blob: 77a290a77f0423d44415b86f69d1d5dc8366b34b [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700334 bool from_smcinvoke;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700335};
336
337struct qseecom_listener_handle {
338 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800339 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800340 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700341};
342
343static struct qseecom_control qseecom;
344
345struct qseecom_dev_handle {
346 enum qseecom_client_handle_type type;
347 union {
348 struct qseecom_client_handle client;
349 struct qseecom_listener_handle listener;
350 };
351 bool released;
352 int abort;
353 wait_queue_head_t abort_wq;
354 atomic_t ioctl_count;
355 bool perf_enabled;
356 bool fast_load_enabled;
357 enum qseecom_bandwidth_request_mode mode;
358 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
359 uint32_t sglist_cnt;
360 bool use_legacy_cmd;
361};
362
363struct qseecom_key_id_usage_desc {
364 uint8_t desc[QSEECOM_KEY_ID_SIZE];
365};
366
367struct qseecom_crypto_info {
368 unsigned int unit_num;
369 unsigned int ce;
370 unsigned int pipe_pair;
371};
372
373static struct qseecom_key_id_usage_desc key_id_array[] = {
374 {
375 .desc = "Undefined Usage Index",
376 },
377
378 {
379 .desc = "Full Disk Encryption",
380 },
381
382 {
383 .desc = "Per File Encryption",
384 },
385
386 {
387 .desc = "UFS ICE Full Disk Encryption",
388 },
389
390 {
391 .desc = "SDCC ICE Full Disk Encryption",
392 },
393};
394
395/* Function proto types */
396static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
397static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
398static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
399static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
400static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
401static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
402 char *cmnlib_name);
403static int qseecom_enable_ice_setup(int usage);
404static int qseecom_disable_ice_setup(int usage);
405static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
406static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
407 void __user *argp);
408static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
409 void __user *argp);
410static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
411 void __user *argp);
412
413static int get_qseecom_keymaster_status(char *str)
414{
415 get_option(&str, &qseecom.is_apps_region_protected);
416 return 1;
417}
418__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
419
Zhen Kong03f220d2019-02-01 17:12:34 -0800420
421#define QSEECOM_SCM_EBUSY_WAIT_MS 30
422#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
423
424static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
425{
426 int ret = 0;
427 int retry_count = 0;
428
429 do {
430 ret = scm_call2_noretry(smc_id, desc);
431 if (ret == -EBUSY) {
432 mutex_unlock(&app_access_lock);
433 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
434 mutex_lock(&app_access_lock);
435 }
436 if (retry_count == 33)
437 pr_warn("secure world has been busy for 1 second!\n");
438 } while (ret == -EBUSY &&
439 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
440 return ret;
441}
442
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700443static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
444 const void *req_buf, void *resp_buf)
445{
446 int ret = 0;
447 uint32_t smc_id = 0;
448 uint32_t qseos_cmd_id = 0;
449 struct scm_desc desc = {0};
450 struct qseecom_command_scm_resp *scm_resp = NULL;
451
452 if (!req_buf || !resp_buf) {
453 pr_err("Invalid buffer pointer\n");
454 return -EINVAL;
455 }
456 qseos_cmd_id = *(uint32_t *)req_buf;
457 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
458
459 switch (svc_id) {
460 case 6: {
461 if (tz_cmd_id == 3) {
462 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
463 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
464 desc.args[0] = *(uint32_t *)req_buf;
465 } else {
466 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
467 svc_id, tz_cmd_id);
468 return -EINVAL;
469 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800470 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700471 break;
472 }
473 case SCM_SVC_ES: {
474 switch (tz_cmd_id) {
475 case SCM_SAVE_PARTITION_HASH_ID: {
476 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
477 struct qseecom_save_partition_hash_req *p_hash_req =
478 (struct qseecom_save_partition_hash_req *)
479 req_buf;
480 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
481
482 if (!tzbuf)
483 return -ENOMEM;
484 memset(tzbuf, 0, tzbuflen);
485 memcpy(tzbuf, p_hash_req->digest,
486 SHA256_DIGEST_LENGTH);
487 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
488 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
489 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
490 desc.args[0] = p_hash_req->partition_id;
491 desc.args[1] = virt_to_phys(tzbuf);
492 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800493 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700494 kzfree(tzbuf);
495 break;
496 }
497 default: {
498 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
499 tz_cmd_id);
500 ret = -EINVAL;
501 break;
502 }
503 } /* end of switch (tz_cmd_id) */
504 break;
505 } /* end of case SCM_SVC_ES */
506 case SCM_SVC_TZSCHEDULER: {
507 switch (qseos_cmd_id) {
508 case QSEOS_APP_START_COMMAND: {
509 struct qseecom_load_app_ireq *req;
510 struct qseecom_load_app_64bit_ireq *req_64bit;
511
512 smc_id = TZ_OS_APP_START_ID;
513 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
514 if (qseecom.qsee_version < QSEE_VERSION_40) {
515 req = (struct qseecom_load_app_ireq *)req_buf;
516 desc.args[0] = req->mdt_len;
517 desc.args[1] = req->img_len;
518 desc.args[2] = req->phy_addr;
519 } else {
520 req_64bit =
521 (struct qseecom_load_app_64bit_ireq *)
522 req_buf;
523 desc.args[0] = req_64bit->mdt_len;
524 desc.args[1] = req_64bit->img_len;
525 desc.args[2] = req_64bit->phy_addr;
526 }
527 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800528 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700529 break;
530 }
531 case QSEOS_APP_SHUTDOWN_COMMAND: {
532 struct qseecom_unload_app_ireq *req;
533
534 req = (struct qseecom_unload_app_ireq *)req_buf;
535 smc_id = TZ_OS_APP_SHUTDOWN_ID;
536 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
537 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700538 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700539 break;
540 }
541 case QSEOS_APP_LOOKUP_COMMAND: {
542 struct qseecom_check_app_ireq *req;
543 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
544 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
545
546 if (!tzbuf)
547 return -ENOMEM;
548 req = (struct qseecom_check_app_ireq *)req_buf;
549 pr_debug("Lookup app_name = %s\n", req->app_name);
550 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
551 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
552 smc_id = TZ_OS_APP_LOOKUP_ID;
553 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
554 desc.args[0] = virt_to_phys(tzbuf);
555 desc.args[1] = strlen(req->app_name);
556 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800557 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 kzfree(tzbuf);
559 break;
560 }
561 case QSEOS_APP_REGION_NOTIFICATION: {
562 struct qsee_apps_region_info_ireq *req;
563 struct qsee_apps_region_info_64bit_ireq *req_64bit;
564
565 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
566 desc.arginfo =
567 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
568 if (qseecom.qsee_version < QSEE_VERSION_40) {
569 req = (struct qsee_apps_region_info_ireq *)
570 req_buf;
571 desc.args[0] = req->addr;
572 desc.args[1] = req->size;
573 } else {
574 req_64bit =
575 (struct qsee_apps_region_info_64bit_ireq *)
576 req_buf;
577 desc.args[0] = req_64bit->addr;
578 desc.args[1] = req_64bit->size;
579 }
580 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800581 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700582 break;
583 }
584 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
585 struct qseecom_load_lib_image_ireq *req;
586 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
587
588 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
589 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
590 if (qseecom.qsee_version < QSEE_VERSION_40) {
591 req = (struct qseecom_load_lib_image_ireq *)
592 req_buf;
593 desc.args[0] = req->mdt_len;
594 desc.args[1] = req->img_len;
595 desc.args[2] = req->phy_addr;
596 } else {
597 req_64bit =
598 (struct qseecom_load_lib_image_64bit_ireq *)
599 req_buf;
600 desc.args[0] = req_64bit->mdt_len;
601 desc.args[1] = req_64bit->img_len;
602 desc.args[2] = req_64bit->phy_addr;
603 }
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800605 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700606 break;
607 }
608 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
609 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
610 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
611 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800612 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700613 break;
614 }
615 case QSEOS_REGISTER_LISTENER: {
616 struct qseecom_register_listener_ireq *req;
617 struct qseecom_register_listener_64bit_ireq *req_64bit;
618
619 desc.arginfo =
620 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
621 if (qseecom.qsee_version < QSEE_VERSION_40) {
622 req = (struct qseecom_register_listener_ireq *)
623 req_buf;
624 desc.args[0] = req->listener_id;
625 desc.args[1] = req->sb_ptr;
626 desc.args[2] = req->sb_len;
627 } else {
628 req_64bit =
629 (struct qseecom_register_listener_64bit_ireq *)
630 req_buf;
631 desc.args[0] = req_64bit->listener_id;
632 desc.args[1] = req_64bit->sb_ptr;
633 desc.args[2] = req_64bit->sb_len;
634 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700635 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700636 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800637 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800638 if (ret == -EIO) {
639 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700640 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700641 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800642 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700643 }
644 break;
645 }
646 case QSEOS_DEREGISTER_LISTENER: {
647 struct qseecom_unregister_listener_ireq *req;
648
649 req = (struct qseecom_unregister_listener_ireq *)
650 req_buf;
651 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
652 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
653 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800654 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 break;
656 }
657 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
658 struct qseecom_client_listener_data_irsp *req;
659
660 req = (struct qseecom_client_listener_data_irsp *)
661 req_buf;
662 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
663 desc.arginfo =
664 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
665 desc.args[0] = req->listener_id;
666 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800667 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700668 break;
669 }
670 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
671 struct qseecom_client_listener_data_irsp *req;
672 struct qseecom_client_listener_data_64bit_irsp *req_64;
673
674 smc_id =
675 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
676 desc.arginfo =
677 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
678 if (qseecom.qsee_version < QSEE_VERSION_40) {
679 req =
680 (struct qseecom_client_listener_data_irsp *)
681 req_buf;
682 desc.args[0] = req->listener_id;
683 desc.args[1] = req->status;
684 desc.args[2] = req->sglistinfo_ptr;
685 desc.args[3] = req->sglistinfo_len;
686 } else {
687 req_64 =
688 (struct qseecom_client_listener_data_64bit_irsp *)
689 req_buf;
690 desc.args[0] = req_64->listener_id;
691 desc.args[1] = req_64->status;
692 desc.args[2] = req_64->sglistinfo_ptr;
693 desc.args[3] = req_64->sglistinfo_len;
694 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800695 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700696 break;
697 }
698 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
699 struct qseecom_load_app_ireq *req;
700 struct qseecom_load_app_64bit_ireq *req_64bit;
701
702 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
703 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
704 if (qseecom.qsee_version < QSEE_VERSION_40) {
705 req = (struct qseecom_load_app_ireq *)req_buf;
706 desc.args[0] = req->mdt_len;
707 desc.args[1] = req->img_len;
708 desc.args[2] = req->phy_addr;
709 } else {
710 req_64bit =
711 (struct qseecom_load_app_64bit_ireq *)req_buf;
712 desc.args[0] = req_64bit->mdt_len;
713 desc.args[1] = req_64bit->img_len;
714 desc.args[2] = req_64bit->phy_addr;
715 }
716 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800717 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700718 break;
719 }
720 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
721 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800724 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700725 break;
726 }
727
728 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
729 struct qseecom_client_send_data_ireq *req;
730 struct qseecom_client_send_data_64bit_ireq *req_64bit;
731
732 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
733 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
734 if (qseecom.qsee_version < QSEE_VERSION_40) {
735 req = (struct qseecom_client_send_data_ireq *)
736 req_buf;
737 desc.args[0] = req->app_id;
738 desc.args[1] = req->req_ptr;
739 desc.args[2] = req->req_len;
740 desc.args[3] = req->rsp_ptr;
741 desc.args[4] = req->rsp_len;
742 } else {
743 req_64bit =
744 (struct qseecom_client_send_data_64bit_ireq *)
745 req_buf;
746 desc.args[0] = req_64bit->app_id;
747 desc.args[1] = req_64bit->req_ptr;
748 desc.args[2] = req_64bit->req_len;
749 desc.args[3] = req_64bit->rsp_ptr;
750 desc.args[4] = req_64bit->rsp_len;
751 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800752 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700753 break;
754 }
755 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
756 struct qseecom_client_send_data_ireq *req;
757 struct qseecom_client_send_data_64bit_ireq *req_64bit;
758
759 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
760 desc.arginfo =
761 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
762 if (qseecom.qsee_version < QSEE_VERSION_40) {
763 req = (struct qseecom_client_send_data_ireq *)
764 req_buf;
765 desc.args[0] = req->app_id;
766 desc.args[1] = req->req_ptr;
767 desc.args[2] = req->req_len;
768 desc.args[3] = req->rsp_ptr;
769 desc.args[4] = req->rsp_len;
770 desc.args[5] = req->sglistinfo_ptr;
771 desc.args[6] = req->sglistinfo_len;
772 } else {
773 req_64bit =
774 (struct qseecom_client_send_data_64bit_ireq *)
775 req_buf;
776 desc.args[0] = req_64bit->app_id;
777 desc.args[1] = req_64bit->req_ptr;
778 desc.args[2] = req_64bit->req_len;
779 desc.args[3] = req_64bit->rsp_ptr;
780 desc.args[4] = req_64bit->rsp_len;
781 desc.args[5] = req_64bit->sglistinfo_ptr;
782 desc.args[6] = req_64bit->sglistinfo_len;
783 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800784 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700785 break;
786 }
787 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
788 struct qseecom_client_send_service_ireq *req;
789
790 req = (struct qseecom_client_send_service_ireq *)
791 req_buf;
792 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
793 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
794 desc.args[0] = req->key_type;
795 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800796 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700797 break;
798 }
799 case QSEOS_RPMB_ERASE_COMMAND: {
800 smc_id = TZ_OS_RPMB_ERASE_ID;
801 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
802 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
807 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
808 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
809 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800810 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700811 break;
812 }
813 case QSEOS_GENERATE_KEY: {
814 u32 tzbuflen = PAGE_ALIGN(sizeof
815 (struct qseecom_key_generate_ireq) -
816 sizeof(uint32_t));
817 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
818
819 if (!tzbuf)
820 return -ENOMEM;
821 memset(tzbuf, 0, tzbuflen);
822 memcpy(tzbuf, req_buf + sizeof(uint32_t),
823 (sizeof(struct qseecom_key_generate_ireq) -
824 sizeof(uint32_t)));
825 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
826 smc_id = TZ_OS_KS_GEN_KEY_ID;
827 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
828 desc.args[0] = virt_to_phys(tzbuf);
829 desc.args[1] = tzbuflen;
830 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800831 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700832 kzfree(tzbuf);
833 break;
834 }
835 case QSEOS_DELETE_KEY: {
836 u32 tzbuflen = PAGE_ALIGN(sizeof
837 (struct qseecom_key_delete_ireq) -
838 sizeof(uint32_t));
839 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
840
841 if (!tzbuf)
842 return -ENOMEM;
843 memset(tzbuf, 0, tzbuflen);
844 memcpy(tzbuf, req_buf + sizeof(uint32_t),
845 (sizeof(struct qseecom_key_delete_ireq) -
846 sizeof(uint32_t)));
847 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
848 smc_id = TZ_OS_KS_DEL_KEY_ID;
849 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
850 desc.args[0] = virt_to_phys(tzbuf);
851 desc.args[1] = tzbuflen;
852 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800853 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700854 kzfree(tzbuf);
855 break;
856 }
857 case QSEOS_SET_KEY: {
858 u32 tzbuflen = PAGE_ALIGN(sizeof
859 (struct qseecom_key_select_ireq) -
860 sizeof(uint32_t));
861 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
862
863 if (!tzbuf)
864 return -ENOMEM;
865 memset(tzbuf, 0, tzbuflen);
866 memcpy(tzbuf, req_buf + sizeof(uint32_t),
867 (sizeof(struct qseecom_key_select_ireq) -
868 sizeof(uint32_t)));
869 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
870 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
871 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
872 desc.args[0] = virt_to_phys(tzbuf);
873 desc.args[1] = tzbuflen;
874 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800875 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700876 kzfree(tzbuf);
877 break;
878 }
879 case QSEOS_UPDATE_KEY_USERINFO: {
880 u32 tzbuflen = PAGE_ALIGN(sizeof
881 (struct qseecom_key_userinfo_update_ireq) -
882 sizeof(uint32_t));
883 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
884
885 if (!tzbuf)
886 return -ENOMEM;
887 memset(tzbuf, 0, tzbuflen);
888 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
889 (struct qseecom_key_userinfo_update_ireq) -
890 sizeof(uint32_t)));
891 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
892 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
893 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
894 desc.args[0] = virt_to_phys(tzbuf);
895 desc.args[1] = tzbuflen;
896 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800897 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700898 kzfree(tzbuf);
899 break;
900 }
901 case QSEOS_TEE_OPEN_SESSION: {
902 struct qseecom_qteec_ireq *req;
903 struct qseecom_qteec_64bit_ireq *req_64bit;
904
905 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
906 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
907 if (qseecom.qsee_version < QSEE_VERSION_40) {
908 req = (struct qseecom_qteec_ireq *)req_buf;
909 desc.args[0] = req->app_id;
910 desc.args[1] = req->req_ptr;
911 desc.args[2] = req->req_len;
912 desc.args[3] = req->resp_ptr;
913 desc.args[4] = req->resp_len;
914 } else {
915 req_64bit = (struct qseecom_qteec_64bit_ireq *)
916 req_buf;
917 desc.args[0] = req_64bit->app_id;
918 desc.args[1] = req_64bit->req_ptr;
919 desc.args[2] = req_64bit->req_len;
920 desc.args[3] = req_64bit->resp_ptr;
921 desc.args[4] = req_64bit->resp_len;
922 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800923 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700924 break;
925 }
926 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
927 struct qseecom_qteec_ireq *req;
928 struct qseecom_qteec_64bit_ireq *req_64bit;
929
930 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
931 desc.arginfo =
932 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
933 if (qseecom.qsee_version < QSEE_VERSION_40) {
934 req = (struct qseecom_qteec_ireq *)req_buf;
935 desc.args[0] = req->app_id;
936 desc.args[1] = req->req_ptr;
937 desc.args[2] = req->req_len;
938 desc.args[3] = req->resp_ptr;
939 desc.args[4] = req->resp_len;
940 desc.args[5] = req->sglistinfo_ptr;
941 desc.args[6] = req->sglistinfo_len;
942 } else {
943 req_64bit = (struct qseecom_qteec_64bit_ireq *)
944 req_buf;
945 desc.args[0] = req_64bit->app_id;
946 desc.args[1] = req_64bit->req_ptr;
947 desc.args[2] = req_64bit->req_len;
948 desc.args[3] = req_64bit->resp_ptr;
949 desc.args[4] = req_64bit->resp_len;
950 desc.args[5] = req_64bit->sglistinfo_ptr;
951 desc.args[6] = req_64bit->sglistinfo_len;
952 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800953 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700954 break;
955 }
956 case QSEOS_TEE_INVOKE_COMMAND: {
957 struct qseecom_qteec_ireq *req;
958 struct qseecom_qteec_64bit_ireq *req_64bit;
959
960 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
961 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
962 if (qseecom.qsee_version < QSEE_VERSION_40) {
963 req = (struct qseecom_qteec_ireq *)req_buf;
964 desc.args[0] = req->app_id;
965 desc.args[1] = req->req_ptr;
966 desc.args[2] = req->req_len;
967 desc.args[3] = req->resp_ptr;
968 desc.args[4] = req->resp_len;
969 } else {
970 req_64bit = (struct qseecom_qteec_64bit_ireq *)
971 req_buf;
972 desc.args[0] = req_64bit->app_id;
973 desc.args[1] = req_64bit->req_ptr;
974 desc.args[2] = req_64bit->req_len;
975 desc.args[3] = req_64bit->resp_ptr;
976 desc.args[4] = req_64bit->resp_len;
977 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800978 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700979 break;
980 }
981 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
982 struct qseecom_qteec_ireq *req;
983 struct qseecom_qteec_64bit_ireq *req_64bit;
984
985 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
986 desc.arginfo =
987 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
988 if (qseecom.qsee_version < QSEE_VERSION_40) {
989 req = (struct qseecom_qteec_ireq *)req_buf;
990 desc.args[0] = req->app_id;
991 desc.args[1] = req->req_ptr;
992 desc.args[2] = req->req_len;
993 desc.args[3] = req->resp_ptr;
994 desc.args[4] = req->resp_len;
995 desc.args[5] = req->sglistinfo_ptr;
996 desc.args[6] = req->sglistinfo_len;
997 } else {
998 req_64bit = (struct qseecom_qteec_64bit_ireq *)
999 req_buf;
1000 desc.args[0] = req_64bit->app_id;
1001 desc.args[1] = req_64bit->req_ptr;
1002 desc.args[2] = req_64bit->req_len;
1003 desc.args[3] = req_64bit->resp_ptr;
1004 desc.args[4] = req_64bit->resp_len;
1005 desc.args[5] = req_64bit->sglistinfo_ptr;
1006 desc.args[6] = req_64bit->sglistinfo_len;
1007 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001008 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001009 break;
1010 }
1011 case QSEOS_TEE_CLOSE_SESSION: {
1012 struct qseecom_qteec_ireq *req;
1013 struct qseecom_qteec_64bit_ireq *req_64bit;
1014
1015 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1016 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1017 if (qseecom.qsee_version < QSEE_VERSION_40) {
1018 req = (struct qseecom_qteec_ireq *)req_buf;
1019 desc.args[0] = req->app_id;
1020 desc.args[1] = req->req_ptr;
1021 desc.args[2] = req->req_len;
1022 desc.args[3] = req->resp_ptr;
1023 desc.args[4] = req->resp_len;
1024 } else {
1025 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1026 req_buf;
1027 desc.args[0] = req_64bit->app_id;
1028 desc.args[1] = req_64bit->req_ptr;
1029 desc.args[2] = req_64bit->req_len;
1030 desc.args[3] = req_64bit->resp_ptr;
1031 desc.args[4] = req_64bit->resp_len;
1032 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001033 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001034 break;
1035 }
1036 case QSEOS_TEE_REQUEST_CANCELLATION: {
1037 struct qseecom_qteec_ireq *req;
1038 struct qseecom_qteec_64bit_ireq *req_64bit;
1039
1040 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1041 desc.arginfo =
1042 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1043 if (qseecom.qsee_version < QSEE_VERSION_40) {
1044 req = (struct qseecom_qteec_ireq *)req_buf;
1045 desc.args[0] = req->app_id;
1046 desc.args[1] = req->req_ptr;
1047 desc.args[2] = req->req_len;
1048 desc.args[3] = req->resp_ptr;
1049 desc.args[4] = req->resp_len;
1050 } else {
1051 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1052 req_buf;
1053 desc.args[0] = req_64bit->app_id;
1054 desc.args[1] = req_64bit->req_ptr;
1055 desc.args[2] = req_64bit->req_len;
1056 desc.args[3] = req_64bit->resp_ptr;
1057 desc.args[4] = req_64bit->resp_len;
1058 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001059 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001060 break;
1061 }
1062 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1063 struct qseecom_continue_blocked_request_ireq *req =
1064 (struct qseecom_continue_blocked_request_ireq *)
1065 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001066 if (qseecom.smcinvoke_support)
1067 smc_id =
1068 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1069 else
1070 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001071 desc.arginfo =
1072 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001073 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001074 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001075 break;
1076 }
1077 default: {
1078 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1079 qseos_cmd_id);
1080 ret = -EINVAL;
1081 break;
1082 }
1083 } /*end of switch (qsee_cmd_id) */
1084 break;
1085 } /*end of case SCM_SVC_TZSCHEDULER*/
1086 default: {
1087 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1088 svc_id);
1089 ret = -EINVAL;
1090 break;
1091 }
1092 } /*end of switch svc_id */
1093 scm_resp->result = desc.ret[0];
1094 scm_resp->resp_type = desc.ret[1];
1095 scm_resp->data = desc.ret[2];
1096 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1097 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1098 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1099 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1100 return ret;
1101}
1102
1103
1104static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1105 size_t cmd_len, void *resp_buf, size_t resp_len)
1106{
1107 if (!is_scm_armv8())
1108 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1109 resp_buf, resp_len);
1110 else
1111 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1112}
1113
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001114static struct qseecom_registered_listener_list *__qseecom_find_svc(
1115 int32_t listener_id)
1116{
1117 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001119 list_for_each_entry(entry,
1120 &qseecom.registered_listener_list_head, list) {
1121 if (entry->svc.listener_id == listener_id)
1122 break;
1123 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001124 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001125 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001126 return NULL;
1127 }
1128
1129 return entry;
1130}
1131
1132static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1133 struct qseecom_dev_handle *handle,
1134 struct qseecom_register_listener_req *listener)
1135{
1136 int ret = 0;
1137 struct qseecom_register_listener_ireq req;
1138 struct qseecom_register_listener_64bit_ireq req_64bit;
1139 struct qseecom_command_scm_resp resp;
1140 ion_phys_addr_t pa;
1141 void *cmd_buf = NULL;
1142 size_t cmd_len;
1143
1144 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001145 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001146 listener->ifd_data_fd);
1147 if (IS_ERR_OR_NULL(svc->ihandle)) {
1148 pr_err("Ion client could not retrieve the handle\n");
1149 return -ENOMEM;
1150 }
1151
1152 /* Get the physical address of the ION BUF */
1153 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1154 if (ret) {
1155 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1156 ret);
1157 return ret;
1158 }
1159 /* Populate the structure for sending scm call to load image */
1160 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1161 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1162 pr_err("ION memory mapping for listener shared buffer failed\n");
1163 return -ENOMEM;
1164 }
1165 svc->sb_phys = (phys_addr_t)pa;
1166
1167 if (qseecom.qsee_version < QSEE_VERSION_40) {
1168 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1169 req.listener_id = svc->svc.listener_id;
1170 req.sb_len = svc->sb_length;
1171 req.sb_ptr = (uint32_t)svc->sb_phys;
1172 cmd_buf = (void *)&req;
1173 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1174 } else {
1175 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1176 req_64bit.listener_id = svc->svc.listener_id;
1177 req_64bit.sb_len = svc->sb_length;
1178 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1179 cmd_buf = (void *)&req_64bit;
1180 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1181 }
1182
1183 resp.result = QSEOS_RESULT_INCOMPLETE;
1184
Zhen Kongc4c162a2019-01-23 12:07:12 -08001185 mutex_unlock(&listener_access_lock);
1186 mutex_lock(&app_access_lock);
1187 __qseecom_reentrancy_check_if_no_app_blocked(
1188 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001189 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1190 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001191 mutex_unlock(&app_access_lock);
1192 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001193 if (ret) {
1194 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1195 return -EINVAL;
1196 }
1197
1198 if (resp.result != QSEOS_RESULT_SUCCESS) {
1199 pr_err("Error SB registration req: resp.result = %d\n",
1200 resp.result);
1201 return -EPERM;
1202 }
1203 return 0;
1204}
1205
1206static int qseecom_register_listener(struct qseecom_dev_handle *data,
1207 void __user *argp)
1208{
1209 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001210 struct qseecom_register_listener_req rcvd_lstnr;
1211 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001212 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001213
1214 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1215 if (ret) {
1216 pr_err("copy_from_user failed\n");
1217 return ret;
1218 }
1219 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1220 rcvd_lstnr.sb_size))
1221 return -EFAULT;
1222
Zhen Kong3c674612018-09-06 22:51:27 -07001223 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001224
1225 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1226 if (ptr_svc) {
1227 if (ptr_svc->unregister_pending == false) {
1228 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001229 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001230 data->released = true;
1231 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001232 } else {
1233 /*wait until listener is unregistered*/
1234 pr_debug("register %d has to wait\n",
1235 rcvd_lstnr.listener_id);
1236 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301237 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001238 qseecom.register_lsnr_pending_wq,
1239 list_empty(
1240 &qseecom.unregister_lsnr_pending_list_head));
1241 if (ret) {
1242 pr_err("interrupted register_pending_wq %d\n",
1243 rcvd_lstnr.listener_id);
1244 mutex_lock(&listener_access_lock);
1245 return -ERESTARTSYS;
1246 }
1247 mutex_lock(&listener_access_lock);
1248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1251 if (!new_entry)
1252 return -ENOMEM;
1253 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001254 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001255
1256 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1257 new_entry->sb_length = rcvd_lstnr.sb_size;
1258 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1259 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001260 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1261 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001262 kzfree(new_entry);
1263 return -ENOMEM;
1264 }
1265
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266 init_waitqueue_head(&new_entry->rcv_req_wq);
1267 init_waitqueue_head(&new_entry->listener_block_app_wq);
1268 new_entry->send_resp_flag = 0;
1269 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271
Zhen Kong52ce9062018-09-24 14:33:27 -07001272 pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001273 return ret;
1274}
1275
Zhen Kongbcdeda22018-11-16 13:50:51 -08001276static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1277 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278{
1279 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001281 struct qseecom_command_scm_resp resp;
1282 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1283
1284 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1285 req.listener_id = data->listener.id;
1286 resp.result = QSEOS_RESULT_INCOMPLETE;
1287
Zhen Kongc4c162a2019-01-23 12:07:12 -08001288 mutex_unlock(&listener_access_lock);
1289 mutex_lock(&app_access_lock);
1290 __qseecom_reentrancy_check_if_no_app_blocked(
1291 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001292 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1293 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001294 mutex_unlock(&app_access_lock);
1295 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296 if (ret) {
1297 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1298 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001299 if (ret == -EBUSY)
1300 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001301 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001302 }
1303
1304 if (resp.result != QSEOS_RESULT_SUCCESS) {
1305 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1306 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001307 ret = -EPERM;
1308 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001309 }
1310
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001311 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301312 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001313 atomic_read(&data->ioctl_count) <= 1)) {
1314 pr_err("Interrupted from abort\n");
1315 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001316 }
1317 }
1318
Zhen Kong3c674612018-09-06 22:51:27 -07001319exit:
1320 if (ptr_svc->sb_virt) {
1321 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001322 if (!IS_ERR_OR_NULL(ihandle)) {
1323 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1324 ion_free(qseecom.ion_clnt, ihandle);
1325 }
1326 }
Zhen Kong3c674612018-09-06 22:51:27 -07001327 list_del(&ptr_svc->list);
1328 kzfree(ptr_svc);
1329
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001330 data->released = true;
Zhen Kong52ce9062018-09-24 14:33:27 -07001331 pr_debug("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001332 return ret;
1333}
1334
Zhen Kongbcdeda22018-11-16 13:50:51 -08001335static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1336{
1337 struct qseecom_registered_listener_list *ptr_svc = NULL;
1338 struct qseecom_unregister_pending_list *entry = NULL;
1339
1340 ptr_svc = __qseecom_find_svc(data->listener.id);
1341 if (!ptr_svc) {
1342 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1343 return -ENODATA;
1344 }
1345 /* stop CA thread waiting for listener response */
1346 ptr_svc->abort = 1;
1347 wake_up_interruptible_all(&qseecom.send_resp_wq);
1348
Zhen Kongc4c162a2019-01-23 12:07:12 -08001349 /* stop listener thread waiting for listener request */
1350 data->abort = 1;
1351 wake_up_all(&ptr_svc->rcv_req_wq);
1352
Zhen Kongbcdeda22018-11-16 13:50:51 -08001353 /* return directly if pending*/
1354 if (ptr_svc->unregister_pending)
1355 return 0;
1356
1357 /*add unregistration into pending list*/
1358 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1359 if (!entry)
1360 return -ENOMEM;
1361 entry->data = data;
1362 list_add_tail(&entry->list,
1363 &qseecom.unregister_lsnr_pending_list_head);
1364 ptr_svc->unregister_pending = true;
1365 pr_debug("unregister %d pending\n", data->listener.id);
1366 return 0;
1367}
1368
1369static void __qseecom_processing_pending_lsnr_unregister(void)
1370{
1371 struct qseecom_unregister_pending_list *entry = NULL;
1372 struct qseecom_registered_listener_list *ptr_svc = NULL;
1373 struct list_head *pos;
1374 int ret = 0;
1375
1376 mutex_lock(&listener_access_lock);
1377 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1378 pos = qseecom.unregister_lsnr_pending_list_head.next;
1379 entry = list_entry(pos,
1380 struct qseecom_unregister_pending_list, list);
1381 if (entry && entry->data) {
1382 pr_debug("process pending unregister %d\n",
1383 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001384 /* don't process if qseecom_release is not called*/
1385 if (!entry->data->listener.release_called)
1386 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001387 ptr_svc = __qseecom_find_svc(
1388 entry->data->listener.id);
1389 if (ptr_svc) {
1390 ret = __qseecom_unregister_listener(
1391 entry->data, ptr_svc);
1392 if (ret == -EBUSY) {
1393 pr_debug("unregister %d pending again\n",
1394 entry->data->listener.id);
1395 mutex_unlock(&listener_access_lock);
1396 return;
1397 }
1398 } else
1399 pr_err("invalid listener %d\n",
1400 entry->data->listener.id);
1401 kzfree(entry->data);
1402 }
1403 list_del(pos);
1404 kzfree(entry);
1405 }
1406 mutex_unlock(&listener_access_lock);
1407 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1408}
1409
Zhen Kongc4c162a2019-01-23 12:07:12 -08001410static void __wakeup_unregister_listener_kthread(void)
1411{
1412 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1413 LSNR_UNREG_KT_WAKEUP);
1414 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1415}
1416
1417static int __qseecom_unregister_listener_kthread_func(void *data)
1418{
1419 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301420 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001421 qseecom.unregister_lsnr_kthread_wq,
1422 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1423 == LSNR_UNREG_KT_WAKEUP);
1424 pr_debug("kthread to unregister listener is called %d\n",
1425 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1426 __qseecom_processing_pending_lsnr_unregister();
1427 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1428 LSNR_UNREG_KT_SLEEP);
1429 }
1430 pr_warn("kthread to unregister listener stopped\n");
1431 return 0;
1432}
1433
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001434static int __qseecom_set_msm_bus_request(uint32_t mode)
1435{
1436 int ret = 0;
1437 struct qseecom_clk *qclk;
1438
1439 qclk = &qseecom.qsee;
1440 if (qclk->ce_core_src_clk != NULL) {
1441 if (mode == INACTIVE) {
1442 __qseecom_disable_clk(CLK_QSEE);
1443 } else {
1444 ret = __qseecom_enable_clk(CLK_QSEE);
1445 if (ret)
1446 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1447 ret, mode);
1448 }
1449 }
1450
1451 if ((!ret) && (qseecom.current_mode != mode)) {
1452 ret = msm_bus_scale_client_update_request(
1453 qseecom.qsee_perf_client, mode);
1454 if (ret) {
1455 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1456 ret, mode);
1457 if (qclk->ce_core_src_clk != NULL) {
1458 if (mode == INACTIVE) {
1459 ret = __qseecom_enable_clk(CLK_QSEE);
1460 if (ret)
1461 pr_err("CLK enable failed\n");
1462 } else
1463 __qseecom_disable_clk(CLK_QSEE);
1464 }
1465 }
1466 qseecom.current_mode = mode;
1467 }
1468 return ret;
1469}
1470
1471static void qseecom_bw_inactive_req_work(struct work_struct *work)
1472{
1473 mutex_lock(&app_access_lock);
1474 mutex_lock(&qsee_bw_mutex);
1475 if (qseecom.timer_running)
1476 __qseecom_set_msm_bus_request(INACTIVE);
1477 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1478 qseecom.current_mode, qseecom.cumulative_mode);
1479 qseecom.timer_running = false;
1480 mutex_unlock(&qsee_bw_mutex);
1481 mutex_unlock(&app_access_lock);
1482}
1483
1484static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1485{
1486 schedule_work(&qseecom.bw_inactive_req_ws);
1487}
1488
1489static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1490{
1491 struct qseecom_clk *qclk;
1492 int ret = 0;
1493
1494 mutex_lock(&clk_access_lock);
1495 if (ce == CLK_QSEE)
1496 qclk = &qseecom.qsee;
1497 else
1498 qclk = &qseecom.ce_drv;
1499
Zhen Kongf99808af2019-07-09 13:28:24 -07001500 if (qclk->clk_access_cnt > 0) {
1501 qclk->clk_access_cnt--;
1502 } else {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001503 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1504 ret = -EINVAL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001505 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001506
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001507 mutex_unlock(&clk_access_lock);
1508 return ret;
1509}
1510
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001511static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1512{
1513 int32_t ret = 0;
1514 int32_t request_mode = INACTIVE;
1515
1516 mutex_lock(&qsee_bw_mutex);
1517 if (mode == 0) {
1518 if (qseecom.cumulative_mode > MEDIUM)
1519 request_mode = HIGH;
1520 else
1521 request_mode = qseecom.cumulative_mode;
1522 } else {
1523 request_mode = mode;
1524 }
1525
1526 ret = __qseecom_set_msm_bus_request(request_mode);
1527 if (ret) {
1528 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1529 ret, request_mode);
1530 goto err_scale_timer;
1531 }
1532
1533 if (qseecom.timer_running) {
1534 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1535 if (ret) {
1536 pr_err("Failed to decrease clk ref count.\n");
1537 goto err_scale_timer;
1538 }
1539 del_timer_sync(&(qseecom.bw_scale_down_timer));
1540 qseecom.timer_running = false;
1541 }
1542err_scale_timer:
1543 mutex_unlock(&qsee_bw_mutex);
1544 return ret;
1545}
1546
1547
1548static int qseecom_unregister_bus_bandwidth_needs(
1549 struct qseecom_dev_handle *data)
1550{
1551 int32_t ret = 0;
1552
1553 qseecom.cumulative_mode -= data->mode;
1554 data->mode = INACTIVE;
1555
1556 return ret;
1557}
1558
1559static int __qseecom_register_bus_bandwidth_needs(
1560 struct qseecom_dev_handle *data, uint32_t request_mode)
1561{
1562 int32_t ret = 0;
1563
1564 if (data->mode == INACTIVE) {
1565 qseecom.cumulative_mode += request_mode;
1566 data->mode = request_mode;
1567 } else {
1568 if (data->mode != request_mode) {
1569 qseecom.cumulative_mode -= data->mode;
1570 qseecom.cumulative_mode += request_mode;
1571 data->mode = request_mode;
1572 }
1573 }
1574 return ret;
1575}
1576
1577static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1578{
1579 int ret = 0;
1580
1581 ret = qsee_vote_for_clock(data, CLK_DFAB);
1582 if (ret) {
1583 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1584 goto perf_enable_exit;
1585 }
1586 ret = qsee_vote_for_clock(data, CLK_SFPB);
1587 if (ret) {
1588 qsee_disable_clock_vote(data, CLK_DFAB);
1589 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1590 goto perf_enable_exit;
1591 }
1592
1593perf_enable_exit:
1594 return ret;
1595}
1596
1597static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1598 void __user *argp)
1599{
1600 int32_t ret = 0;
1601 int32_t req_mode;
1602
1603 if (qseecom.no_clock_support)
1604 return 0;
1605
1606 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1607 if (ret) {
1608 pr_err("copy_from_user failed\n");
1609 return ret;
1610 }
1611 if (req_mode > HIGH) {
1612 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1613 return -EINVAL;
1614 }
1615
1616 /*
1617 * Register bus bandwidth needs if bus scaling feature is enabled;
1618 * otherwise, qseecom enable/disable clocks for the client directly.
1619 */
1620 if (qseecom.support_bus_scaling) {
1621 mutex_lock(&qsee_bw_mutex);
1622 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1623 mutex_unlock(&qsee_bw_mutex);
1624 } else {
1625 pr_debug("Bus scaling feature is NOT enabled\n");
1626 pr_debug("request bandwidth mode %d for the client\n",
1627 req_mode);
1628 if (req_mode != INACTIVE) {
1629 ret = qseecom_perf_enable(data);
1630 if (ret)
1631 pr_err("Failed to vote for clock with err %d\n",
1632 ret);
1633 } else {
1634 qsee_disable_clock_vote(data, CLK_DFAB);
1635 qsee_disable_clock_vote(data, CLK_SFPB);
1636 }
1637 }
1638 return ret;
1639}
1640
1641static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1642{
1643 if (qseecom.no_clock_support)
1644 return;
1645
1646 mutex_lock(&qsee_bw_mutex);
1647 qseecom.bw_scale_down_timer.expires = jiffies +
1648 msecs_to_jiffies(duration);
1649 mod_timer(&(qseecom.bw_scale_down_timer),
1650 qseecom.bw_scale_down_timer.expires);
1651 qseecom.timer_running = true;
1652 mutex_unlock(&qsee_bw_mutex);
1653}
1654
1655static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1656{
1657 if (!qseecom.support_bus_scaling)
1658 qsee_disable_clock_vote(data, CLK_SFPB);
1659 else
1660 __qseecom_add_bw_scale_down_timer(
1661 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1662}
1663
1664static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1665{
1666 int ret = 0;
1667
1668 if (qseecom.support_bus_scaling) {
1669 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1670 if (ret)
1671 pr_err("Failed to set bw MEDIUM.\n");
1672 } else {
1673 ret = qsee_vote_for_clock(data, CLK_SFPB);
1674 if (ret)
1675 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1676 }
1677 return ret;
1678}
1679
1680static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1681 void __user *argp)
1682{
1683 ion_phys_addr_t pa;
1684 int32_t ret;
1685 struct qseecom_set_sb_mem_param_req req;
1686 size_t len;
1687
1688 /* Copy the relevant information needed for loading the image */
1689 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1690 return -EFAULT;
1691
1692 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1693 (req.sb_len == 0)) {
1694 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1695 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1696 return -EFAULT;
1697 }
1698 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1699 req.sb_len))
1700 return -EFAULT;
1701
1702 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001703 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001704 req.ifd_data_fd);
1705 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1706 pr_err("Ion client could not retrieve the handle\n");
1707 return -ENOMEM;
1708 }
1709 /* Get the physical address of the ION BUF */
1710 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1711 if (ret) {
1712
1713 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1714 ret);
1715 return ret;
1716 }
1717
1718 if (len < req.sb_len) {
1719 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1720 req.sb_len, len);
1721 return -EINVAL;
1722 }
1723 /* Populate the structure for sending scm call to load image */
1724 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1725 data->client.ihandle);
1726 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1727 pr_err("ION memory mapping for client shared buf failed\n");
1728 return -ENOMEM;
1729 }
1730 data->client.sb_phys = (phys_addr_t)pa;
1731 data->client.sb_length = req.sb_len;
1732 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1733 return 0;
1734}
1735
Zhen Kong26e62742018-05-04 17:19:06 -07001736static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1737 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001738{
1739 int ret;
1740
1741 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001742 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001743}
1744
1745static int __qseecom_reentrancy_listener_has_sent_rsp(
1746 struct qseecom_dev_handle *data,
1747 struct qseecom_registered_listener_list *ptr_svc)
1748{
1749 int ret;
1750
1751 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001752 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001753}
1754
1755static void __qseecom_clean_listener_sglistinfo(
1756 struct qseecom_registered_listener_list *ptr_svc)
1757{
1758 if (ptr_svc->sglist_cnt) {
1759 memset(ptr_svc->sglistinfo_ptr, 0,
1760 SGLISTINFO_TABLE_SIZE);
1761 ptr_svc->sglist_cnt = 0;
1762 }
1763}
1764
1765static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1766 struct qseecom_command_scm_resp *resp)
1767{
1768 int ret = 0;
1769 int rc = 0;
1770 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001771 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1772 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1773 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001774 struct qseecom_registered_listener_list *ptr_svc = NULL;
1775 sigset_t new_sigset;
1776 sigset_t old_sigset;
1777 uint32_t status;
1778 void *cmd_buf = NULL;
1779 size_t cmd_len;
1780 struct sglist_info *table = NULL;
1781
Zhen Kongbcdeda22018-11-16 13:50:51 -08001782 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001783 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1784 lstnr = resp->data;
1785 /*
1786 * Wake up blocking lsitener service with the lstnr id
1787 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001788 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001789 list_for_each_entry(ptr_svc,
1790 &qseecom.registered_listener_list_head, list) {
1791 if (ptr_svc->svc.listener_id == lstnr) {
1792 ptr_svc->listener_in_use = true;
1793 ptr_svc->rcv_req_flag = 1;
1794 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1795 break;
1796 }
1797 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001798
1799 if (ptr_svc == NULL) {
1800 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001801 rc = -EINVAL;
1802 status = QSEOS_RESULT_FAILURE;
1803 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001804 }
1805
1806 if (!ptr_svc->ihandle) {
1807 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001808 rc = -EINVAL;
1809 status = QSEOS_RESULT_FAILURE;
1810 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001811 }
1812
1813 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001814 pr_err("Service %d does not exist\n",
1815 lstnr);
1816 rc = -ERESTARTSYS;
1817 ptr_svc = NULL;
1818 status = QSEOS_RESULT_FAILURE;
1819 goto err_resp;
1820 }
1821
1822 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001823 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001824 lstnr, ptr_svc->abort);
1825 rc = -ENODEV;
1826 status = QSEOS_RESULT_FAILURE;
1827 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001828 }
Zhen Kong25731112018-09-20 13:10:03 -07001829
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1831
1832 /* initialize the new signal mask with all signals*/
1833 sigfillset(&new_sigset);
1834 /* block all signals */
1835 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1836
Zhen Kongbcdeda22018-11-16 13:50:51 -08001837 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001838 do {
1839 /*
1840 * When reentrancy is not supported, check global
1841 * send_resp_flag; otherwise, check this listener's
1842 * send_resp_flag.
1843 */
1844 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301845 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001846 __qseecom_listener_has_sent_rsp(
1847 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001848 break;
1849 }
1850
1851 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301852 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001853 __qseecom_reentrancy_listener_has_sent_rsp(
1854 data, ptr_svc))) {
1855 break;
1856 }
1857 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001858 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001859 /* restore signal mask */
1860 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001861 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1863 data->client.app_id, lstnr, ret);
1864 rc = -ENODEV;
1865 status = QSEOS_RESULT_FAILURE;
1866 } else {
1867 status = QSEOS_RESULT_SUCCESS;
1868 }
Zhen Kong26e62742018-05-04 17:19:06 -07001869err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001870 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001871 if (ptr_svc) {
1872 ptr_svc->send_resp_flag = 0;
1873 table = ptr_svc->sglistinfo_ptr;
1874 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001875 if (qseecom.qsee_version < QSEE_VERSION_40) {
1876 send_data_rsp.listener_id = lstnr;
1877 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001878 if (table) {
1879 send_data_rsp.sglistinfo_ptr =
1880 (uint32_t)virt_to_phys(table);
1881 send_data_rsp.sglistinfo_len =
1882 SGLISTINFO_TABLE_SIZE;
1883 dmac_flush_range((void *)table,
1884 (void *)table + SGLISTINFO_TABLE_SIZE);
1885 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001886 cmd_buf = (void *)&send_data_rsp;
1887 cmd_len = sizeof(send_data_rsp);
1888 } else {
1889 send_data_rsp_64bit.listener_id = lstnr;
1890 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001891 if (table) {
1892 send_data_rsp_64bit.sglistinfo_ptr =
1893 virt_to_phys(table);
1894 send_data_rsp_64bit.sglistinfo_len =
1895 SGLISTINFO_TABLE_SIZE;
1896 dmac_flush_range((void *)table,
1897 (void *)table + SGLISTINFO_TABLE_SIZE);
1898 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001899 cmd_buf = (void *)&send_data_rsp_64bit;
1900 cmd_len = sizeof(send_data_rsp_64bit);
1901 }
Zhen Kong7d500032018-08-06 16:58:31 -07001902 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001903 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1904 else
1905 *(uint32_t *)cmd_buf =
1906 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001907 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001908 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1909 ptr_svc->ihandle,
1910 ptr_svc->sb_virt, ptr_svc->sb_length,
1911 ION_IOC_CLEAN_INV_CACHES);
1912 if (ret) {
1913 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001914 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001915 }
1916 }
1917
1918 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1919 ret = __qseecom_enable_clk(CLK_QSEE);
1920 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001921 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001922 }
1923
1924 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1925 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001926 if (ptr_svc) {
1927 ptr_svc->listener_in_use = false;
1928 __qseecom_clean_listener_sglistinfo(ptr_svc);
1929 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001930 if (ret) {
1931 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1932 ret, data->client.app_id);
1933 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1934 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001935 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001936 }
Zhen Kong26e62742018-05-04 17:19:06 -07001937 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1938 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001939 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1940 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1941 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1942 resp->result, data->client.app_id, lstnr);
1943 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001944 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001945 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946exit:
1947 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001948 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1949 __qseecom_disable_clk(CLK_QSEE);
1950
1951 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001952 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001953 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001954 if (rc)
1955 return rc;
1956
1957 return ret;
1958}
1959
Zhen Konga91aaf02018-02-02 17:21:04 -08001960static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001961 struct qseecom_command_scm_resp *resp,
1962 struct qseecom_registered_app_list *ptr_app,
1963 struct qseecom_dev_handle *data)
1964{
1965 struct qseecom_registered_listener_list *list_ptr;
1966 int ret = 0;
1967 struct qseecom_continue_blocked_request_ireq ireq;
1968 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001969 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001970 sigset_t new_sigset;
1971 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001972 unsigned long flags;
1973 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07001974 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975
1976 if (!resp || !data) {
1977 pr_err("invalid resp or data pointer\n");
1978 ret = -EINVAL;
1979 goto exit;
1980 }
1981
1982 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07001983 if (!ptr_app) {
1984 if (data->client.from_smcinvoke) {
1985 pr_debug("This request is from smcinvoke\n");
1986 ptr_app = &dummy_app_entry;
1987 ptr_app->app_id = data->client.app_id;
1988 } else {
1989 spin_lock_irqsave(&qseecom.registered_app_list_lock,
1990 flags);
1991 list_for_each_entry(ptr_app,
1992 &qseecom.registered_app_list_head, list) {
1993 if ((ptr_app->app_id == data->client.app_id) &&
1994 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001995 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07001996 found_app = true;
1997 break;
1998 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001999 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002000 spin_unlock_irqrestore(
2001 &qseecom.registered_app_list_lock, flags);
2002 if (!found_app) {
2003 pr_err("app_id %d (%s) is not found\n",
2004 data->client.app_id,
2005 (char *)data->client.app_name);
2006 ret = -ENOENT;
2007 goto exit;
2008 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002009 }
2010 }
2011
Zhen Kongd8cc0052017-11-13 15:13:31 -08002012 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002013 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002014 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 list_ptr = __qseecom_find_svc(resp->data);
2016 if (!list_ptr) {
2017 pr_err("Invalid listener ID %d\n", resp->data);
2018 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002019 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002020 goto exit;
2021 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002022 ptr_app->blocked_on_listener_id = resp->data;
2023
2024 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2025 resp->data, list_ptr->listener_in_use,
2026 session_id, data->client.app_id);
2027
2028 /* sleep until listener is available */
2029 sigfillset(&new_sigset);
2030 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2031
2032 do {
2033 qseecom.app_block_ref_cnt++;
2034 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002035 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002036 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302037 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002038 list_ptr->listener_block_app_wq,
2039 !list_ptr->listener_in_use);
2040 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002041 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002042 ptr_app->app_blocked = false;
2043 qseecom.app_block_ref_cnt--;
2044 } while (list_ptr->listener_in_use);
2045
2046 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2047
2048 ptr_app->blocked_on_listener_id = 0;
2049 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2050 resp->data, session_id, data->client.app_id);
2051
2052 /* notify TZ that listener is available */
2053 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2054
2055 if (qseecom.smcinvoke_support)
2056 ireq.app_or_session_id = session_id;
2057 else
2058 ireq.app_or_session_id = data->client.app_id;
2059
2060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2061 &ireq, sizeof(ireq),
2062 &continue_resp, sizeof(continue_resp));
2063 if (ret && qseecom.smcinvoke_support) {
2064 /* retry with legacy cmd */
2065 qseecom.smcinvoke_support = false;
2066 ireq.app_or_session_id = data->client.app_id;
2067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2068 &ireq, sizeof(ireq),
2069 &continue_resp, sizeof(continue_resp));
2070 qseecom.smcinvoke_support = true;
2071 if (ret) {
2072 pr_err("unblock app %d or session %d fail\n",
2073 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002074 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002075 goto exit;
2076 }
2077 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002078 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002079 resp->result = continue_resp.result;
2080 resp->resp_type = continue_resp.resp_type;
2081 resp->data = continue_resp.data;
2082 pr_debug("unblock resp = %d\n", resp->result);
2083 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2084
2085 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2086 pr_err("Unexpected unblock resp %d\n", resp->result);
2087 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002088 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002089exit:
2090 return ret;
2091}
2092
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002093static int __qseecom_reentrancy_process_incomplete_cmd(
2094 struct qseecom_dev_handle *data,
2095 struct qseecom_command_scm_resp *resp)
2096{
2097 int ret = 0;
2098 int rc = 0;
2099 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002100 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2101 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2102 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002103 struct qseecom_registered_listener_list *ptr_svc = NULL;
2104 sigset_t new_sigset;
2105 sigset_t old_sigset;
2106 uint32_t status;
2107 void *cmd_buf = NULL;
2108 size_t cmd_len;
2109 struct sglist_info *table = NULL;
2110
Zhen Kong26e62742018-05-04 17:19:06 -07002111 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002112 lstnr = resp->data;
2113 /*
2114 * Wake up blocking lsitener service with the lstnr id
2115 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002116 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002117 list_for_each_entry(ptr_svc,
2118 &qseecom.registered_listener_list_head, list) {
2119 if (ptr_svc->svc.listener_id == lstnr) {
2120 ptr_svc->listener_in_use = true;
2121 ptr_svc->rcv_req_flag = 1;
2122 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2123 break;
2124 }
2125 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002126
2127 if (ptr_svc == NULL) {
2128 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002129 rc = -EINVAL;
2130 status = QSEOS_RESULT_FAILURE;
2131 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002132 }
2133
2134 if (!ptr_svc->ihandle) {
2135 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002136 rc = -EINVAL;
2137 status = QSEOS_RESULT_FAILURE;
2138 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002139 }
2140
2141 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002142 pr_err("Service %d does not exist\n",
2143 lstnr);
2144 rc = -ERESTARTSYS;
2145 ptr_svc = NULL;
2146 status = QSEOS_RESULT_FAILURE;
2147 goto err_resp;
2148 }
2149
2150 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002151 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002152 lstnr, ptr_svc->abort);
2153 rc = -ENODEV;
2154 status = QSEOS_RESULT_FAILURE;
2155 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002156 }
Zhen Kong25731112018-09-20 13:10:03 -07002157
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002158 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2159
2160 /* initialize the new signal mask with all signals*/
2161 sigfillset(&new_sigset);
2162
2163 /* block all signals */
2164 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2165
2166 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002167 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002168 mutex_unlock(&app_access_lock);
2169 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302170 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002171 __qseecom_reentrancy_listener_has_sent_rsp(
2172 data, ptr_svc))) {
2173 break;
2174 }
2175 } while (1);
2176 /* lock mutex again after resp sent */
2177 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002178 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002179 ptr_svc->send_resp_flag = 0;
2180 qseecom.send_resp_flag = 0;
2181
2182 /* restore signal mask */
2183 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002184 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002185 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2186 data->client.app_id, lstnr, ret);
2187 rc = -ENODEV;
2188 status = QSEOS_RESULT_FAILURE;
2189 } else {
2190 status = QSEOS_RESULT_SUCCESS;
2191 }
Zhen Kong26e62742018-05-04 17:19:06 -07002192err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002193 if (ptr_svc)
2194 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002195 if (qseecom.qsee_version < QSEE_VERSION_40) {
2196 send_data_rsp.listener_id = lstnr;
2197 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002198 if (table) {
2199 send_data_rsp.sglistinfo_ptr =
2200 (uint32_t)virt_to_phys(table);
2201 send_data_rsp.sglistinfo_len =
2202 SGLISTINFO_TABLE_SIZE;
2203 dmac_flush_range((void *)table,
2204 (void *)table + SGLISTINFO_TABLE_SIZE);
2205 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002206 cmd_buf = (void *)&send_data_rsp;
2207 cmd_len = sizeof(send_data_rsp);
2208 } else {
2209 send_data_rsp_64bit.listener_id = lstnr;
2210 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002211 if (table) {
2212 send_data_rsp_64bit.sglistinfo_ptr =
2213 virt_to_phys(table);
2214 send_data_rsp_64bit.sglistinfo_len =
2215 SGLISTINFO_TABLE_SIZE;
2216 dmac_flush_range((void *)table,
2217 (void *)table + SGLISTINFO_TABLE_SIZE);
2218 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002219 cmd_buf = (void *)&send_data_rsp_64bit;
2220 cmd_len = sizeof(send_data_rsp_64bit);
2221 }
Zhen Kong7d500032018-08-06 16:58:31 -07002222 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002223 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2224 else
2225 *(uint32_t *)cmd_buf =
2226 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002227 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002228 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2229 ptr_svc->ihandle,
2230 ptr_svc->sb_virt, ptr_svc->sb_length,
2231 ION_IOC_CLEAN_INV_CACHES);
2232 if (ret) {
2233 pr_err("cache operation failed %d\n", ret);
2234 return ret;
2235 }
2236 }
2237 if (lstnr == RPMB_SERVICE) {
2238 ret = __qseecom_enable_clk(CLK_QSEE);
2239 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002240 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002241 }
2242
2243 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2244 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002245 if (ptr_svc) {
2246 ptr_svc->listener_in_use = false;
2247 __qseecom_clean_listener_sglistinfo(ptr_svc);
2248 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2249 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002250
2251 if (ret) {
2252 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2253 ret, data->client.app_id);
2254 goto exit;
2255 }
2256
2257 switch (resp->result) {
2258 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2259 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2260 lstnr, data->client.app_id, resp->data);
2261 if (lstnr == resp->data) {
2262 pr_err("lstnr %d should not be blocked!\n",
2263 lstnr);
2264 ret = -EINVAL;
2265 goto exit;
2266 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002267 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002268 ret = __qseecom_process_reentrancy_blocked_on_listener(
2269 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002270 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002271 if (ret) {
2272 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2273 data->client.app_id,
2274 data->client.app_name, resp->data);
2275 goto exit;
2276 }
2277 case QSEOS_RESULT_SUCCESS:
2278 case QSEOS_RESULT_INCOMPLETE:
2279 break;
2280 default:
2281 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2282 resp->result, data->client.app_id, lstnr);
2283 ret = -EINVAL;
2284 goto exit;
2285 }
2286exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002287 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002288 if (lstnr == RPMB_SERVICE)
2289 __qseecom_disable_clk(CLK_QSEE);
2290
2291 }
2292 if (rc)
2293 return rc;
2294
2295 return ret;
2296}
2297
2298/*
2299 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2300 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2301 * So, needs to first check if no app blocked before sending OS level scm call,
2302 * then wait until all apps are unblocked.
2303 */
2304static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2305{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002306 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2307 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2308 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2309 /* thread sleep until this app unblocked */
2310 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002311 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002312 wait_event_interruptible(qseecom.app_block_wq,
2313 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002314 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002315 }
2316 }
2317}
2318
2319/*
2320 * scm_call of send data will fail if this TA is blocked or there are more
2321 * than one TA requesting listener services; So, first check to see if need
2322 * to wait.
2323 */
2324static void __qseecom_reentrancy_check_if_this_app_blocked(
2325 struct qseecom_registered_app_list *ptr_app)
2326{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002327 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002328 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002329 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2330 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002331 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002332 wait_event_interruptible(qseecom.app_block_wq,
2333 (!ptr_app->app_blocked &&
2334 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002335 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002336 }
Zhen Kongdea10592018-07-30 17:50:10 -07002337 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002338 }
2339}
2340
2341static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2342 uint32_t *app_id)
2343{
2344 int32_t ret;
2345 struct qseecom_command_scm_resp resp;
2346 bool found_app = false;
2347 struct qseecom_registered_app_list *entry = NULL;
2348 unsigned long flags = 0;
2349
2350 if (!app_id) {
2351 pr_err("Null pointer to app_id\n");
2352 return -EINVAL;
2353 }
2354 *app_id = 0;
2355
2356 /* check if app exists and has been registered locally */
2357 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2358 list_for_each_entry(entry,
2359 &qseecom.registered_app_list_head, list) {
2360 if (!strcmp(entry->app_name, req.app_name)) {
2361 found_app = true;
2362 break;
2363 }
2364 }
2365 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2366 if (found_app) {
2367 pr_debug("Found app with id %d\n", entry->app_id);
2368 *app_id = entry->app_id;
2369 return 0;
2370 }
2371
2372 memset((void *)&resp, 0, sizeof(resp));
2373
2374 /* SCM_CALL to check if app_id for the mentioned app exists */
2375 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2376 sizeof(struct qseecom_check_app_ireq),
2377 &resp, sizeof(resp));
2378 if (ret) {
2379 pr_err("scm_call to check if app is already loaded failed\n");
2380 return -EINVAL;
2381 }
2382
2383 if (resp.result == QSEOS_RESULT_FAILURE)
2384 return 0;
2385
2386 switch (resp.resp_type) {
2387 /*qsee returned listener type response */
2388 case QSEOS_LISTENER_ID:
2389 pr_err("resp type is of listener type instead of app");
2390 return -EINVAL;
2391 case QSEOS_APP_ID:
2392 *app_id = resp.data;
2393 return 0;
2394 default:
2395 pr_err("invalid resp type (%d) from qsee",
2396 resp.resp_type);
2397 return -ENODEV;
2398 }
2399}
2400
2401static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2402{
2403 struct qseecom_registered_app_list *entry = NULL;
2404 unsigned long flags = 0;
2405 u32 app_id = 0;
2406 struct ion_handle *ihandle; /* Ion handle */
2407 struct qseecom_load_img_req load_img_req;
2408 int32_t ret = 0;
2409 ion_phys_addr_t pa = 0;
2410 size_t len;
2411 struct qseecom_command_scm_resp resp;
2412 struct qseecom_check_app_ireq req;
2413 struct qseecom_load_app_ireq load_req;
2414 struct qseecom_load_app_64bit_ireq load_req_64bit;
2415 void *cmd_buf = NULL;
2416 size_t cmd_len;
2417 bool first_time = false;
2418
2419 /* Copy the relevant information needed for loading the image */
2420 if (copy_from_user(&load_img_req,
2421 (void __user *)argp,
2422 sizeof(struct qseecom_load_img_req))) {
2423 pr_err("copy_from_user failed\n");
2424 return -EFAULT;
2425 }
2426
2427 /* Check and load cmnlib */
2428 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2429 if (!qseecom.commonlib_loaded &&
2430 load_img_req.app_arch == ELFCLASS32) {
2431 ret = qseecom_load_commonlib_image(data, "cmnlib");
2432 if (ret) {
2433 pr_err("failed to load cmnlib\n");
2434 return -EIO;
2435 }
2436 qseecom.commonlib_loaded = true;
2437 pr_debug("cmnlib is loaded\n");
2438 }
2439
2440 if (!qseecom.commonlib64_loaded &&
2441 load_img_req.app_arch == ELFCLASS64) {
2442 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2443 if (ret) {
2444 pr_err("failed to load cmnlib64\n");
2445 return -EIO;
2446 }
2447 qseecom.commonlib64_loaded = true;
2448 pr_debug("cmnlib64 is loaded\n");
2449 }
2450 }
2451
2452 if (qseecom.support_bus_scaling) {
2453 mutex_lock(&qsee_bw_mutex);
2454 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2455 mutex_unlock(&qsee_bw_mutex);
2456 if (ret)
2457 return ret;
2458 }
2459
2460 /* Vote for the SFPB clock */
2461 ret = __qseecom_enable_clk_scale_up(data);
2462 if (ret)
2463 goto enable_clk_err;
2464
2465 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2466 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2467 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2468
2469 ret = __qseecom_check_app_exists(req, &app_id);
2470 if (ret < 0)
2471 goto loadapp_err;
2472
2473 if (app_id) {
2474 pr_debug("App id %d (%s) already exists\n", app_id,
2475 (char *)(req.app_name));
2476 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2477 list_for_each_entry(entry,
2478 &qseecom.registered_app_list_head, list){
2479 if (entry->app_id == app_id) {
2480 entry->ref_cnt++;
2481 break;
2482 }
2483 }
2484 spin_unlock_irqrestore(
2485 &qseecom.registered_app_list_lock, flags);
2486 ret = 0;
2487 } else {
2488 first_time = true;
2489 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2490 (char *)(load_img_req.img_name));
2491 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002492 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002493 load_img_req.ifd_data_fd);
2494 if (IS_ERR_OR_NULL(ihandle)) {
2495 pr_err("Ion client could not retrieve the handle\n");
2496 ret = -ENOMEM;
2497 goto loadapp_err;
2498 }
2499
2500 /* Get the physical address of the ION BUF */
2501 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2502 if (ret) {
2503 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2504 ret);
2505 goto loadapp_err;
2506 }
2507 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2508 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2509 len, load_img_req.mdt_len,
2510 load_img_req.img_len);
2511 ret = -EINVAL;
2512 goto loadapp_err;
2513 }
2514 /* Populate the structure for sending scm call to load image */
2515 if (qseecom.qsee_version < QSEE_VERSION_40) {
2516 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2517 load_req.mdt_len = load_img_req.mdt_len;
2518 load_req.img_len = load_img_req.img_len;
2519 strlcpy(load_req.app_name, load_img_req.img_name,
2520 MAX_APP_NAME_SIZE);
2521 load_req.phy_addr = (uint32_t)pa;
2522 cmd_buf = (void *)&load_req;
2523 cmd_len = sizeof(struct qseecom_load_app_ireq);
2524 } else {
2525 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2526 load_req_64bit.mdt_len = load_img_req.mdt_len;
2527 load_req_64bit.img_len = load_img_req.img_len;
2528 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_req_64bit.phy_addr = (uint64_t)pa;
2531 cmd_buf = (void *)&load_req_64bit;
2532 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2533 }
2534
2535 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2536 ION_IOC_CLEAN_INV_CACHES);
2537 if (ret) {
2538 pr_err("cache operation failed %d\n", ret);
2539 goto loadapp_err;
2540 }
2541
2542 /* SCM_CALL to load the app and get the app_id back */
2543 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2544 cmd_len, &resp, sizeof(resp));
2545 if (ret) {
2546 pr_err("scm_call to load app failed\n");
2547 if (!IS_ERR_OR_NULL(ihandle))
2548 ion_free(qseecom.ion_clnt, ihandle);
2549 ret = -EINVAL;
2550 goto loadapp_err;
2551 }
2552
2553 if (resp.result == QSEOS_RESULT_FAILURE) {
2554 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2555 if (!IS_ERR_OR_NULL(ihandle))
2556 ion_free(qseecom.ion_clnt, ihandle);
2557 ret = -EFAULT;
2558 goto loadapp_err;
2559 }
2560
2561 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2562 ret = __qseecom_process_incomplete_cmd(data, &resp);
2563 if (ret) {
2564 pr_err("process_incomplete_cmd failed err: %d\n",
2565 ret);
2566 if (!IS_ERR_OR_NULL(ihandle))
2567 ion_free(qseecom.ion_clnt, ihandle);
2568 ret = -EFAULT;
2569 goto loadapp_err;
2570 }
2571 }
2572
2573 if (resp.result != QSEOS_RESULT_SUCCESS) {
2574 pr_err("scm_call failed resp.result unknown, %d\n",
2575 resp.result);
2576 if (!IS_ERR_OR_NULL(ihandle))
2577 ion_free(qseecom.ion_clnt, ihandle);
2578 ret = -EFAULT;
2579 goto loadapp_err;
2580 }
2581
2582 app_id = resp.data;
2583
2584 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2585 if (!entry) {
2586 ret = -ENOMEM;
2587 goto loadapp_err;
2588 }
2589 entry->app_id = app_id;
2590 entry->ref_cnt = 1;
2591 entry->app_arch = load_img_req.app_arch;
2592 /*
2593 * keymaster app may be first loaded as "keymaste" by qseecomd,
2594 * and then used as "keymaster" on some targets. To avoid app
2595 * name checking error, register "keymaster" into app_list and
2596 * thread private data.
2597 */
2598 if (!strcmp(load_img_req.img_name, "keymaste"))
2599 strlcpy(entry->app_name, "keymaster",
2600 MAX_APP_NAME_SIZE);
2601 else
2602 strlcpy(entry->app_name, load_img_req.img_name,
2603 MAX_APP_NAME_SIZE);
2604 entry->app_blocked = false;
2605 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002606 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002607
2608 /* Deallocate the handle */
2609 if (!IS_ERR_OR_NULL(ihandle))
2610 ion_free(qseecom.ion_clnt, ihandle);
2611
2612 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2613 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2614 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2615 flags);
2616
2617 pr_warn("App with id %u (%s) now loaded\n", app_id,
2618 (char *)(load_img_req.img_name));
2619 }
2620 data->client.app_id = app_id;
2621 data->client.app_arch = load_img_req.app_arch;
2622 if (!strcmp(load_img_req.img_name, "keymaste"))
2623 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2624 else
2625 strlcpy(data->client.app_name, load_img_req.img_name,
2626 MAX_APP_NAME_SIZE);
2627 load_img_req.app_id = app_id;
2628 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2629 pr_err("copy_to_user failed\n");
2630 ret = -EFAULT;
2631 if (first_time == true) {
2632 spin_lock_irqsave(
2633 &qseecom.registered_app_list_lock, flags);
2634 list_del(&entry->list);
2635 spin_unlock_irqrestore(
2636 &qseecom.registered_app_list_lock, flags);
2637 kzfree(entry);
2638 }
2639 }
2640
2641loadapp_err:
2642 __qseecom_disable_clk_scale_down(data);
2643enable_clk_err:
2644 if (qseecom.support_bus_scaling) {
2645 mutex_lock(&qsee_bw_mutex);
2646 qseecom_unregister_bus_bandwidth_needs(data);
2647 mutex_unlock(&qsee_bw_mutex);
2648 }
2649 return ret;
2650}
2651
2652static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2653{
2654 int ret = 1; /* Set unload app */
2655
2656 wake_up_all(&qseecom.send_resp_wq);
2657 if (qseecom.qsee_reentrancy_support)
2658 mutex_unlock(&app_access_lock);
2659 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302660 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002661 atomic_read(&data->ioctl_count) <= 1)) {
2662 pr_err("Interrupted from abort\n");
2663 ret = -ERESTARTSYS;
2664 break;
2665 }
2666 }
2667 if (qseecom.qsee_reentrancy_support)
2668 mutex_lock(&app_access_lock);
2669 return ret;
2670}
2671
2672static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2673{
2674 int ret = 0;
2675
2676 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2677 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2678 ion_free(qseecom.ion_clnt, data->client.ihandle);
jitendrathakarec7ff9e42019-09-12 19:46:48 +05302679 memset((void *)&data->client,
2680 0, sizeof(struct qseecom_client_handle));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002681 }
2682 return ret;
2683}
2684
2685static int qseecom_unload_app(struct qseecom_dev_handle *data,
2686 bool app_crash)
2687{
2688 unsigned long flags;
2689 unsigned long flags1;
2690 int ret = 0;
2691 struct qseecom_command_scm_resp resp;
2692 struct qseecom_registered_app_list *ptr_app = NULL;
2693 bool unload = false;
2694 bool found_app = false;
2695 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002696 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002697
2698 if (!data) {
2699 pr_err("Invalid/uninitialized device handle\n");
2700 return -EINVAL;
2701 }
2702
2703 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2704 pr_debug("Do not unload keymaster app from tz\n");
2705 goto unload_exit;
2706 }
2707
2708 __qseecom_cleanup_app(data);
2709 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2710
2711 if (data->client.app_id > 0) {
2712 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2713 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2714 list) {
2715 if (ptr_app->app_id == data->client.app_id) {
2716 if (!strcmp((void *)ptr_app->app_name,
2717 (void *)data->client.app_name)) {
2718 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002719 if (ptr_app->app_blocked ||
2720 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002721 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002722 if (app_crash || ptr_app->ref_cnt == 1)
2723 unload = true;
2724 break;
2725 }
2726 found_dead_app = true;
2727 break;
2728 }
2729 }
2730 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2731 flags);
2732 if (found_app == false && found_dead_app == false) {
2733 pr_err("Cannot find app with id = %d (%s)\n",
2734 data->client.app_id,
2735 (char *)data->client.app_name);
2736 ret = -EINVAL;
2737 goto unload_exit;
2738 }
2739 }
2740
2741 if (found_dead_app)
2742 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2743 (char *)data->client.app_name);
2744
2745 if (unload) {
2746 struct qseecom_unload_app_ireq req;
2747 /* Populate the structure for sending scm call to load image */
2748 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2749 req.app_id = data->client.app_id;
2750
2751 /* SCM_CALL to unload the app */
2752 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2753 sizeof(struct qseecom_unload_app_ireq),
2754 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002755 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002756 if (ret) {
2757 pr_err("scm_call to unload app (id = %d) failed\n",
2758 req.app_id);
2759 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002760 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002761 } else {
2762 pr_warn("App id %d now unloaded\n", req.app_id);
2763 }
2764 if (resp.result == QSEOS_RESULT_FAILURE) {
2765 pr_err("app (%d) unload_failed!!\n",
2766 data->client.app_id);
2767 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002768 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002769 }
2770 if (resp.result == QSEOS_RESULT_SUCCESS)
2771 pr_debug("App (%d) is unloaded!!\n",
2772 data->client.app_id);
2773 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2774 ret = __qseecom_process_incomplete_cmd(data, &resp);
2775 if (ret) {
2776 pr_err("process_incomplete_cmd fail err: %d\n",
2777 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002778 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002779 }
2780 }
2781 }
2782
Zhen Kongf818f152019-03-13 12:31:32 -07002783scm_exit:
2784 if (scm_called) {
2785 /* double check if this app_entry still exists */
2786 bool doublecheck = false;
2787
2788 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2789 list_for_each_entry(ptr_app,
2790 &qseecom.registered_app_list_head, list) {
2791 if ((ptr_app->app_id == data->client.app_id) &&
2792 (!strcmp((void *)ptr_app->app_name,
2793 (void *)data->client.app_name))) {
2794 doublecheck = true;
2795 break;
2796 }
2797 }
2798 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2799 flags1);
2800 if (!doublecheck) {
2801 pr_warn("app %d(%s) entry is already removed\n",
2802 data->client.app_id,
2803 (char *)data->client.app_name);
2804 found_app = false;
2805 }
2806 }
Zhen Kong7d500032018-08-06 16:58:31 -07002807unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002808 if (found_app) {
2809 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2810 if (app_crash) {
2811 ptr_app->ref_cnt = 0;
2812 pr_debug("app_crash: ref_count = 0\n");
2813 } else {
2814 if (ptr_app->ref_cnt == 1) {
2815 ptr_app->ref_cnt = 0;
2816 pr_debug("ref_count set to 0\n");
2817 } else {
2818 ptr_app->ref_cnt--;
2819 pr_debug("Can't unload app(%d) inuse\n",
2820 ptr_app->app_id);
2821 }
2822 }
2823 if (unload) {
2824 list_del(&ptr_app->list);
2825 kzfree(ptr_app);
2826 }
2827 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2828 flags1);
2829 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002830 qseecom_unmap_ion_allocated_memory(data);
2831 data->released = true;
2832 return ret;
2833}
2834
2835static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2836 unsigned long virt)
2837{
2838 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2839}
2840
2841static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2842 unsigned long virt)
2843{
2844 return (uintptr_t)data->client.sb_virt +
2845 (virt - data->client.user_virt_sb_base);
2846}
2847
2848int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2849 struct qseecom_send_svc_cmd_req *req_ptr,
2850 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2851{
2852 int ret = 0;
2853 void *req_buf = NULL;
2854
2855 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2856 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2857 req_ptr, send_svc_ireq_ptr);
2858 return -EINVAL;
2859 }
2860
2861 /* Clients need to ensure req_buf is at base offset of shared buffer */
2862 if ((uintptr_t)req_ptr->cmd_req_buf !=
2863 data_ptr->client.user_virt_sb_base) {
2864 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2865 return -EINVAL;
2866 }
2867
2868 if (data_ptr->client.sb_length <
2869 sizeof(struct qseecom_rpmb_provision_key)) {
2870 pr_err("shared buffer is too small to hold key type\n");
2871 return -EINVAL;
2872 }
2873 req_buf = data_ptr->client.sb_virt;
2874
2875 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2876 send_svc_ireq_ptr->key_type =
2877 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2878 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2879 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2880 data_ptr, (uintptr_t)req_ptr->resp_buf));
2881 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2882
2883 return ret;
2884}
2885
2886int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2887 struct qseecom_send_svc_cmd_req *req_ptr,
2888 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2889{
2890 int ret = 0;
2891 uint32_t reqd_len_sb_in = 0;
2892
2893 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2894 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2895 req_ptr, send_svc_ireq_ptr);
2896 return -EINVAL;
2897 }
2898
2899 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2900 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2901 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2902 pr_err("Required: %u, Available: %zu\n",
2903 reqd_len_sb_in, data_ptr->client.sb_length);
2904 return -ENOMEM;
2905 }
2906
2907 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2908 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2909 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2910 data_ptr, (uintptr_t)req_ptr->resp_buf));
2911 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2912
2913 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2914 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2915
2916
2917 return ret;
2918}
2919
2920static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2921 struct qseecom_send_svc_cmd_req *req)
2922{
2923 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2924 pr_err("req or cmd buffer or response buffer is null\n");
2925 return -EINVAL;
2926 }
2927
2928 if (!data || !data->client.ihandle) {
2929 pr_err("Client or client handle is not initialized\n");
2930 return -EINVAL;
2931 }
2932
2933 if (data->client.sb_virt == NULL) {
2934 pr_err("sb_virt null\n");
2935 return -EINVAL;
2936 }
2937
2938 if (data->client.user_virt_sb_base == 0) {
2939 pr_err("user_virt_sb_base is null\n");
2940 return -EINVAL;
2941 }
2942
2943 if (data->client.sb_length == 0) {
2944 pr_err("sb_length is 0\n");
2945 return -EINVAL;
2946 }
2947
2948 if (((uintptr_t)req->cmd_req_buf <
2949 data->client.user_virt_sb_base) ||
2950 ((uintptr_t)req->cmd_req_buf >=
2951 (data->client.user_virt_sb_base + data->client.sb_length))) {
2952 pr_err("cmd buffer address not within shared bufffer\n");
2953 return -EINVAL;
2954 }
2955 if (((uintptr_t)req->resp_buf <
2956 data->client.user_virt_sb_base) ||
2957 ((uintptr_t)req->resp_buf >=
2958 (data->client.user_virt_sb_base + data->client.sb_length))) {
2959 pr_err("response buffer address not within shared bufffer\n");
2960 return -EINVAL;
2961 }
2962 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2963 (req->cmd_req_len > data->client.sb_length) ||
2964 (req->resp_len > data->client.sb_length)) {
2965 pr_err("cmd buf length or response buf length not valid\n");
2966 return -EINVAL;
2967 }
2968 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2969 pr_err("Integer overflow detected in req_len & rsp_len\n");
2970 return -EINVAL;
2971 }
2972
2973 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2974 pr_debug("Not enough memory to fit cmd_buf.\n");
2975 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2976 (req->cmd_req_len + req->resp_len),
2977 data->client.sb_length);
2978 return -ENOMEM;
2979 }
2980 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2981 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2982 return -EINVAL;
2983 }
2984 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2985 pr_err("Integer overflow in resp_len & resp_buf\n");
2986 return -EINVAL;
2987 }
2988 if (data->client.user_virt_sb_base >
2989 (ULONG_MAX - data->client.sb_length)) {
2990 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2991 return -EINVAL;
2992 }
2993 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2994 ((uintptr_t)data->client.user_virt_sb_base +
2995 data->client.sb_length)) ||
2996 (((uintptr_t)req->resp_buf + req->resp_len) >
2997 ((uintptr_t)data->client.user_virt_sb_base +
2998 data->client.sb_length))) {
2999 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3000 return -EINVAL;
3001 }
3002 return 0;
3003}
3004
3005static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3006 void __user *argp)
3007{
3008 int ret = 0;
3009 struct qseecom_client_send_service_ireq send_svc_ireq;
3010 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3011 struct qseecom_command_scm_resp resp;
3012 struct qseecom_send_svc_cmd_req req;
3013 void *send_req_ptr;
3014 size_t req_buf_size;
3015
3016 /*struct qseecom_command_scm_resp resp;*/
3017
3018 if (copy_from_user(&req,
3019 (void __user *)argp,
3020 sizeof(req))) {
3021 pr_err("copy_from_user failed\n");
3022 return -EFAULT;
3023 }
3024
3025 if (__validate_send_service_cmd_inputs(data, &req))
3026 return -EINVAL;
3027
3028 data->type = QSEECOM_SECURE_SERVICE;
3029
3030 switch (req.cmd_id) {
3031 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3032 case QSEOS_RPMB_ERASE_COMMAND:
3033 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3034 send_req_ptr = &send_svc_ireq;
3035 req_buf_size = sizeof(send_svc_ireq);
3036 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3037 send_req_ptr))
3038 return -EINVAL;
3039 break;
3040 case QSEOS_FSM_LTEOTA_REQ_CMD:
3041 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3042 case QSEOS_FSM_IKE_REQ_CMD:
3043 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3044 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3045 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3046 case QSEOS_FSM_ENCFS_REQ_CMD:
3047 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3048 send_req_ptr = &send_fsm_key_svc_ireq;
3049 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3050 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3051 send_req_ptr))
3052 return -EINVAL;
3053 break;
3054 default:
3055 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3056 return -EINVAL;
3057 }
3058
3059 if (qseecom.support_bus_scaling) {
3060 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3061 if (ret) {
3062 pr_err("Fail to set bw HIGH\n");
3063 return ret;
3064 }
3065 } else {
3066 ret = qseecom_perf_enable(data);
3067 if (ret) {
3068 pr_err("Failed to vote for clocks with err %d\n", ret);
3069 goto exit;
3070 }
3071 }
3072
3073 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3074 data->client.sb_virt, data->client.sb_length,
3075 ION_IOC_CLEAN_INV_CACHES);
3076 if (ret) {
3077 pr_err("cache operation failed %d\n", ret);
3078 goto exit;
3079 }
3080 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3081 (const void *)send_req_ptr,
3082 req_buf_size, &resp, sizeof(resp));
3083 if (ret) {
3084 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3085 if (!qseecom.support_bus_scaling) {
3086 qsee_disable_clock_vote(data, CLK_DFAB);
3087 qsee_disable_clock_vote(data, CLK_SFPB);
3088 } else {
3089 __qseecom_add_bw_scale_down_timer(
3090 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3091 }
3092 goto exit;
3093 }
3094 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3095 data->client.sb_virt, data->client.sb_length,
3096 ION_IOC_INV_CACHES);
3097 if (ret) {
3098 pr_err("cache operation failed %d\n", ret);
3099 goto exit;
3100 }
3101 switch (resp.result) {
3102 case QSEOS_RESULT_SUCCESS:
3103 break;
3104 case QSEOS_RESULT_INCOMPLETE:
3105 pr_debug("qseos_result_incomplete\n");
3106 ret = __qseecom_process_incomplete_cmd(data, &resp);
3107 if (ret) {
3108 pr_err("process_incomplete_cmd fail with result: %d\n",
3109 resp.result);
3110 }
3111 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3112 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303113 if (put_user(resp.result,
3114 (uint32_t __user *)req.resp_buf)) {
3115 ret = -EINVAL;
3116 goto exit;
3117 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003118 ret = 0;
3119 }
3120 break;
3121 case QSEOS_RESULT_FAILURE:
3122 pr_err("scm call failed with resp.result: %d\n", resp.result);
3123 ret = -EINVAL;
3124 break;
3125 default:
3126 pr_err("Response result %d not supported\n",
3127 resp.result);
3128 ret = -EINVAL;
3129 break;
3130 }
3131 if (!qseecom.support_bus_scaling) {
3132 qsee_disable_clock_vote(data, CLK_DFAB);
3133 qsee_disable_clock_vote(data, CLK_SFPB);
3134 } else {
3135 __qseecom_add_bw_scale_down_timer(
3136 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3137 }
3138
3139exit:
3140 return ret;
3141}
3142
3143static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3144 struct qseecom_send_cmd_req *req)
3145
3146{
3147 if (!data || !data->client.ihandle) {
3148 pr_err("Client or client handle is not initialized\n");
3149 return -EINVAL;
3150 }
3151 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3152 (req->cmd_req_buf == NULL)) {
3153 pr_err("cmd buffer or response buffer is null\n");
3154 return -EINVAL;
3155 }
3156 if (((uintptr_t)req->cmd_req_buf <
3157 data->client.user_virt_sb_base) ||
3158 ((uintptr_t)req->cmd_req_buf >=
3159 (data->client.user_virt_sb_base + data->client.sb_length))) {
3160 pr_err("cmd buffer address not within shared bufffer\n");
3161 return -EINVAL;
3162 }
3163 if (((uintptr_t)req->resp_buf <
3164 data->client.user_virt_sb_base) ||
3165 ((uintptr_t)req->resp_buf >=
3166 (data->client.user_virt_sb_base + data->client.sb_length))) {
3167 pr_err("response buffer address not within shared bufffer\n");
3168 return -EINVAL;
3169 }
3170 if ((req->cmd_req_len == 0) ||
3171 (req->cmd_req_len > data->client.sb_length) ||
3172 (req->resp_len > data->client.sb_length)) {
3173 pr_err("cmd buf length or response buf length not valid\n");
3174 return -EINVAL;
3175 }
3176 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3177 pr_err("Integer overflow detected in req_len & rsp_len\n");
3178 return -EINVAL;
3179 }
3180
3181 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3182 pr_debug("Not enough memory to fit cmd_buf.\n");
3183 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3184 (req->cmd_req_len + req->resp_len),
3185 data->client.sb_length);
3186 return -ENOMEM;
3187 }
3188 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3189 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3190 return -EINVAL;
3191 }
3192 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3193 pr_err("Integer overflow in resp_len & resp_buf\n");
3194 return -EINVAL;
3195 }
3196 if (data->client.user_virt_sb_base >
3197 (ULONG_MAX - data->client.sb_length)) {
3198 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3199 return -EINVAL;
3200 }
3201 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3202 ((uintptr_t)data->client.user_virt_sb_base +
3203 data->client.sb_length)) ||
3204 (((uintptr_t)req->resp_buf + req->resp_len) >
3205 ((uintptr_t)data->client.user_virt_sb_base +
3206 data->client.sb_length))) {
3207 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3208 return -EINVAL;
3209 }
3210 return 0;
3211}
3212
3213int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3214 struct qseecom_registered_app_list *ptr_app,
3215 struct qseecom_dev_handle *data)
3216{
3217 int ret = 0;
3218
3219 switch (resp->result) {
3220 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3221 pr_warn("App(%d) %s is blocked on listener %d\n",
3222 data->client.app_id, data->client.app_name,
3223 resp->data);
3224 ret = __qseecom_process_reentrancy_blocked_on_listener(
3225 resp, ptr_app, data);
3226 if (ret) {
3227 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3228 data->client.app_id, data->client.app_name, resp->data);
3229 return ret;
3230 }
3231
3232 case QSEOS_RESULT_INCOMPLETE:
3233 qseecom.app_block_ref_cnt++;
3234 ptr_app->app_blocked = true;
3235 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3236 ptr_app->app_blocked = false;
3237 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003238 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003239 if (ret)
3240 pr_err("process_incomplete_cmd failed err: %d\n",
3241 ret);
3242 return ret;
3243 case QSEOS_RESULT_SUCCESS:
3244 return ret;
3245 default:
3246 pr_err("Response result %d not supported\n",
3247 resp->result);
3248 return -EINVAL;
3249 }
3250}
3251
3252static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3253 struct qseecom_send_cmd_req *req)
3254{
3255 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003256 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003257 u32 reqd_len_sb_in = 0;
3258 struct qseecom_client_send_data_ireq send_data_req = {0};
3259 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3260 struct qseecom_command_scm_resp resp;
3261 unsigned long flags;
3262 struct qseecom_registered_app_list *ptr_app;
3263 bool found_app = false;
3264 void *cmd_buf = NULL;
3265 size_t cmd_len;
3266 struct sglist_info *table = data->sglistinfo_ptr;
3267
3268 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3269 /* find app_id & img_name from list */
3270 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3271 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3272 list) {
3273 if ((ptr_app->app_id == data->client.app_id) &&
3274 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3275 found_app = true;
3276 break;
3277 }
3278 }
3279 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3280
3281 if (!found_app) {
3282 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3283 (char *)data->client.app_name);
3284 return -ENOENT;
3285 }
3286
3287 if (qseecom.qsee_version < QSEE_VERSION_40) {
3288 send_data_req.app_id = data->client.app_id;
3289 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3290 data, (uintptr_t)req->cmd_req_buf));
3291 send_data_req.req_len = req->cmd_req_len;
3292 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3293 data, (uintptr_t)req->resp_buf));
3294 send_data_req.rsp_len = req->resp_len;
3295 send_data_req.sglistinfo_ptr =
3296 (uint32_t)virt_to_phys(table);
3297 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3298 dmac_flush_range((void *)table,
3299 (void *)table + SGLISTINFO_TABLE_SIZE);
3300 cmd_buf = (void *)&send_data_req;
3301 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3302 } else {
3303 send_data_req_64bit.app_id = data->client.app_id;
3304 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3305 (uintptr_t)req->cmd_req_buf);
3306 send_data_req_64bit.req_len = req->cmd_req_len;
3307 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3308 (uintptr_t)req->resp_buf);
3309 send_data_req_64bit.rsp_len = req->resp_len;
3310 /* check if 32bit app's phys_addr region is under 4GB.*/
3311 if ((data->client.app_arch == ELFCLASS32) &&
3312 ((send_data_req_64bit.req_ptr >=
3313 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3314 (send_data_req_64bit.rsp_ptr >=
3315 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3316 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3317 data->client.app_name,
3318 send_data_req_64bit.req_ptr,
3319 send_data_req_64bit.req_len,
3320 send_data_req_64bit.rsp_ptr,
3321 send_data_req_64bit.rsp_len);
3322 return -EFAULT;
3323 }
3324 send_data_req_64bit.sglistinfo_ptr =
3325 (uint64_t)virt_to_phys(table);
3326 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3327 dmac_flush_range((void *)table,
3328 (void *)table + SGLISTINFO_TABLE_SIZE);
3329 cmd_buf = (void *)&send_data_req_64bit;
3330 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3331 }
3332
3333 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3334 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3335 else
3336 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3337
3338 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3339 data->client.sb_virt,
3340 reqd_len_sb_in,
3341 ION_IOC_CLEAN_INV_CACHES);
3342 if (ret) {
3343 pr_err("cache operation failed %d\n", ret);
3344 return ret;
3345 }
3346
3347 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3348
3349 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3350 cmd_buf, cmd_len,
3351 &resp, sizeof(resp));
3352 if (ret) {
3353 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3354 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003355 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003356 }
3357
3358 if (qseecom.qsee_reentrancy_support) {
3359 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003360 if (ret)
3361 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003362 } else {
3363 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3364 ret = __qseecom_process_incomplete_cmd(data, &resp);
3365 if (ret) {
3366 pr_err("process_incomplete_cmd failed err: %d\n",
3367 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003368 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003369 }
3370 } else {
3371 if (resp.result != QSEOS_RESULT_SUCCESS) {
3372 pr_err("Response result %d not supported\n",
3373 resp.result);
3374 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003375 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003376 }
3377 }
3378 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003379exit:
3380 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003381 data->client.sb_virt, data->client.sb_length,
3382 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003383 if (ret2) {
3384 pr_err("cache operation failed %d\n", ret2);
3385 return ret2;
3386 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003387 return ret;
3388}
3389
3390static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3391{
3392 int ret = 0;
3393 struct qseecom_send_cmd_req req;
3394
3395 ret = copy_from_user(&req, argp, sizeof(req));
3396 if (ret) {
3397 pr_err("copy_from_user failed\n");
3398 return ret;
3399 }
3400
3401 if (__validate_send_cmd_inputs(data, &req))
3402 return -EINVAL;
3403
3404 ret = __qseecom_send_cmd(data, &req);
3405
3406 if (ret)
3407 return ret;
3408
3409 return ret;
3410}
3411
3412int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3413 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3414 struct qseecom_dev_handle *data, int i) {
3415
3416 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3417 (req->ifd_data[i].fd > 0)) {
3418 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3419 (req->ifd_data[i].cmd_buf_offset >
3420 req->cmd_req_len - sizeof(uint32_t))) {
3421 pr_err("Invalid offset (req len) 0x%x\n",
3422 req->ifd_data[i].cmd_buf_offset);
3423 return -EINVAL;
3424 }
3425 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3426 (lstnr_resp->ifd_data[i].fd > 0)) {
3427 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3428 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3429 lstnr_resp->resp_len - sizeof(uint32_t))) {
3430 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3431 lstnr_resp->ifd_data[i].cmd_buf_offset);
3432 return -EINVAL;
3433 }
3434 }
3435 return 0;
3436}
3437
3438static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3439 struct qseecom_dev_handle *data)
3440{
3441 struct ion_handle *ihandle;
3442 char *field;
3443 int ret = 0;
3444 int i = 0;
3445 uint32_t len = 0;
3446 struct scatterlist *sg;
3447 struct qseecom_send_modfd_cmd_req *req = NULL;
3448 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3449 struct qseecom_registered_listener_list *this_lstnr = NULL;
3450 uint32_t offset;
3451 struct sg_table *sg_ptr;
3452
3453 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3454 (data->type != QSEECOM_CLIENT_APP))
3455 return -EFAULT;
3456
3457 if (msg == NULL) {
3458 pr_err("Invalid address\n");
3459 return -EINVAL;
3460 }
3461 if (data->type == QSEECOM_LISTENER_SERVICE) {
3462 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3463 this_lstnr = __qseecom_find_svc(data->listener.id);
3464 if (IS_ERR_OR_NULL(this_lstnr)) {
3465 pr_err("Invalid listener ID\n");
3466 return -ENOMEM;
3467 }
3468 } else {
3469 req = (struct qseecom_send_modfd_cmd_req *)msg;
3470 }
3471
3472 for (i = 0; i < MAX_ION_FD; i++) {
3473 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3474 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003475 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003476 req->ifd_data[i].fd);
3477 if (IS_ERR_OR_NULL(ihandle)) {
3478 pr_err("Ion client can't retrieve the handle\n");
3479 return -ENOMEM;
3480 }
3481 field = (char *) req->cmd_req_buf +
3482 req->ifd_data[i].cmd_buf_offset;
3483 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3484 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003485 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003486 lstnr_resp->ifd_data[i].fd);
3487 if (IS_ERR_OR_NULL(ihandle)) {
3488 pr_err("Ion client can't retrieve the handle\n");
3489 return -ENOMEM;
3490 }
3491 field = lstnr_resp->resp_buf_ptr +
3492 lstnr_resp->ifd_data[i].cmd_buf_offset;
3493 } else {
3494 continue;
3495 }
3496 /* Populate the cmd data structure with the phys_addr */
3497 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3498 if (IS_ERR_OR_NULL(sg_ptr)) {
3499 pr_err("IOn client could not retrieve sg table\n");
3500 goto err;
3501 }
3502 if (sg_ptr->nents == 0) {
3503 pr_err("Num of scattered entries is 0\n");
3504 goto err;
3505 }
3506 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3507 pr_err("Num of scattered entries");
3508 pr_err(" (%d) is greater than max supported %d\n",
3509 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3510 goto err;
3511 }
3512 sg = sg_ptr->sgl;
3513 if (sg_ptr->nents == 1) {
3514 uint32_t *update;
3515
3516 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3517 goto err;
3518 if ((data->type == QSEECOM_CLIENT_APP &&
3519 (data->client.app_arch == ELFCLASS32 ||
3520 data->client.app_arch == ELFCLASS64)) ||
3521 (data->type == QSEECOM_LISTENER_SERVICE)) {
3522 /*
3523 * Check if sg list phy add region is under 4GB
3524 */
3525 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3526 (!cleanup) &&
3527 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3528 >= PHY_ADDR_4G - sg->length)) {
3529 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3530 data->client.app_name,
3531 &(sg_dma_address(sg_ptr->sgl)),
3532 sg->length);
3533 goto err;
3534 }
3535 update = (uint32_t *) field;
3536 *update = cleanup ? 0 :
3537 (uint32_t)sg_dma_address(sg_ptr->sgl);
3538 } else {
3539 pr_err("QSEE app arch %u is not supported\n",
3540 data->client.app_arch);
3541 goto err;
3542 }
3543 len += (uint32_t)sg->length;
3544 } else {
3545 struct qseecom_sg_entry *update;
3546 int j = 0;
3547
3548 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3549 (req->ifd_data[i].fd > 0)) {
3550
3551 if ((req->cmd_req_len <
3552 SG_ENTRY_SZ * sg_ptr->nents) ||
3553 (req->ifd_data[i].cmd_buf_offset >
3554 (req->cmd_req_len -
3555 SG_ENTRY_SZ * sg_ptr->nents))) {
3556 pr_err("Invalid offset = 0x%x\n",
3557 req->ifd_data[i].cmd_buf_offset);
3558 goto err;
3559 }
3560
3561 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3562 (lstnr_resp->ifd_data[i].fd > 0)) {
3563
3564 if ((lstnr_resp->resp_len <
3565 SG_ENTRY_SZ * sg_ptr->nents) ||
3566 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3567 (lstnr_resp->resp_len -
3568 SG_ENTRY_SZ * sg_ptr->nents))) {
3569 goto err;
3570 }
3571 }
3572 if ((data->type == QSEECOM_CLIENT_APP &&
3573 (data->client.app_arch == ELFCLASS32 ||
3574 data->client.app_arch == ELFCLASS64)) ||
3575 (data->type == QSEECOM_LISTENER_SERVICE)) {
3576 update = (struct qseecom_sg_entry *)field;
3577 for (j = 0; j < sg_ptr->nents; j++) {
3578 /*
3579 * Check if sg list PA is under 4GB
3580 */
3581 if ((qseecom.qsee_version >=
3582 QSEE_VERSION_40) &&
3583 (!cleanup) &&
3584 ((uint64_t)(sg_dma_address(sg))
3585 >= PHY_ADDR_4G - sg->length)) {
3586 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3587 data->client.app_name,
3588 &(sg_dma_address(sg)),
3589 sg->length);
3590 goto err;
3591 }
3592 update->phys_addr = cleanup ? 0 :
3593 (uint32_t)sg_dma_address(sg);
3594 update->len = cleanup ? 0 : sg->length;
3595 update++;
3596 len += sg->length;
3597 sg = sg_next(sg);
3598 }
3599 } else {
3600 pr_err("QSEE app arch %u is not supported\n",
3601 data->client.app_arch);
3602 goto err;
3603 }
3604 }
3605
3606 if (cleanup) {
3607 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3608 ihandle, NULL, len,
3609 ION_IOC_INV_CACHES);
3610 if (ret) {
3611 pr_err("cache operation failed %d\n", ret);
3612 goto err;
3613 }
3614 } else {
3615 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3616 ihandle, NULL, len,
3617 ION_IOC_CLEAN_INV_CACHES);
3618 if (ret) {
3619 pr_err("cache operation failed %d\n", ret);
3620 goto err;
3621 }
3622 if (data->type == QSEECOM_CLIENT_APP) {
3623 offset = req->ifd_data[i].cmd_buf_offset;
3624 data->sglistinfo_ptr[i].indexAndFlags =
3625 SGLISTINFO_SET_INDEX_FLAG(
3626 (sg_ptr->nents == 1), 0, offset);
3627 data->sglistinfo_ptr[i].sizeOrCount =
3628 (sg_ptr->nents == 1) ?
3629 sg->length : sg_ptr->nents;
3630 data->sglist_cnt = i + 1;
3631 } else {
3632 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3633 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3634 (uintptr_t)this_lstnr->sb_virt);
3635 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3636 SGLISTINFO_SET_INDEX_FLAG(
3637 (sg_ptr->nents == 1), 0, offset);
3638 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3639 (sg_ptr->nents == 1) ?
3640 sg->length : sg_ptr->nents;
3641 this_lstnr->sglist_cnt = i + 1;
3642 }
3643 }
3644 /* Deallocate the handle */
3645 if (!IS_ERR_OR_NULL(ihandle))
3646 ion_free(qseecom.ion_clnt, ihandle);
3647 }
3648 return ret;
3649err:
3650 if (!IS_ERR_OR_NULL(ihandle))
3651 ion_free(qseecom.ion_clnt, ihandle);
3652 return -ENOMEM;
3653}
3654
3655static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3656 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3657{
3658 struct scatterlist *sg = sg_ptr->sgl;
3659 struct qseecom_sg_entry_64bit *sg_entry;
3660 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3661 void *buf;
3662 uint i;
3663 size_t size;
3664 dma_addr_t coh_pmem;
3665
3666 if (fd_idx >= MAX_ION_FD) {
3667 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3668 return -ENOMEM;
3669 }
3670 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3671 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3672 /* Allocate a contiguous kernel buffer */
3673 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3674 size = (size + PAGE_SIZE) & PAGE_MASK;
3675 buf = dma_alloc_coherent(qseecom.pdev,
3676 size, &coh_pmem, GFP_KERNEL);
3677 if (buf == NULL) {
3678 pr_err("failed to alloc memory for sg buf\n");
3679 return -ENOMEM;
3680 }
3681 /* update qseecom_sg_list_buf_hdr_64bit */
3682 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3683 buf_hdr->new_buf_phys_addr = coh_pmem;
3684 buf_hdr->nents_total = sg_ptr->nents;
3685 /* save the left sg entries into new allocated buf */
3686 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3687 for (i = 0; i < sg_ptr->nents; i++) {
3688 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3689 sg_entry->len = sg->length;
3690 sg_entry++;
3691 sg = sg_next(sg);
3692 }
3693
3694 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3695 data->client.sec_buf_fd[fd_idx].vbase = buf;
3696 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3697 data->client.sec_buf_fd[fd_idx].size = size;
3698
3699 return 0;
3700}
3701
3702static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3703 struct qseecom_dev_handle *data)
3704{
3705 struct ion_handle *ihandle;
3706 char *field;
3707 int ret = 0;
3708 int i = 0;
3709 uint32_t len = 0;
3710 struct scatterlist *sg;
3711 struct qseecom_send_modfd_cmd_req *req = NULL;
3712 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3713 struct qseecom_registered_listener_list *this_lstnr = NULL;
3714 uint32_t offset;
3715 struct sg_table *sg_ptr;
3716
3717 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3718 (data->type != QSEECOM_CLIENT_APP))
3719 return -EFAULT;
3720
3721 if (msg == NULL) {
3722 pr_err("Invalid address\n");
3723 return -EINVAL;
3724 }
3725 if (data->type == QSEECOM_LISTENER_SERVICE) {
3726 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3727 this_lstnr = __qseecom_find_svc(data->listener.id);
3728 if (IS_ERR_OR_NULL(this_lstnr)) {
3729 pr_err("Invalid listener ID\n");
3730 return -ENOMEM;
3731 }
3732 } else {
3733 req = (struct qseecom_send_modfd_cmd_req *)msg;
3734 }
3735
3736 for (i = 0; i < MAX_ION_FD; i++) {
3737 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3738 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003739 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003740 req->ifd_data[i].fd);
3741 if (IS_ERR_OR_NULL(ihandle)) {
3742 pr_err("Ion client can't retrieve the handle\n");
3743 return -ENOMEM;
3744 }
3745 field = (char *) req->cmd_req_buf +
3746 req->ifd_data[i].cmd_buf_offset;
3747 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3748 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003749 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003750 lstnr_resp->ifd_data[i].fd);
3751 if (IS_ERR_OR_NULL(ihandle)) {
3752 pr_err("Ion client can't retrieve the handle\n");
3753 return -ENOMEM;
3754 }
3755 field = lstnr_resp->resp_buf_ptr +
3756 lstnr_resp->ifd_data[i].cmd_buf_offset;
3757 } else {
3758 continue;
3759 }
3760 /* Populate the cmd data structure with the phys_addr */
3761 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3762 if (IS_ERR_OR_NULL(sg_ptr)) {
3763 pr_err("IOn client could not retrieve sg table\n");
3764 goto err;
3765 }
3766 if (sg_ptr->nents == 0) {
3767 pr_err("Num of scattered entries is 0\n");
3768 goto err;
3769 }
3770 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3771 pr_warn("Num of scattered entries");
3772 pr_warn(" (%d) is greater than %d\n",
3773 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3774 if (cleanup) {
3775 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3776 data->client.sec_buf_fd[i].vbase)
3777 dma_free_coherent(qseecom.pdev,
3778 data->client.sec_buf_fd[i].size,
3779 data->client.sec_buf_fd[i].vbase,
3780 data->client.sec_buf_fd[i].pbase);
3781 } else {
3782 ret = __qseecom_allocate_sg_list_buffer(data,
3783 field, i, sg_ptr);
3784 if (ret) {
3785 pr_err("Failed to allocate sg list buffer\n");
3786 goto err;
3787 }
3788 }
3789 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3790 sg = sg_ptr->sgl;
3791 goto cleanup;
3792 }
3793 sg = sg_ptr->sgl;
3794 if (sg_ptr->nents == 1) {
3795 uint64_t *update_64bit;
3796
3797 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3798 goto err;
3799 /* 64bit app uses 64bit address */
3800 update_64bit = (uint64_t *) field;
3801 *update_64bit = cleanup ? 0 :
3802 (uint64_t)sg_dma_address(sg_ptr->sgl);
3803 len += (uint32_t)sg->length;
3804 } else {
3805 struct qseecom_sg_entry_64bit *update_64bit;
3806 int j = 0;
3807
3808 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3809 (req->ifd_data[i].fd > 0)) {
3810
3811 if ((req->cmd_req_len <
3812 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3813 (req->ifd_data[i].cmd_buf_offset >
3814 (req->cmd_req_len -
3815 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3816 pr_err("Invalid offset = 0x%x\n",
3817 req->ifd_data[i].cmd_buf_offset);
3818 goto err;
3819 }
3820
3821 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3822 (lstnr_resp->ifd_data[i].fd > 0)) {
3823
3824 if ((lstnr_resp->resp_len <
3825 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3826 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3827 (lstnr_resp->resp_len -
3828 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3829 goto err;
3830 }
3831 }
3832 /* 64bit app uses 64bit address */
3833 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3834 for (j = 0; j < sg_ptr->nents; j++) {
3835 update_64bit->phys_addr = cleanup ? 0 :
3836 (uint64_t)sg_dma_address(sg);
3837 update_64bit->len = cleanup ? 0 :
3838 (uint32_t)sg->length;
3839 update_64bit++;
3840 len += sg->length;
3841 sg = sg_next(sg);
3842 }
3843 }
3844cleanup:
3845 if (cleanup) {
3846 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3847 ihandle, NULL, len,
3848 ION_IOC_INV_CACHES);
3849 if (ret) {
3850 pr_err("cache operation failed %d\n", ret);
3851 goto err;
3852 }
3853 } else {
3854 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3855 ihandle, NULL, len,
3856 ION_IOC_CLEAN_INV_CACHES);
3857 if (ret) {
3858 pr_err("cache operation failed %d\n", ret);
3859 goto err;
3860 }
3861 if (data->type == QSEECOM_CLIENT_APP) {
3862 offset = req->ifd_data[i].cmd_buf_offset;
3863 data->sglistinfo_ptr[i].indexAndFlags =
3864 SGLISTINFO_SET_INDEX_FLAG(
3865 (sg_ptr->nents == 1), 1, offset);
3866 data->sglistinfo_ptr[i].sizeOrCount =
3867 (sg_ptr->nents == 1) ?
3868 sg->length : sg_ptr->nents;
3869 data->sglist_cnt = i + 1;
3870 } else {
3871 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3872 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3873 (uintptr_t)this_lstnr->sb_virt);
3874 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3875 SGLISTINFO_SET_INDEX_FLAG(
3876 (sg_ptr->nents == 1), 1, offset);
3877 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3878 (sg_ptr->nents == 1) ?
3879 sg->length : sg_ptr->nents;
3880 this_lstnr->sglist_cnt = i + 1;
3881 }
3882 }
3883 /* Deallocate the handle */
3884 if (!IS_ERR_OR_NULL(ihandle))
3885 ion_free(qseecom.ion_clnt, ihandle);
3886 }
3887 return ret;
3888err:
3889 for (i = 0; i < MAX_ION_FD; i++)
3890 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3891 data->client.sec_buf_fd[i].vbase)
3892 dma_free_coherent(qseecom.pdev,
3893 data->client.sec_buf_fd[i].size,
3894 data->client.sec_buf_fd[i].vbase,
3895 data->client.sec_buf_fd[i].pbase);
3896 if (!IS_ERR_OR_NULL(ihandle))
3897 ion_free(qseecom.ion_clnt, ihandle);
3898 return -ENOMEM;
3899}
3900
3901static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3902 void __user *argp,
3903 bool is_64bit_addr)
3904{
3905 int ret = 0;
3906 int i;
3907 struct qseecom_send_modfd_cmd_req req;
3908 struct qseecom_send_cmd_req send_cmd_req;
3909
3910 ret = copy_from_user(&req, argp, sizeof(req));
3911 if (ret) {
3912 pr_err("copy_from_user failed\n");
3913 return ret;
3914 }
3915
3916 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3917 send_cmd_req.cmd_req_len = req.cmd_req_len;
3918 send_cmd_req.resp_buf = req.resp_buf;
3919 send_cmd_req.resp_len = req.resp_len;
3920
3921 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3922 return -EINVAL;
3923
3924 /* validate offsets */
3925 for (i = 0; i < MAX_ION_FD; i++) {
3926 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3927 pr_err("Invalid offset %d = 0x%x\n",
3928 i, req.ifd_data[i].cmd_buf_offset);
3929 return -EINVAL;
3930 }
3931 }
3932 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3933 (uintptr_t)req.cmd_req_buf);
3934 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3935 (uintptr_t)req.resp_buf);
3936
3937 if (!is_64bit_addr) {
3938 ret = __qseecom_update_cmd_buf(&req, false, data);
3939 if (ret)
3940 return ret;
3941 ret = __qseecom_send_cmd(data, &send_cmd_req);
3942 if (ret)
3943 return ret;
3944 ret = __qseecom_update_cmd_buf(&req, true, data);
3945 if (ret)
3946 return ret;
3947 } else {
3948 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3949 if (ret)
3950 return ret;
3951 ret = __qseecom_send_cmd(data, &send_cmd_req);
3952 if (ret)
3953 return ret;
3954 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3955 if (ret)
3956 return ret;
3957 }
3958
3959 return ret;
3960}
3961
3962static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3963 void __user *argp)
3964{
3965 return __qseecom_send_modfd_cmd(data, argp, false);
3966}
3967
3968static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3969 void __user *argp)
3970{
3971 return __qseecom_send_modfd_cmd(data, argp, true);
3972}
3973
3974
3975
3976static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3977 struct qseecom_registered_listener_list *svc)
3978{
3979 int ret;
3980
Zhen Kongf5087172018-10-11 17:22:05 -07003981 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003982 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003983}
3984
3985static int qseecom_receive_req(struct qseecom_dev_handle *data)
3986{
3987 int ret = 0;
3988 struct qseecom_registered_listener_list *this_lstnr;
3989
Zhen Kongbcdeda22018-11-16 13:50:51 -08003990 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003991 this_lstnr = __qseecom_find_svc(data->listener.id);
3992 if (!this_lstnr) {
3993 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003994 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003995 return -ENODATA;
3996 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003997 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003998
3999 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304000 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004001 __qseecom_listener_has_rcvd_req(data,
4002 this_lstnr))) {
Zhen Kong52ce9062018-09-24 14:33:27 -07004003 pr_debug("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004004 (uint32_t)data->listener.id);
4005 /* woken up for different reason */
4006 return -ERESTARTSYS;
4007 }
4008
Zhen Kongbcdeda22018-11-16 13:50:51 -08004009 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004010 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004011 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004012 return -ENODEV;
4013 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004014 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004015 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004016 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004017 break;
4018 }
4019 return ret;
4020}
4021
4022static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4023{
4024 unsigned char app_arch = 0;
4025 struct elf32_hdr *ehdr;
4026 struct elf64_hdr *ehdr64;
4027
4028 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4029
4030 switch (app_arch) {
4031 case ELFCLASS32: {
4032 ehdr = (struct elf32_hdr *)fw_entry->data;
4033 if (fw_entry->size < sizeof(*ehdr)) {
4034 pr_err("%s: Not big enough to be an elf32 header\n",
4035 qseecom.pdev->init_name);
4036 return false;
4037 }
4038 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4039 pr_err("%s: Not an elf32 header\n",
4040 qseecom.pdev->init_name);
4041 return false;
4042 }
4043 if (ehdr->e_phnum == 0) {
4044 pr_err("%s: No loadable segments\n",
4045 qseecom.pdev->init_name);
4046 return false;
4047 }
4048 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4049 sizeof(struct elf32_hdr) > fw_entry->size) {
4050 pr_err("%s: Program headers not within mdt\n",
4051 qseecom.pdev->init_name);
4052 return false;
4053 }
4054 break;
4055 }
4056 case ELFCLASS64: {
4057 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4058 if (fw_entry->size < sizeof(*ehdr64)) {
4059 pr_err("%s: Not big enough to be an elf64 header\n",
4060 qseecom.pdev->init_name);
4061 return false;
4062 }
4063 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4064 pr_err("%s: Not an elf64 header\n",
4065 qseecom.pdev->init_name);
4066 return false;
4067 }
4068 if (ehdr64->e_phnum == 0) {
4069 pr_err("%s: No loadable segments\n",
4070 qseecom.pdev->init_name);
4071 return false;
4072 }
4073 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4074 sizeof(struct elf64_hdr) > fw_entry->size) {
4075 pr_err("%s: Program headers not within mdt\n",
4076 qseecom.pdev->init_name);
4077 return false;
4078 }
4079 break;
4080 }
4081 default: {
4082 pr_err("QSEE app arch %u is not supported\n", app_arch);
4083 return false;
4084 }
4085 }
4086 return true;
4087}
4088
4089static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4090 uint32_t *app_arch)
4091{
4092 int ret = -1;
4093 int i = 0, rc = 0;
4094 const struct firmware *fw_entry = NULL;
4095 char fw_name[MAX_APP_NAME_SIZE];
4096 struct elf32_hdr *ehdr;
4097 struct elf64_hdr *ehdr64;
4098 int num_images = 0;
4099
4100 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4101 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4102 if (rc) {
4103 pr_err("error with request_firmware\n");
4104 ret = -EIO;
4105 goto err;
4106 }
4107 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4108 ret = -EIO;
4109 goto err;
4110 }
4111 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4112 *fw_size = fw_entry->size;
4113 if (*app_arch == ELFCLASS32) {
4114 ehdr = (struct elf32_hdr *)fw_entry->data;
4115 num_images = ehdr->e_phnum;
4116 } else if (*app_arch == ELFCLASS64) {
4117 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4118 num_images = ehdr64->e_phnum;
4119 } else {
4120 pr_err("QSEE %s app, arch %u is not supported\n",
4121 appname, *app_arch);
4122 ret = -EIO;
4123 goto err;
4124 }
4125 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4126 release_firmware(fw_entry);
4127 fw_entry = NULL;
4128 for (i = 0; i < num_images; i++) {
4129 memset(fw_name, 0, sizeof(fw_name));
4130 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4131 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4132 if (ret)
4133 goto err;
4134 if (*fw_size > U32_MAX - fw_entry->size) {
4135 pr_err("QSEE %s app file size overflow\n", appname);
4136 ret = -EINVAL;
4137 goto err;
4138 }
4139 *fw_size += fw_entry->size;
4140 release_firmware(fw_entry);
4141 fw_entry = NULL;
4142 }
4143
4144 return ret;
4145err:
4146 if (fw_entry)
4147 release_firmware(fw_entry);
4148 *fw_size = 0;
4149 return ret;
4150}
4151
4152static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4153 uint32_t fw_size,
4154 struct qseecom_load_app_ireq *load_req)
4155{
4156 int ret = -1;
4157 int i = 0, rc = 0;
4158 const struct firmware *fw_entry = NULL;
4159 char fw_name[MAX_APP_NAME_SIZE];
4160 u8 *img_data_ptr = img_data;
4161 struct elf32_hdr *ehdr;
4162 struct elf64_hdr *ehdr64;
4163 int num_images = 0;
4164 unsigned char app_arch = 0;
4165
4166 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4167 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4168 if (rc) {
4169 ret = -EIO;
4170 goto err;
4171 }
4172
4173 load_req->img_len = fw_entry->size;
4174 if (load_req->img_len > fw_size) {
4175 pr_err("app %s size %zu is larger than buf size %u\n",
4176 appname, fw_entry->size, fw_size);
4177 ret = -EINVAL;
4178 goto err;
4179 }
4180 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4181 img_data_ptr = img_data_ptr + fw_entry->size;
4182 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4183
4184 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4185 if (app_arch == ELFCLASS32) {
4186 ehdr = (struct elf32_hdr *)fw_entry->data;
4187 num_images = ehdr->e_phnum;
4188 } else if (app_arch == ELFCLASS64) {
4189 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4190 num_images = ehdr64->e_phnum;
4191 } else {
4192 pr_err("QSEE %s app, arch %u is not supported\n",
4193 appname, app_arch);
4194 ret = -EIO;
4195 goto err;
4196 }
4197 release_firmware(fw_entry);
4198 fw_entry = NULL;
4199 for (i = 0; i < num_images; i++) {
4200 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4201 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4202 if (ret) {
4203 pr_err("Failed to locate blob %s\n", fw_name);
4204 goto err;
4205 }
4206 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4207 (fw_entry->size + load_req->img_len > fw_size)) {
4208 pr_err("Invalid file size for %s\n", fw_name);
4209 ret = -EINVAL;
4210 goto err;
4211 }
4212 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4213 img_data_ptr = img_data_ptr + fw_entry->size;
4214 load_req->img_len += fw_entry->size;
4215 release_firmware(fw_entry);
4216 fw_entry = NULL;
4217 }
4218 return ret;
4219err:
4220 release_firmware(fw_entry);
4221 return ret;
4222}
4223
4224static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4225 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4226{
4227 size_t len = 0;
4228 int ret = 0;
4229 ion_phys_addr_t pa;
4230 struct ion_handle *ihandle = NULL;
4231 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004232 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004233 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004234
Zhen Kong3dd92792017-12-08 09:47:15 -08004235 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004236 if (retry++) {
4237 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004238 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004239 mutex_lock(&app_access_lock);
4240 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004241 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004242 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004243 } while (IS_ERR_OR_NULL(ihandle) &&
4244 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004245
4246 if (IS_ERR_OR_NULL(ihandle)) {
4247 pr_err("ION alloc failed\n");
4248 return -ENOMEM;
4249 }
4250 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4251 ihandle);
4252
4253 if (IS_ERR_OR_NULL(img_data)) {
4254 pr_err("ION memory mapping for image loading failed\n");
4255 ret = -ENOMEM;
4256 goto exit_ion_free;
4257 }
4258 /* Get the physical address of the ION BUF */
4259 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4260 if (ret) {
4261 pr_err("physical memory retrieval failure\n");
4262 ret = -EIO;
4263 goto exit_ion_unmap_kernel;
4264 }
4265
4266 *pihandle = ihandle;
4267 *data = img_data;
4268 *paddr = pa;
4269 return ret;
4270
4271exit_ion_unmap_kernel:
4272 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4273exit_ion_free:
4274 ion_free(qseecom.ion_clnt, ihandle);
4275 ihandle = NULL;
4276 return ret;
4277}
4278
4279static void __qseecom_free_img_data(struct ion_handle **ihandle)
4280{
4281 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4282 ion_free(qseecom.ion_clnt, *ihandle);
4283 *ihandle = NULL;
4284}
4285
4286static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4287 uint32_t *app_id)
4288{
4289 int ret = -1;
4290 uint32_t fw_size = 0;
4291 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4292 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4293 struct qseecom_command_scm_resp resp;
4294 u8 *img_data = NULL;
4295 ion_phys_addr_t pa = 0;
4296 struct ion_handle *ihandle = NULL;
4297 void *cmd_buf = NULL;
4298 size_t cmd_len;
4299 uint32_t app_arch = 0;
4300
4301 if (!data || !appname || !app_id) {
4302 pr_err("Null pointer to data or appname or appid\n");
4303 return -EINVAL;
4304 }
4305 *app_id = 0;
4306 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4307 return -EIO;
4308 data->client.app_arch = app_arch;
4309
4310 /* Check and load cmnlib */
4311 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4312 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4313 ret = qseecom_load_commonlib_image(data, "cmnlib");
4314 if (ret) {
4315 pr_err("failed to load cmnlib\n");
4316 return -EIO;
4317 }
4318 qseecom.commonlib_loaded = true;
4319 pr_debug("cmnlib is loaded\n");
4320 }
4321
4322 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4323 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4324 if (ret) {
4325 pr_err("failed to load cmnlib64\n");
4326 return -EIO;
4327 }
4328 qseecom.commonlib64_loaded = true;
4329 pr_debug("cmnlib64 is loaded\n");
4330 }
4331 }
4332
4333 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4334 if (ret)
4335 return ret;
4336
4337 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4338 if (ret) {
4339 ret = -EIO;
4340 goto exit_free_img_data;
4341 }
4342
4343 /* Populate the load_req parameters */
4344 if (qseecom.qsee_version < QSEE_VERSION_40) {
4345 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4346 load_req.mdt_len = load_req.mdt_len;
4347 load_req.img_len = load_req.img_len;
4348 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4349 load_req.phy_addr = (uint32_t)pa;
4350 cmd_buf = (void *)&load_req;
4351 cmd_len = sizeof(struct qseecom_load_app_ireq);
4352 } else {
4353 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4354 load_req_64bit.mdt_len = load_req.mdt_len;
4355 load_req_64bit.img_len = load_req.img_len;
4356 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4357 load_req_64bit.phy_addr = (uint64_t)pa;
4358 cmd_buf = (void *)&load_req_64bit;
4359 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4360 }
4361
4362 if (qseecom.support_bus_scaling) {
4363 mutex_lock(&qsee_bw_mutex);
4364 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4365 mutex_unlock(&qsee_bw_mutex);
4366 if (ret) {
4367 ret = -EIO;
4368 goto exit_free_img_data;
4369 }
4370 }
4371
4372 ret = __qseecom_enable_clk_scale_up(data);
4373 if (ret) {
4374 ret = -EIO;
4375 goto exit_unregister_bus_bw_need;
4376 }
4377
4378 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4379 img_data, fw_size,
4380 ION_IOC_CLEAN_INV_CACHES);
4381 if (ret) {
4382 pr_err("cache operation failed %d\n", ret);
4383 goto exit_disable_clk_vote;
4384 }
4385
4386 /* SCM_CALL to load the image */
4387 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4388 &resp, sizeof(resp));
4389 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004390 pr_err("scm_call to load failed : ret %d, result %x\n",
4391 ret, resp.result);
4392 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4393 ret = -EEXIST;
4394 else
4395 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004396 goto exit_disable_clk_vote;
4397 }
4398
4399 switch (resp.result) {
4400 case QSEOS_RESULT_SUCCESS:
4401 *app_id = resp.data;
4402 break;
4403 case QSEOS_RESULT_INCOMPLETE:
4404 ret = __qseecom_process_incomplete_cmd(data, &resp);
4405 if (ret)
4406 pr_err("process_incomplete_cmd FAILED\n");
4407 else
4408 *app_id = resp.data;
4409 break;
4410 case QSEOS_RESULT_FAILURE:
4411 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4412 break;
4413 default:
4414 pr_err("scm call return unknown response %d\n", resp.result);
4415 ret = -EINVAL;
4416 break;
4417 }
4418
4419exit_disable_clk_vote:
4420 __qseecom_disable_clk_scale_down(data);
4421
4422exit_unregister_bus_bw_need:
4423 if (qseecom.support_bus_scaling) {
4424 mutex_lock(&qsee_bw_mutex);
4425 qseecom_unregister_bus_bandwidth_needs(data);
4426 mutex_unlock(&qsee_bw_mutex);
4427 }
4428
4429exit_free_img_data:
4430 __qseecom_free_img_data(&ihandle);
4431 return ret;
4432}
4433
4434static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4435 char *cmnlib_name)
4436{
4437 int ret = 0;
4438 uint32_t fw_size = 0;
4439 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4440 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4441 struct qseecom_command_scm_resp resp;
4442 u8 *img_data = NULL;
4443 ion_phys_addr_t pa = 0;
4444 void *cmd_buf = NULL;
4445 size_t cmd_len;
4446 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004447 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004448
4449 if (!cmnlib_name) {
4450 pr_err("cmnlib_name is NULL\n");
4451 return -EINVAL;
4452 }
4453 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4454 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4455 cmnlib_name, strlen(cmnlib_name));
4456 return -EINVAL;
4457 }
4458
4459 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4460 return -EIO;
4461
Zhen Kong3bafb312017-10-18 10:27:20 -07004462 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004463 &img_data, fw_size, &pa);
4464 if (ret)
4465 return -EIO;
4466
4467 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4468 if (ret) {
4469 ret = -EIO;
4470 goto exit_free_img_data;
4471 }
4472 if (qseecom.qsee_version < QSEE_VERSION_40) {
4473 load_req.phy_addr = (uint32_t)pa;
4474 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4475 cmd_buf = (void *)&load_req;
4476 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4477 } else {
4478 load_req_64bit.phy_addr = (uint64_t)pa;
4479 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4480 load_req_64bit.img_len = load_req.img_len;
4481 load_req_64bit.mdt_len = load_req.mdt_len;
4482 cmd_buf = (void *)&load_req_64bit;
4483 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4484 }
4485
4486 if (qseecom.support_bus_scaling) {
4487 mutex_lock(&qsee_bw_mutex);
4488 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4489 mutex_unlock(&qsee_bw_mutex);
4490 if (ret) {
4491 ret = -EIO;
4492 goto exit_free_img_data;
4493 }
4494 }
4495
4496 /* Vote for the SFPB clock */
4497 ret = __qseecom_enable_clk_scale_up(data);
4498 if (ret) {
4499 ret = -EIO;
4500 goto exit_unregister_bus_bw_need;
4501 }
4502
Zhen Kong3bafb312017-10-18 10:27:20 -07004503 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004504 img_data, fw_size,
4505 ION_IOC_CLEAN_INV_CACHES);
4506 if (ret) {
4507 pr_err("cache operation failed %d\n", ret);
4508 goto exit_disable_clk_vote;
4509 }
4510
4511 /* SCM_CALL to load the image */
4512 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4513 &resp, sizeof(resp));
4514 if (ret) {
4515 pr_err("scm_call to load failed : ret %d\n", ret);
4516 ret = -EIO;
4517 goto exit_disable_clk_vote;
4518 }
4519
4520 switch (resp.result) {
4521 case QSEOS_RESULT_SUCCESS:
4522 break;
4523 case QSEOS_RESULT_FAILURE:
4524 pr_err("scm call failed w/response result%d\n", resp.result);
4525 ret = -EINVAL;
4526 goto exit_disable_clk_vote;
4527 case QSEOS_RESULT_INCOMPLETE:
4528 ret = __qseecom_process_incomplete_cmd(data, &resp);
4529 if (ret) {
4530 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4531 goto exit_disable_clk_vote;
4532 }
4533 break;
4534 default:
4535 pr_err("scm call return unknown response %d\n", resp.result);
4536 ret = -EINVAL;
4537 goto exit_disable_clk_vote;
4538 }
4539
4540exit_disable_clk_vote:
4541 __qseecom_disable_clk_scale_down(data);
4542
4543exit_unregister_bus_bw_need:
4544 if (qseecom.support_bus_scaling) {
4545 mutex_lock(&qsee_bw_mutex);
4546 qseecom_unregister_bus_bandwidth_needs(data);
4547 mutex_unlock(&qsee_bw_mutex);
4548 }
4549
4550exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004551 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004552 return ret;
4553}
4554
4555static int qseecom_unload_commonlib_image(void)
4556{
4557 int ret = -EINVAL;
4558 struct qseecom_unload_lib_image_ireq unload_req = {0};
4559 struct qseecom_command_scm_resp resp;
4560
4561 /* Populate the remaining parameters */
4562 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4563
4564 /* SCM_CALL to load the image */
4565 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4566 sizeof(struct qseecom_unload_lib_image_ireq),
4567 &resp, sizeof(resp));
4568 if (ret) {
4569 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4570 ret = -EIO;
4571 } else {
4572 switch (resp.result) {
4573 case QSEOS_RESULT_SUCCESS:
4574 break;
4575 case QSEOS_RESULT_FAILURE:
4576 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4577 break;
4578 default:
4579 pr_err("scm call return unknown response %d\n",
4580 resp.result);
4581 ret = -EINVAL;
4582 break;
4583 }
4584 }
4585
4586 return ret;
4587}
4588
4589int qseecom_start_app(struct qseecom_handle **handle,
4590 char *app_name, uint32_t size)
4591{
4592 int32_t ret = 0;
4593 unsigned long flags = 0;
4594 struct qseecom_dev_handle *data = NULL;
4595 struct qseecom_check_app_ireq app_ireq;
4596 struct qseecom_registered_app_list *entry = NULL;
4597 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4598 bool found_app = false;
4599 size_t len;
4600 ion_phys_addr_t pa;
4601 uint32_t fw_size, app_arch;
4602 uint32_t app_id = 0;
4603
Zhen Kongc4c162a2019-01-23 12:07:12 -08004604 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004605
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004606 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4607 pr_err("Not allowed to be called in %d state\n",
4608 atomic_read(&qseecom.qseecom_state));
4609 return -EPERM;
4610 }
4611 if (!app_name) {
4612 pr_err("failed to get the app name\n");
4613 return -EINVAL;
4614 }
4615
Zhen Kong64a6d7282017-06-16 11:55:07 -07004616 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004617 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004618 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004619 return -EINVAL;
4620 }
4621
4622 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4623 if (!(*handle))
4624 return -ENOMEM;
4625
4626 data = kzalloc(sizeof(*data), GFP_KERNEL);
4627 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304628 ret = -ENOMEM;
4629 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004630 }
4631 data->abort = 0;
4632 data->type = QSEECOM_CLIENT_APP;
4633 data->released = false;
4634 data->client.sb_length = size;
4635 data->client.user_virt_sb_base = 0;
4636 data->client.ihandle = NULL;
4637
4638 init_waitqueue_head(&data->abort_wq);
4639
4640 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4641 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4642 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4643 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304644 ret = -ENOMEM;
4645 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004646 }
4647 mutex_lock(&app_access_lock);
4648
Zhen Kong5d02be92018-05-29 16:17:29 -07004649recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004650 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4651 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4652 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4653 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304654 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004655
4656 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4657 if (app_id) {
4658 pr_warn("App id %d for [%s] app exists\n", app_id,
4659 (char *)app_ireq.app_name);
4660 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4661 list_for_each_entry(entry,
4662 &qseecom.registered_app_list_head, list){
4663 if (entry->app_id == app_id) {
4664 entry->ref_cnt++;
4665 found_app = true;
4666 break;
4667 }
4668 }
4669 spin_unlock_irqrestore(
4670 &qseecom.registered_app_list_lock, flags);
4671 if (!found_app)
4672 pr_warn("App_id %d [%s] was loaded but not registered\n",
4673 ret, (char *)app_ireq.app_name);
4674 } else {
4675 /* load the app and get the app_id */
4676 pr_debug("%s: Loading app for the first time'\n",
4677 qseecom.pdev->init_name);
4678 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004679 if (ret == -EEXIST) {
4680 pr_err("recheck if TA %s is loaded\n", app_name);
4681 goto recheck;
4682 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304683 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004684 }
4685 data->client.app_id = app_id;
4686 if (!found_app) {
4687 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4688 if (!entry) {
4689 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304690 ret = -ENOMEM;
4691 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004692 }
4693 entry->app_id = app_id;
4694 entry->ref_cnt = 1;
4695 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4696 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4697 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304698 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004699 }
4700 entry->app_arch = app_arch;
4701 entry->app_blocked = false;
4702 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004703 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004704 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4705 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4706 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4707 flags);
4708 }
4709
4710 /* Get the physical address of the ION BUF */
4711 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4712 if (ret) {
4713 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4714 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304715 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004716 }
4717
4718 /* Populate the structure for sending scm call to load image */
4719 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4720 data->client.ihandle);
4721 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4722 pr_err("ION memory mapping for client shared buf failed\n");
4723 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304724 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004725 }
4726 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4727 data->client.sb_phys = (phys_addr_t)pa;
4728 (*handle)->dev = (void *)data;
4729 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4730 (*handle)->sbuf_len = data->client.sb_length;
4731
4732 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4733 if (!kclient_entry) {
4734 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304735 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004736 }
4737 kclient_entry->handle = *handle;
4738
4739 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4740 list_add_tail(&kclient_entry->list,
4741 &qseecom.registered_kclient_list_head);
4742 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4743
4744 mutex_unlock(&app_access_lock);
4745 return 0;
4746
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304747exit_ion_unmap_kernel:
4748 if (!IS_ERR_OR_NULL(data->client.ihandle))
4749 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4750exit_entry_free:
4751 kfree(entry);
4752exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004753 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304754 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4755 ion_free(qseecom.ion_clnt, data->client.ihandle);
4756 data->client.ihandle = NULL;
4757 }
4758exit_data_free:
4759 kfree(data);
4760exit_handle_free:
4761 if (*handle) {
4762 kfree(*handle);
4763 *handle = NULL;
4764 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004765 return ret;
4766}
4767EXPORT_SYMBOL(qseecom_start_app);
4768
4769int qseecom_shutdown_app(struct qseecom_handle **handle)
4770{
4771 int ret = -EINVAL;
4772 struct qseecom_dev_handle *data;
4773
4774 struct qseecom_registered_kclient_list *kclient = NULL;
4775 unsigned long flags = 0;
4776 bool found_handle = false;
4777
Zhen Kongc4c162a2019-01-23 12:07:12 -08004778 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004779
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004780 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4781 pr_err("Not allowed to be called in %d state\n",
4782 atomic_read(&qseecom.qseecom_state));
4783 return -EPERM;
4784 }
4785
4786 if ((handle == NULL) || (*handle == NULL)) {
4787 pr_err("Handle is not initialized\n");
4788 return -EINVAL;
4789 }
4790 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4791 mutex_lock(&app_access_lock);
4792
4793 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4794 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4795 list) {
4796 if (kclient->handle == (*handle)) {
4797 list_del(&kclient->list);
4798 found_handle = true;
4799 break;
4800 }
4801 }
4802 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4803 if (!found_handle)
4804 pr_err("Unable to find the handle, exiting\n");
4805 else
4806 ret = qseecom_unload_app(data, false);
4807
4808 mutex_unlock(&app_access_lock);
4809 if (ret == 0) {
4810 kzfree(data);
4811 kzfree(*handle);
4812 kzfree(kclient);
4813 *handle = NULL;
4814 }
4815
4816 return ret;
4817}
4818EXPORT_SYMBOL(qseecom_shutdown_app);
4819
4820int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4821 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4822{
4823 int ret = 0;
4824 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4825 struct qseecom_dev_handle *data;
4826 bool perf_enabled = false;
4827
Zhen Kongc4c162a2019-01-23 12:07:12 -08004828 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004829
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004830 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4831 pr_err("Not allowed to be called in %d state\n",
4832 atomic_read(&qseecom.qseecom_state));
4833 return -EPERM;
4834 }
4835
4836 if (handle == NULL) {
4837 pr_err("Handle is not initialized\n");
4838 return -EINVAL;
4839 }
4840 data = handle->dev;
4841
4842 req.cmd_req_len = sbuf_len;
4843 req.resp_len = rbuf_len;
4844 req.cmd_req_buf = send_buf;
4845 req.resp_buf = resp_buf;
4846
4847 if (__validate_send_cmd_inputs(data, &req))
4848 return -EINVAL;
4849
4850 mutex_lock(&app_access_lock);
4851 if (qseecom.support_bus_scaling) {
4852 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4853 if (ret) {
4854 pr_err("Failed to set bw.\n");
4855 mutex_unlock(&app_access_lock);
4856 return ret;
4857 }
4858 }
4859 /*
4860 * On targets where crypto clock is handled by HLOS,
4861 * if clk_access_cnt is zero and perf_enabled is false,
4862 * then the crypto clock was not enabled before sending cmd
4863 * to tz, qseecom will enable the clock to avoid service failure.
4864 */
4865 if (!qseecom.no_clock_support &&
4866 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4867 pr_debug("ce clock is not enabled!\n");
4868 ret = qseecom_perf_enable(data);
4869 if (ret) {
4870 pr_err("Failed to vote for clock with err %d\n",
4871 ret);
4872 mutex_unlock(&app_access_lock);
4873 return -EINVAL;
4874 }
4875 perf_enabled = true;
4876 }
4877 if (!strcmp(data->client.app_name, "securemm"))
4878 data->use_legacy_cmd = true;
4879
4880 ret = __qseecom_send_cmd(data, &req);
4881 data->use_legacy_cmd = false;
4882 if (qseecom.support_bus_scaling)
4883 __qseecom_add_bw_scale_down_timer(
4884 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4885
4886 if (perf_enabled) {
4887 qsee_disable_clock_vote(data, CLK_DFAB);
4888 qsee_disable_clock_vote(data, CLK_SFPB);
4889 }
4890
4891 mutex_unlock(&app_access_lock);
4892
4893 if (ret)
4894 return ret;
4895
4896 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4897 req.resp_len, req.resp_buf);
4898 return ret;
4899}
4900EXPORT_SYMBOL(qseecom_send_command);
4901
4902int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4903{
4904 int ret = 0;
4905
4906 if ((handle == NULL) || (handle->dev == NULL)) {
4907 pr_err("No valid kernel client\n");
4908 return -EINVAL;
4909 }
4910 if (high) {
4911 if (qseecom.support_bus_scaling) {
4912 mutex_lock(&qsee_bw_mutex);
4913 __qseecom_register_bus_bandwidth_needs(handle->dev,
4914 HIGH);
4915 mutex_unlock(&qsee_bw_mutex);
4916 } else {
4917 ret = qseecom_perf_enable(handle->dev);
4918 if (ret)
4919 pr_err("Failed to vote for clock with err %d\n",
4920 ret);
4921 }
4922 } else {
4923 if (!qseecom.support_bus_scaling) {
4924 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4925 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4926 } else {
4927 mutex_lock(&qsee_bw_mutex);
4928 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4929 mutex_unlock(&qsee_bw_mutex);
4930 }
4931 }
4932 return ret;
4933}
4934EXPORT_SYMBOL(qseecom_set_bandwidth);
4935
4936int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4937{
4938 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4939 struct qseecom_dev_handle dummy_private_data = {0};
4940 struct qseecom_command_scm_resp resp;
4941 int ret = 0;
4942
4943 if (!desc) {
4944 pr_err("desc is NULL\n");
4945 return -EINVAL;
4946 }
4947
4948 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004949 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004950 resp.data = desc->ret[2]; /*listener_id*/
4951
Zhen Konge7f525f2017-12-01 18:26:25 -08004952 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07004953 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08004954 dummy_app_entry.app_id = desc->ret[1];
4955
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004956 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004957 if (qseecom.qsee_reentrancy_support)
4958 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004959 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004960 else
4961 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4962 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004963 mutex_unlock(&app_access_lock);
4964 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004965 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004966 (int)desc->ret[0], (int)desc->ret[2],
4967 (int)desc->ret[1], ret);
4968 desc->ret[0] = resp.result;
4969 desc->ret[1] = resp.resp_type;
4970 desc->ret[2] = resp.data;
4971 return ret;
4972}
4973EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4974
4975static int qseecom_send_resp(void)
4976{
4977 qseecom.send_resp_flag = 1;
4978 wake_up_interruptible(&qseecom.send_resp_wq);
4979 return 0;
4980}
4981
4982static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4983{
4984 struct qseecom_registered_listener_list *this_lstnr = NULL;
4985
4986 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4987 this_lstnr = __qseecom_find_svc(data->listener.id);
4988 if (this_lstnr == NULL)
4989 return -EINVAL;
4990 qseecom.send_resp_flag = 1;
4991 this_lstnr->send_resp_flag = 1;
4992 wake_up_interruptible(&qseecom.send_resp_wq);
4993 return 0;
4994}
4995
4996static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4997 struct qseecom_send_modfd_listener_resp *resp,
4998 struct qseecom_registered_listener_list *this_lstnr)
4999{
5000 int i;
5001
5002 if (!data || !resp || !this_lstnr) {
5003 pr_err("listener handle or resp msg is null\n");
5004 return -EINVAL;
5005 }
5006
5007 if (resp->resp_buf_ptr == NULL) {
5008 pr_err("resp buffer is null\n");
5009 return -EINVAL;
5010 }
5011 /* validate resp buf length */
5012 if ((resp->resp_len == 0) ||
5013 (resp->resp_len > this_lstnr->sb_length)) {
5014 pr_err("resp buf length %d not valid\n", resp->resp_len);
5015 return -EINVAL;
5016 }
5017
5018 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5019 pr_err("Integer overflow in resp_len & resp_buf\n");
5020 return -EINVAL;
5021 }
5022 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5023 (ULONG_MAX - this_lstnr->sb_length)) {
5024 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5025 return -EINVAL;
5026 }
5027 /* validate resp buf */
5028 if (((uintptr_t)resp->resp_buf_ptr <
5029 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5030 ((uintptr_t)resp->resp_buf_ptr >=
5031 ((uintptr_t)this_lstnr->user_virt_sb_base +
5032 this_lstnr->sb_length)) ||
5033 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5034 ((uintptr_t)this_lstnr->user_virt_sb_base +
5035 this_lstnr->sb_length))) {
5036 pr_err("resp buf is out of shared buffer region\n");
5037 return -EINVAL;
5038 }
5039
5040 /* validate offsets */
5041 for (i = 0; i < MAX_ION_FD; i++) {
5042 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5043 pr_err("Invalid offset %d = 0x%x\n",
5044 i, resp->ifd_data[i].cmd_buf_offset);
5045 return -EINVAL;
5046 }
5047 }
5048
5049 return 0;
5050}
5051
5052static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5053 void __user *argp, bool is_64bit_addr)
5054{
5055 struct qseecom_send_modfd_listener_resp resp;
5056 struct qseecom_registered_listener_list *this_lstnr = NULL;
5057
5058 if (copy_from_user(&resp, argp, sizeof(resp))) {
5059 pr_err("copy_from_user failed");
5060 return -EINVAL;
5061 }
5062
5063 this_lstnr = __qseecom_find_svc(data->listener.id);
5064 if (this_lstnr == NULL)
5065 return -EINVAL;
5066
5067 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5068 return -EINVAL;
5069
5070 resp.resp_buf_ptr = this_lstnr->sb_virt +
5071 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5072
5073 if (!is_64bit_addr)
5074 __qseecom_update_cmd_buf(&resp, false, data);
5075 else
5076 __qseecom_update_cmd_buf_64(&resp, false, data);
5077 qseecom.send_resp_flag = 1;
5078 this_lstnr->send_resp_flag = 1;
5079 wake_up_interruptible(&qseecom.send_resp_wq);
5080 return 0;
5081}
5082
5083static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5084 void __user *argp)
5085{
5086 return __qseecom_send_modfd_resp(data, argp, false);
5087}
5088
5089static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5090 void __user *argp)
5091{
5092 return __qseecom_send_modfd_resp(data, argp, true);
5093}
5094
5095static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5096 void __user *argp)
5097{
5098 struct qseecom_qseos_version_req req;
5099
5100 if (copy_from_user(&req, argp, sizeof(req))) {
5101 pr_err("copy_from_user failed");
5102 return -EINVAL;
5103 }
5104 req.qseos_version = qseecom.qseos_version;
5105 if (copy_to_user(argp, &req, sizeof(req))) {
5106 pr_err("copy_to_user failed");
5107 return -EINVAL;
5108 }
5109 return 0;
5110}
5111
5112static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5113{
5114 int rc = 0;
5115 struct qseecom_clk *qclk = NULL;
5116
5117 if (qseecom.no_clock_support)
5118 return 0;
5119
5120 if (ce == CLK_QSEE)
5121 qclk = &qseecom.qsee;
5122 if (ce == CLK_CE_DRV)
5123 qclk = &qseecom.ce_drv;
5124
5125 if (qclk == NULL) {
5126 pr_err("CLK type not supported\n");
5127 return -EINVAL;
5128 }
5129 mutex_lock(&clk_access_lock);
5130
5131 if (qclk->clk_access_cnt == ULONG_MAX) {
5132 pr_err("clk_access_cnt beyond limitation\n");
5133 goto err;
5134 }
5135 if (qclk->clk_access_cnt > 0) {
5136 qclk->clk_access_cnt++;
5137 mutex_unlock(&clk_access_lock);
5138 return rc;
5139 }
5140
5141 /* Enable CE core clk */
5142 if (qclk->ce_core_clk != NULL) {
5143 rc = clk_prepare_enable(qclk->ce_core_clk);
5144 if (rc) {
5145 pr_err("Unable to enable/prepare CE core clk\n");
5146 goto err;
5147 }
5148 }
5149 /* Enable CE clk */
5150 if (qclk->ce_clk != NULL) {
5151 rc = clk_prepare_enable(qclk->ce_clk);
5152 if (rc) {
5153 pr_err("Unable to enable/prepare CE iface clk\n");
5154 goto ce_clk_err;
5155 }
5156 }
5157 /* Enable AXI clk */
5158 if (qclk->ce_bus_clk != NULL) {
5159 rc = clk_prepare_enable(qclk->ce_bus_clk);
5160 if (rc) {
5161 pr_err("Unable to enable/prepare CE bus clk\n");
5162 goto ce_bus_clk_err;
5163 }
5164 }
5165 qclk->clk_access_cnt++;
5166 mutex_unlock(&clk_access_lock);
5167 return 0;
5168
5169ce_bus_clk_err:
5170 if (qclk->ce_clk != NULL)
5171 clk_disable_unprepare(qclk->ce_clk);
5172ce_clk_err:
5173 if (qclk->ce_core_clk != NULL)
5174 clk_disable_unprepare(qclk->ce_core_clk);
5175err:
5176 mutex_unlock(&clk_access_lock);
5177 return -EIO;
5178}
5179
5180static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5181{
5182 struct qseecom_clk *qclk;
5183
5184 if (qseecom.no_clock_support)
5185 return;
5186
5187 if (ce == CLK_QSEE)
5188 qclk = &qseecom.qsee;
5189 else
5190 qclk = &qseecom.ce_drv;
5191
5192 mutex_lock(&clk_access_lock);
5193
5194 if (qclk->clk_access_cnt == 0) {
5195 mutex_unlock(&clk_access_lock);
5196 return;
5197 }
5198
5199 if (qclk->clk_access_cnt == 1) {
5200 if (qclk->ce_clk != NULL)
5201 clk_disable_unprepare(qclk->ce_clk);
5202 if (qclk->ce_core_clk != NULL)
5203 clk_disable_unprepare(qclk->ce_core_clk);
5204 if (qclk->ce_bus_clk != NULL)
5205 clk_disable_unprepare(qclk->ce_bus_clk);
5206 }
5207 qclk->clk_access_cnt--;
5208 mutex_unlock(&clk_access_lock);
5209}
5210
5211static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5212 int32_t clk_type)
5213{
5214 int ret = 0;
5215 struct qseecom_clk *qclk;
5216
5217 if (qseecom.no_clock_support)
5218 return 0;
5219
5220 qclk = &qseecom.qsee;
5221 if (!qseecom.qsee_perf_client)
5222 return ret;
5223
5224 switch (clk_type) {
5225 case CLK_DFAB:
5226 mutex_lock(&qsee_bw_mutex);
5227 if (!qseecom.qsee_bw_count) {
5228 if (qseecom.qsee_sfpb_bw_count > 0)
5229 ret = msm_bus_scale_client_update_request(
5230 qseecom.qsee_perf_client, 3);
5231 else {
5232 if (qclk->ce_core_src_clk != NULL)
5233 ret = __qseecom_enable_clk(CLK_QSEE);
5234 if (!ret) {
5235 ret =
5236 msm_bus_scale_client_update_request(
5237 qseecom.qsee_perf_client, 1);
5238 if ((ret) &&
5239 (qclk->ce_core_src_clk != NULL))
5240 __qseecom_disable_clk(CLK_QSEE);
5241 }
5242 }
5243 if (ret)
5244 pr_err("DFAB Bandwidth req failed (%d)\n",
5245 ret);
5246 else {
5247 qseecom.qsee_bw_count++;
5248 data->perf_enabled = true;
5249 }
5250 } else {
5251 qseecom.qsee_bw_count++;
5252 data->perf_enabled = true;
5253 }
5254 mutex_unlock(&qsee_bw_mutex);
5255 break;
5256 case CLK_SFPB:
5257 mutex_lock(&qsee_bw_mutex);
5258 if (!qseecom.qsee_sfpb_bw_count) {
5259 if (qseecom.qsee_bw_count > 0)
5260 ret = msm_bus_scale_client_update_request(
5261 qseecom.qsee_perf_client, 3);
5262 else {
5263 if (qclk->ce_core_src_clk != NULL)
5264 ret = __qseecom_enable_clk(CLK_QSEE);
5265 if (!ret) {
5266 ret =
5267 msm_bus_scale_client_update_request(
5268 qseecom.qsee_perf_client, 2);
5269 if ((ret) &&
5270 (qclk->ce_core_src_clk != NULL))
5271 __qseecom_disable_clk(CLK_QSEE);
5272 }
5273 }
5274
5275 if (ret)
5276 pr_err("SFPB Bandwidth req failed (%d)\n",
5277 ret);
5278 else {
5279 qseecom.qsee_sfpb_bw_count++;
5280 data->fast_load_enabled = true;
5281 }
5282 } else {
5283 qseecom.qsee_sfpb_bw_count++;
5284 data->fast_load_enabled = true;
5285 }
5286 mutex_unlock(&qsee_bw_mutex);
5287 break;
5288 default:
5289 pr_err("Clock type not defined\n");
5290 break;
5291 }
5292 return ret;
5293}
5294
5295static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5296 int32_t clk_type)
5297{
5298 int32_t ret = 0;
5299 struct qseecom_clk *qclk;
5300
5301 qclk = &qseecom.qsee;
5302
5303 if (qseecom.no_clock_support)
5304 return;
5305 if (!qseecom.qsee_perf_client)
5306 return;
5307
5308 switch (clk_type) {
5309 case CLK_DFAB:
5310 mutex_lock(&qsee_bw_mutex);
5311 if (qseecom.qsee_bw_count == 0) {
5312 pr_err("Client error.Extra call to disable DFAB clk\n");
5313 mutex_unlock(&qsee_bw_mutex);
5314 return;
5315 }
5316
5317 if (qseecom.qsee_bw_count == 1) {
5318 if (qseecom.qsee_sfpb_bw_count > 0)
5319 ret = msm_bus_scale_client_update_request(
5320 qseecom.qsee_perf_client, 2);
5321 else {
5322 ret = msm_bus_scale_client_update_request(
5323 qseecom.qsee_perf_client, 0);
5324 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5325 __qseecom_disable_clk(CLK_QSEE);
5326 }
5327 if (ret)
5328 pr_err("SFPB Bandwidth req fail (%d)\n",
5329 ret);
5330 else {
5331 qseecom.qsee_bw_count--;
5332 data->perf_enabled = false;
5333 }
5334 } else {
5335 qseecom.qsee_bw_count--;
5336 data->perf_enabled = false;
5337 }
5338 mutex_unlock(&qsee_bw_mutex);
5339 break;
5340 case CLK_SFPB:
5341 mutex_lock(&qsee_bw_mutex);
5342 if (qseecom.qsee_sfpb_bw_count == 0) {
5343 pr_err("Client error.Extra call to disable SFPB clk\n");
5344 mutex_unlock(&qsee_bw_mutex);
5345 return;
5346 }
5347 if (qseecom.qsee_sfpb_bw_count == 1) {
5348 if (qseecom.qsee_bw_count > 0)
5349 ret = msm_bus_scale_client_update_request(
5350 qseecom.qsee_perf_client, 1);
5351 else {
5352 ret = msm_bus_scale_client_update_request(
5353 qseecom.qsee_perf_client, 0);
5354 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5355 __qseecom_disable_clk(CLK_QSEE);
5356 }
5357 if (ret)
5358 pr_err("SFPB Bandwidth req fail (%d)\n",
5359 ret);
5360 else {
5361 qseecom.qsee_sfpb_bw_count--;
5362 data->fast_load_enabled = false;
5363 }
5364 } else {
5365 qseecom.qsee_sfpb_bw_count--;
5366 data->fast_load_enabled = false;
5367 }
5368 mutex_unlock(&qsee_bw_mutex);
5369 break;
5370 default:
5371 pr_err("Clock type not defined\n");
5372 break;
5373 }
5374
5375}
5376
5377static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5378 void __user *argp)
5379{
5380 struct ion_handle *ihandle; /* Ion handle */
5381 struct qseecom_load_img_req load_img_req;
5382 int uret = 0;
5383 int ret;
5384 ion_phys_addr_t pa = 0;
5385 size_t len;
5386 struct qseecom_load_app_ireq load_req;
5387 struct qseecom_load_app_64bit_ireq load_req_64bit;
5388 struct qseecom_command_scm_resp resp;
5389 void *cmd_buf = NULL;
5390 size_t cmd_len;
5391 /* Copy the relevant information needed for loading the image */
5392 if (copy_from_user(&load_img_req,
5393 (void __user *)argp,
5394 sizeof(struct qseecom_load_img_req))) {
5395 pr_err("copy_from_user failed\n");
5396 return -EFAULT;
5397 }
5398
5399 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005400 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005401 load_img_req.ifd_data_fd);
5402 if (IS_ERR_OR_NULL(ihandle)) {
5403 pr_err("Ion client could not retrieve the handle\n");
5404 return -ENOMEM;
5405 }
5406
5407 /* Get the physical address of the ION BUF */
5408 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5409 if (ret) {
5410 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5411 ret);
5412 return ret;
5413 }
5414 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5415 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5416 len, load_img_req.mdt_len,
5417 load_img_req.img_len);
5418 return ret;
5419 }
5420 /* Populate the structure for sending scm call to load image */
5421 if (qseecom.qsee_version < QSEE_VERSION_40) {
5422 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5423 load_req.mdt_len = load_img_req.mdt_len;
5424 load_req.img_len = load_img_req.img_len;
5425 load_req.phy_addr = (uint32_t)pa;
5426 cmd_buf = (void *)&load_req;
5427 cmd_len = sizeof(struct qseecom_load_app_ireq);
5428 } else {
5429 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5430 load_req_64bit.mdt_len = load_img_req.mdt_len;
5431 load_req_64bit.img_len = load_img_req.img_len;
5432 load_req_64bit.phy_addr = (uint64_t)pa;
5433 cmd_buf = (void *)&load_req_64bit;
5434 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5435 }
5436
5437 if (qseecom.support_bus_scaling) {
5438 mutex_lock(&qsee_bw_mutex);
5439 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5440 mutex_unlock(&qsee_bw_mutex);
5441 if (ret) {
5442 ret = -EIO;
5443 goto exit_cpu_restore;
5444 }
5445 }
5446
5447 /* Vote for the SFPB clock */
5448 ret = __qseecom_enable_clk_scale_up(data);
5449 if (ret) {
5450 ret = -EIO;
5451 goto exit_register_bus_bandwidth_needs;
5452 }
5453 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5454 ION_IOC_CLEAN_INV_CACHES);
5455 if (ret) {
5456 pr_err("cache operation failed %d\n", ret);
5457 goto exit_disable_clock;
5458 }
5459 /* SCM_CALL to load the external elf */
5460 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5461 &resp, sizeof(resp));
5462 if (ret) {
5463 pr_err("scm_call to load failed : ret %d\n",
5464 ret);
5465 ret = -EFAULT;
5466 goto exit_disable_clock;
5467 }
5468
5469 switch (resp.result) {
5470 case QSEOS_RESULT_SUCCESS:
5471 break;
5472 case QSEOS_RESULT_INCOMPLETE:
5473 pr_err("%s: qseos result incomplete\n", __func__);
5474 ret = __qseecom_process_incomplete_cmd(data, &resp);
5475 if (ret)
5476 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5477 break;
5478 case QSEOS_RESULT_FAILURE:
5479 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5480 ret = -EFAULT;
5481 break;
5482 default:
5483 pr_err("scm_call response result %d not supported\n",
5484 resp.result);
5485 ret = -EFAULT;
5486 break;
5487 }
5488
5489exit_disable_clock:
5490 __qseecom_disable_clk_scale_down(data);
5491
5492exit_register_bus_bandwidth_needs:
5493 if (qseecom.support_bus_scaling) {
5494 mutex_lock(&qsee_bw_mutex);
5495 uret = qseecom_unregister_bus_bandwidth_needs(data);
5496 mutex_unlock(&qsee_bw_mutex);
5497 if (uret)
5498 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5499 uret, ret);
5500 }
5501
5502exit_cpu_restore:
5503 /* Deallocate the handle */
5504 if (!IS_ERR_OR_NULL(ihandle))
5505 ion_free(qseecom.ion_clnt, ihandle);
5506 return ret;
5507}
5508
5509static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5510{
5511 int ret = 0;
5512 struct qseecom_command_scm_resp resp;
5513 struct qseecom_unload_app_ireq req;
5514
5515 /* unavailable client app */
5516 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5517
5518 /* Populate the structure for sending scm call to unload image */
5519 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5520
5521 /* SCM_CALL to unload the external elf */
5522 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5523 sizeof(struct qseecom_unload_app_ireq),
5524 &resp, sizeof(resp));
5525 if (ret) {
5526 pr_err("scm_call to unload failed : ret %d\n",
5527 ret);
5528 ret = -EFAULT;
5529 goto qseecom_unload_external_elf_scm_err;
5530 }
5531 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5532 ret = __qseecom_process_incomplete_cmd(data, &resp);
5533 if (ret)
5534 pr_err("process_incomplete_cmd fail err: %d\n",
5535 ret);
5536 } else {
5537 if (resp.result != QSEOS_RESULT_SUCCESS) {
5538 pr_err("scm_call to unload image failed resp.result =%d\n",
5539 resp.result);
5540 ret = -EFAULT;
5541 }
5542 }
5543
5544qseecom_unload_external_elf_scm_err:
5545
5546 return ret;
5547}
5548
5549static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5550 void __user *argp)
5551{
5552
5553 int32_t ret;
5554 struct qseecom_qseos_app_load_query query_req;
5555 struct qseecom_check_app_ireq req;
5556 struct qseecom_registered_app_list *entry = NULL;
5557 unsigned long flags = 0;
5558 uint32_t app_arch = 0, app_id = 0;
5559 bool found_app = false;
5560
5561 /* Copy the relevant information needed for loading the image */
5562 if (copy_from_user(&query_req,
5563 (void __user *)argp,
5564 sizeof(struct qseecom_qseos_app_load_query))) {
5565 pr_err("copy_from_user failed\n");
5566 return -EFAULT;
5567 }
5568
5569 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5570 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5571 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5572
5573 ret = __qseecom_check_app_exists(req, &app_id);
5574 if (ret) {
5575 pr_err(" scm call to check if app is loaded failed");
5576 return ret; /* scm call failed */
5577 }
5578 if (app_id) {
5579 pr_debug("App id %d (%s) already exists\n", app_id,
5580 (char *)(req.app_name));
5581 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5582 list_for_each_entry(entry,
5583 &qseecom.registered_app_list_head, list){
5584 if (entry->app_id == app_id) {
5585 app_arch = entry->app_arch;
5586 entry->ref_cnt++;
5587 found_app = true;
5588 break;
5589 }
5590 }
5591 spin_unlock_irqrestore(
5592 &qseecom.registered_app_list_lock, flags);
5593 data->client.app_id = app_id;
5594 query_req.app_id = app_id;
5595 if (app_arch) {
5596 data->client.app_arch = app_arch;
5597 query_req.app_arch = app_arch;
5598 } else {
5599 data->client.app_arch = 0;
5600 query_req.app_arch = 0;
5601 }
5602 strlcpy(data->client.app_name, query_req.app_name,
5603 MAX_APP_NAME_SIZE);
5604 /*
5605 * If app was loaded by appsbl before and was not registered,
5606 * regiser this app now.
5607 */
5608 if (!found_app) {
5609 pr_debug("Register app %d [%s] which was loaded before\n",
5610 ret, (char *)query_req.app_name);
5611 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5612 if (!entry) {
5613 pr_err("kmalloc for app entry failed\n");
5614 return -ENOMEM;
5615 }
5616 entry->app_id = app_id;
5617 entry->ref_cnt = 1;
5618 entry->app_arch = data->client.app_arch;
5619 strlcpy(entry->app_name, data->client.app_name,
5620 MAX_APP_NAME_SIZE);
5621 entry->app_blocked = false;
5622 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005623 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005624 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5625 flags);
5626 list_add_tail(&entry->list,
5627 &qseecom.registered_app_list_head);
5628 spin_unlock_irqrestore(
5629 &qseecom.registered_app_list_lock, flags);
5630 }
5631 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5632 pr_err("copy_to_user failed\n");
5633 return -EFAULT;
5634 }
5635 return -EEXIST; /* app already loaded */
5636 } else {
5637 return 0; /* app not loaded */
5638 }
5639}
5640
5641static int __qseecom_get_ce_pipe_info(
5642 enum qseecom_key_management_usage_type usage,
5643 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5644{
5645 int ret = -EINVAL;
5646 int i, j;
5647 struct qseecom_ce_info_use *p = NULL;
5648 int total = 0;
5649 struct qseecom_ce_pipe_entry *pcepipe;
5650
5651 switch (usage) {
5652 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5653 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5654 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5655 if (qseecom.support_fde) {
5656 p = qseecom.ce_info.fde;
5657 total = qseecom.ce_info.num_fde;
5658 } else {
5659 pr_err("system does not support fde\n");
5660 return -EINVAL;
5661 }
5662 break;
5663 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5664 if (qseecom.support_pfe) {
5665 p = qseecom.ce_info.pfe;
5666 total = qseecom.ce_info.num_pfe;
5667 } else {
5668 pr_err("system does not support pfe\n");
5669 return -EINVAL;
5670 }
5671 break;
5672 default:
5673 pr_err("unsupported usage %d\n", usage);
5674 return -EINVAL;
5675 }
5676
5677 for (j = 0; j < total; j++) {
5678 if (p->unit_num == unit) {
5679 pcepipe = p->ce_pipe_entry;
5680 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5681 (*ce_hw)[i] = pcepipe->ce_num;
5682 *pipe = pcepipe->ce_pipe_pair;
5683 pcepipe++;
5684 }
5685 ret = 0;
5686 break;
5687 }
5688 p++;
5689 }
5690 return ret;
5691}
5692
5693static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5694 enum qseecom_key_management_usage_type usage,
5695 struct qseecom_key_generate_ireq *ireq)
5696{
5697 struct qseecom_command_scm_resp resp;
5698 int ret;
5699
5700 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5701 usage >= QSEOS_KM_USAGE_MAX) {
5702 pr_err("Error:: unsupported usage %d\n", usage);
5703 return -EFAULT;
5704 }
5705 ret = __qseecom_enable_clk(CLK_QSEE);
5706 if (ret)
5707 return ret;
5708
5709 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5710 ireq, sizeof(struct qseecom_key_generate_ireq),
5711 &resp, sizeof(resp));
5712 if (ret) {
5713 if (ret == -EINVAL &&
5714 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5715 pr_debug("Key ID exists.\n");
5716 ret = 0;
5717 } else {
5718 pr_err("scm call to generate key failed : %d\n", ret);
5719 ret = -EFAULT;
5720 }
5721 goto generate_key_exit;
5722 }
5723
5724 switch (resp.result) {
5725 case QSEOS_RESULT_SUCCESS:
5726 break;
5727 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5728 pr_debug("Key ID exists.\n");
5729 break;
5730 case QSEOS_RESULT_INCOMPLETE:
5731 ret = __qseecom_process_incomplete_cmd(data, &resp);
5732 if (ret) {
5733 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5734 pr_debug("Key ID exists.\n");
5735 ret = 0;
5736 } else {
5737 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5738 resp.result);
5739 }
5740 }
5741 break;
5742 case QSEOS_RESULT_FAILURE:
5743 default:
5744 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5745 ret = -EINVAL;
5746 break;
5747 }
5748generate_key_exit:
5749 __qseecom_disable_clk(CLK_QSEE);
5750 return ret;
5751}
5752
5753static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5754 enum qseecom_key_management_usage_type usage,
5755 struct qseecom_key_delete_ireq *ireq)
5756{
5757 struct qseecom_command_scm_resp resp;
5758 int ret;
5759
5760 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5761 usage >= QSEOS_KM_USAGE_MAX) {
5762 pr_err("Error:: unsupported usage %d\n", usage);
5763 return -EFAULT;
5764 }
5765 ret = __qseecom_enable_clk(CLK_QSEE);
5766 if (ret)
5767 return ret;
5768
5769 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5770 ireq, sizeof(struct qseecom_key_delete_ireq),
5771 &resp, sizeof(struct qseecom_command_scm_resp));
5772 if (ret) {
5773 if (ret == -EINVAL &&
5774 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5775 pr_debug("Max attempts to input password reached.\n");
5776 ret = -ERANGE;
5777 } else {
5778 pr_err("scm call to delete key failed : %d\n", ret);
5779 ret = -EFAULT;
5780 }
5781 goto del_key_exit;
5782 }
5783
5784 switch (resp.result) {
5785 case QSEOS_RESULT_SUCCESS:
5786 break;
5787 case QSEOS_RESULT_INCOMPLETE:
5788 ret = __qseecom_process_incomplete_cmd(data, &resp);
5789 if (ret) {
5790 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5791 resp.result);
5792 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5793 pr_debug("Max attempts to input password reached.\n");
5794 ret = -ERANGE;
5795 }
5796 }
5797 break;
5798 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5799 pr_debug("Max attempts to input password reached.\n");
5800 ret = -ERANGE;
5801 break;
5802 case QSEOS_RESULT_FAILURE:
5803 default:
5804 pr_err("Delete key scm call failed resp.result %d\n",
5805 resp.result);
5806 ret = -EINVAL;
5807 break;
5808 }
5809del_key_exit:
5810 __qseecom_disable_clk(CLK_QSEE);
5811 return ret;
5812}
5813
5814static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5815 enum qseecom_key_management_usage_type usage,
5816 struct qseecom_key_select_ireq *ireq)
5817{
5818 struct qseecom_command_scm_resp resp;
5819 int ret;
5820
5821 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5822 usage >= QSEOS_KM_USAGE_MAX) {
5823 pr_err("Error:: unsupported usage %d\n", usage);
5824 return -EFAULT;
5825 }
5826 ret = __qseecom_enable_clk(CLK_QSEE);
5827 if (ret)
5828 return ret;
5829
5830 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5831 ret = __qseecom_enable_clk(CLK_CE_DRV);
5832 if (ret)
5833 return ret;
5834 }
5835
5836 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5837 ireq, sizeof(struct qseecom_key_select_ireq),
5838 &resp, sizeof(struct qseecom_command_scm_resp));
5839 if (ret) {
5840 if (ret == -EINVAL &&
5841 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5842 pr_debug("Max attempts to input password reached.\n");
5843 ret = -ERANGE;
5844 } else if (ret == -EINVAL &&
5845 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5846 pr_debug("Set Key operation under processing...\n");
5847 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5848 } else {
5849 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5850 ret);
5851 ret = -EFAULT;
5852 }
5853 goto set_key_exit;
5854 }
5855
5856 switch (resp.result) {
5857 case QSEOS_RESULT_SUCCESS:
5858 break;
5859 case QSEOS_RESULT_INCOMPLETE:
5860 ret = __qseecom_process_incomplete_cmd(data, &resp);
5861 if (ret) {
5862 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5863 resp.result);
5864 if (resp.result ==
5865 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5866 pr_debug("Set Key operation under processing...\n");
5867 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5868 }
5869 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5870 pr_debug("Max attempts to input password reached.\n");
5871 ret = -ERANGE;
5872 }
5873 }
5874 break;
5875 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5876 pr_debug("Max attempts to input password reached.\n");
5877 ret = -ERANGE;
5878 break;
5879 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5880 pr_debug("Set Key operation under processing...\n");
5881 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5882 break;
5883 case QSEOS_RESULT_FAILURE:
5884 default:
5885 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5886 ret = -EINVAL;
5887 break;
5888 }
5889set_key_exit:
5890 __qseecom_disable_clk(CLK_QSEE);
5891 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5892 __qseecom_disable_clk(CLK_CE_DRV);
5893 return ret;
5894}
5895
5896static int __qseecom_update_current_key_user_info(
5897 struct qseecom_dev_handle *data,
5898 enum qseecom_key_management_usage_type usage,
5899 struct qseecom_key_userinfo_update_ireq *ireq)
5900{
5901 struct qseecom_command_scm_resp resp;
5902 int ret;
5903
5904 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5905 usage >= QSEOS_KM_USAGE_MAX) {
5906 pr_err("Error:: unsupported usage %d\n", usage);
5907 return -EFAULT;
5908 }
5909 ret = __qseecom_enable_clk(CLK_QSEE);
5910 if (ret)
5911 return ret;
5912
5913 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5914 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5915 &resp, sizeof(struct qseecom_command_scm_resp));
5916 if (ret) {
5917 if (ret == -EINVAL &&
5918 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5919 pr_debug("Set Key operation under processing...\n");
5920 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5921 } else {
5922 pr_err("scm call to update key userinfo failed: %d\n",
5923 ret);
5924 __qseecom_disable_clk(CLK_QSEE);
5925 return -EFAULT;
5926 }
5927 }
5928
5929 switch (resp.result) {
5930 case QSEOS_RESULT_SUCCESS:
5931 break;
5932 case QSEOS_RESULT_INCOMPLETE:
5933 ret = __qseecom_process_incomplete_cmd(data, &resp);
5934 if (resp.result ==
5935 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5936 pr_debug("Set Key operation under processing...\n");
5937 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5938 }
5939 if (ret)
5940 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5941 resp.result);
5942 break;
5943 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5944 pr_debug("Update Key operation under processing...\n");
5945 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5946 break;
5947 case QSEOS_RESULT_FAILURE:
5948 default:
5949 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5950 ret = -EINVAL;
5951 break;
5952 }
5953
5954 __qseecom_disable_clk(CLK_QSEE);
5955 return ret;
5956}
5957
5958
5959static int qseecom_enable_ice_setup(int usage)
5960{
5961 int ret = 0;
5962
5963 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5964 ret = qcom_ice_setup_ice_hw("ufs", true);
5965 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5966 ret = qcom_ice_setup_ice_hw("sdcc", true);
5967
5968 return ret;
5969}
5970
5971static int qseecom_disable_ice_setup(int usage)
5972{
5973 int ret = 0;
5974
5975 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5976 ret = qcom_ice_setup_ice_hw("ufs", false);
5977 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5978 ret = qcom_ice_setup_ice_hw("sdcc", false);
5979
5980 return ret;
5981}
5982
5983static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5984{
5985 struct qseecom_ce_info_use *pce_info_use, *p;
5986 int total = 0;
5987 int i;
5988
5989 switch (usage) {
5990 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5991 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5992 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5993 p = qseecom.ce_info.fde;
5994 total = qseecom.ce_info.num_fde;
5995 break;
5996 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5997 p = qseecom.ce_info.pfe;
5998 total = qseecom.ce_info.num_pfe;
5999 break;
6000 default:
6001 pr_err("unsupported usage %d\n", usage);
6002 return -EINVAL;
6003 }
6004
6005 pce_info_use = NULL;
6006
6007 for (i = 0; i < total; i++) {
6008 if (p->unit_num == unit) {
6009 pce_info_use = p;
6010 break;
6011 }
6012 p++;
6013 }
6014 if (!pce_info_use) {
6015 pr_err("can not find %d\n", unit);
6016 return -EINVAL;
6017 }
6018 return pce_info_use->num_ce_pipe_entries;
6019}
6020
6021static int qseecom_create_key(struct qseecom_dev_handle *data,
6022 void __user *argp)
6023{
6024 int i;
6025 uint32_t *ce_hw = NULL;
6026 uint32_t pipe = 0;
6027 int ret = 0;
6028 uint32_t flags = 0;
6029 struct qseecom_create_key_req create_key_req;
6030 struct qseecom_key_generate_ireq generate_key_ireq;
6031 struct qseecom_key_select_ireq set_key_ireq;
6032 uint32_t entries = 0;
6033
6034 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6035 if (ret) {
6036 pr_err("copy_from_user failed\n");
6037 return ret;
6038 }
6039
6040 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6041 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6042 pr_err("unsupported usage %d\n", create_key_req.usage);
6043 ret = -EFAULT;
6044 return ret;
6045 }
6046 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6047 create_key_req.usage);
6048 if (entries <= 0) {
6049 pr_err("no ce instance for usage %d instance %d\n",
6050 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6051 ret = -EINVAL;
6052 return ret;
6053 }
6054
6055 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6056 if (!ce_hw) {
6057 ret = -ENOMEM;
6058 return ret;
6059 }
6060 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6061 DEFAULT_CE_INFO_UNIT);
6062 if (ret) {
6063 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6064 ret = -EINVAL;
6065 goto free_buf;
6066 }
6067
6068 if (qseecom.fde_key_size)
6069 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6070 else
6071 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6072
Jiten Patela7bb1d52018-05-11 12:34:26 +05306073 if (qseecom.enable_key_wrap_in_ks == true)
6074 flags |= ENABLE_KEY_WRAP_IN_KS;
6075
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006076 generate_key_ireq.flags = flags;
6077 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6078 memset((void *)generate_key_ireq.key_id,
6079 0, QSEECOM_KEY_ID_SIZE);
6080 memset((void *)generate_key_ireq.hash32,
6081 0, QSEECOM_HASH_SIZE);
6082 memcpy((void *)generate_key_ireq.key_id,
6083 (void *)key_id_array[create_key_req.usage].desc,
6084 QSEECOM_KEY_ID_SIZE);
6085 memcpy((void *)generate_key_ireq.hash32,
6086 (void *)create_key_req.hash32,
6087 QSEECOM_HASH_SIZE);
6088
6089 ret = __qseecom_generate_and_save_key(data,
6090 create_key_req.usage, &generate_key_ireq);
6091 if (ret) {
6092 pr_err("Failed to generate key on storage: %d\n", ret);
6093 goto free_buf;
6094 }
6095
6096 for (i = 0; i < entries; i++) {
6097 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6098 if (create_key_req.usage ==
6099 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6100 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6101 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6102
6103 } else if (create_key_req.usage ==
6104 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6105 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6106 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6107
6108 } else {
6109 set_key_ireq.ce = ce_hw[i];
6110 set_key_ireq.pipe = pipe;
6111 }
6112 set_key_ireq.flags = flags;
6113
6114 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6115 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6116 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6117 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6118 memcpy((void *)set_key_ireq.key_id,
6119 (void *)key_id_array[create_key_req.usage].desc,
6120 QSEECOM_KEY_ID_SIZE);
6121 memcpy((void *)set_key_ireq.hash32,
6122 (void *)create_key_req.hash32,
6123 QSEECOM_HASH_SIZE);
6124 /*
6125 * It will return false if it is GPCE based crypto instance or
6126 * ICE is setup properly
6127 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006128 ret = qseecom_enable_ice_setup(create_key_req.usage);
6129 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006130 goto free_buf;
6131
6132 do {
6133 ret = __qseecom_set_clear_ce_key(data,
6134 create_key_req.usage,
6135 &set_key_ireq);
6136 /*
6137 * wait a little before calling scm again to let other
6138 * processes run
6139 */
6140 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6141 msleep(50);
6142
6143 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6144
6145 qseecom_disable_ice_setup(create_key_req.usage);
6146
6147 if (ret) {
6148 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6149 pipe, ce_hw[i], ret);
6150 goto free_buf;
6151 } else {
6152 pr_err("Set the key successfully\n");
6153 if ((create_key_req.usage ==
6154 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6155 (create_key_req.usage ==
6156 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6157 goto free_buf;
6158 }
6159 }
6160
6161free_buf:
6162 kzfree(ce_hw);
6163 return ret;
6164}
6165
6166static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6167 void __user *argp)
6168{
6169 uint32_t *ce_hw = NULL;
6170 uint32_t pipe = 0;
6171 int ret = 0;
6172 uint32_t flags = 0;
6173 int i, j;
6174 struct qseecom_wipe_key_req wipe_key_req;
6175 struct qseecom_key_delete_ireq delete_key_ireq;
6176 struct qseecom_key_select_ireq clear_key_ireq;
6177 uint32_t entries = 0;
6178
6179 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6180 if (ret) {
6181 pr_err("copy_from_user failed\n");
6182 return ret;
6183 }
6184
6185 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6186 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6187 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6188 ret = -EFAULT;
6189 return ret;
6190 }
6191
6192 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6193 wipe_key_req.usage);
6194 if (entries <= 0) {
6195 pr_err("no ce instance for usage %d instance %d\n",
6196 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6197 ret = -EINVAL;
6198 return ret;
6199 }
6200
6201 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6202 if (!ce_hw) {
6203 ret = -ENOMEM;
6204 return ret;
6205 }
6206
6207 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6208 DEFAULT_CE_INFO_UNIT);
6209 if (ret) {
6210 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6211 ret = -EINVAL;
6212 goto free_buf;
6213 }
6214
6215 if (wipe_key_req.wipe_key_flag) {
6216 delete_key_ireq.flags = flags;
6217 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6218 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6219 memcpy((void *)delete_key_ireq.key_id,
6220 (void *)key_id_array[wipe_key_req.usage].desc,
6221 QSEECOM_KEY_ID_SIZE);
6222 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6223
6224 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6225 &delete_key_ireq);
6226 if (ret) {
6227 pr_err("Failed to delete key from ssd storage: %d\n",
6228 ret);
6229 ret = -EFAULT;
6230 goto free_buf;
6231 }
6232 }
6233
6234 for (j = 0; j < entries; j++) {
6235 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6236 if (wipe_key_req.usage ==
6237 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6238 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6239 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6240 } else if (wipe_key_req.usage ==
6241 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6242 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6243 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6244 } else {
6245 clear_key_ireq.ce = ce_hw[j];
6246 clear_key_ireq.pipe = pipe;
6247 }
6248 clear_key_ireq.flags = flags;
6249 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6250 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6251 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6252 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6253
6254 /*
6255 * It will return false if it is GPCE based crypto instance or
6256 * ICE is setup properly
6257 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006258 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6259 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006260 goto free_buf;
6261
6262 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6263 &clear_key_ireq);
6264
6265 qseecom_disable_ice_setup(wipe_key_req.usage);
6266
6267 if (ret) {
6268 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6269 pipe, ce_hw[j], ret);
6270 ret = -EFAULT;
6271 goto free_buf;
6272 }
6273 }
6274
6275free_buf:
6276 kzfree(ce_hw);
6277 return ret;
6278}
6279
6280static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6281 void __user *argp)
6282{
6283 int ret = 0;
6284 uint32_t flags = 0;
6285 struct qseecom_update_key_userinfo_req update_key_req;
6286 struct qseecom_key_userinfo_update_ireq ireq;
6287
6288 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6289 if (ret) {
6290 pr_err("copy_from_user failed\n");
6291 return ret;
6292 }
6293
6294 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6295 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6296 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6297 return -EFAULT;
6298 }
6299
6300 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6301
6302 if (qseecom.fde_key_size)
6303 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6304 else
6305 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6306
6307 ireq.flags = flags;
6308 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6309 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6310 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6311 memcpy((void *)ireq.key_id,
6312 (void *)key_id_array[update_key_req.usage].desc,
6313 QSEECOM_KEY_ID_SIZE);
6314 memcpy((void *)ireq.current_hash32,
6315 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6316 memcpy((void *)ireq.new_hash32,
6317 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6318
6319 do {
6320 ret = __qseecom_update_current_key_user_info(data,
6321 update_key_req.usage,
6322 &ireq);
6323 /*
6324 * wait a little before calling scm again to let other
6325 * processes run
6326 */
6327 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6328 msleep(50);
6329
6330 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6331 if (ret) {
6332 pr_err("Failed to update key info: %d\n", ret);
6333 return ret;
6334 }
6335 return ret;
6336
6337}
6338static int qseecom_is_es_activated(void __user *argp)
6339{
Zhen Kong26e62742018-05-04 17:19:06 -07006340 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006341 struct qseecom_command_scm_resp resp;
6342 int ret;
6343
6344 if (qseecom.qsee_version < QSEE_VERSION_04) {
6345 pr_err("invalid qsee version\n");
6346 return -ENODEV;
6347 }
6348
6349 if (argp == NULL) {
6350 pr_err("arg is null\n");
6351 return -EINVAL;
6352 }
6353
6354 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6355 &req, sizeof(req), &resp, sizeof(resp));
6356 if (ret) {
6357 pr_err("scm_call failed\n");
6358 return ret;
6359 }
6360
6361 req.is_activated = resp.result;
6362 ret = copy_to_user(argp, &req, sizeof(req));
6363 if (ret) {
6364 pr_err("copy_to_user failed\n");
6365 return ret;
6366 }
6367
6368 return 0;
6369}
6370
6371static int qseecom_save_partition_hash(void __user *argp)
6372{
6373 struct qseecom_save_partition_hash_req req;
6374 struct qseecom_command_scm_resp resp;
6375 int ret;
6376
6377 memset(&resp, 0x00, sizeof(resp));
6378
6379 if (qseecom.qsee_version < QSEE_VERSION_04) {
6380 pr_err("invalid qsee version\n");
6381 return -ENODEV;
6382 }
6383
6384 if (argp == NULL) {
6385 pr_err("arg is null\n");
6386 return -EINVAL;
6387 }
6388
6389 ret = copy_from_user(&req, argp, sizeof(req));
6390 if (ret) {
6391 pr_err("copy_from_user failed\n");
6392 return ret;
6393 }
6394
6395 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6396 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6397 if (ret) {
6398 pr_err("qseecom_scm_call failed\n");
6399 return ret;
6400 }
6401
6402 return 0;
6403}
6404
6405static int qseecom_mdtp_cipher_dip(void __user *argp)
6406{
6407 struct qseecom_mdtp_cipher_dip_req req;
6408 u32 tzbuflenin, tzbuflenout;
6409 char *tzbufin = NULL, *tzbufout = NULL;
6410 struct scm_desc desc = {0};
6411 int ret;
6412
6413 do {
6414 /* Copy the parameters from userspace */
6415 if (argp == NULL) {
6416 pr_err("arg is null\n");
6417 ret = -EINVAL;
6418 break;
6419 }
6420
6421 ret = copy_from_user(&req, argp, sizeof(req));
6422 if (ret) {
6423 pr_err("copy_from_user failed, ret= %d\n", ret);
6424 break;
6425 }
6426
6427 if (req.in_buf == NULL || req.out_buf == NULL ||
6428 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6429 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6430 req.direction > 1) {
6431 pr_err("invalid parameters\n");
6432 ret = -EINVAL;
6433 break;
6434 }
6435
6436 /* Copy the input buffer from userspace to kernel space */
6437 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6438 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6439 if (!tzbufin) {
6440 pr_err("error allocating in buffer\n");
6441 ret = -ENOMEM;
6442 break;
6443 }
6444
6445 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6446 if (ret) {
6447 pr_err("copy_from_user failed, ret=%d\n", ret);
6448 break;
6449 }
6450
6451 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6452
6453 /* Prepare the output buffer in kernel space */
6454 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6455 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6456 if (!tzbufout) {
6457 pr_err("error allocating out buffer\n");
6458 ret = -ENOMEM;
6459 break;
6460 }
6461
6462 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6463
6464 /* Send the command to TZ */
6465 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6466 desc.args[0] = virt_to_phys(tzbufin);
6467 desc.args[1] = req.in_buf_size;
6468 desc.args[2] = virt_to_phys(tzbufout);
6469 desc.args[3] = req.out_buf_size;
6470 desc.args[4] = req.direction;
6471
6472 ret = __qseecom_enable_clk(CLK_QSEE);
6473 if (ret)
6474 break;
6475
Zhen Kong03f220d2019-02-01 17:12:34 -08006476 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006477
6478 __qseecom_disable_clk(CLK_QSEE);
6479
6480 if (ret) {
6481 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6482 ret);
6483 break;
6484 }
6485
6486 /* Copy the output buffer from kernel space to userspace */
6487 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6488 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6489 if (ret) {
6490 pr_err("copy_to_user failed, ret=%d\n", ret);
6491 break;
6492 }
6493 } while (0);
6494
6495 kzfree(tzbufin);
6496 kzfree(tzbufout);
6497
6498 return ret;
6499}
6500
6501static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6502 struct qseecom_qteec_req *req)
6503{
6504 if (!data || !data->client.ihandle) {
6505 pr_err("Client or client handle is not initialized\n");
6506 return -EINVAL;
6507 }
6508
6509 if (data->type != QSEECOM_CLIENT_APP)
6510 return -EFAULT;
6511
6512 if (req->req_len > UINT_MAX - req->resp_len) {
6513 pr_err("Integer overflow detected in req_len & rsp_len\n");
6514 return -EINVAL;
6515 }
6516
6517 if (req->req_len + req->resp_len > data->client.sb_length) {
6518 pr_debug("Not enough memory to fit cmd_buf.\n");
6519 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6520 (req->req_len + req->resp_len), data->client.sb_length);
6521 return -ENOMEM;
6522 }
6523
6524 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6525 pr_err("cmd buffer or response buffer is null\n");
6526 return -EINVAL;
6527 }
6528 if (((uintptr_t)req->req_ptr <
6529 data->client.user_virt_sb_base) ||
6530 ((uintptr_t)req->req_ptr >=
6531 (data->client.user_virt_sb_base + data->client.sb_length))) {
6532 pr_err("cmd buffer address not within shared bufffer\n");
6533 return -EINVAL;
6534 }
6535
6536 if (((uintptr_t)req->resp_ptr <
6537 data->client.user_virt_sb_base) ||
6538 ((uintptr_t)req->resp_ptr >=
6539 (data->client.user_virt_sb_base + data->client.sb_length))) {
6540 pr_err("response buffer address not within shared bufffer\n");
6541 return -EINVAL;
6542 }
6543
6544 if ((req->req_len == 0) || (req->resp_len == 0)) {
6545 pr_err("cmd buf lengtgh/response buf length not valid\n");
6546 return -EINVAL;
6547 }
6548
6549 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6550 pr_err("Integer overflow in req_len & req_ptr\n");
6551 return -EINVAL;
6552 }
6553
6554 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6555 pr_err("Integer overflow in resp_len & resp_ptr\n");
6556 return -EINVAL;
6557 }
6558
6559 if (data->client.user_virt_sb_base >
6560 (ULONG_MAX - data->client.sb_length)) {
6561 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6562 return -EINVAL;
6563 }
6564 if ((((uintptr_t)req->req_ptr + req->req_len) >
6565 ((uintptr_t)data->client.user_virt_sb_base +
6566 data->client.sb_length)) ||
6567 (((uintptr_t)req->resp_ptr + req->resp_len) >
6568 ((uintptr_t)data->client.user_virt_sb_base +
6569 data->client.sb_length))) {
6570 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6571 return -EINVAL;
6572 }
6573 return 0;
6574}
6575
6576static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6577 uint32_t fd_idx, struct sg_table *sg_ptr)
6578{
6579 struct scatterlist *sg = sg_ptr->sgl;
6580 struct qseecom_sg_entry *sg_entry;
6581 void *buf;
6582 uint i;
6583 size_t size;
6584 dma_addr_t coh_pmem;
6585
6586 if (fd_idx >= MAX_ION_FD) {
6587 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6588 return -ENOMEM;
6589 }
6590 /*
6591 * Allocate a buffer, populate it with number of entry plus
6592 * each sg entry's phy addr and length; then return the
6593 * phy_addr of the buffer.
6594 */
6595 size = sizeof(uint32_t) +
6596 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6597 size = (size + PAGE_SIZE) & PAGE_MASK;
6598 buf = dma_alloc_coherent(qseecom.pdev,
6599 size, &coh_pmem, GFP_KERNEL);
6600 if (buf == NULL) {
6601 pr_err("failed to alloc memory for sg buf\n");
6602 return -ENOMEM;
6603 }
6604 *(uint32_t *)buf = sg_ptr->nents;
6605 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6606 for (i = 0; i < sg_ptr->nents; i++) {
6607 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6608 sg_entry->len = sg->length;
6609 sg_entry++;
6610 sg = sg_next(sg);
6611 }
6612 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6613 data->client.sec_buf_fd[fd_idx].vbase = buf;
6614 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6615 data->client.sec_buf_fd[fd_idx].size = size;
6616 return 0;
6617}
6618
6619static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6620 struct qseecom_dev_handle *data, bool cleanup)
6621{
6622 struct ion_handle *ihandle;
6623 int ret = 0;
6624 int i = 0;
6625 uint32_t *update;
6626 struct sg_table *sg_ptr = NULL;
6627 struct scatterlist *sg;
6628 struct qseecom_param_memref *memref;
6629
6630 if (req == NULL) {
6631 pr_err("Invalid address\n");
6632 return -EINVAL;
6633 }
6634 for (i = 0; i < MAX_ION_FD; i++) {
6635 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006636 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006637 req->ifd_data[i].fd);
6638 if (IS_ERR_OR_NULL(ihandle)) {
6639 pr_err("Ion client can't retrieve the handle\n");
6640 return -ENOMEM;
6641 }
6642 if ((req->req_len < sizeof(uint32_t)) ||
6643 (req->ifd_data[i].cmd_buf_offset >
6644 req->req_len - sizeof(uint32_t))) {
6645 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6646 req->req_len,
6647 req->ifd_data[i].cmd_buf_offset);
6648 return -EINVAL;
6649 }
6650 update = (uint32_t *)((char *) req->req_ptr +
6651 req->ifd_data[i].cmd_buf_offset);
6652 if (!update) {
6653 pr_err("update pointer is NULL\n");
6654 return -EINVAL;
6655 }
6656 } else {
6657 continue;
6658 }
6659 /* Populate the cmd data structure with the phys_addr */
6660 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6661 if (IS_ERR_OR_NULL(sg_ptr)) {
6662 pr_err("IOn client could not retrieve sg table\n");
6663 goto err;
6664 }
6665 sg = sg_ptr->sgl;
6666 if (sg == NULL) {
6667 pr_err("sg is NULL\n");
6668 goto err;
6669 }
6670 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6671 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6672 sg_ptr->nents, sg->length);
6673 goto err;
6674 }
6675 /* clean up buf for pre-allocated fd */
6676 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6677 (*update)) {
6678 if (data->client.sec_buf_fd[i].vbase)
6679 dma_free_coherent(qseecom.pdev,
6680 data->client.sec_buf_fd[i].size,
6681 data->client.sec_buf_fd[i].vbase,
6682 data->client.sec_buf_fd[i].pbase);
6683 memset((void *)update, 0,
6684 sizeof(struct qseecom_param_memref));
6685 memset(&(data->client.sec_buf_fd[i]), 0,
6686 sizeof(struct qseecom_sec_buf_fd_info));
6687 goto clean;
6688 }
6689
6690 if (*update == 0) {
6691 /* update buf for pre-allocated fd from secure heap*/
6692 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6693 sg_ptr);
6694 if (ret) {
6695 pr_err("Failed to handle buf for fd[%d]\n", i);
6696 goto err;
6697 }
6698 memref = (struct qseecom_param_memref *)update;
6699 memref->buffer =
6700 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6701 memref->size =
6702 (uint32_t)(data->client.sec_buf_fd[i].size);
6703 } else {
6704 /* update buf for fd from non-secure qseecom heap */
6705 if (sg_ptr->nents != 1) {
6706 pr_err("Num of scat entr (%d) invalid\n",
6707 sg_ptr->nents);
6708 goto err;
6709 }
6710 if (cleanup)
6711 *update = 0;
6712 else
6713 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6714 }
6715clean:
6716 if (cleanup) {
6717 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6718 ihandle, NULL, sg->length,
6719 ION_IOC_INV_CACHES);
6720 if (ret) {
6721 pr_err("cache operation failed %d\n", ret);
6722 goto err;
6723 }
6724 } else {
6725 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6726 ihandle, NULL, sg->length,
6727 ION_IOC_CLEAN_INV_CACHES);
6728 if (ret) {
6729 pr_err("cache operation failed %d\n", ret);
6730 goto err;
6731 }
6732 data->sglistinfo_ptr[i].indexAndFlags =
6733 SGLISTINFO_SET_INDEX_FLAG(
6734 (sg_ptr->nents == 1), 0,
6735 req->ifd_data[i].cmd_buf_offset);
6736 data->sglistinfo_ptr[i].sizeOrCount =
6737 (sg_ptr->nents == 1) ?
6738 sg->length : sg_ptr->nents;
6739 data->sglist_cnt = i + 1;
6740 }
6741 /* Deallocate the handle */
6742 if (!IS_ERR_OR_NULL(ihandle))
6743 ion_free(qseecom.ion_clnt, ihandle);
6744 }
6745 return ret;
6746err:
6747 if (!IS_ERR_OR_NULL(ihandle))
6748 ion_free(qseecom.ion_clnt, ihandle);
6749 return -ENOMEM;
6750}
6751
6752static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6753 struct qseecom_qteec_req *req, uint32_t cmd_id)
6754{
6755 struct qseecom_command_scm_resp resp;
6756 struct qseecom_qteec_ireq ireq;
6757 struct qseecom_qteec_64bit_ireq ireq_64bit;
6758 struct qseecom_registered_app_list *ptr_app;
6759 bool found_app = false;
6760 unsigned long flags;
6761 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006762 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006763 uint32_t reqd_len_sb_in = 0;
6764 void *cmd_buf = NULL;
6765 size_t cmd_len;
6766 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306767 void *req_ptr = NULL;
6768 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006769
6770 ret = __qseecom_qteec_validate_msg(data, req);
6771 if (ret)
6772 return ret;
6773
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306774 req_ptr = req->req_ptr;
6775 resp_ptr = req->resp_ptr;
6776
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006777 /* find app_id & img_name from list */
6778 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6779 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6780 list) {
6781 if ((ptr_app->app_id == data->client.app_id) &&
6782 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6783 found_app = true;
6784 break;
6785 }
6786 }
6787 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6788 if (!found_app) {
6789 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6790 (char *)data->client.app_name);
6791 return -ENOENT;
6792 }
6793
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306794 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6795 (uintptr_t)req->req_ptr);
6796 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6797 (uintptr_t)req->resp_ptr);
6798
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006799 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6800 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6801 ret = __qseecom_update_qteec_req_buf(
6802 (struct qseecom_qteec_modfd_req *)req, data, false);
6803 if (ret)
6804 return ret;
6805 }
6806
6807 if (qseecom.qsee_version < QSEE_VERSION_40) {
6808 ireq.app_id = data->client.app_id;
6809 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306810 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006811 ireq.req_len = req->req_len;
6812 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306813 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006814 ireq.resp_len = req->resp_len;
6815 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6816 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6817 dmac_flush_range((void *)table,
6818 (void *)table + SGLISTINFO_TABLE_SIZE);
6819 cmd_buf = (void *)&ireq;
6820 cmd_len = sizeof(struct qseecom_qteec_ireq);
6821 } else {
6822 ireq_64bit.app_id = data->client.app_id;
6823 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306824 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006825 ireq_64bit.req_len = req->req_len;
6826 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306827 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006828 ireq_64bit.resp_len = req->resp_len;
6829 if ((data->client.app_arch == ELFCLASS32) &&
6830 ((ireq_64bit.req_ptr >=
6831 PHY_ADDR_4G - ireq_64bit.req_len) ||
6832 (ireq_64bit.resp_ptr >=
6833 PHY_ADDR_4G - ireq_64bit.resp_len))){
6834 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6835 data->client.app_name, data->client.app_id);
6836 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6837 ireq_64bit.req_ptr, ireq_64bit.req_len,
6838 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6839 return -EFAULT;
6840 }
6841 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6842 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6843 dmac_flush_range((void *)table,
6844 (void *)table + SGLISTINFO_TABLE_SIZE);
6845 cmd_buf = (void *)&ireq_64bit;
6846 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6847 }
6848 if (qseecom.whitelist_support == true
6849 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6850 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6851 else
6852 *(uint32_t *)cmd_buf = cmd_id;
6853
6854 reqd_len_sb_in = req->req_len + req->resp_len;
6855 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6856 data->client.sb_virt,
6857 reqd_len_sb_in,
6858 ION_IOC_CLEAN_INV_CACHES);
6859 if (ret) {
6860 pr_err("cache operation failed %d\n", ret);
6861 return ret;
6862 }
6863
6864 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6865
6866 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6867 cmd_buf, cmd_len,
6868 &resp, sizeof(resp));
6869 if (ret) {
6870 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6871 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006872 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006873 }
6874
6875 if (qseecom.qsee_reentrancy_support) {
6876 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006877 if (ret)
6878 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006879 } else {
6880 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6881 ret = __qseecom_process_incomplete_cmd(data, &resp);
6882 if (ret) {
6883 pr_err("process_incomplete_cmd failed err: %d\n",
6884 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006885 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006886 }
6887 } else {
6888 if (resp.result != QSEOS_RESULT_SUCCESS) {
6889 pr_err("Response result %d not supported\n",
6890 resp.result);
6891 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006892 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006893 }
6894 }
6895 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006896exit:
6897 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006898 data->client.sb_virt, data->client.sb_length,
6899 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006900 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006901 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006902 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006903 }
6904
6905 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6906 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006907 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006908 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006909 if (ret2)
6910 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006911 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006912 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006913}
6914
6915static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6916 void __user *argp)
6917{
6918 struct qseecom_qteec_modfd_req req;
6919 int ret = 0;
6920
6921 ret = copy_from_user(&req, argp,
6922 sizeof(struct qseecom_qteec_modfd_req));
6923 if (ret) {
6924 pr_err("copy_from_user failed\n");
6925 return ret;
6926 }
6927 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6928 QSEOS_TEE_OPEN_SESSION);
6929
6930 return ret;
6931}
6932
6933static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6934 void __user *argp)
6935{
6936 struct qseecom_qteec_req req;
6937 int ret = 0;
6938
6939 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6940 if (ret) {
6941 pr_err("copy_from_user failed\n");
6942 return ret;
6943 }
6944 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6945 return ret;
6946}
6947
6948static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6949 void __user *argp)
6950{
6951 struct qseecom_qteec_modfd_req req;
6952 struct qseecom_command_scm_resp resp;
6953 struct qseecom_qteec_ireq ireq;
6954 struct qseecom_qteec_64bit_ireq ireq_64bit;
6955 struct qseecom_registered_app_list *ptr_app;
6956 bool found_app = false;
6957 unsigned long flags;
6958 int ret = 0;
6959 int i = 0;
6960 uint32_t reqd_len_sb_in = 0;
6961 void *cmd_buf = NULL;
6962 size_t cmd_len;
6963 struct sglist_info *table = data->sglistinfo_ptr;
6964 void *req_ptr = NULL;
6965 void *resp_ptr = NULL;
6966
6967 ret = copy_from_user(&req, argp,
6968 sizeof(struct qseecom_qteec_modfd_req));
6969 if (ret) {
6970 pr_err("copy_from_user failed\n");
6971 return ret;
6972 }
6973 ret = __qseecom_qteec_validate_msg(data,
6974 (struct qseecom_qteec_req *)(&req));
6975 if (ret)
6976 return ret;
6977 req_ptr = req.req_ptr;
6978 resp_ptr = req.resp_ptr;
6979
6980 /* find app_id & img_name from list */
6981 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6982 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6983 list) {
6984 if ((ptr_app->app_id == data->client.app_id) &&
6985 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6986 found_app = true;
6987 break;
6988 }
6989 }
6990 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6991 if (!found_app) {
6992 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6993 (char *)data->client.app_name);
6994 return -ENOENT;
6995 }
6996
6997 /* validate offsets */
6998 for (i = 0; i < MAX_ION_FD; i++) {
6999 if (req.ifd_data[i].fd) {
7000 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7001 return -EINVAL;
7002 }
7003 }
7004 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7005 (uintptr_t)req.req_ptr);
7006 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7007 (uintptr_t)req.resp_ptr);
7008 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7009 if (ret)
7010 return ret;
7011
7012 if (qseecom.qsee_version < QSEE_VERSION_40) {
7013 ireq.app_id = data->client.app_id;
7014 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7015 (uintptr_t)req_ptr);
7016 ireq.req_len = req.req_len;
7017 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7018 (uintptr_t)resp_ptr);
7019 ireq.resp_len = req.resp_len;
7020 cmd_buf = (void *)&ireq;
7021 cmd_len = sizeof(struct qseecom_qteec_ireq);
7022 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7023 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7024 dmac_flush_range((void *)table,
7025 (void *)table + SGLISTINFO_TABLE_SIZE);
7026 } else {
7027 ireq_64bit.app_id = data->client.app_id;
7028 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7029 (uintptr_t)req_ptr);
7030 ireq_64bit.req_len = req.req_len;
7031 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7032 (uintptr_t)resp_ptr);
7033 ireq_64bit.resp_len = req.resp_len;
7034 cmd_buf = (void *)&ireq_64bit;
7035 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7036 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7037 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7038 dmac_flush_range((void *)table,
7039 (void *)table + SGLISTINFO_TABLE_SIZE);
7040 }
7041 reqd_len_sb_in = req.req_len + req.resp_len;
7042 if (qseecom.whitelist_support == true)
7043 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7044 else
7045 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7046
7047 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7048 data->client.sb_virt,
7049 reqd_len_sb_in,
7050 ION_IOC_CLEAN_INV_CACHES);
7051 if (ret) {
7052 pr_err("cache operation failed %d\n", ret);
7053 return ret;
7054 }
7055
7056 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7057
7058 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7059 cmd_buf, cmd_len,
7060 &resp, sizeof(resp));
7061 if (ret) {
7062 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7063 ret, data->client.app_id);
7064 return ret;
7065 }
7066
7067 if (qseecom.qsee_reentrancy_support) {
7068 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7069 } else {
7070 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7071 ret = __qseecom_process_incomplete_cmd(data, &resp);
7072 if (ret) {
7073 pr_err("process_incomplete_cmd failed err: %d\n",
7074 ret);
7075 return ret;
7076 }
7077 } else {
7078 if (resp.result != QSEOS_RESULT_SUCCESS) {
7079 pr_err("Response result %d not supported\n",
7080 resp.result);
7081 ret = -EINVAL;
7082 }
7083 }
7084 }
7085 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7086 if (ret)
7087 return ret;
7088
7089 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7090 data->client.sb_virt, data->client.sb_length,
7091 ION_IOC_INV_CACHES);
7092 if (ret) {
7093 pr_err("cache operation failed %d\n", ret);
7094 return ret;
7095 }
7096 return 0;
7097}
7098
7099static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7100 void __user *argp)
7101{
7102 struct qseecom_qteec_modfd_req req;
7103 int ret = 0;
7104
7105 ret = copy_from_user(&req, argp,
7106 sizeof(struct qseecom_qteec_modfd_req));
7107 if (ret) {
7108 pr_err("copy_from_user failed\n");
7109 return ret;
7110 }
7111 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7112 QSEOS_TEE_REQUEST_CANCELLATION);
7113
7114 return ret;
7115}
7116
7117static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7118{
7119 if (data->sglist_cnt) {
7120 memset(data->sglistinfo_ptr, 0,
7121 SGLISTINFO_TABLE_SIZE);
7122 data->sglist_cnt = 0;
7123 }
7124}
7125
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307126static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007127 unsigned int cmd, unsigned long arg)
7128{
7129 int ret = 0;
7130 struct qseecom_dev_handle *data = file->private_data;
7131 void __user *argp = (void __user *) arg;
7132 bool perf_enabled = false;
7133
7134 if (!data) {
7135 pr_err("Invalid/uninitialized device handle\n");
7136 return -EINVAL;
7137 }
7138
7139 if (data->abort) {
7140 pr_err("Aborting qseecom driver\n");
7141 return -ENODEV;
7142 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007143 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7144 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7145 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7146 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007147 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007148
7149 switch (cmd) {
7150 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7151 if (data->type != QSEECOM_GENERIC) {
7152 pr_err("reg lstnr req: invalid handle (%d)\n",
7153 data->type);
7154 ret = -EINVAL;
7155 break;
7156 }
7157 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007158 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007159 atomic_inc(&data->ioctl_count);
7160 data->type = QSEECOM_LISTENER_SERVICE;
7161 ret = qseecom_register_listener(data, argp);
7162 atomic_dec(&data->ioctl_count);
7163 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007164 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007165 if (ret)
7166 pr_err("failed qseecom_register_listener: %d\n", ret);
7167 break;
7168 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307169 case QSEECOM_IOCTL_SET_ICE_INFO: {
7170 struct qseecom_ice_data_t ice_data;
7171
7172 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7173 if (ret) {
7174 pr_err("copy_from_user failed\n");
7175 return -EFAULT;
7176 }
7177 qcom_ice_set_fde_flag(ice_data.flag);
7178 break;
7179 }
7180
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007181 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7182 if ((data->listener.id == 0) ||
7183 (data->type != QSEECOM_LISTENER_SERVICE)) {
7184 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7185 data->type, data->listener.id);
7186 ret = -EINVAL;
7187 break;
7188 }
7189 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007190 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007191 atomic_inc(&data->ioctl_count);
7192 ret = qseecom_unregister_listener(data);
7193 atomic_dec(&data->ioctl_count);
7194 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007195 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007196 if (ret)
7197 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7198 break;
7199 }
7200 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7201 if ((data->client.app_id == 0) ||
7202 (data->type != QSEECOM_CLIENT_APP)) {
7203 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7204 data->type, data->client.app_id);
7205 ret = -EINVAL;
7206 break;
7207 }
7208 /* Only one client allowed here at a time */
7209 mutex_lock(&app_access_lock);
7210 if (qseecom.support_bus_scaling) {
7211 /* register bus bw in case the client doesn't do it */
7212 if (!data->mode) {
7213 mutex_lock(&qsee_bw_mutex);
7214 __qseecom_register_bus_bandwidth_needs(
7215 data, HIGH);
7216 mutex_unlock(&qsee_bw_mutex);
7217 }
7218 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7219 if (ret) {
7220 pr_err("Failed to set bw.\n");
7221 ret = -EINVAL;
7222 mutex_unlock(&app_access_lock);
7223 break;
7224 }
7225 }
7226 /*
7227 * On targets where crypto clock is handled by HLOS,
7228 * if clk_access_cnt is zero and perf_enabled is false,
7229 * then the crypto clock was not enabled before sending cmd to
7230 * tz, qseecom will enable the clock to avoid service failure.
7231 */
7232 if (!qseecom.no_clock_support &&
7233 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7234 pr_debug("ce clock is not enabled!\n");
7235 ret = qseecom_perf_enable(data);
7236 if (ret) {
7237 pr_err("Failed to vote for clock with err %d\n",
7238 ret);
7239 mutex_unlock(&app_access_lock);
7240 ret = -EINVAL;
7241 break;
7242 }
7243 perf_enabled = true;
7244 }
7245 atomic_inc(&data->ioctl_count);
7246 ret = qseecom_send_cmd(data, argp);
7247 if (qseecom.support_bus_scaling)
7248 __qseecom_add_bw_scale_down_timer(
7249 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7250 if (perf_enabled) {
7251 qsee_disable_clock_vote(data, CLK_DFAB);
7252 qsee_disable_clock_vote(data, CLK_SFPB);
7253 }
7254 atomic_dec(&data->ioctl_count);
7255 wake_up_all(&data->abort_wq);
7256 mutex_unlock(&app_access_lock);
7257 if (ret)
7258 pr_err("failed qseecom_send_cmd: %d\n", ret);
7259 break;
7260 }
7261 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7262 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7263 if ((data->client.app_id == 0) ||
7264 (data->type != QSEECOM_CLIENT_APP)) {
7265 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7266 data->type, data->client.app_id);
7267 ret = -EINVAL;
7268 break;
7269 }
7270 /* Only one client allowed here at a time */
7271 mutex_lock(&app_access_lock);
7272 if (qseecom.support_bus_scaling) {
7273 if (!data->mode) {
7274 mutex_lock(&qsee_bw_mutex);
7275 __qseecom_register_bus_bandwidth_needs(
7276 data, HIGH);
7277 mutex_unlock(&qsee_bw_mutex);
7278 }
7279 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7280 if (ret) {
7281 pr_err("Failed to set bw.\n");
7282 mutex_unlock(&app_access_lock);
7283 ret = -EINVAL;
7284 break;
7285 }
7286 }
7287 /*
7288 * On targets where crypto clock is handled by HLOS,
7289 * if clk_access_cnt is zero and perf_enabled is false,
7290 * then the crypto clock was not enabled before sending cmd to
7291 * tz, qseecom will enable the clock to avoid service failure.
7292 */
7293 if (!qseecom.no_clock_support &&
7294 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7295 pr_debug("ce clock is not enabled!\n");
7296 ret = qseecom_perf_enable(data);
7297 if (ret) {
7298 pr_err("Failed to vote for clock with err %d\n",
7299 ret);
7300 mutex_unlock(&app_access_lock);
7301 ret = -EINVAL;
7302 break;
7303 }
7304 perf_enabled = true;
7305 }
7306 atomic_inc(&data->ioctl_count);
7307 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7308 ret = qseecom_send_modfd_cmd(data, argp);
7309 else
7310 ret = qseecom_send_modfd_cmd_64(data, argp);
7311 if (qseecom.support_bus_scaling)
7312 __qseecom_add_bw_scale_down_timer(
7313 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7314 if (perf_enabled) {
7315 qsee_disable_clock_vote(data, CLK_DFAB);
7316 qsee_disable_clock_vote(data, CLK_SFPB);
7317 }
7318 atomic_dec(&data->ioctl_count);
7319 wake_up_all(&data->abort_wq);
7320 mutex_unlock(&app_access_lock);
7321 if (ret)
7322 pr_err("failed qseecom_send_cmd: %d\n", ret);
7323 __qseecom_clean_data_sglistinfo(data);
7324 break;
7325 }
7326 case QSEECOM_IOCTL_RECEIVE_REQ: {
7327 if ((data->listener.id == 0) ||
7328 (data->type != QSEECOM_LISTENER_SERVICE)) {
7329 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7330 data->type, data->listener.id);
7331 ret = -EINVAL;
7332 break;
7333 }
7334 atomic_inc(&data->ioctl_count);
7335 ret = qseecom_receive_req(data);
7336 atomic_dec(&data->ioctl_count);
7337 wake_up_all(&data->abort_wq);
7338 if (ret && (ret != -ERESTARTSYS))
7339 pr_err("failed qseecom_receive_req: %d\n", ret);
7340 break;
7341 }
7342 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7343 if ((data->listener.id == 0) ||
7344 (data->type != QSEECOM_LISTENER_SERVICE)) {
7345 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7346 data->type, data->listener.id);
7347 ret = -EINVAL;
7348 break;
7349 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007350 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007351 atomic_inc(&data->ioctl_count);
7352 if (!qseecom.qsee_reentrancy_support)
7353 ret = qseecom_send_resp();
7354 else
7355 ret = qseecom_reentrancy_send_resp(data);
7356 atomic_dec(&data->ioctl_count);
7357 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007358 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007359 if (ret)
7360 pr_err("failed qseecom_send_resp: %d\n", ret);
7361 break;
7362 }
7363 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7364 if ((data->type != QSEECOM_CLIENT_APP) &&
7365 (data->type != QSEECOM_GENERIC) &&
7366 (data->type != QSEECOM_SECURE_SERVICE)) {
7367 pr_err("set mem param req: invalid handle (%d)\n",
7368 data->type);
7369 ret = -EINVAL;
7370 break;
7371 }
7372 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7373 mutex_lock(&app_access_lock);
7374 atomic_inc(&data->ioctl_count);
7375 ret = qseecom_set_client_mem_param(data, argp);
7376 atomic_dec(&data->ioctl_count);
7377 mutex_unlock(&app_access_lock);
7378 if (ret)
7379 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7380 ret);
7381 break;
7382 }
7383 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7384 if ((data->type != QSEECOM_GENERIC) &&
7385 (data->type != QSEECOM_CLIENT_APP)) {
7386 pr_err("load app req: invalid handle (%d)\n",
7387 data->type);
7388 ret = -EINVAL;
7389 break;
7390 }
7391 data->type = QSEECOM_CLIENT_APP;
7392 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7393 mutex_lock(&app_access_lock);
7394 atomic_inc(&data->ioctl_count);
7395 ret = qseecom_load_app(data, argp);
7396 atomic_dec(&data->ioctl_count);
7397 mutex_unlock(&app_access_lock);
7398 if (ret)
7399 pr_err("failed load_app request: %d\n", ret);
7400 break;
7401 }
7402 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7403 if ((data->client.app_id == 0) ||
7404 (data->type != QSEECOM_CLIENT_APP)) {
7405 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7406 data->type, data->client.app_id);
7407 ret = -EINVAL;
7408 break;
7409 }
7410 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7411 mutex_lock(&app_access_lock);
7412 atomic_inc(&data->ioctl_count);
7413 ret = qseecom_unload_app(data, false);
7414 atomic_dec(&data->ioctl_count);
7415 mutex_unlock(&app_access_lock);
7416 if (ret)
7417 pr_err("failed unload_app request: %d\n", ret);
7418 break;
7419 }
7420 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_get_qseos_version(data, argp);
7423 if (ret)
7424 pr_err("qseecom_get_qseos_version: %d\n", ret);
7425 atomic_dec(&data->ioctl_count);
7426 break;
7427 }
7428 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7429 if ((data->type != QSEECOM_GENERIC) &&
7430 (data->type != QSEECOM_CLIENT_APP)) {
7431 pr_err("perf enable req: invalid handle (%d)\n",
7432 data->type);
7433 ret = -EINVAL;
7434 break;
7435 }
7436 if ((data->type == QSEECOM_CLIENT_APP) &&
7437 (data->client.app_id == 0)) {
7438 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7439 data->type, data->client.app_id);
7440 ret = -EINVAL;
7441 break;
7442 }
7443 atomic_inc(&data->ioctl_count);
7444 if (qseecom.support_bus_scaling) {
7445 mutex_lock(&qsee_bw_mutex);
7446 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7447 mutex_unlock(&qsee_bw_mutex);
7448 } else {
7449 ret = qseecom_perf_enable(data);
7450 if (ret)
7451 pr_err("Fail to vote for clocks %d\n", ret);
7452 }
7453 atomic_dec(&data->ioctl_count);
7454 break;
7455 }
7456 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7457 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7458 (data->type != QSEECOM_CLIENT_APP)) {
7459 pr_err("perf disable req: invalid handle (%d)\n",
7460 data->type);
7461 ret = -EINVAL;
7462 break;
7463 }
7464 if ((data->type == QSEECOM_CLIENT_APP) &&
7465 (data->client.app_id == 0)) {
7466 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7467 data->type, data->client.app_id);
7468 ret = -EINVAL;
7469 break;
7470 }
7471 atomic_inc(&data->ioctl_count);
7472 if (!qseecom.support_bus_scaling) {
7473 qsee_disable_clock_vote(data, CLK_DFAB);
7474 qsee_disable_clock_vote(data, CLK_SFPB);
7475 } else {
7476 mutex_lock(&qsee_bw_mutex);
7477 qseecom_unregister_bus_bandwidth_needs(data);
7478 mutex_unlock(&qsee_bw_mutex);
7479 }
7480 atomic_dec(&data->ioctl_count);
7481 break;
7482 }
7483
7484 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7485 /* If crypto clock is not handled by HLOS, return directly. */
7486 if (qseecom.no_clock_support) {
7487 pr_debug("crypto clock is not handled by HLOS\n");
7488 break;
7489 }
7490 if ((data->client.app_id == 0) ||
7491 (data->type != QSEECOM_CLIENT_APP)) {
7492 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7493 data->type, data->client.app_id);
7494 ret = -EINVAL;
7495 break;
7496 }
7497 atomic_inc(&data->ioctl_count);
7498 ret = qseecom_scale_bus_bandwidth(data, argp);
7499 atomic_dec(&data->ioctl_count);
7500 break;
7501 }
7502 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7503 if (data->type != QSEECOM_GENERIC) {
7504 pr_err("load ext elf req: invalid client handle (%d)\n",
7505 data->type);
7506 ret = -EINVAL;
7507 break;
7508 }
7509 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7510 data->released = true;
7511 mutex_lock(&app_access_lock);
7512 atomic_inc(&data->ioctl_count);
7513 ret = qseecom_load_external_elf(data, argp);
7514 atomic_dec(&data->ioctl_count);
7515 mutex_unlock(&app_access_lock);
7516 if (ret)
7517 pr_err("failed load_external_elf request: %d\n", ret);
7518 break;
7519 }
7520 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7521 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7522 pr_err("unload ext elf req: invalid handle (%d)\n",
7523 data->type);
7524 ret = -EINVAL;
7525 break;
7526 }
7527 data->released = true;
7528 mutex_lock(&app_access_lock);
7529 atomic_inc(&data->ioctl_count);
7530 ret = qseecom_unload_external_elf(data);
7531 atomic_dec(&data->ioctl_count);
7532 mutex_unlock(&app_access_lock);
7533 if (ret)
7534 pr_err("failed unload_app request: %d\n", ret);
7535 break;
7536 }
7537 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
Zhen Kong677362c2019-08-30 10:50:25 -07007538 if ((data->type != QSEECOM_GENERIC) &&
7539 (data->type != QSEECOM_CLIENT_APP)) {
7540 pr_err("app loaded query req: invalid handle (%d)\n",
7541 data->type);
7542 ret = -EINVAL;
7543 break;
7544 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007545 data->type = QSEECOM_CLIENT_APP;
7546 mutex_lock(&app_access_lock);
7547 atomic_inc(&data->ioctl_count);
7548 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7549 ret = qseecom_query_app_loaded(data, argp);
7550 atomic_dec(&data->ioctl_count);
7551 mutex_unlock(&app_access_lock);
7552 break;
7553 }
7554 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7555 if (data->type != QSEECOM_GENERIC) {
7556 pr_err("send cmd svc req: invalid handle (%d)\n",
7557 data->type);
7558 ret = -EINVAL;
7559 break;
7560 }
7561 data->type = QSEECOM_SECURE_SERVICE;
7562 if (qseecom.qsee_version < QSEE_VERSION_03) {
7563 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7564 qseecom.qsee_version);
7565 return -EINVAL;
7566 }
7567 mutex_lock(&app_access_lock);
7568 atomic_inc(&data->ioctl_count);
7569 ret = qseecom_send_service_cmd(data, argp);
7570 atomic_dec(&data->ioctl_count);
7571 mutex_unlock(&app_access_lock);
7572 break;
7573 }
7574 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7575 if (!(qseecom.support_pfe || qseecom.support_fde))
7576 pr_err("Features requiring key init not supported\n");
7577 if (data->type != QSEECOM_GENERIC) {
7578 pr_err("create key req: invalid handle (%d)\n",
7579 data->type);
7580 ret = -EINVAL;
7581 break;
7582 }
7583 if (qseecom.qsee_version < QSEE_VERSION_05) {
7584 pr_err("Create Key feature unsupported: qsee ver %u\n",
7585 qseecom.qsee_version);
7586 return -EINVAL;
7587 }
7588 data->released = true;
7589 mutex_lock(&app_access_lock);
7590 atomic_inc(&data->ioctl_count);
7591 ret = qseecom_create_key(data, argp);
7592 if (ret)
7593 pr_err("failed to create encryption key: %d\n", ret);
7594
7595 atomic_dec(&data->ioctl_count);
7596 mutex_unlock(&app_access_lock);
7597 break;
7598 }
7599 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7600 if (!(qseecom.support_pfe || qseecom.support_fde))
7601 pr_err("Features requiring key init not supported\n");
7602 if (data->type != QSEECOM_GENERIC) {
7603 pr_err("wipe key req: invalid handle (%d)\n",
7604 data->type);
7605 ret = -EINVAL;
7606 break;
7607 }
7608 if (qseecom.qsee_version < QSEE_VERSION_05) {
7609 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7610 qseecom.qsee_version);
7611 return -EINVAL;
7612 }
7613 data->released = true;
7614 mutex_lock(&app_access_lock);
7615 atomic_inc(&data->ioctl_count);
7616 ret = qseecom_wipe_key(data, argp);
7617 if (ret)
7618 pr_err("failed to wipe encryption key: %d\n", ret);
7619 atomic_dec(&data->ioctl_count);
7620 mutex_unlock(&app_access_lock);
7621 break;
7622 }
7623 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7624 if (!(qseecom.support_pfe || qseecom.support_fde))
7625 pr_err("Features requiring key init not supported\n");
7626 if (data->type != QSEECOM_GENERIC) {
7627 pr_err("update key req: invalid handle (%d)\n",
7628 data->type);
7629 ret = -EINVAL;
7630 break;
7631 }
7632 if (qseecom.qsee_version < QSEE_VERSION_05) {
7633 pr_err("Update Key feature unsupported in qsee ver %u\n",
7634 qseecom.qsee_version);
7635 return -EINVAL;
7636 }
7637 data->released = true;
7638 mutex_lock(&app_access_lock);
7639 atomic_inc(&data->ioctl_count);
7640 ret = qseecom_update_key_user_info(data, argp);
7641 if (ret)
7642 pr_err("failed to update key user info: %d\n", ret);
7643 atomic_dec(&data->ioctl_count);
7644 mutex_unlock(&app_access_lock);
7645 break;
7646 }
7647 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7648 if (data->type != QSEECOM_GENERIC) {
7649 pr_err("save part hash req: invalid handle (%d)\n",
7650 data->type);
7651 ret = -EINVAL;
7652 break;
7653 }
7654 data->released = true;
7655 mutex_lock(&app_access_lock);
7656 atomic_inc(&data->ioctl_count);
7657 ret = qseecom_save_partition_hash(argp);
7658 atomic_dec(&data->ioctl_count);
7659 mutex_unlock(&app_access_lock);
7660 break;
7661 }
7662 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7663 if (data->type != QSEECOM_GENERIC) {
7664 pr_err("ES activated req: invalid handle (%d)\n",
7665 data->type);
7666 ret = -EINVAL;
7667 break;
7668 }
7669 data->released = true;
7670 mutex_lock(&app_access_lock);
7671 atomic_inc(&data->ioctl_count);
7672 ret = qseecom_is_es_activated(argp);
7673 atomic_dec(&data->ioctl_count);
7674 mutex_unlock(&app_access_lock);
7675 break;
7676 }
7677 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7678 if (data->type != QSEECOM_GENERIC) {
7679 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7680 data->type);
7681 ret = -EINVAL;
7682 break;
7683 }
7684 data->released = true;
7685 mutex_lock(&app_access_lock);
7686 atomic_inc(&data->ioctl_count);
7687 ret = qseecom_mdtp_cipher_dip(argp);
7688 atomic_dec(&data->ioctl_count);
7689 mutex_unlock(&app_access_lock);
7690 break;
7691 }
7692 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7693 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7694 if ((data->listener.id == 0) ||
7695 (data->type != QSEECOM_LISTENER_SERVICE)) {
7696 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7697 data->type, data->listener.id);
7698 ret = -EINVAL;
7699 break;
7700 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007701 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007702 atomic_inc(&data->ioctl_count);
7703 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7704 ret = qseecom_send_modfd_resp(data, argp);
7705 else
7706 ret = qseecom_send_modfd_resp_64(data, argp);
7707 atomic_dec(&data->ioctl_count);
7708 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007709 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007710 if (ret)
7711 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7712 __qseecom_clean_data_sglistinfo(data);
7713 break;
7714 }
7715 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7716 if ((data->client.app_id == 0) ||
7717 (data->type != QSEECOM_CLIENT_APP)) {
7718 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7719 data->type, data->client.app_id);
7720 ret = -EINVAL;
7721 break;
7722 }
7723 if (qseecom.qsee_version < QSEE_VERSION_40) {
7724 pr_err("GP feature unsupported: qsee ver %u\n",
7725 qseecom.qsee_version);
7726 return -EINVAL;
7727 }
7728 /* Only one client allowed here at a time */
7729 mutex_lock(&app_access_lock);
7730 atomic_inc(&data->ioctl_count);
7731 ret = qseecom_qteec_open_session(data, argp);
7732 atomic_dec(&data->ioctl_count);
7733 wake_up_all(&data->abort_wq);
7734 mutex_unlock(&app_access_lock);
7735 if (ret)
7736 pr_err("failed open_session_cmd: %d\n", ret);
7737 __qseecom_clean_data_sglistinfo(data);
7738 break;
7739 }
7740 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7741 if ((data->client.app_id == 0) ||
7742 (data->type != QSEECOM_CLIENT_APP)) {
7743 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7744 data->type, data->client.app_id);
7745 ret = -EINVAL;
7746 break;
7747 }
7748 if (qseecom.qsee_version < QSEE_VERSION_40) {
7749 pr_err("GP feature unsupported: qsee ver %u\n",
7750 qseecom.qsee_version);
7751 return -EINVAL;
7752 }
7753 /* Only one client allowed here at a time */
7754 mutex_lock(&app_access_lock);
7755 atomic_inc(&data->ioctl_count);
7756 ret = qseecom_qteec_close_session(data, argp);
7757 atomic_dec(&data->ioctl_count);
7758 wake_up_all(&data->abort_wq);
7759 mutex_unlock(&app_access_lock);
7760 if (ret)
7761 pr_err("failed close_session_cmd: %d\n", ret);
7762 break;
7763 }
7764 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7765 if ((data->client.app_id == 0) ||
7766 (data->type != QSEECOM_CLIENT_APP)) {
7767 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7768 data->type, data->client.app_id);
7769 ret = -EINVAL;
7770 break;
7771 }
7772 if (qseecom.qsee_version < QSEE_VERSION_40) {
7773 pr_err("GP feature unsupported: qsee ver %u\n",
7774 qseecom.qsee_version);
7775 return -EINVAL;
7776 }
7777 /* Only one client allowed here at a time */
7778 mutex_lock(&app_access_lock);
7779 atomic_inc(&data->ioctl_count);
7780 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7781 atomic_dec(&data->ioctl_count);
7782 wake_up_all(&data->abort_wq);
7783 mutex_unlock(&app_access_lock);
7784 if (ret)
7785 pr_err("failed Invoke cmd: %d\n", ret);
7786 __qseecom_clean_data_sglistinfo(data);
7787 break;
7788 }
7789 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7790 if ((data->client.app_id == 0) ||
7791 (data->type != QSEECOM_CLIENT_APP)) {
7792 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7793 data->type, data->client.app_id);
7794 ret = -EINVAL;
7795 break;
7796 }
7797 if (qseecom.qsee_version < QSEE_VERSION_40) {
7798 pr_err("GP feature unsupported: qsee ver %u\n",
7799 qseecom.qsee_version);
7800 return -EINVAL;
7801 }
7802 /* Only one client allowed here at a time */
7803 mutex_lock(&app_access_lock);
7804 atomic_inc(&data->ioctl_count);
7805 ret = qseecom_qteec_request_cancellation(data, argp);
7806 atomic_dec(&data->ioctl_count);
7807 wake_up_all(&data->abort_wq);
7808 mutex_unlock(&app_access_lock);
7809 if (ret)
7810 pr_err("failed request_cancellation: %d\n", ret);
7811 break;
7812 }
7813 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7814 atomic_inc(&data->ioctl_count);
7815 ret = qseecom_get_ce_info(data, argp);
7816 if (ret)
7817 pr_err("failed get fde ce pipe info: %d\n", ret);
7818 atomic_dec(&data->ioctl_count);
7819 break;
7820 }
7821 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7822 atomic_inc(&data->ioctl_count);
7823 ret = qseecom_free_ce_info(data, argp);
7824 if (ret)
7825 pr_err("failed get fde ce pipe info: %d\n", ret);
7826 atomic_dec(&data->ioctl_count);
7827 break;
7828 }
7829 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7830 atomic_inc(&data->ioctl_count);
7831 ret = qseecom_query_ce_info(data, argp);
7832 if (ret)
7833 pr_err("failed get fde ce pipe info: %d\n", ret);
7834 atomic_dec(&data->ioctl_count);
7835 break;
7836 }
7837 default:
7838 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7839 return -EINVAL;
7840 }
7841 return ret;
7842}
7843
7844static int qseecom_open(struct inode *inode, struct file *file)
7845{
7846 int ret = 0;
7847 struct qseecom_dev_handle *data;
7848
7849 data = kzalloc(sizeof(*data), GFP_KERNEL);
7850 if (!data)
7851 return -ENOMEM;
7852 file->private_data = data;
7853 data->abort = 0;
7854 data->type = QSEECOM_GENERIC;
7855 data->released = false;
7856 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7857 data->mode = INACTIVE;
7858 init_waitqueue_head(&data->abort_wq);
7859 atomic_set(&data->ioctl_count, 0);
7860 return ret;
7861}
7862
7863static int qseecom_release(struct inode *inode, struct file *file)
7864{
7865 struct qseecom_dev_handle *data = file->private_data;
7866 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007867 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007868
7869 if (data->released == false) {
7870 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7871 data->type, data->mode, data);
7872 switch (data->type) {
7873 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007874 pr_debug("release lsnr svc %d\n", data->listener.id);
7875 free_private_data = false;
7876 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007877 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007878 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007879 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007880 break;
7881 case QSEECOM_CLIENT_APP:
7882 mutex_lock(&app_access_lock);
7883 ret = qseecom_unload_app(data, true);
7884 mutex_unlock(&app_access_lock);
7885 break;
7886 case QSEECOM_SECURE_SERVICE:
7887 case QSEECOM_GENERIC:
7888 ret = qseecom_unmap_ion_allocated_memory(data);
7889 if (ret)
7890 pr_err("Ion Unmap failed\n");
7891 break;
7892 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7893 break;
7894 default:
7895 pr_err("Unsupported clnt_handle_type %d",
7896 data->type);
7897 break;
7898 }
7899 }
7900
7901 if (qseecom.support_bus_scaling) {
7902 mutex_lock(&qsee_bw_mutex);
7903 if (data->mode != INACTIVE) {
7904 qseecom_unregister_bus_bandwidth_needs(data);
7905 if (qseecom.cumulative_mode == INACTIVE) {
7906 ret = __qseecom_set_msm_bus_request(INACTIVE);
7907 if (ret)
7908 pr_err("Fail to scale down bus\n");
7909 }
7910 }
7911 mutex_unlock(&qsee_bw_mutex);
7912 } else {
7913 if (data->fast_load_enabled == true)
7914 qsee_disable_clock_vote(data, CLK_SFPB);
7915 if (data->perf_enabled == true)
7916 qsee_disable_clock_vote(data, CLK_DFAB);
7917 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007918
Zhen Kongbcdeda22018-11-16 13:50:51 -08007919 if (free_private_data)
7920 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007921 return ret;
7922}
7923
7924#ifdef CONFIG_COMPAT
7925#include "compat_qseecom.c"
7926#else
7927#define compat_qseecom_ioctl NULL
7928#endif
7929
7930static const struct file_operations qseecom_fops = {
7931 .owner = THIS_MODULE,
7932 .unlocked_ioctl = qseecom_ioctl,
7933 .compat_ioctl = compat_qseecom_ioctl,
7934 .open = qseecom_open,
7935 .release = qseecom_release
7936};
7937
7938static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7939{
7940 int rc = 0;
7941 struct device *pdev;
7942 struct qseecom_clk *qclk;
7943 char *core_clk_src = NULL;
7944 char *core_clk = NULL;
7945 char *iface_clk = NULL;
7946 char *bus_clk = NULL;
7947
7948 switch (ce) {
7949 case CLK_QSEE: {
7950 core_clk_src = "core_clk_src";
7951 core_clk = "core_clk";
7952 iface_clk = "iface_clk";
7953 bus_clk = "bus_clk";
7954 qclk = &qseecom.qsee;
7955 qclk->instance = CLK_QSEE;
7956 break;
7957 };
7958 case CLK_CE_DRV: {
7959 core_clk_src = "ce_drv_core_clk_src";
7960 core_clk = "ce_drv_core_clk";
7961 iface_clk = "ce_drv_iface_clk";
7962 bus_clk = "ce_drv_bus_clk";
7963 qclk = &qseecom.ce_drv;
7964 qclk->instance = CLK_CE_DRV;
7965 break;
7966 };
7967 default:
7968 pr_err("Invalid ce hw instance: %d!\n", ce);
7969 return -EIO;
7970 }
7971
7972 if (qseecom.no_clock_support) {
7973 qclk->ce_core_clk = NULL;
7974 qclk->ce_clk = NULL;
7975 qclk->ce_bus_clk = NULL;
7976 qclk->ce_core_src_clk = NULL;
7977 return 0;
7978 }
7979
7980 pdev = qseecom.pdev;
7981
7982 /* Get CE3 src core clk. */
7983 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7984 if (!IS_ERR(qclk->ce_core_src_clk)) {
7985 rc = clk_set_rate(qclk->ce_core_src_clk,
7986 qseecom.ce_opp_freq_hz);
7987 if (rc) {
7988 clk_put(qclk->ce_core_src_clk);
7989 qclk->ce_core_src_clk = NULL;
7990 pr_err("Unable to set the core src clk @%uMhz.\n",
7991 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7992 return -EIO;
7993 }
7994 } else {
7995 pr_warn("Unable to get CE core src clk, set to NULL\n");
7996 qclk->ce_core_src_clk = NULL;
7997 }
7998
7999 /* Get CE core clk */
8000 qclk->ce_core_clk = clk_get(pdev, core_clk);
8001 if (IS_ERR(qclk->ce_core_clk)) {
8002 rc = PTR_ERR(qclk->ce_core_clk);
8003 pr_err("Unable to get CE core clk\n");
8004 if (qclk->ce_core_src_clk != NULL)
8005 clk_put(qclk->ce_core_src_clk);
8006 return -EIO;
8007 }
8008
8009 /* Get CE Interface clk */
8010 qclk->ce_clk = clk_get(pdev, iface_clk);
8011 if (IS_ERR(qclk->ce_clk)) {
8012 rc = PTR_ERR(qclk->ce_clk);
8013 pr_err("Unable to get CE interface clk\n");
8014 if (qclk->ce_core_src_clk != NULL)
8015 clk_put(qclk->ce_core_src_clk);
8016 clk_put(qclk->ce_core_clk);
8017 return -EIO;
8018 }
8019
8020 /* Get CE AXI clk */
8021 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8022 if (IS_ERR(qclk->ce_bus_clk)) {
8023 rc = PTR_ERR(qclk->ce_bus_clk);
8024 pr_err("Unable to get CE BUS interface clk\n");
8025 if (qclk->ce_core_src_clk != NULL)
8026 clk_put(qclk->ce_core_src_clk);
8027 clk_put(qclk->ce_core_clk);
8028 clk_put(qclk->ce_clk);
8029 return -EIO;
8030 }
8031
8032 return rc;
8033}
8034
8035static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8036{
8037 struct qseecom_clk *qclk;
8038
8039 if (ce == CLK_QSEE)
8040 qclk = &qseecom.qsee;
8041 else
8042 qclk = &qseecom.ce_drv;
8043
8044 if (qclk->ce_clk != NULL) {
8045 clk_put(qclk->ce_clk);
8046 qclk->ce_clk = NULL;
8047 }
8048 if (qclk->ce_core_clk != NULL) {
8049 clk_put(qclk->ce_core_clk);
8050 qclk->ce_core_clk = NULL;
8051 }
8052 if (qclk->ce_bus_clk != NULL) {
8053 clk_put(qclk->ce_bus_clk);
8054 qclk->ce_bus_clk = NULL;
8055 }
8056 if (qclk->ce_core_src_clk != NULL) {
8057 clk_put(qclk->ce_core_src_clk);
8058 qclk->ce_core_src_clk = NULL;
8059 }
8060 qclk->instance = CLK_INVALID;
8061}
8062
8063static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8064{
8065 int rc = 0;
8066 uint32_t hlos_num_ce_hw_instances;
8067 uint32_t disk_encrypt_pipe;
8068 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008069 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008070 int i;
8071 const int *tbl;
8072 int size;
8073 int entry;
8074 struct qseecom_crypto_info *pfde_tbl = NULL;
8075 struct qseecom_crypto_info *p;
8076 int tbl_size;
8077 int j;
8078 bool old_db = true;
8079 struct qseecom_ce_info_use *pce_info_use;
8080 uint32_t *unit_tbl = NULL;
8081 int total_units = 0;
8082 struct qseecom_ce_pipe_entry *pce_entry;
8083
8084 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8085 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8086
8087 if (of_property_read_u32((&pdev->dev)->of_node,
8088 "qcom,qsee-ce-hw-instance",
8089 &qseecom.ce_info.qsee_ce_hw_instance)) {
8090 pr_err("Fail to get qsee ce hw instance information.\n");
8091 rc = -EINVAL;
8092 goto out;
8093 } else {
8094 pr_debug("qsee-ce-hw-instance=0x%x\n",
8095 qseecom.ce_info.qsee_ce_hw_instance);
8096 }
8097
8098 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8099 "qcom,support-fde");
8100 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8101 "qcom,support-pfe");
8102
8103 if (!qseecom.support_pfe && !qseecom.support_fde) {
8104 pr_warn("Device does not support PFE/FDE");
8105 goto out;
8106 }
8107
8108 if (qseecom.support_fde)
8109 tbl = of_get_property((&pdev->dev)->of_node,
8110 "qcom,full-disk-encrypt-info", &size);
8111 else
8112 tbl = NULL;
8113 if (tbl) {
8114 old_db = false;
8115 if (size % sizeof(struct qseecom_crypto_info)) {
8116 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8117 size);
8118 rc = -EINVAL;
8119 goto out;
8120 }
8121 tbl_size = size / sizeof
8122 (struct qseecom_crypto_info);
8123
8124 pfde_tbl = kzalloc(size, GFP_KERNEL);
8125 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8126 total_units = 0;
8127
8128 if (!pfde_tbl || !unit_tbl) {
8129 pr_err("failed to alloc memory\n");
8130 rc = -ENOMEM;
8131 goto out;
8132 }
8133 if (of_property_read_u32_array((&pdev->dev)->of_node,
8134 "qcom,full-disk-encrypt-info",
8135 (u32 *)pfde_tbl, size/sizeof(u32))) {
8136 pr_err("failed to read full-disk-encrypt-info tbl\n");
8137 rc = -EINVAL;
8138 goto out;
8139 }
8140
8141 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8142 for (j = 0; j < total_units; j++) {
8143 if (p->unit_num == *(unit_tbl + j))
8144 break;
8145 }
8146 if (j == total_units) {
8147 *(unit_tbl + total_units) = p->unit_num;
8148 total_units++;
8149 }
8150 }
8151
8152 qseecom.ce_info.num_fde = total_units;
8153 pce_info_use = qseecom.ce_info.fde = kcalloc(
8154 total_units, sizeof(struct qseecom_ce_info_use),
8155 GFP_KERNEL);
8156 if (!pce_info_use) {
8157 pr_err("failed to alloc memory\n");
8158 rc = -ENOMEM;
8159 goto out;
8160 }
8161
8162 for (j = 0; j < total_units; j++, pce_info_use++) {
8163 pce_info_use->unit_num = *(unit_tbl + j);
8164 pce_info_use->alloc = false;
8165 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8166 pce_info_use->num_ce_pipe_entries = 0;
8167 pce_info_use->ce_pipe_entry = NULL;
8168 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8169 if (p->unit_num == pce_info_use->unit_num)
8170 pce_info_use->num_ce_pipe_entries++;
8171 }
8172
8173 entry = pce_info_use->num_ce_pipe_entries;
8174 pce_entry = pce_info_use->ce_pipe_entry =
8175 kcalloc(entry,
8176 sizeof(struct qseecom_ce_pipe_entry),
8177 GFP_KERNEL);
8178 if (pce_entry == NULL) {
8179 pr_err("failed to alloc memory\n");
8180 rc = -ENOMEM;
8181 goto out;
8182 }
8183
8184 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8185 if (p->unit_num == pce_info_use->unit_num) {
8186 pce_entry->ce_num = p->ce;
8187 pce_entry->ce_pipe_pair =
8188 p->pipe_pair;
8189 pce_entry->valid = true;
8190 pce_entry++;
8191 }
8192 }
8193 }
8194 kfree(unit_tbl);
8195 unit_tbl = NULL;
8196 kfree(pfde_tbl);
8197 pfde_tbl = NULL;
8198 }
8199
8200 if (qseecom.support_pfe)
8201 tbl = of_get_property((&pdev->dev)->of_node,
8202 "qcom,per-file-encrypt-info", &size);
8203 else
8204 tbl = NULL;
8205 if (tbl) {
8206 old_db = false;
8207 if (size % sizeof(struct qseecom_crypto_info)) {
8208 pr_err("per-file-encrypt-info tbl size(%d)\n",
8209 size);
8210 rc = -EINVAL;
8211 goto out;
8212 }
8213 tbl_size = size / sizeof
8214 (struct qseecom_crypto_info);
8215
8216 pfde_tbl = kzalloc(size, GFP_KERNEL);
8217 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8218 total_units = 0;
8219 if (!pfde_tbl || !unit_tbl) {
8220 pr_err("failed to alloc memory\n");
8221 rc = -ENOMEM;
8222 goto out;
8223 }
8224 if (of_property_read_u32_array((&pdev->dev)->of_node,
8225 "qcom,per-file-encrypt-info",
8226 (u32 *)pfde_tbl, size/sizeof(u32))) {
8227 pr_err("failed to read per-file-encrypt-info tbl\n");
8228 rc = -EINVAL;
8229 goto out;
8230 }
8231
8232 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8233 for (j = 0; j < total_units; j++) {
8234 if (p->unit_num == *(unit_tbl + j))
8235 break;
8236 }
8237 if (j == total_units) {
8238 *(unit_tbl + total_units) = p->unit_num;
8239 total_units++;
8240 }
8241 }
8242
8243 qseecom.ce_info.num_pfe = total_units;
8244 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8245 total_units, sizeof(struct qseecom_ce_info_use),
8246 GFP_KERNEL);
8247 if (!pce_info_use) {
8248 pr_err("failed to alloc memory\n");
8249 rc = -ENOMEM;
8250 goto out;
8251 }
8252
8253 for (j = 0; j < total_units; j++, pce_info_use++) {
8254 pce_info_use->unit_num = *(unit_tbl + j);
8255 pce_info_use->alloc = false;
8256 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8257 pce_info_use->num_ce_pipe_entries = 0;
8258 pce_info_use->ce_pipe_entry = NULL;
8259 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8260 if (p->unit_num == pce_info_use->unit_num)
8261 pce_info_use->num_ce_pipe_entries++;
8262 }
8263
8264 entry = pce_info_use->num_ce_pipe_entries;
8265 pce_entry = pce_info_use->ce_pipe_entry =
8266 kcalloc(entry,
8267 sizeof(struct qseecom_ce_pipe_entry),
8268 GFP_KERNEL);
8269 if (pce_entry == NULL) {
8270 pr_err("failed to alloc memory\n");
8271 rc = -ENOMEM;
8272 goto out;
8273 }
8274
8275 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8276 if (p->unit_num == pce_info_use->unit_num) {
8277 pce_entry->ce_num = p->ce;
8278 pce_entry->ce_pipe_pair =
8279 p->pipe_pair;
8280 pce_entry->valid = true;
8281 pce_entry++;
8282 }
8283 }
8284 }
8285 kfree(unit_tbl);
8286 unit_tbl = NULL;
8287 kfree(pfde_tbl);
8288 pfde_tbl = NULL;
8289 }
8290
8291 if (!old_db)
8292 goto out1;
8293
8294 if (of_property_read_bool((&pdev->dev)->of_node,
8295 "qcom,support-multiple-ce-hw-instance")) {
8296 if (of_property_read_u32((&pdev->dev)->of_node,
8297 "qcom,hlos-num-ce-hw-instances",
8298 &hlos_num_ce_hw_instances)) {
8299 pr_err("Fail: get hlos number of ce hw instance\n");
8300 rc = -EINVAL;
8301 goto out;
8302 }
8303 } else {
8304 hlos_num_ce_hw_instances = 1;
8305 }
8306
8307 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8308 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8309 MAX_CE_PIPE_PAIR_PER_UNIT);
8310 rc = -EINVAL;
8311 goto out;
8312 }
8313
8314 if (of_property_read_u32_array((&pdev->dev)->of_node,
8315 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8316 hlos_num_ce_hw_instances)) {
8317 pr_err("Fail: get hlos ce hw instance info\n");
8318 rc = -EINVAL;
8319 goto out;
8320 }
8321
8322 if (qseecom.support_fde) {
8323 pce_info_use = qseecom.ce_info.fde =
8324 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8325 if (!pce_info_use) {
8326 pr_err("failed to alloc memory\n");
8327 rc = -ENOMEM;
8328 goto out;
8329 }
8330 /* by default for old db */
8331 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8332 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8333 pce_info_use->alloc = false;
8334 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8335 pce_info_use->ce_pipe_entry = NULL;
8336 if (of_property_read_u32((&pdev->dev)->of_node,
8337 "qcom,disk-encrypt-pipe-pair",
8338 &disk_encrypt_pipe)) {
8339 pr_err("Fail to get FDE pipe information.\n");
8340 rc = -EINVAL;
8341 goto out;
8342 } else {
8343 pr_debug("disk-encrypt-pipe-pair=0x%x",
8344 disk_encrypt_pipe);
8345 }
8346 entry = pce_info_use->num_ce_pipe_entries =
8347 hlos_num_ce_hw_instances;
8348 pce_entry = pce_info_use->ce_pipe_entry =
8349 kcalloc(entry,
8350 sizeof(struct qseecom_ce_pipe_entry),
8351 GFP_KERNEL);
8352 if (pce_entry == NULL) {
8353 pr_err("failed to alloc memory\n");
8354 rc = -ENOMEM;
8355 goto out;
8356 }
8357 for (i = 0; i < entry; i++) {
8358 pce_entry->ce_num = hlos_ce_hw_instance[i];
8359 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8360 pce_entry->valid = 1;
8361 pce_entry++;
8362 }
8363 } else {
8364 pr_warn("Device does not support FDE");
8365 disk_encrypt_pipe = 0xff;
8366 }
8367 if (qseecom.support_pfe) {
8368 pce_info_use = qseecom.ce_info.pfe =
8369 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8370 if (!pce_info_use) {
8371 pr_err("failed to alloc memory\n");
8372 rc = -ENOMEM;
8373 goto out;
8374 }
8375 /* by default for old db */
8376 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8377 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8378 pce_info_use->alloc = false;
8379 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8380 pce_info_use->ce_pipe_entry = NULL;
8381
8382 if (of_property_read_u32((&pdev->dev)->of_node,
8383 "qcom,file-encrypt-pipe-pair",
8384 &file_encrypt_pipe)) {
8385 pr_err("Fail to get PFE pipe information.\n");
8386 rc = -EINVAL;
8387 goto out;
8388 } else {
8389 pr_debug("file-encrypt-pipe-pair=0x%x",
8390 file_encrypt_pipe);
8391 }
8392 entry = pce_info_use->num_ce_pipe_entries =
8393 hlos_num_ce_hw_instances;
8394 pce_entry = pce_info_use->ce_pipe_entry =
8395 kcalloc(entry,
8396 sizeof(struct qseecom_ce_pipe_entry),
8397 GFP_KERNEL);
8398 if (pce_entry == NULL) {
8399 pr_err("failed to alloc memory\n");
8400 rc = -ENOMEM;
8401 goto out;
8402 }
8403 for (i = 0; i < entry; i++) {
8404 pce_entry->ce_num = hlos_ce_hw_instance[i];
8405 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8406 pce_entry->valid = 1;
8407 pce_entry++;
8408 }
8409 } else {
8410 pr_warn("Device does not support PFE");
8411 file_encrypt_pipe = 0xff;
8412 }
8413
8414out1:
8415 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8416 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8417out:
8418 if (rc) {
8419 if (qseecom.ce_info.fde) {
8420 pce_info_use = qseecom.ce_info.fde;
8421 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8422 pce_entry = pce_info_use->ce_pipe_entry;
8423 kfree(pce_entry);
8424 pce_info_use++;
8425 }
8426 }
8427 kfree(qseecom.ce_info.fde);
8428 qseecom.ce_info.fde = NULL;
8429 if (qseecom.ce_info.pfe) {
8430 pce_info_use = qseecom.ce_info.pfe;
8431 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8432 pce_entry = pce_info_use->ce_pipe_entry;
8433 kfree(pce_entry);
8434 pce_info_use++;
8435 }
8436 }
8437 kfree(qseecom.ce_info.pfe);
8438 qseecom.ce_info.pfe = NULL;
8439 }
8440 kfree(unit_tbl);
8441 kfree(pfde_tbl);
8442 return rc;
8443}
8444
8445static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8446 void __user *argp)
8447{
8448 struct qseecom_ce_info_req req;
8449 struct qseecom_ce_info_req *pinfo = &req;
8450 int ret = 0;
8451 int i;
8452 unsigned int entries;
8453 struct qseecom_ce_info_use *pce_info_use, *p;
8454 int total = 0;
8455 bool found = false;
8456 struct qseecom_ce_pipe_entry *pce_entry;
8457
8458 ret = copy_from_user(pinfo, argp,
8459 sizeof(struct qseecom_ce_info_req));
8460 if (ret) {
8461 pr_err("copy_from_user failed\n");
8462 return ret;
8463 }
8464
8465 switch (pinfo->usage) {
8466 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8467 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8468 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8469 if (qseecom.support_fde) {
8470 p = qseecom.ce_info.fde;
8471 total = qseecom.ce_info.num_fde;
8472 } else {
8473 pr_err("system does not support fde\n");
8474 return -EINVAL;
8475 }
8476 break;
8477 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8478 if (qseecom.support_pfe) {
8479 p = qseecom.ce_info.pfe;
8480 total = qseecom.ce_info.num_pfe;
8481 } else {
8482 pr_err("system does not support pfe\n");
8483 return -EINVAL;
8484 }
8485 break;
8486 default:
8487 pr_err("unsupported usage %d\n", pinfo->usage);
8488 return -EINVAL;
8489 }
8490
8491 pce_info_use = NULL;
8492 for (i = 0; i < total; i++) {
8493 if (!p->alloc)
8494 pce_info_use = p;
8495 else if (!memcmp(p->handle, pinfo->handle,
8496 MAX_CE_INFO_HANDLE_SIZE)) {
8497 pce_info_use = p;
8498 found = true;
8499 break;
8500 }
8501 p++;
8502 }
8503
8504 if (pce_info_use == NULL)
8505 return -EBUSY;
8506
8507 pinfo->unit_num = pce_info_use->unit_num;
8508 if (!pce_info_use->alloc) {
8509 pce_info_use->alloc = true;
8510 memcpy(pce_info_use->handle,
8511 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8512 }
8513 if (pce_info_use->num_ce_pipe_entries >
8514 MAX_CE_PIPE_PAIR_PER_UNIT)
8515 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8516 else
8517 entries = pce_info_use->num_ce_pipe_entries;
8518 pinfo->num_ce_pipe_entries = entries;
8519 pce_entry = pce_info_use->ce_pipe_entry;
8520 for (i = 0; i < entries; i++, pce_entry++)
8521 pinfo->ce_pipe_entry[i] = *pce_entry;
8522 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8523 pinfo->ce_pipe_entry[i].valid = 0;
8524
8525 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8526 pr_err("copy_to_user failed\n");
8527 ret = -EFAULT;
8528 }
8529 return ret;
8530}
8531
8532static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8533 void __user *argp)
8534{
8535 struct qseecom_ce_info_req req;
8536 struct qseecom_ce_info_req *pinfo = &req;
8537 int ret = 0;
8538 struct qseecom_ce_info_use *p;
8539 int total = 0;
8540 int i;
8541 bool found = false;
8542
8543 ret = copy_from_user(pinfo, argp,
8544 sizeof(struct qseecom_ce_info_req));
8545 if (ret)
8546 return ret;
8547
8548 switch (pinfo->usage) {
8549 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8550 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8551 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8552 if (qseecom.support_fde) {
8553 p = qseecom.ce_info.fde;
8554 total = qseecom.ce_info.num_fde;
8555 } else {
8556 pr_err("system does not support fde\n");
8557 return -EINVAL;
8558 }
8559 break;
8560 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8561 if (qseecom.support_pfe) {
8562 p = qseecom.ce_info.pfe;
8563 total = qseecom.ce_info.num_pfe;
8564 } else {
8565 pr_err("system does not support pfe\n");
8566 return -EINVAL;
8567 }
8568 break;
8569 default:
8570 pr_err("unsupported usage %d\n", pinfo->usage);
8571 return -EINVAL;
8572 }
8573
8574 for (i = 0; i < total; i++) {
8575 if (p->alloc &&
8576 !memcmp(p->handle, pinfo->handle,
8577 MAX_CE_INFO_HANDLE_SIZE)) {
8578 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8579 p->alloc = false;
8580 found = true;
8581 break;
8582 }
8583 p++;
8584 }
8585 return ret;
8586}
8587
8588static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8589 void __user *argp)
8590{
8591 struct qseecom_ce_info_req req;
8592 struct qseecom_ce_info_req *pinfo = &req;
8593 int ret = 0;
8594 int i;
8595 unsigned int entries;
8596 struct qseecom_ce_info_use *pce_info_use, *p;
8597 int total = 0;
8598 bool found = false;
8599 struct qseecom_ce_pipe_entry *pce_entry;
8600
8601 ret = copy_from_user(pinfo, argp,
8602 sizeof(struct qseecom_ce_info_req));
8603 if (ret)
8604 return ret;
8605
8606 switch (pinfo->usage) {
8607 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8608 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8609 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8610 if (qseecom.support_fde) {
8611 p = qseecom.ce_info.fde;
8612 total = qseecom.ce_info.num_fde;
8613 } else {
8614 pr_err("system does not support fde\n");
8615 return -EINVAL;
8616 }
8617 break;
8618 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8619 if (qseecom.support_pfe) {
8620 p = qseecom.ce_info.pfe;
8621 total = qseecom.ce_info.num_pfe;
8622 } else {
8623 pr_err("system does not support pfe\n");
8624 return -EINVAL;
8625 }
8626 break;
8627 default:
8628 pr_err("unsupported usage %d\n", pinfo->usage);
8629 return -EINVAL;
8630 }
8631
8632 pce_info_use = NULL;
8633 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8634 pinfo->num_ce_pipe_entries = 0;
8635 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8636 pinfo->ce_pipe_entry[i].valid = 0;
8637
8638 for (i = 0; i < total; i++) {
8639
8640 if (p->alloc && !memcmp(p->handle,
8641 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8642 pce_info_use = p;
8643 found = true;
8644 break;
8645 }
8646 p++;
8647 }
8648 if (!pce_info_use)
8649 goto out;
8650 pinfo->unit_num = pce_info_use->unit_num;
8651 if (pce_info_use->num_ce_pipe_entries >
8652 MAX_CE_PIPE_PAIR_PER_UNIT)
8653 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8654 else
8655 entries = pce_info_use->num_ce_pipe_entries;
8656 pinfo->num_ce_pipe_entries = entries;
8657 pce_entry = pce_info_use->ce_pipe_entry;
8658 for (i = 0; i < entries; i++, pce_entry++)
8659 pinfo->ce_pipe_entry[i] = *pce_entry;
8660 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8661 pinfo->ce_pipe_entry[i].valid = 0;
8662out:
8663 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8664 pr_err("copy_to_user failed\n");
8665 ret = -EFAULT;
8666 }
8667 return ret;
8668}
8669
8670/*
8671 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8672 * then whitelist feature is not supported.
8673 */
8674static int qseecom_check_whitelist_feature(void)
8675{
8676 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8677
8678 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8679}
8680
8681static int qseecom_probe(struct platform_device *pdev)
8682{
8683 int rc;
8684 int i;
8685 uint32_t feature = 10;
8686 struct device *class_dev;
8687 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8688 struct qseecom_command_scm_resp resp;
8689 struct qseecom_ce_info_use *pce_info_use = NULL;
8690
8691 qseecom.qsee_bw_count = 0;
8692 qseecom.qsee_perf_client = 0;
8693 qseecom.qsee_sfpb_bw_count = 0;
8694
8695 qseecom.qsee.ce_core_clk = NULL;
8696 qseecom.qsee.ce_clk = NULL;
8697 qseecom.qsee.ce_core_src_clk = NULL;
8698 qseecom.qsee.ce_bus_clk = NULL;
8699
8700 qseecom.cumulative_mode = 0;
8701 qseecom.current_mode = INACTIVE;
8702 qseecom.support_bus_scaling = false;
8703 qseecom.support_fde = false;
8704 qseecom.support_pfe = false;
8705
8706 qseecom.ce_drv.ce_core_clk = NULL;
8707 qseecom.ce_drv.ce_clk = NULL;
8708 qseecom.ce_drv.ce_core_src_clk = NULL;
8709 qseecom.ce_drv.ce_bus_clk = NULL;
8710 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8711
8712 qseecom.app_block_ref_cnt = 0;
8713 init_waitqueue_head(&qseecom.app_block_wq);
8714 qseecom.whitelist_support = true;
8715
8716 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8717 if (rc < 0) {
8718 pr_err("alloc_chrdev_region failed %d\n", rc);
8719 return rc;
8720 }
8721
8722 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8723 if (IS_ERR(driver_class)) {
8724 rc = -ENOMEM;
8725 pr_err("class_create failed %d\n", rc);
8726 goto exit_unreg_chrdev_region;
8727 }
8728
8729 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8730 QSEECOM_DEV);
8731 if (IS_ERR(class_dev)) {
8732 pr_err("class_device_create failed %d\n", rc);
8733 rc = -ENOMEM;
8734 goto exit_destroy_class;
8735 }
8736
8737 cdev_init(&qseecom.cdev, &qseecom_fops);
8738 qseecom.cdev.owner = THIS_MODULE;
8739
8740 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8741 if (rc < 0) {
8742 pr_err("cdev_add failed %d\n", rc);
8743 goto exit_destroy_device;
8744 }
8745
8746 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008747 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8748 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008749 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008750 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8751 spin_lock_init(&qseecom.registered_kclient_list_lock);
8752 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008753 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008754 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008755 qseecom.send_resp_flag = 0;
8756
8757 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008758 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008759 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8760 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008761 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008762 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8763 if (rc) {
8764 pr_err("Failed to get QSEE version info %d\n", rc);
8765 goto exit_del_cdev;
8766 }
8767 qseecom.qsee_version = resp.result;
8768 qseecom.qseos_version = QSEOS_VERSION_14;
8769 qseecom.commonlib_loaded = false;
8770 qseecom.commonlib64_loaded = false;
8771 qseecom.pdev = class_dev;
8772 /* Create ION msm client */
8773 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8774 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8775 pr_err("Ion client cannot be created\n");
8776 rc = -ENOMEM;
8777 goto exit_del_cdev;
8778 }
8779
8780 /* register client for bus scaling */
8781 if (pdev->dev.of_node) {
8782 qseecom.pdev->of_node = pdev->dev.of_node;
8783 qseecom.support_bus_scaling =
8784 of_property_read_bool((&pdev->dev)->of_node,
8785 "qcom,support-bus-scaling");
8786 rc = qseecom_retrieve_ce_data(pdev);
8787 if (rc)
8788 goto exit_destroy_ion_client;
8789 qseecom.appsbl_qseecom_support =
8790 of_property_read_bool((&pdev->dev)->of_node,
8791 "qcom,appsbl-qseecom-support");
8792 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8793 qseecom.appsbl_qseecom_support);
8794
8795 qseecom.commonlib64_loaded =
8796 of_property_read_bool((&pdev->dev)->of_node,
8797 "qcom,commonlib64-loaded-by-uefi");
8798 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8799 qseecom.commonlib64_loaded);
8800 qseecom.fde_key_size =
8801 of_property_read_bool((&pdev->dev)->of_node,
8802 "qcom,fde-key-size");
8803 qseecom.no_clock_support =
8804 of_property_read_bool((&pdev->dev)->of_node,
8805 "qcom,no-clock-support");
8806 if (!qseecom.no_clock_support) {
8807 pr_info("qseecom clocks handled by other subsystem\n");
8808 } else {
8809 pr_info("no-clock-support=0x%x",
8810 qseecom.no_clock_support);
8811 }
8812
8813 if (of_property_read_u32((&pdev->dev)->of_node,
8814 "qcom,qsee-reentrancy-support",
8815 &qseecom.qsee_reentrancy_support)) {
8816 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8817 qseecom.qsee_reentrancy_support = 0;
8818 } else {
8819 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8820 qseecom.qsee_reentrancy_support);
8821 }
8822
Jiten Patela7bb1d52018-05-11 12:34:26 +05308823 qseecom.enable_key_wrap_in_ks =
8824 of_property_read_bool((&pdev->dev)->of_node,
8825 "qcom,enable-key-wrap-in-ks");
8826 if (qseecom.enable_key_wrap_in_ks) {
8827 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8828 qseecom.enable_key_wrap_in_ks);
8829 }
8830
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008831 /*
8832 * The qseecom bus scaling flag can not be enabled when
8833 * crypto clock is not handled by HLOS.
8834 */
8835 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8836 pr_err("support_bus_scaling flag can not be enabled.\n");
8837 rc = -EINVAL;
8838 goto exit_destroy_ion_client;
8839 }
8840
8841 if (of_property_read_u32((&pdev->dev)->of_node,
8842 "qcom,ce-opp-freq",
8843 &qseecom.ce_opp_freq_hz)) {
8844 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8845 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8846 }
8847 rc = __qseecom_init_clk(CLK_QSEE);
8848 if (rc)
8849 goto exit_destroy_ion_client;
8850
8851 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8852 (qseecom.support_pfe || qseecom.support_fde)) {
8853 rc = __qseecom_init_clk(CLK_CE_DRV);
8854 if (rc) {
8855 __qseecom_deinit_clk(CLK_QSEE);
8856 goto exit_destroy_ion_client;
8857 }
8858 } else {
8859 struct qseecom_clk *qclk;
8860
8861 qclk = &qseecom.qsee;
8862 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8863 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8864 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8865 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8866 }
8867
8868 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8869 msm_bus_cl_get_pdata(pdev);
8870 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8871 (!qseecom.is_apps_region_protected &&
8872 !qseecom.appsbl_qseecom_support)) {
8873 struct resource *resource = NULL;
8874 struct qsee_apps_region_info_ireq req;
8875 struct qsee_apps_region_info_64bit_ireq req_64bit;
8876 struct qseecom_command_scm_resp resp;
8877 void *cmd_buf = NULL;
8878 size_t cmd_len;
8879
8880 resource = platform_get_resource_byname(pdev,
8881 IORESOURCE_MEM, "secapp-region");
8882 if (resource) {
8883 if (qseecom.qsee_version < QSEE_VERSION_40) {
8884 req.qsee_cmd_id =
8885 QSEOS_APP_REGION_NOTIFICATION;
8886 req.addr = (uint32_t)resource->start;
8887 req.size = resource_size(resource);
8888 cmd_buf = (void *)&req;
8889 cmd_len = sizeof(struct
8890 qsee_apps_region_info_ireq);
8891 pr_warn("secure app region addr=0x%x size=0x%x",
8892 req.addr, req.size);
8893 } else {
8894 req_64bit.qsee_cmd_id =
8895 QSEOS_APP_REGION_NOTIFICATION;
8896 req_64bit.addr = resource->start;
8897 req_64bit.size = resource_size(
8898 resource);
8899 cmd_buf = (void *)&req_64bit;
8900 cmd_len = sizeof(struct
8901 qsee_apps_region_info_64bit_ireq);
8902 pr_warn("secure app region addr=0x%llx size=0x%x",
8903 req_64bit.addr, req_64bit.size);
8904 }
8905 } else {
8906 pr_err("Fail to get secure app region info\n");
8907 rc = -EINVAL;
8908 goto exit_deinit_clock;
8909 }
8910 rc = __qseecom_enable_clk(CLK_QSEE);
8911 if (rc) {
8912 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8913 rc = -EIO;
8914 goto exit_deinit_clock;
8915 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008916 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008917 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8918 cmd_buf, cmd_len,
8919 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008920 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008921 __qseecom_disable_clk(CLK_QSEE);
8922 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8923 pr_err("send secapp reg fail %d resp.res %d\n",
8924 rc, resp.result);
8925 rc = -EINVAL;
8926 goto exit_deinit_clock;
8927 }
8928 }
8929 /*
8930 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8931 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8932 * Pls add "qseecom.commonlib64_loaded = true" here too.
8933 */
8934 if (qseecom.is_apps_region_protected ||
8935 qseecom.appsbl_qseecom_support)
8936 qseecom.commonlib_loaded = true;
8937 } else {
8938 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8939 pdev->dev.platform_data;
8940 }
8941 if (qseecom.support_bus_scaling) {
8942 init_timer(&(qseecom.bw_scale_down_timer));
8943 INIT_WORK(&qseecom.bw_inactive_req_ws,
8944 qseecom_bw_inactive_req_work);
8945 qseecom.bw_scale_down_timer.function =
8946 qseecom_scale_bus_bandwidth_timer_callback;
8947 }
8948 qseecom.timer_running = false;
8949 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8950 qseecom_platform_support);
8951
8952 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8953 pr_warn("qseecom.whitelist_support = %d\n",
8954 qseecom.whitelist_support);
8955
8956 if (!qseecom.qsee_perf_client)
8957 pr_err("Unable to register bus client\n");
8958
Zhen Kongc4c162a2019-01-23 12:07:12 -08008959 /*create a kthread to process pending listener unregister task */
8960 qseecom.unregister_lsnr_kthread_task = kthread_run(
8961 __qseecom_unregister_listener_kthread_func,
8962 NULL, "qseecom-unreg-lsnr");
8963 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8964 pr_err("failed to create kthread to unregister listener\n");
8965 rc = -EINVAL;
8966 goto exit_deinit_clock;
8967 }
8968 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8969 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008970 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8971 return 0;
8972
8973exit_deinit_clock:
8974 __qseecom_deinit_clk(CLK_QSEE);
8975 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8976 (qseecom.support_pfe || qseecom.support_fde))
8977 __qseecom_deinit_clk(CLK_CE_DRV);
8978exit_destroy_ion_client:
8979 if (qseecom.ce_info.fde) {
8980 pce_info_use = qseecom.ce_info.fde;
8981 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8982 kzfree(pce_info_use->ce_pipe_entry);
8983 pce_info_use++;
8984 }
8985 kfree(qseecom.ce_info.fde);
8986 }
8987 if (qseecom.ce_info.pfe) {
8988 pce_info_use = qseecom.ce_info.pfe;
8989 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8990 kzfree(pce_info_use->ce_pipe_entry);
8991 pce_info_use++;
8992 }
8993 kfree(qseecom.ce_info.pfe);
8994 }
8995 ion_client_destroy(qseecom.ion_clnt);
8996exit_del_cdev:
8997 cdev_del(&qseecom.cdev);
8998exit_destroy_device:
8999 device_destroy(driver_class, qseecom_device_no);
9000exit_destroy_class:
9001 class_destroy(driver_class);
9002exit_unreg_chrdev_region:
9003 unregister_chrdev_region(qseecom_device_no, 1);
9004 return rc;
9005}
9006
9007static int qseecom_remove(struct platform_device *pdev)
9008{
9009 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309010 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009011 unsigned long flags = 0;
9012 int ret = 0;
9013 int i;
9014 struct qseecom_ce_pipe_entry *pce_entry;
9015 struct qseecom_ce_info_use *pce_info_use;
9016
9017 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9018 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9019
Monika Singhe711b162018-04-24 09:54:50 +05309020 list_for_each_entry_safe(kclient, kclient_tmp,
9021 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009022
9023 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009024 if (!kclient->handle) {
9025 list_del(&kclient->list);
9026 kzfree(kclient);
9027 break;
9028 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009029
9030 list_del(&kclient->list);
9031 mutex_lock(&app_access_lock);
9032 ret = qseecom_unload_app(kclient->handle->dev, false);
9033 mutex_unlock(&app_access_lock);
9034 if (!ret) {
9035 kzfree(kclient->handle->dev);
9036 kzfree(kclient->handle);
9037 kzfree(kclient);
9038 }
9039 }
9040
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009041 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9042
9043 if (qseecom.qseos_version > QSEEE_VERSION_00)
9044 qseecom_unload_commonlib_image();
9045
9046 if (qseecom.qsee_perf_client)
9047 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9048 0);
9049 if (pdev->dev.platform_data != NULL)
9050 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9051
9052 if (qseecom.support_bus_scaling) {
9053 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9054 del_timer_sync(&qseecom.bw_scale_down_timer);
9055 }
9056
9057 if (qseecom.ce_info.fde) {
9058 pce_info_use = qseecom.ce_info.fde;
9059 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9060 pce_entry = pce_info_use->ce_pipe_entry;
9061 kfree(pce_entry);
9062 pce_info_use++;
9063 }
9064 }
9065 kfree(qseecom.ce_info.fde);
9066 if (qseecom.ce_info.pfe) {
9067 pce_info_use = qseecom.ce_info.pfe;
9068 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9069 pce_entry = pce_info_use->ce_pipe_entry;
9070 kfree(pce_entry);
9071 pce_info_use++;
9072 }
9073 }
9074 kfree(qseecom.ce_info.pfe);
9075
9076 /* register client for bus scaling */
9077 if (pdev->dev.of_node) {
9078 __qseecom_deinit_clk(CLK_QSEE);
9079 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9080 (qseecom.support_pfe || qseecom.support_fde))
9081 __qseecom_deinit_clk(CLK_CE_DRV);
9082 }
9083
9084 ion_client_destroy(qseecom.ion_clnt);
9085
Zhen Kongc4c162a2019-01-23 12:07:12 -08009086 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9087
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009088 cdev_del(&qseecom.cdev);
9089
9090 device_destroy(driver_class, qseecom_device_no);
9091
9092 class_destroy(driver_class);
9093
9094 unregister_chrdev_region(qseecom_device_no, 1);
9095
9096 return ret;
9097}
9098
9099static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9100{
9101 int ret = 0;
9102 struct qseecom_clk *qclk;
9103
9104 qclk = &qseecom.qsee;
9105 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9106 if (qseecom.no_clock_support)
9107 return 0;
9108
9109 mutex_lock(&qsee_bw_mutex);
9110 mutex_lock(&clk_access_lock);
9111
9112 if (qseecom.current_mode != INACTIVE) {
9113 ret = msm_bus_scale_client_update_request(
9114 qseecom.qsee_perf_client, INACTIVE);
9115 if (ret)
9116 pr_err("Fail to scale down bus\n");
9117 else
9118 qseecom.current_mode = INACTIVE;
9119 }
9120
9121 if (qclk->clk_access_cnt) {
9122 if (qclk->ce_clk != NULL)
9123 clk_disable_unprepare(qclk->ce_clk);
9124 if (qclk->ce_core_clk != NULL)
9125 clk_disable_unprepare(qclk->ce_core_clk);
9126 if (qclk->ce_bus_clk != NULL)
9127 clk_disable_unprepare(qclk->ce_bus_clk);
9128 }
9129
9130 del_timer_sync(&(qseecom.bw_scale_down_timer));
9131 qseecom.timer_running = false;
9132
9133 mutex_unlock(&clk_access_lock);
9134 mutex_unlock(&qsee_bw_mutex);
9135 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9136
9137 return 0;
9138}
9139
9140static int qseecom_resume(struct platform_device *pdev)
9141{
9142 int mode = 0;
9143 int ret = 0;
9144 struct qseecom_clk *qclk;
9145
9146 qclk = &qseecom.qsee;
9147 if (qseecom.no_clock_support)
9148 goto exit;
9149
9150 mutex_lock(&qsee_bw_mutex);
9151 mutex_lock(&clk_access_lock);
9152 if (qseecom.cumulative_mode >= HIGH)
9153 mode = HIGH;
9154 else
9155 mode = qseecom.cumulative_mode;
9156
9157 if (qseecom.cumulative_mode != INACTIVE) {
9158 ret = msm_bus_scale_client_update_request(
9159 qseecom.qsee_perf_client, mode);
9160 if (ret)
9161 pr_err("Fail to scale up bus to %d\n", mode);
9162 else
9163 qseecom.current_mode = mode;
9164 }
9165
9166 if (qclk->clk_access_cnt) {
9167 if (qclk->ce_core_clk != NULL) {
9168 ret = clk_prepare_enable(qclk->ce_core_clk);
9169 if (ret) {
9170 pr_err("Unable to enable/prep CE core clk\n");
9171 qclk->clk_access_cnt = 0;
9172 goto err;
9173 }
9174 }
9175 if (qclk->ce_clk != NULL) {
9176 ret = clk_prepare_enable(qclk->ce_clk);
9177 if (ret) {
9178 pr_err("Unable to enable/prep CE iface clk\n");
9179 qclk->clk_access_cnt = 0;
9180 goto ce_clk_err;
9181 }
9182 }
9183 if (qclk->ce_bus_clk != NULL) {
9184 ret = clk_prepare_enable(qclk->ce_bus_clk);
9185 if (ret) {
9186 pr_err("Unable to enable/prep CE bus clk\n");
9187 qclk->clk_access_cnt = 0;
9188 goto ce_bus_clk_err;
9189 }
9190 }
9191 }
9192
9193 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9194 qseecom.bw_scale_down_timer.expires = jiffies +
9195 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9196 mod_timer(&(qseecom.bw_scale_down_timer),
9197 qseecom.bw_scale_down_timer.expires);
9198 qseecom.timer_running = true;
9199 }
9200
9201 mutex_unlock(&clk_access_lock);
9202 mutex_unlock(&qsee_bw_mutex);
9203 goto exit;
9204
9205ce_bus_clk_err:
9206 if (qclk->ce_clk)
9207 clk_disable_unprepare(qclk->ce_clk);
9208ce_clk_err:
9209 if (qclk->ce_core_clk)
9210 clk_disable_unprepare(qclk->ce_core_clk);
9211err:
9212 mutex_unlock(&clk_access_lock);
9213 mutex_unlock(&qsee_bw_mutex);
9214 ret = -EIO;
9215exit:
9216 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9217 return ret;
9218}
9219
9220static const struct of_device_id qseecom_match[] = {
9221 {
9222 .compatible = "qcom,qseecom",
9223 },
9224 {}
9225};
9226
9227static struct platform_driver qseecom_plat_driver = {
9228 .probe = qseecom_probe,
9229 .remove = qseecom_remove,
9230 .suspend = qseecom_suspend,
9231 .resume = qseecom_resume,
9232 .driver = {
9233 .name = "qseecom",
9234 .owner = THIS_MODULE,
9235 .of_match_table = qseecom_match,
9236 },
9237};
9238
9239static int qseecom_init(void)
9240{
9241 return platform_driver_register(&qseecom_plat_driver);
9242}
9243
9244static void qseecom_exit(void)
9245{
9246 platform_driver_unregister(&qseecom_plat_driver);
9247}
9248
9249MODULE_LICENSE("GPL v2");
9250MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9251
9252module_init(qseecom_init);
9253module_exit(qseecom_exit);