blob: 208d18e76130af3487fcfddf7c0e3c1370c4c315 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
Zhen Kong0ea975d2019-03-12 14:40:24 -0700334 bool from_smcinvoke;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700335};
336
337struct qseecom_listener_handle {
338 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800339 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800340 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700341};
342
343static struct qseecom_control qseecom;
344
345struct qseecom_dev_handle {
346 enum qseecom_client_handle_type type;
347 union {
348 struct qseecom_client_handle client;
349 struct qseecom_listener_handle listener;
350 };
351 bool released;
352 int abort;
353 wait_queue_head_t abort_wq;
354 atomic_t ioctl_count;
355 bool perf_enabled;
356 bool fast_load_enabled;
357 enum qseecom_bandwidth_request_mode mode;
358 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
359 uint32_t sglist_cnt;
360 bool use_legacy_cmd;
361};
362
363struct qseecom_key_id_usage_desc {
364 uint8_t desc[QSEECOM_KEY_ID_SIZE];
365};
366
367struct qseecom_crypto_info {
368 unsigned int unit_num;
369 unsigned int ce;
370 unsigned int pipe_pair;
371};
372
373static struct qseecom_key_id_usage_desc key_id_array[] = {
374 {
375 .desc = "Undefined Usage Index",
376 },
377
378 {
379 .desc = "Full Disk Encryption",
380 },
381
382 {
383 .desc = "Per File Encryption",
384 },
385
386 {
387 .desc = "UFS ICE Full Disk Encryption",
388 },
389
390 {
391 .desc = "SDCC ICE Full Disk Encryption",
392 },
393};
394
395/* Function proto types */
396static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
397static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
398static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
399static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
400static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
401static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
402 char *cmnlib_name);
403static int qseecom_enable_ice_setup(int usage);
404static int qseecom_disable_ice_setup(int usage);
405static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
406static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
407 void __user *argp);
408static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
409 void __user *argp);
410static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
411 void __user *argp);
412
413static int get_qseecom_keymaster_status(char *str)
414{
415 get_option(&str, &qseecom.is_apps_region_protected);
416 return 1;
417}
418__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
419
Zhen Kong03f220d2019-02-01 17:12:34 -0800420
421#define QSEECOM_SCM_EBUSY_WAIT_MS 30
422#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
423
424static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
425{
426 int ret = 0;
427 int retry_count = 0;
428
429 do {
430 ret = scm_call2_noretry(smc_id, desc);
431 if (ret == -EBUSY) {
432 mutex_unlock(&app_access_lock);
433 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
434 mutex_lock(&app_access_lock);
435 }
436 if (retry_count == 33)
437 pr_warn("secure world has been busy for 1 second!\n");
438 } while (ret == -EBUSY &&
439 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
440 return ret;
441}
442
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700443static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
444 const void *req_buf, void *resp_buf)
445{
446 int ret = 0;
447 uint32_t smc_id = 0;
448 uint32_t qseos_cmd_id = 0;
449 struct scm_desc desc = {0};
450 struct qseecom_command_scm_resp *scm_resp = NULL;
451
452 if (!req_buf || !resp_buf) {
453 pr_err("Invalid buffer pointer\n");
454 return -EINVAL;
455 }
456 qseos_cmd_id = *(uint32_t *)req_buf;
457 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
458
459 switch (svc_id) {
460 case 6: {
461 if (tz_cmd_id == 3) {
462 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
463 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
464 desc.args[0] = *(uint32_t *)req_buf;
465 } else {
466 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
467 svc_id, tz_cmd_id);
468 return -EINVAL;
469 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800470 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700471 break;
472 }
473 case SCM_SVC_ES: {
474 switch (tz_cmd_id) {
475 case SCM_SAVE_PARTITION_HASH_ID: {
476 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
477 struct qseecom_save_partition_hash_req *p_hash_req =
478 (struct qseecom_save_partition_hash_req *)
479 req_buf;
480 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
481
482 if (!tzbuf)
483 return -ENOMEM;
484 memset(tzbuf, 0, tzbuflen);
485 memcpy(tzbuf, p_hash_req->digest,
486 SHA256_DIGEST_LENGTH);
487 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
488 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
489 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
490 desc.args[0] = p_hash_req->partition_id;
491 desc.args[1] = virt_to_phys(tzbuf);
492 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800493 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700494 kzfree(tzbuf);
495 break;
496 }
497 default: {
498 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
499 tz_cmd_id);
500 ret = -EINVAL;
501 break;
502 }
503 } /* end of switch (tz_cmd_id) */
504 break;
505 } /* end of case SCM_SVC_ES */
506 case SCM_SVC_TZSCHEDULER: {
507 switch (qseos_cmd_id) {
508 case QSEOS_APP_START_COMMAND: {
509 struct qseecom_load_app_ireq *req;
510 struct qseecom_load_app_64bit_ireq *req_64bit;
511
512 smc_id = TZ_OS_APP_START_ID;
513 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
514 if (qseecom.qsee_version < QSEE_VERSION_40) {
515 req = (struct qseecom_load_app_ireq *)req_buf;
516 desc.args[0] = req->mdt_len;
517 desc.args[1] = req->img_len;
518 desc.args[2] = req->phy_addr;
519 } else {
520 req_64bit =
521 (struct qseecom_load_app_64bit_ireq *)
522 req_buf;
523 desc.args[0] = req_64bit->mdt_len;
524 desc.args[1] = req_64bit->img_len;
525 desc.args[2] = req_64bit->phy_addr;
526 }
527 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800528 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700529 break;
530 }
531 case QSEOS_APP_SHUTDOWN_COMMAND: {
532 struct qseecom_unload_app_ireq *req;
533
534 req = (struct qseecom_unload_app_ireq *)req_buf;
535 smc_id = TZ_OS_APP_SHUTDOWN_ID;
536 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
537 desc.args[0] = req->app_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800538 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700539 break;
540 }
541 case QSEOS_APP_LOOKUP_COMMAND: {
542 struct qseecom_check_app_ireq *req;
543 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
544 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
545
546 if (!tzbuf)
547 return -ENOMEM;
548 req = (struct qseecom_check_app_ireq *)req_buf;
549 pr_debug("Lookup app_name = %s\n", req->app_name);
550 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
551 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
552 smc_id = TZ_OS_APP_LOOKUP_ID;
553 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
554 desc.args[0] = virt_to_phys(tzbuf);
555 desc.args[1] = strlen(req->app_name);
556 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800557 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700558 kzfree(tzbuf);
559 break;
560 }
561 case QSEOS_APP_REGION_NOTIFICATION: {
562 struct qsee_apps_region_info_ireq *req;
563 struct qsee_apps_region_info_64bit_ireq *req_64bit;
564
565 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
566 desc.arginfo =
567 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
568 if (qseecom.qsee_version < QSEE_VERSION_40) {
569 req = (struct qsee_apps_region_info_ireq *)
570 req_buf;
571 desc.args[0] = req->addr;
572 desc.args[1] = req->size;
573 } else {
574 req_64bit =
575 (struct qsee_apps_region_info_64bit_ireq *)
576 req_buf;
577 desc.args[0] = req_64bit->addr;
578 desc.args[1] = req_64bit->size;
579 }
580 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800581 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700582 break;
583 }
584 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
585 struct qseecom_load_lib_image_ireq *req;
586 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
587
588 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
589 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
590 if (qseecom.qsee_version < QSEE_VERSION_40) {
591 req = (struct qseecom_load_lib_image_ireq *)
592 req_buf;
593 desc.args[0] = req->mdt_len;
594 desc.args[1] = req->img_len;
595 desc.args[2] = req->phy_addr;
596 } else {
597 req_64bit =
598 (struct qseecom_load_lib_image_64bit_ireq *)
599 req_buf;
600 desc.args[0] = req_64bit->mdt_len;
601 desc.args[1] = req_64bit->img_len;
602 desc.args[2] = req_64bit->phy_addr;
603 }
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800605 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700606 break;
607 }
608 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
609 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
610 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
611 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800612 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700613 break;
614 }
615 case QSEOS_REGISTER_LISTENER: {
616 struct qseecom_register_listener_ireq *req;
617 struct qseecom_register_listener_64bit_ireq *req_64bit;
618
619 desc.arginfo =
620 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
621 if (qseecom.qsee_version < QSEE_VERSION_40) {
622 req = (struct qseecom_register_listener_ireq *)
623 req_buf;
624 desc.args[0] = req->listener_id;
625 desc.args[1] = req->sb_ptr;
626 desc.args[2] = req->sb_len;
627 } else {
628 req_64bit =
629 (struct qseecom_register_listener_64bit_ireq *)
630 req_buf;
631 desc.args[0] = req_64bit->listener_id;
632 desc.args[1] = req_64bit->sb_ptr;
633 desc.args[2] = req_64bit->sb_len;
634 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700635 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700636 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800637 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800638 if (ret == -EIO) {
639 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700640 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700641 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800642 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700643 }
644 break;
645 }
646 case QSEOS_DEREGISTER_LISTENER: {
647 struct qseecom_unregister_listener_ireq *req;
648
649 req = (struct qseecom_unregister_listener_ireq *)
650 req_buf;
651 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
652 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
653 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800654 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700655 break;
656 }
657 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
658 struct qseecom_client_listener_data_irsp *req;
659
660 req = (struct qseecom_client_listener_data_irsp *)
661 req_buf;
662 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
663 desc.arginfo =
664 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
665 desc.args[0] = req->listener_id;
666 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800667 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700668 break;
669 }
670 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
671 struct qseecom_client_listener_data_irsp *req;
672 struct qseecom_client_listener_data_64bit_irsp *req_64;
673
674 smc_id =
675 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
676 desc.arginfo =
677 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
678 if (qseecom.qsee_version < QSEE_VERSION_40) {
679 req =
680 (struct qseecom_client_listener_data_irsp *)
681 req_buf;
682 desc.args[0] = req->listener_id;
683 desc.args[1] = req->status;
684 desc.args[2] = req->sglistinfo_ptr;
685 desc.args[3] = req->sglistinfo_len;
686 } else {
687 req_64 =
688 (struct qseecom_client_listener_data_64bit_irsp *)
689 req_buf;
690 desc.args[0] = req_64->listener_id;
691 desc.args[1] = req_64->status;
692 desc.args[2] = req_64->sglistinfo_ptr;
693 desc.args[3] = req_64->sglistinfo_len;
694 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800695 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700696 break;
697 }
698 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
699 struct qseecom_load_app_ireq *req;
700 struct qseecom_load_app_64bit_ireq *req_64bit;
701
702 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
703 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
704 if (qseecom.qsee_version < QSEE_VERSION_40) {
705 req = (struct qseecom_load_app_ireq *)req_buf;
706 desc.args[0] = req->mdt_len;
707 desc.args[1] = req->img_len;
708 desc.args[2] = req->phy_addr;
709 } else {
710 req_64bit =
711 (struct qseecom_load_app_64bit_ireq *)req_buf;
712 desc.args[0] = req_64bit->mdt_len;
713 desc.args[1] = req_64bit->img_len;
714 desc.args[2] = req_64bit->phy_addr;
715 }
716 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800717 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700718 break;
719 }
720 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
721 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
722 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
723 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800724 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700725 break;
726 }
727
728 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
729 struct qseecom_client_send_data_ireq *req;
730 struct qseecom_client_send_data_64bit_ireq *req_64bit;
731
732 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
733 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
734 if (qseecom.qsee_version < QSEE_VERSION_40) {
735 req = (struct qseecom_client_send_data_ireq *)
736 req_buf;
737 desc.args[0] = req->app_id;
738 desc.args[1] = req->req_ptr;
739 desc.args[2] = req->req_len;
740 desc.args[3] = req->rsp_ptr;
741 desc.args[4] = req->rsp_len;
742 } else {
743 req_64bit =
744 (struct qseecom_client_send_data_64bit_ireq *)
745 req_buf;
746 desc.args[0] = req_64bit->app_id;
747 desc.args[1] = req_64bit->req_ptr;
748 desc.args[2] = req_64bit->req_len;
749 desc.args[3] = req_64bit->rsp_ptr;
750 desc.args[4] = req_64bit->rsp_len;
751 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800752 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700753 break;
754 }
755 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
756 struct qseecom_client_send_data_ireq *req;
757 struct qseecom_client_send_data_64bit_ireq *req_64bit;
758
759 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
760 desc.arginfo =
761 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
762 if (qseecom.qsee_version < QSEE_VERSION_40) {
763 req = (struct qseecom_client_send_data_ireq *)
764 req_buf;
765 desc.args[0] = req->app_id;
766 desc.args[1] = req->req_ptr;
767 desc.args[2] = req->req_len;
768 desc.args[3] = req->rsp_ptr;
769 desc.args[4] = req->rsp_len;
770 desc.args[5] = req->sglistinfo_ptr;
771 desc.args[6] = req->sglistinfo_len;
772 } else {
773 req_64bit =
774 (struct qseecom_client_send_data_64bit_ireq *)
775 req_buf;
776 desc.args[0] = req_64bit->app_id;
777 desc.args[1] = req_64bit->req_ptr;
778 desc.args[2] = req_64bit->req_len;
779 desc.args[3] = req_64bit->rsp_ptr;
780 desc.args[4] = req_64bit->rsp_len;
781 desc.args[5] = req_64bit->sglistinfo_ptr;
782 desc.args[6] = req_64bit->sglistinfo_len;
783 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800784 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700785 break;
786 }
787 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
788 struct qseecom_client_send_service_ireq *req;
789
790 req = (struct qseecom_client_send_service_ireq *)
791 req_buf;
792 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
793 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
794 desc.args[0] = req->key_type;
795 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800796 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700797 break;
798 }
799 case QSEOS_RPMB_ERASE_COMMAND: {
800 smc_id = TZ_OS_RPMB_ERASE_ID;
801 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
802 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800803 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700804 break;
805 }
806 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
807 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
808 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
809 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800810 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700811 break;
812 }
813 case QSEOS_GENERATE_KEY: {
814 u32 tzbuflen = PAGE_ALIGN(sizeof
815 (struct qseecom_key_generate_ireq) -
816 sizeof(uint32_t));
817 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
818
819 if (!tzbuf)
820 return -ENOMEM;
821 memset(tzbuf, 0, tzbuflen);
822 memcpy(tzbuf, req_buf + sizeof(uint32_t),
823 (sizeof(struct qseecom_key_generate_ireq) -
824 sizeof(uint32_t)));
825 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
826 smc_id = TZ_OS_KS_GEN_KEY_ID;
827 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
828 desc.args[0] = virt_to_phys(tzbuf);
829 desc.args[1] = tzbuflen;
830 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800831 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700832 kzfree(tzbuf);
833 break;
834 }
835 case QSEOS_DELETE_KEY: {
836 u32 tzbuflen = PAGE_ALIGN(sizeof
837 (struct qseecom_key_delete_ireq) -
838 sizeof(uint32_t));
839 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
840
841 if (!tzbuf)
842 return -ENOMEM;
843 memset(tzbuf, 0, tzbuflen);
844 memcpy(tzbuf, req_buf + sizeof(uint32_t),
845 (sizeof(struct qseecom_key_delete_ireq) -
846 sizeof(uint32_t)));
847 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
848 smc_id = TZ_OS_KS_DEL_KEY_ID;
849 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
850 desc.args[0] = virt_to_phys(tzbuf);
851 desc.args[1] = tzbuflen;
852 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800853 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700854 kzfree(tzbuf);
855 break;
856 }
857 case QSEOS_SET_KEY: {
858 u32 tzbuflen = PAGE_ALIGN(sizeof
859 (struct qseecom_key_select_ireq) -
860 sizeof(uint32_t));
861 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
862
863 if (!tzbuf)
864 return -ENOMEM;
865 memset(tzbuf, 0, tzbuflen);
866 memcpy(tzbuf, req_buf + sizeof(uint32_t),
867 (sizeof(struct qseecom_key_select_ireq) -
868 sizeof(uint32_t)));
869 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
870 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
871 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
872 desc.args[0] = virt_to_phys(tzbuf);
873 desc.args[1] = tzbuflen;
874 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800875 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700876 kzfree(tzbuf);
877 break;
878 }
879 case QSEOS_UPDATE_KEY_USERINFO: {
880 u32 tzbuflen = PAGE_ALIGN(sizeof
881 (struct qseecom_key_userinfo_update_ireq) -
882 sizeof(uint32_t));
883 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
884
885 if (!tzbuf)
886 return -ENOMEM;
887 memset(tzbuf, 0, tzbuflen);
888 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
889 (struct qseecom_key_userinfo_update_ireq) -
890 sizeof(uint32_t)));
891 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
892 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
893 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
894 desc.args[0] = virt_to_phys(tzbuf);
895 desc.args[1] = tzbuflen;
896 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800897 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700898 kzfree(tzbuf);
899 break;
900 }
901 case QSEOS_TEE_OPEN_SESSION: {
902 struct qseecom_qteec_ireq *req;
903 struct qseecom_qteec_64bit_ireq *req_64bit;
904
905 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
906 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
907 if (qseecom.qsee_version < QSEE_VERSION_40) {
908 req = (struct qseecom_qteec_ireq *)req_buf;
909 desc.args[0] = req->app_id;
910 desc.args[1] = req->req_ptr;
911 desc.args[2] = req->req_len;
912 desc.args[3] = req->resp_ptr;
913 desc.args[4] = req->resp_len;
914 } else {
915 req_64bit = (struct qseecom_qteec_64bit_ireq *)
916 req_buf;
917 desc.args[0] = req_64bit->app_id;
918 desc.args[1] = req_64bit->req_ptr;
919 desc.args[2] = req_64bit->req_len;
920 desc.args[3] = req_64bit->resp_ptr;
921 desc.args[4] = req_64bit->resp_len;
922 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800923 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700924 break;
925 }
926 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
927 struct qseecom_qteec_ireq *req;
928 struct qseecom_qteec_64bit_ireq *req_64bit;
929
930 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
931 desc.arginfo =
932 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
933 if (qseecom.qsee_version < QSEE_VERSION_40) {
934 req = (struct qseecom_qteec_ireq *)req_buf;
935 desc.args[0] = req->app_id;
936 desc.args[1] = req->req_ptr;
937 desc.args[2] = req->req_len;
938 desc.args[3] = req->resp_ptr;
939 desc.args[4] = req->resp_len;
940 desc.args[5] = req->sglistinfo_ptr;
941 desc.args[6] = req->sglistinfo_len;
942 } else {
943 req_64bit = (struct qseecom_qteec_64bit_ireq *)
944 req_buf;
945 desc.args[0] = req_64bit->app_id;
946 desc.args[1] = req_64bit->req_ptr;
947 desc.args[2] = req_64bit->req_len;
948 desc.args[3] = req_64bit->resp_ptr;
949 desc.args[4] = req_64bit->resp_len;
950 desc.args[5] = req_64bit->sglistinfo_ptr;
951 desc.args[6] = req_64bit->sglistinfo_len;
952 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800953 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700954 break;
955 }
956 case QSEOS_TEE_INVOKE_COMMAND: {
957 struct qseecom_qteec_ireq *req;
958 struct qseecom_qteec_64bit_ireq *req_64bit;
959
960 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
961 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
962 if (qseecom.qsee_version < QSEE_VERSION_40) {
963 req = (struct qseecom_qteec_ireq *)req_buf;
964 desc.args[0] = req->app_id;
965 desc.args[1] = req->req_ptr;
966 desc.args[2] = req->req_len;
967 desc.args[3] = req->resp_ptr;
968 desc.args[4] = req->resp_len;
969 } else {
970 req_64bit = (struct qseecom_qteec_64bit_ireq *)
971 req_buf;
972 desc.args[0] = req_64bit->app_id;
973 desc.args[1] = req_64bit->req_ptr;
974 desc.args[2] = req_64bit->req_len;
975 desc.args[3] = req_64bit->resp_ptr;
976 desc.args[4] = req_64bit->resp_len;
977 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800978 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700979 break;
980 }
981 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
982 struct qseecom_qteec_ireq *req;
983 struct qseecom_qteec_64bit_ireq *req_64bit;
984
985 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
986 desc.arginfo =
987 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
988 if (qseecom.qsee_version < QSEE_VERSION_40) {
989 req = (struct qseecom_qteec_ireq *)req_buf;
990 desc.args[0] = req->app_id;
991 desc.args[1] = req->req_ptr;
992 desc.args[2] = req->req_len;
993 desc.args[3] = req->resp_ptr;
994 desc.args[4] = req->resp_len;
995 desc.args[5] = req->sglistinfo_ptr;
996 desc.args[6] = req->sglistinfo_len;
997 } else {
998 req_64bit = (struct qseecom_qteec_64bit_ireq *)
999 req_buf;
1000 desc.args[0] = req_64bit->app_id;
1001 desc.args[1] = req_64bit->req_ptr;
1002 desc.args[2] = req_64bit->req_len;
1003 desc.args[3] = req_64bit->resp_ptr;
1004 desc.args[4] = req_64bit->resp_len;
1005 desc.args[5] = req_64bit->sglistinfo_ptr;
1006 desc.args[6] = req_64bit->sglistinfo_len;
1007 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001008 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001009 break;
1010 }
1011 case QSEOS_TEE_CLOSE_SESSION: {
1012 struct qseecom_qteec_ireq *req;
1013 struct qseecom_qteec_64bit_ireq *req_64bit;
1014
1015 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1016 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1017 if (qseecom.qsee_version < QSEE_VERSION_40) {
1018 req = (struct qseecom_qteec_ireq *)req_buf;
1019 desc.args[0] = req->app_id;
1020 desc.args[1] = req->req_ptr;
1021 desc.args[2] = req->req_len;
1022 desc.args[3] = req->resp_ptr;
1023 desc.args[4] = req->resp_len;
1024 } else {
1025 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1026 req_buf;
1027 desc.args[0] = req_64bit->app_id;
1028 desc.args[1] = req_64bit->req_ptr;
1029 desc.args[2] = req_64bit->req_len;
1030 desc.args[3] = req_64bit->resp_ptr;
1031 desc.args[4] = req_64bit->resp_len;
1032 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001033 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001034 break;
1035 }
1036 case QSEOS_TEE_REQUEST_CANCELLATION: {
1037 struct qseecom_qteec_ireq *req;
1038 struct qseecom_qteec_64bit_ireq *req_64bit;
1039
1040 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1041 desc.arginfo =
1042 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1043 if (qseecom.qsee_version < QSEE_VERSION_40) {
1044 req = (struct qseecom_qteec_ireq *)req_buf;
1045 desc.args[0] = req->app_id;
1046 desc.args[1] = req->req_ptr;
1047 desc.args[2] = req->req_len;
1048 desc.args[3] = req->resp_ptr;
1049 desc.args[4] = req->resp_len;
1050 } else {
1051 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1052 req_buf;
1053 desc.args[0] = req_64bit->app_id;
1054 desc.args[1] = req_64bit->req_ptr;
1055 desc.args[2] = req_64bit->req_len;
1056 desc.args[3] = req_64bit->resp_ptr;
1057 desc.args[4] = req_64bit->resp_len;
1058 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001059 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001060 break;
1061 }
1062 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1063 struct qseecom_continue_blocked_request_ireq *req =
1064 (struct qseecom_continue_blocked_request_ireq *)
1065 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001066 if (qseecom.smcinvoke_support)
1067 smc_id =
1068 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1069 else
1070 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001071 desc.arginfo =
1072 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001073 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001074 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001075 break;
1076 }
1077 default: {
1078 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1079 qseos_cmd_id);
1080 ret = -EINVAL;
1081 break;
1082 }
1083 } /*end of switch (qsee_cmd_id) */
1084 break;
1085 } /*end of case SCM_SVC_TZSCHEDULER*/
1086 default: {
1087 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1088 svc_id);
1089 ret = -EINVAL;
1090 break;
1091 }
1092 } /*end of switch svc_id */
1093 scm_resp->result = desc.ret[0];
1094 scm_resp->resp_type = desc.ret[1];
1095 scm_resp->data = desc.ret[2];
1096 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1097 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1098 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1099 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1100 return ret;
1101}
1102
1103
1104static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1105 size_t cmd_len, void *resp_buf, size_t resp_len)
1106{
1107 if (!is_scm_armv8())
1108 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1109 resp_buf, resp_len);
1110 else
1111 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1112}
1113
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001114static struct qseecom_registered_listener_list *__qseecom_find_svc(
1115 int32_t listener_id)
1116{
1117 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001119 list_for_each_entry(entry,
1120 &qseecom.registered_listener_list_head, list) {
1121 if (entry->svc.listener_id == listener_id)
1122 break;
1123 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001124 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001125 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001126 return NULL;
1127 }
1128
1129 return entry;
1130}
1131
1132static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1133 struct qseecom_dev_handle *handle,
1134 struct qseecom_register_listener_req *listener)
1135{
1136 int ret = 0;
1137 struct qseecom_register_listener_ireq req;
1138 struct qseecom_register_listener_64bit_ireq req_64bit;
1139 struct qseecom_command_scm_resp resp;
1140 ion_phys_addr_t pa;
1141 void *cmd_buf = NULL;
1142 size_t cmd_len;
1143
1144 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001145 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001146 listener->ifd_data_fd);
1147 if (IS_ERR_OR_NULL(svc->ihandle)) {
1148 pr_err("Ion client could not retrieve the handle\n");
1149 return -ENOMEM;
1150 }
1151
1152 /* Get the physical address of the ION BUF */
1153 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1154 if (ret) {
1155 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1156 ret);
1157 return ret;
1158 }
1159 /* Populate the structure for sending scm call to load image */
1160 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1161 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1162 pr_err("ION memory mapping for listener shared buffer failed\n");
1163 return -ENOMEM;
1164 }
1165 svc->sb_phys = (phys_addr_t)pa;
1166
1167 if (qseecom.qsee_version < QSEE_VERSION_40) {
1168 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1169 req.listener_id = svc->svc.listener_id;
1170 req.sb_len = svc->sb_length;
1171 req.sb_ptr = (uint32_t)svc->sb_phys;
1172 cmd_buf = (void *)&req;
1173 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1174 } else {
1175 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1176 req_64bit.listener_id = svc->svc.listener_id;
1177 req_64bit.sb_len = svc->sb_length;
1178 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1179 cmd_buf = (void *)&req_64bit;
1180 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1181 }
1182
1183 resp.result = QSEOS_RESULT_INCOMPLETE;
1184
Zhen Kongc4c162a2019-01-23 12:07:12 -08001185 mutex_unlock(&listener_access_lock);
1186 mutex_lock(&app_access_lock);
1187 __qseecom_reentrancy_check_if_no_app_blocked(
1188 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001189 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1190 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001191 mutex_unlock(&app_access_lock);
1192 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001193 if (ret) {
1194 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1195 return -EINVAL;
1196 }
1197
1198 if (resp.result != QSEOS_RESULT_SUCCESS) {
1199 pr_err("Error SB registration req: resp.result = %d\n",
1200 resp.result);
1201 return -EPERM;
1202 }
1203 return 0;
1204}
1205
1206static int qseecom_register_listener(struct qseecom_dev_handle *data,
1207 void __user *argp)
1208{
1209 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001210 struct qseecom_register_listener_req rcvd_lstnr;
1211 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001212 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001213
1214 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1215 if (ret) {
1216 pr_err("copy_from_user failed\n");
1217 return ret;
1218 }
1219 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1220 rcvd_lstnr.sb_size))
1221 return -EFAULT;
1222
Zhen Kong3c674612018-09-06 22:51:27 -07001223 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001224
1225 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1226 if (ptr_svc) {
1227 if (ptr_svc->unregister_pending == false) {
1228 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001229 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001230 data->released = true;
1231 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001232 } else {
1233 /*wait until listener is unregistered*/
1234 pr_debug("register %d has to wait\n",
1235 rcvd_lstnr.listener_id);
1236 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301237 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001238 qseecom.register_lsnr_pending_wq,
1239 list_empty(
1240 &qseecom.unregister_lsnr_pending_list_head));
1241 if (ret) {
1242 pr_err("interrupted register_pending_wq %d\n",
1243 rcvd_lstnr.listener_id);
1244 mutex_lock(&listener_access_lock);
1245 return -ERESTARTSYS;
1246 }
1247 mutex_lock(&listener_access_lock);
1248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001250 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1251 if (!new_entry)
1252 return -ENOMEM;
1253 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001254 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001255
1256 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1257 new_entry->sb_length = rcvd_lstnr.sb_size;
1258 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1259 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001260 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1261 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001262 kzfree(new_entry);
1263 return -ENOMEM;
1264 }
1265
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266 init_waitqueue_head(&new_entry->rcv_req_wq);
1267 init_waitqueue_head(&new_entry->listener_block_app_wq);
1268 new_entry->send_resp_flag = 0;
1269 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271
Zhen Kong3c674612018-09-06 22:51:27 -07001272 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001273 return ret;
1274}
1275
Zhen Kongbcdeda22018-11-16 13:50:51 -08001276static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1277 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278{
1279 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001281 struct qseecom_command_scm_resp resp;
1282 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1283
1284 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1285 req.listener_id = data->listener.id;
1286 resp.result = QSEOS_RESULT_INCOMPLETE;
1287
Zhen Kongc4c162a2019-01-23 12:07:12 -08001288 mutex_unlock(&listener_access_lock);
1289 mutex_lock(&app_access_lock);
1290 __qseecom_reentrancy_check_if_no_app_blocked(
1291 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001292 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1293 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001294 mutex_unlock(&app_access_lock);
1295 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001296 if (ret) {
1297 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1298 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001299 if (ret == -EBUSY)
1300 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001301 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001302 }
1303
1304 if (resp.result != QSEOS_RESULT_SUCCESS) {
1305 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1306 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001307 ret = -EPERM;
1308 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001309 }
1310
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001311 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301312 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001313 atomic_read(&data->ioctl_count) <= 1)) {
1314 pr_err("Interrupted from abort\n");
1315 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001316 }
1317 }
1318
Zhen Kong3c674612018-09-06 22:51:27 -07001319exit:
1320 if (ptr_svc->sb_virt) {
1321 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001322 if (!IS_ERR_OR_NULL(ihandle)) {
1323 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1324 ion_free(qseecom.ion_clnt, ihandle);
1325 }
1326 }
Zhen Kong3c674612018-09-06 22:51:27 -07001327 list_del(&ptr_svc->list);
1328 kzfree(ptr_svc);
1329
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001330 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001331 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001332 return ret;
1333}
1334
Zhen Kongbcdeda22018-11-16 13:50:51 -08001335static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1336{
1337 struct qseecom_registered_listener_list *ptr_svc = NULL;
1338 struct qseecom_unregister_pending_list *entry = NULL;
1339
1340 ptr_svc = __qseecom_find_svc(data->listener.id);
1341 if (!ptr_svc) {
1342 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1343 return -ENODATA;
1344 }
1345 /* stop CA thread waiting for listener response */
1346 ptr_svc->abort = 1;
1347 wake_up_interruptible_all(&qseecom.send_resp_wq);
1348
Zhen Kongc4c162a2019-01-23 12:07:12 -08001349 /* stop listener thread waiting for listener request */
1350 data->abort = 1;
1351 wake_up_all(&ptr_svc->rcv_req_wq);
1352
Zhen Kongbcdeda22018-11-16 13:50:51 -08001353 /* return directly if pending*/
1354 if (ptr_svc->unregister_pending)
1355 return 0;
1356
1357 /*add unregistration into pending list*/
1358 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1359 if (!entry)
1360 return -ENOMEM;
1361 entry->data = data;
1362 list_add_tail(&entry->list,
1363 &qseecom.unregister_lsnr_pending_list_head);
1364 ptr_svc->unregister_pending = true;
1365 pr_debug("unregister %d pending\n", data->listener.id);
1366 return 0;
1367}
1368
1369static void __qseecom_processing_pending_lsnr_unregister(void)
1370{
1371 struct qseecom_unregister_pending_list *entry = NULL;
1372 struct qseecom_registered_listener_list *ptr_svc = NULL;
1373 struct list_head *pos;
1374 int ret = 0;
1375
1376 mutex_lock(&listener_access_lock);
1377 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1378 pos = qseecom.unregister_lsnr_pending_list_head.next;
1379 entry = list_entry(pos,
1380 struct qseecom_unregister_pending_list, list);
1381 if (entry && entry->data) {
1382 pr_debug("process pending unregister %d\n",
1383 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001384 /* don't process if qseecom_release is not called*/
1385 if (!entry->data->listener.release_called)
1386 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001387 ptr_svc = __qseecom_find_svc(
1388 entry->data->listener.id);
1389 if (ptr_svc) {
1390 ret = __qseecom_unregister_listener(
1391 entry->data, ptr_svc);
1392 if (ret == -EBUSY) {
1393 pr_debug("unregister %d pending again\n",
1394 entry->data->listener.id);
1395 mutex_unlock(&listener_access_lock);
1396 return;
1397 }
1398 } else
1399 pr_err("invalid listener %d\n",
1400 entry->data->listener.id);
1401 kzfree(entry->data);
1402 }
1403 list_del(pos);
1404 kzfree(entry);
1405 }
1406 mutex_unlock(&listener_access_lock);
1407 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1408}
1409
Zhen Kongc4c162a2019-01-23 12:07:12 -08001410static void __wakeup_unregister_listener_kthread(void)
1411{
1412 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1413 LSNR_UNREG_KT_WAKEUP);
1414 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1415}
1416
1417static int __qseecom_unregister_listener_kthread_func(void *data)
1418{
1419 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301420 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001421 qseecom.unregister_lsnr_kthread_wq,
1422 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1423 == LSNR_UNREG_KT_WAKEUP);
1424 pr_debug("kthread to unregister listener is called %d\n",
1425 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1426 __qseecom_processing_pending_lsnr_unregister();
1427 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1428 LSNR_UNREG_KT_SLEEP);
1429 }
1430 pr_warn("kthread to unregister listener stopped\n");
1431 return 0;
1432}
1433
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001434static int __qseecom_set_msm_bus_request(uint32_t mode)
1435{
1436 int ret = 0;
1437 struct qseecom_clk *qclk;
1438
1439 qclk = &qseecom.qsee;
1440 if (qclk->ce_core_src_clk != NULL) {
1441 if (mode == INACTIVE) {
1442 __qseecom_disable_clk(CLK_QSEE);
1443 } else {
1444 ret = __qseecom_enable_clk(CLK_QSEE);
1445 if (ret)
1446 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1447 ret, mode);
1448 }
1449 }
1450
1451 if ((!ret) && (qseecom.current_mode != mode)) {
1452 ret = msm_bus_scale_client_update_request(
1453 qseecom.qsee_perf_client, mode);
1454 if (ret) {
1455 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1456 ret, mode);
1457 if (qclk->ce_core_src_clk != NULL) {
1458 if (mode == INACTIVE) {
1459 ret = __qseecom_enable_clk(CLK_QSEE);
1460 if (ret)
1461 pr_err("CLK enable failed\n");
1462 } else
1463 __qseecom_disable_clk(CLK_QSEE);
1464 }
1465 }
1466 qseecom.current_mode = mode;
1467 }
1468 return ret;
1469}
1470
1471static void qseecom_bw_inactive_req_work(struct work_struct *work)
1472{
1473 mutex_lock(&app_access_lock);
1474 mutex_lock(&qsee_bw_mutex);
1475 if (qseecom.timer_running)
1476 __qseecom_set_msm_bus_request(INACTIVE);
1477 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1478 qseecom.current_mode, qseecom.cumulative_mode);
1479 qseecom.timer_running = false;
1480 mutex_unlock(&qsee_bw_mutex);
1481 mutex_unlock(&app_access_lock);
1482}
1483
1484static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1485{
1486 schedule_work(&qseecom.bw_inactive_req_ws);
1487}
1488
1489static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1490{
1491 struct qseecom_clk *qclk;
1492 int ret = 0;
1493
1494 mutex_lock(&clk_access_lock);
1495 if (ce == CLK_QSEE)
1496 qclk = &qseecom.qsee;
1497 else
1498 qclk = &qseecom.ce_drv;
1499
1500 if (qclk->clk_access_cnt > 2) {
1501 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1502 ret = -EINVAL;
1503 goto err_dec_ref_cnt;
1504 }
1505 if (qclk->clk_access_cnt == 2)
1506 qclk->clk_access_cnt--;
1507
1508err_dec_ref_cnt:
1509 mutex_unlock(&clk_access_lock);
1510 return ret;
1511}
1512
1513
1514static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1515{
1516 int32_t ret = 0;
1517 int32_t request_mode = INACTIVE;
1518
1519 mutex_lock(&qsee_bw_mutex);
1520 if (mode == 0) {
1521 if (qseecom.cumulative_mode > MEDIUM)
1522 request_mode = HIGH;
1523 else
1524 request_mode = qseecom.cumulative_mode;
1525 } else {
1526 request_mode = mode;
1527 }
1528
1529 ret = __qseecom_set_msm_bus_request(request_mode);
1530 if (ret) {
1531 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1532 ret, request_mode);
1533 goto err_scale_timer;
1534 }
1535
1536 if (qseecom.timer_running) {
1537 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1538 if (ret) {
1539 pr_err("Failed to decrease clk ref count.\n");
1540 goto err_scale_timer;
1541 }
1542 del_timer_sync(&(qseecom.bw_scale_down_timer));
1543 qseecom.timer_running = false;
1544 }
1545err_scale_timer:
1546 mutex_unlock(&qsee_bw_mutex);
1547 return ret;
1548}
1549
1550
1551static int qseecom_unregister_bus_bandwidth_needs(
1552 struct qseecom_dev_handle *data)
1553{
1554 int32_t ret = 0;
1555
1556 qseecom.cumulative_mode -= data->mode;
1557 data->mode = INACTIVE;
1558
1559 return ret;
1560}
1561
1562static int __qseecom_register_bus_bandwidth_needs(
1563 struct qseecom_dev_handle *data, uint32_t request_mode)
1564{
1565 int32_t ret = 0;
1566
1567 if (data->mode == INACTIVE) {
1568 qseecom.cumulative_mode += request_mode;
1569 data->mode = request_mode;
1570 } else {
1571 if (data->mode != request_mode) {
1572 qseecom.cumulative_mode -= data->mode;
1573 qseecom.cumulative_mode += request_mode;
1574 data->mode = request_mode;
1575 }
1576 }
1577 return ret;
1578}
1579
1580static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1581{
1582 int ret = 0;
1583
1584 ret = qsee_vote_for_clock(data, CLK_DFAB);
1585 if (ret) {
1586 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1587 goto perf_enable_exit;
1588 }
1589 ret = qsee_vote_for_clock(data, CLK_SFPB);
1590 if (ret) {
1591 qsee_disable_clock_vote(data, CLK_DFAB);
1592 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1593 goto perf_enable_exit;
1594 }
1595
1596perf_enable_exit:
1597 return ret;
1598}
1599
1600static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1601 void __user *argp)
1602{
1603 int32_t ret = 0;
1604 int32_t req_mode;
1605
1606 if (qseecom.no_clock_support)
1607 return 0;
1608
1609 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1610 if (ret) {
1611 pr_err("copy_from_user failed\n");
1612 return ret;
1613 }
1614 if (req_mode > HIGH) {
1615 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1616 return -EINVAL;
1617 }
1618
1619 /*
1620 * Register bus bandwidth needs if bus scaling feature is enabled;
1621 * otherwise, qseecom enable/disable clocks for the client directly.
1622 */
1623 if (qseecom.support_bus_scaling) {
1624 mutex_lock(&qsee_bw_mutex);
1625 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1626 mutex_unlock(&qsee_bw_mutex);
1627 } else {
1628 pr_debug("Bus scaling feature is NOT enabled\n");
1629 pr_debug("request bandwidth mode %d for the client\n",
1630 req_mode);
1631 if (req_mode != INACTIVE) {
1632 ret = qseecom_perf_enable(data);
1633 if (ret)
1634 pr_err("Failed to vote for clock with err %d\n",
1635 ret);
1636 } else {
1637 qsee_disable_clock_vote(data, CLK_DFAB);
1638 qsee_disable_clock_vote(data, CLK_SFPB);
1639 }
1640 }
1641 return ret;
1642}
1643
1644static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1645{
1646 if (qseecom.no_clock_support)
1647 return;
1648
1649 mutex_lock(&qsee_bw_mutex);
1650 qseecom.bw_scale_down_timer.expires = jiffies +
1651 msecs_to_jiffies(duration);
1652 mod_timer(&(qseecom.bw_scale_down_timer),
1653 qseecom.bw_scale_down_timer.expires);
1654 qseecom.timer_running = true;
1655 mutex_unlock(&qsee_bw_mutex);
1656}
1657
1658static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1659{
1660 if (!qseecom.support_bus_scaling)
1661 qsee_disable_clock_vote(data, CLK_SFPB);
1662 else
1663 __qseecom_add_bw_scale_down_timer(
1664 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1665}
1666
1667static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1668{
1669 int ret = 0;
1670
1671 if (qseecom.support_bus_scaling) {
1672 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1673 if (ret)
1674 pr_err("Failed to set bw MEDIUM.\n");
1675 } else {
1676 ret = qsee_vote_for_clock(data, CLK_SFPB);
1677 if (ret)
1678 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1679 }
1680 return ret;
1681}
1682
1683static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1684 void __user *argp)
1685{
1686 ion_phys_addr_t pa;
1687 int32_t ret;
1688 struct qseecom_set_sb_mem_param_req req;
1689 size_t len;
1690
1691 /* Copy the relevant information needed for loading the image */
1692 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1693 return -EFAULT;
1694
1695 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1696 (req.sb_len == 0)) {
1697 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1698 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1699 return -EFAULT;
1700 }
1701 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1702 req.sb_len))
1703 return -EFAULT;
1704
1705 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001706 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001707 req.ifd_data_fd);
1708 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1709 pr_err("Ion client could not retrieve the handle\n");
1710 return -ENOMEM;
1711 }
1712 /* Get the physical address of the ION BUF */
1713 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1714 if (ret) {
1715
1716 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1717 ret);
1718 return ret;
1719 }
1720
1721 if (len < req.sb_len) {
1722 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1723 req.sb_len, len);
1724 return -EINVAL;
1725 }
1726 /* Populate the structure for sending scm call to load image */
1727 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1728 data->client.ihandle);
1729 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1730 pr_err("ION memory mapping for client shared buf failed\n");
1731 return -ENOMEM;
1732 }
1733 data->client.sb_phys = (phys_addr_t)pa;
1734 data->client.sb_length = req.sb_len;
1735 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1736 return 0;
1737}
1738
Zhen Kong26e62742018-05-04 17:19:06 -07001739static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1740 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001741{
1742 int ret;
1743
1744 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001745 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001746}
1747
1748static int __qseecom_reentrancy_listener_has_sent_rsp(
1749 struct qseecom_dev_handle *data,
1750 struct qseecom_registered_listener_list *ptr_svc)
1751{
1752 int ret;
1753
1754 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001755 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001756}
1757
1758static void __qseecom_clean_listener_sglistinfo(
1759 struct qseecom_registered_listener_list *ptr_svc)
1760{
1761 if (ptr_svc->sglist_cnt) {
1762 memset(ptr_svc->sglistinfo_ptr, 0,
1763 SGLISTINFO_TABLE_SIZE);
1764 ptr_svc->sglist_cnt = 0;
1765 }
1766}
1767
1768static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1769 struct qseecom_command_scm_resp *resp)
1770{
1771 int ret = 0;
1772 int rc = 0;
1773 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001774 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1775 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1776 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001777 struct qseecom_registered_listener_list *ptr_svc = NULL;
1778 sigset_t new_sigset;
1779 sigset_t old_sigset;
1780 uint32_t status;
1781 void *cmd_buf = NULL;
1782 size_t cmd_len;
1783 struct sglist_info *table = NULL;
1784
Zhen Kongbcdeda22018-11-16 13:50:51 -08001785 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001786 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1787 lstnr = resp->data;
1788 /*
1789 * Wake up blocking lsitener service with the lstnr id
1790 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001791 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001792 list_for_each_entry(ptr_svc,
1793 &qseecom.registered_listener_list_head, list) {
1794 if (ptr_svc->svc.listener_id == lstnr) {
1795 ptr_svc->listener_in_use = true;
1796 ptr_svc->rcv_req_flag = 1;
1797 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1798 break;
1799 }
1800 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001801
1802 if (ptr_svc == NULL) {
1803 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001804 rc = -EINVAL;
1805 status = QSEOS_RESULT_FAILURE;
1806 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001807 }
1808
1809 if (!ptr_svc->ihandle) {
1810 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001811 rc = -EINVAL;
1812 status = QSEOS_RESULT_FAILURE;
1813 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001814 }
1815
1816 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001817 pr_err("Service %d does not exist\n",
1818 lstnr);
1819 rc = -ERESTARTSYS;
1820 ptr_svc = NULL;
1821 status = QSEOS_RESULT_FAILURE;
1822 goto err_resp;
1823 }
1824
1825 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001826 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001827 lstnr, ptr_svc->abort);
1828 rc = -ENODEV;
1829 status = QSEOS_RESULT_FAILURE;
1830 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001831 }
Zhen Kong25731112018-09-20 13:10:03 -07001832
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001833 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1834
1835 /* initialize the new signal mask with all signals*/
1836 sigfillset(&new_sigset);
1837 /* block all signals */
1838 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1839
Zhen Kongbcdeda22018-11-16 13:50:51 -08001840 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001841 do {
1842 /*
1843 * When reentrancy is not supported, check global
1844 * send_resp_flag; otherwise, check this listener's
1845 * send_resp_flag.
1846 */
1847 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301848 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001849 __qseecom_listener_has_sent_rsp(
1850 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001851 break;
1852 }
1853
1854 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301855 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001856 __qseecom_reentrancy_listener_has_sent_rsp(
1857 data, ptr_svc))) {
1858 break;
1859 }
1860 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001861 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862 /* restore signal mask */
1863 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001864 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001865 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1866 data->client.app_id, lstnr, ret);
1867 rc = -ENODEV;
1868 status = QSEOS_RESULT_FAILURE;
1869 } else {
1870 status = QSEOS_RESULT_SUCCESS;
1871 }
Zhen Kong26e62742018-05-04 17:19:06 -07001872err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001873 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001874 if (ptr_svc) {
1875 ptr_svc->send_resp_flag = 0;
1876 table = ptr_svc->sglistinfo_ptr;
1877 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001878 if (qseecom.qsee_version < QSEE_VERSION_40) {
1879 send_data_rsp.listener_id = lstnr;
1880 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001881 if (table) {
1882 send_data_rsp.sglistinfo_ptr =
1883 (uint32_t)virt_to_phys(table);
1884 send_data_rsp.sglistinfo_len =
1885 SGLISTINFO_TABLE_SIZE;
1886 dmac_flush_range((void *)table,
1887 (void *)table + SGLISTINFO_TABLE_SIZE);
1888 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001889 cmd_buf = (void *)&send_data_rsp;
1890 cmd_len = sizeof(send_data_rsp);
1891 } else {
1892 send_data_rsp_64bit.listener_id = lstnr;
1893 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001894 if (table) {
1895 send_data_rsp_64bit.sglistinfo_ptr =
1896 virt_to_phys(table);
1897 send_data_rsp_64bit.sglistinfo_len =
1898 SGLISTINFO_TABLE_SIZE;
1899 dmac_flush_range((void *)table,
1900 (void *)table + SGLISTINFO_TABLE_SIZE);
1901 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001902 cmd_buf = (void *)&send_data_rsp_64bit;
1903 cmd_len = sizeof(send_data_rsp_64bit);
1904 }
Zhen Kong7d500032018-08-06 16:58:31 -07001905 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001906 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1907 else
1908 *(uint32_t *)cmd_buf =
1909 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001910 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001911 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1912 ptr_svc->ihandle,
1913 ptr_svc->sb_virt, ptr_svc->sb_length,
1914 ION_IOC_CLEAN_INV_CACHES);
1915 if (ret) {
1916 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001917 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001918 }
1919 }
1920
1921 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1922 ret = __qseecom_enable_clk(CLK_QSEE);
1923 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001924 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001925 }
1926
1927 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1928 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001929 if (ptr_svc) {
1930 ptr_svc->listener_in_use = false;
1931 __qseecom_clean_listener_sglistinfo(ptr_svc);
1932 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001933 if (ret) {
1934 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1935 ret, data->client.app_id);
1936 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1937 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001938 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001939 }
Zhen Kong26e62742018-05-04 17:19:06 -07001940 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1941 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001942 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1943 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1944 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1945 resp->result, data->client.app_id, lstnr);
1946 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001947 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001948 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001949exit:
1950 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001951 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1952 __qseecom_disable_clk(CLK_QSEE);
1953
1954 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001955 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001956 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001957 if (rc)
1958 return rc;
1959
1960 return ret;
1961}
1962
Zhen Konga91aaf02018-02-02 17:21:04 -08001963static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001964 struct qseecom_command_scm_resp *resp,
1965 struct qseecom_registered_app_list *ptr_app,
1966 struct qseecom_dev_handle *data)
1967{
1968 struct qseecom_registered_listener_list *list_ptr;
1969 int ret = 0;
1970 struct qseecom_continue_blocked_request_ireq ireq;
1971 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001972 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001973 sigset_t new_sigset;
1974 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001975 unsigned long flags;
1976 bool found_app = false;
Zhen Kong0ea975d2019-03-12 14:40:24 -07001977 struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001978
1979 if (!resp || !data) {
1980 pr_err("invalid resp or data pointer\n");
1981 ret = -EINVAL;
1982 goto exit;
1983 }
1984
1985 /* find app_id & img_name from list */
Zhen Kong0ea975d2019-03-12 14:40:24 -07001986 if (!ptr_app) {
1987 if (data->client.from_smcinvoke) {
1988 pr_debug("This request is from smcinvoke\n");
1989 ptr_app = &dummy_app_entry;
1990 ptr_app->app_id = data->client.app_id;
1991 } else {
1992 spin_lock_irqsave(&qseecom.registered_app_list_lock,
1993 flags);
1994 list_for_each_entry(ptr_app,
1995 &qseecom.registered_app_list_head, list) {
1996 if ((ptr_app->app_id == data->client.app_id) &&
1997 (!strcmp(ptr_app->app_name,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001998 data->client.app_name))) {
Zhen Kong0ea975d2019-03-12 14:40:24 -07001999 found_app = true;
2000 break;
2001 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002002 }
Zhen Kong0ea975d2019-03-12 14:40:24 -07002003 spin_unlock_irqrestore(
2004 &qseecom.registered_app_list_lock, flags);
2005 if (!found_app) {
2006 pr_err("app_id %d (%s) is not found\n",
2007 data->client.app_id,
2008 (char *)data->client.app_name);
2009 ret = -ENOENT;
2010 goto exit;
2011 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002012 }
2013 }
2014
Zhen Kongd8cc0052017-11-13 15:13:31 -08002015 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002016 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002017 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002018 list_ptr = __qseecom_find_svc(resp->data);
2019 if (!list_ptr) {
2020 pr_err("Invalid listener ID %d\n", resp->data);
2021 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002022 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002023 goto exit;
2024 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002025 ptr_app->blocked_on_listener_id = resp->data;
2026
2027 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2028 resp->data, list_ptr->listener_in_use,
2029 session_id, data->client.app_id);
2030
2031 /* sleep until listener is available */
2032 sigfillset(&new_sigset);
2033 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2034
2035 do {
2036 qseecom.app_block_ref_cnt++;
2037 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002038 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002039 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302040 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002041 list_ptr->listener_block_app_wq,
2042 !list_ptr->listener_in_use);
2043 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002044 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002045 ptr_app->app_blocked = false;
2046 qseecom.app_block_ref_cnt--;
2047 } while (list_ptr->listener_in_use);
2048
2049 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2050
2051 ptr_app->blocked_on_listener_id = 0;
2052 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2053 resp->data, session_id, data->client.app_id);
2054
2055 /* notify TZ that listener is available */
2056 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2057
2058 if (qseecom.smcinvoke_support)
2059 ireq.app_or_session_id = session_id;
2060 else
2061 ireq.app_or_session_id = data->client.app_id;
2062
2063 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2064 &ireq, sizeof(ireq),
2065 &continue_resp, sizeof(continue_resp));
2066 if (ret && qseecom.smcinvoke_support) {
2067 /* retry with legacy cmd */
2068 qseecom.smcinvoke_support = false;
2069 ireq.app_or_session_id = data->client.app_id;
2070 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2071 &ireq, sizeof(ireq),
2072 &continue_resp, sizeof(continue_resp));
2073 qseecom.smcinvoke_support = true;
2074 if (ret) {
2075 pr_err("unblock app %d or session %d fail\n",
2076 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002077 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002078 goto exit;
2079 }
2080 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002081 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002082 resp->result = continue_resp.result;
2083 resp->resp_type = continue_resp.resp_type;
2084 resp->data = continue_resp.data;
2085 pr_debug("unblock resp = %d\n", resp->result);
2086 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2087
2088 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2089 pr_err("Unexpected unblock resp %d\n", resp->result);
2090 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002091 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002092exit:
2093 return ret;
2094}
2095
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002096static int __qseecom_reentrancy_process_incomplete_cmd(
2097 struct qseecom_dev_handle *data,
2098 struct qseecom_command_scm_resp *resp)
2099{
2100 int ret = 0;
2101 int rc = 0;
2102 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002103 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2104 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2105 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002106 struct qseecom_registered_listener_list *ptr_svc = NULL;
2107 sigset_t new_sigset;
2108 sigset_t old_sigset;
2109 uint32_t status;
2110 void *cmd_buf = NULL;
2111 size_t cmd_len;
2112 struct sglist_info *table = NULL;
2113
Zhen Kong26e62742018-05-04 17:19:06 -07002114 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002115 lstnr = resp->data;
2116 /*
2117 * Wake up blocking lsitener service with the lstnr id
2118 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002119 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002120 list_for_each_entry(ptr_svc,
2121 &qseecom.registered_listener_list_head, list) {
2122 if (ptr_svc->svc.listener_id == lstnr) {
2123 ptr_svc->listener_in_use = true;
2124 ptr_svc->rcv_req_flag = 1;
2125 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2126 break;
2127 }
2128 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002129
2130 if (ptr_svc == NULL) {
2131 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002132 rc = -EINVAL;
2133 status = QSEOS_RESULT_FAILURE;
2134 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002135 }
2136
2137 if (!ptr_svc->ihandle) {
2138 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002139 rc = -EINVAL;
2140 status = QSEOS_RESULT_FAILURE;
2141 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002142 }
2143
2144 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002145 pr_err("Service %d does not exist\n",
2146 lstnr);
2147 rc = -ERESTARTSYS;
2148 ptr_svc = NULL;
2149 status = QSEOS_RESULT_FAILURE;
2150 goto err_resp;
2151 }
2152
2153 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002154 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002155 lstnr, ptr_svc->abort);
2156 rc = -ENODEV;
2157 status = QSEOS_RESULT_FAILURE;
2158 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002159 }
Zhen Kong25731112018-09-20 13:10:03 -07002160
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002161 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2162
2163 /* initialize the new signal mask with all signals*/
2164 sigfillset(&new_sigset);
2165
2166 /* block all signals */
2167 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2168
2169 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002170 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002171 mutex_unlock(&app_access_lock);
2172 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302173 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002174 __qseecom_reentrancy_listener_has_sent_rsp(
2175 data, ptr_svc))) {
2176 break;
2177 }
2178 } while (1);
2179 /* lock mutex again after resp sent */
2180 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002181 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002182 ptr_svc->send_resp_flag = 0;
2183 qseecom.send_resp_flag = 0;
2184
2185 /* restore signal mask */
2186 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002187 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002188 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2189 data->client.app_id, lstnr, ret);
2190 rc = -ENODEV;
2191 status = QSEOS_RESULT_FAILURE;
2192 } else {
2193 status = QSEOS_RESULT_SUCCESS;
2194 }
Zhen Kong26e62742018-05-04 17:19:06 -07002195err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002196 if (ptr_svc)
2197 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002198 if (qseecom.qsee_version < QSEE_VERSION_40) {
2199 send_data_rsp.listener_id = lstnr;
2200 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002201 if (table) {
2202 send_data_rsp.sglistinfo_ptr =
2203 (uint32_t)virt_to_phys(table);
2204 send_data_rsp.sglistinfo_len =
2205 SGLISTINFO_TABLE_SIZE;
2206 dmac_flush_range((void *)table,
2207 (void *)table + SGLISTINFO_TABLE_SIZE);
2208 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002209 cmd_buf = (void *)&send_data_rsp;
2210 cmd_len = sizeof(send_data_rsp);
2211 } else {
2212 send_data_rsp_64bit.listener_id = lstnr;
2213 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002214 if (table) {
2215 send_data_rsp_64bit.sglistinfo_ptr =
2216 virt_to_phys(table);
2217 send_data_rsp_64bit.sglistinfo_len =
2218 SGLISTINFO_TABLE_SIZE;
2219 dmac_flush_range((void *)table,
2220 (void *)table + SGLISTINFO_TABLE_SIZE);
2221 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002222 cmd_buf = (void *)&send_data_rsp_64bit;
2223 cmd_len = sizeof(send_data_rsp_64bit);
2224 }
Zhen Kong7d500032018-08-06 16:58:31 -07002225 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002226 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2227 else
2228 *(uint32_t *)cmd_buf =
2229 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002230 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002231 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2232 ptr_svc->ihandle,
2233 ptr_svc->sb_virt, ptr_svc->sb_length,
2234 ION_IOC_CLEAN_INV_CACHES);
2235 if (ret) {
2236 pr_err("cache operation failed %d\n", ret);
2237 return ret;
2238 }
2239 }
2240 if (lstnr == RPMB_SERVICE) {
2241 ret = __qseecom_enable_clk(CLK_QSEE);
2242 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002243 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002244 }
2245
2246 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2247 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002248 if (ptr_svc) {
2249 ptr_svc->listener_in_use = false;
2250 __qseecom_clean_listener_sglistinfo(ptr_svc);
2251 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2252 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002253
2254 if (ret) {
2255 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2256 ret, data->client.app_id);
2257 goto exit;
2258 }
2259
2260 switch (resp->result) {
2261 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2262 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2263 lstnr, data->client.app_id, resp->data);
2264 if (lstnr == resp->data) {
2265 pr_err("lstnr %d should not be blocked!\n",
2266 lstnr);
2267 ret = -EINVAL;
2268 goto exit;
2269 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002270 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002271 ret = __qseecom_process_reentrancy_blocked_on_listener(
2272 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002273 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002274 if (ret) {
2275 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2276 data->client.app_id,
2277 data->client.app_name, resp->data);
2278 goto exit;
2279 }
2280 case QSEOS_RESULT_SUCCESS:
2281 case QSEOS_RESULT_INCOMPLETE:
2282 break;
2283 default:
2284 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2285 resp->result, data->client.app_id, lstnr);
2286 ret = -EINVAL;
2287 goto exit;
2288 }
2289exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002290 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002291 if (lstnr == RPMB_SERVICE)
2292 __qseecom_disable_clk(CLK_QSEE);
2293
2294 }
2295 if (rc)
2296 return rc;
2297
2298 return ret;
2299}
2300
2301/*
2302 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2303 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2304 * So, needs to first check if no app blocked before sending OS level scm call,
2305 * then wait until all apps are unblocked.
2306 */
2307static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2308{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002309 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2310 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2311 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2312 /* thread sleep until this app unblocked */
2313 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002314 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002315 wait_event_interruptible(qseecom.app_block_wq,
2316 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002317 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002318 }
2319 }
2320}
2321
2322/*
2323 * scm_call of send data will fail if this TA is blocked or there are more
2324 * than one TA requesting listener services; So, first check to see if need
2325 * to wait.
2326 */
2327static void __qseecom_reentrancy_check_if_this_app_blocked(
2328 struct qseecom_registered_app_list *ptr_app)
2329{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002330 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002331 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002332 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2333 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002334 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002335 wait_event_interruptible(qseecom.app_block_wq,
2336 (!ptr_app->app_blocked &&
2337 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002338 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002339 }
Zhen Kongdea10592018-07-30 17:50:10 -07002340 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002341 }
2342}
2343
2344static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2345 uint32_t *app_id)
2346{
2347 int32_t ret;
2348 struct qseecom_command_scm_resp resp;
2349 bool found_app = false;
2350 struct qseecom_registered_app_list *entry = NULL;
2351 unsigned long flags = 0;
2352
2353 if (!app_id) {
2354 pr_err("Null pointer to app_id\n");
2355 return -EINVAL;
2356 }
2357 *app_id = 0;
2358
2359 /* check if app exists and has been registered locally */
2360 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2361 list_for_each_entry(entry,
2362 &qseecom.registered_app_list_head, list) {
2363 if (!strcmp(entry->app_name, req.app_name)) {
2364 found_app = true;
2365 break;
2366 }
2367 }
2368 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2369 if (found_app) {
2370 pr_debug("Found app with id %d\n", entry->app_id);
2371 *app_id = entry->app_id;
2372 return 0;
2373 }
2374
2375 memset((void *)&resp, 0, sizeof(resp));
2376
2377 /* SCM_CALL to check if app_id for the mentioned app exists */
2378 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2379 sizeof(struct qseecom_check_app_ireq),
2380 &resp, sizeof(resp));
2381 if (ret) {
2382 pr_err("scm_call to check if app is already loaded failed\n");
2383 return -EINVAL;
2384 }
2385
2386 if (resp.result == QSEOS_RESULT_FAILURE)
2387 return 0;
2388
2389 switch (resp.resp_type) {
2390 /*qsee returned listener type response */
2391 case QSEOS_LISTENER_ID:
2392 pr_err("resp type is of listener type instead of app");
2393 return -EINVAL;
2394 case QSEOS_APP_ID:
2395 *app_id = resp.data;
2396 return 0;
2397 default:
2398 pr_err("invalid resp type (%d) from qsee",
2399 resp.resp_type);
2400 return -ENODEV;
2401 }
2402}
2403
2404static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2405{
2406 struct qseecom_registered_app_list *entry = NULL;
2407 unsigned long flags = 0;
2408 u32 app_id = 0;
2409 struct ion_handle *ihandle; /* Ion handle */
2410 struct qseecom_load_img_req load_img_req;
2411 int32_t ret = 0;
2412 ion_phys_addr_t pa = 0;
2413 size_t len;
2414 struct qseecom_command_scm_resp resp;
2415 struct qseecom_check_app_ireq req;
2416 struct qseecom_load_app_ireq load_req;
2417 struct qseecom_load_app_64bit_ireq load_req_64bit;
2418 void *cmd_buf = NULL;
2419 size_t cmd_len;
2420 bool first_time = false;
2421
2422 /* Copy the relevant information needed for loading the image */
2423 if (copy_from_user(&load_img_req,
2424 (void __user *)argp,
2425 sizeof(struct qseecom_load_img_req))) {
2426 pr_err("copy_from_user failed\n");
2427 return -EFAULT;
2428 }
2429
2430 /* Check and load cmnlib */
2431 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2432 if (!qseecom.commonlib_loaded &&
2433 load_img_req.app_arch == ELFCLASS32) {
2434 ret = qseecom_load_commonlib_image(data, "cmnlib");
2435 if (ret) {
2436 pr_err("failed to load cmnlib\n");
2437 return -EIO;
2438 }
2439 qseecom.commonlib_loaded = true;
2440 pr_debug("cmnlib is loaded\n");
2441 }
2442
2443 if (!qseecom.commonlib64_loaded &&
2444 load_img_req.app_arch == ELFCLASS64) {
2445 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2446 if (ret) {
2447 pr_err("failed to load cmnlib64\n");
2448 return -EIO;
2449 }
2450 qseecom.commonlib64_loaded = true;
2451 pr_debug("cmnlib64 is loaded\n");
2452 }
2453 }
2454
2455 if (qseecom.support_bus_scaling) {
2456 mutex_lock(&qsee_bw_mutex);
2457 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2458 mutex_unlock(&qsee_bw_mutex);
2459 if (ret)
2460 return ret;
2461 }
2462
2463 /* Vote for the SFPB clock */
2464 ret = __qseecom_enable_clk_scale_up(data);
2465 if (ret)
2466 goto enable_clk_err;
2467
2468 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2469 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2470 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2471
2472 ret = __qseecom_check_app_exists(req, &app_id);
2473 if (ret < 0)
2474 goto loadapp_err;
2475
2476 if (app_id) {
2477 pr_debug("App id %d (%s) already exists\n", app_id,
2478 (char *)(req.app_name));
2479 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2480 list_for_each_entry(entry,
2481 &qseecom.registered_app_list_head, list){
2482 if (entry->app_id == app_id) {
2483 entry->ref_cnt++;
2484 break;
2485 }
2486 }
2487 spin_unlock_irqrestore(
2488 &qseecom.registered_app_list_lock, flags);
2489 ret = 0;
2490 } else {
2491 first_time = true;
2492 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2493 (char *)(load_img_req.img_name));
2494 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002495 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002496 load_img_req.ifd_data_fd);
2497 if (IS_ERR_OR_NULL(ihandle)) {
2498 pr_err("Ion client could not retrieve the handle\n");
2499 ret = -ENOMEM;
2500 goto loadapp_err;
2501 }
2502
2503 /* Get the physical address of the ION BUF */
2504 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2505 if (ret) {
2506 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2507 ret);
2508 goto loadapp_err;
2509 }
2510 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2511 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2512 len, load_img_req.mdt_len,
2513 load_img_req.img_len);
2514 ret = -EINVAL;
2515 goto loadapp_err;
2516 }
2517 /* Populate the structure for sending scm call to load image */
2518 if (qseecom.qsee_version < QSEE_VERSION_40) {
2519 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2520 load_req.mdt_len = load_img_req.mdt_len;
2521 load_req.img_len = load_img_req.img_len;
2522 strlcpy(load_req.app_name, load_img_req.img_name,
2523 MAX_APP_NAME_SIZE);
2524 load_req.phy_addr = (uint32_t)pa;
2525 cmd_buf = (void *)&load_req;
2526 cmd_len = sizeof(struct qseecom_load_app_ireq);
2527 } else {
2528 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2529 load_req_64bit.mdt_len = load_img_req.mdt_len;
2530 load_req_64bit.img_len = load_img_req.img_len;
2531 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2532 MAX_APP_NAME_SIZE);
2533 load_req_64bit.phy_addr = (uint64_t)pa;
2534 cmd_buf = (void *)&load_req_64bit;
2535 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2536 }
2537
2538 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2539 ION_IOC_CLEAN_INV_CACHES);
2540 if (ret) {
2541 pr_err("cache operation failed %d\n", ret);
2542 goto loadapp_err;
2543 }
2544
2545 /* SCM_CALL to load the app and get the app_id back */
2546 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2547 cmd_len, &resp, sizeof(resp));
2548 if (ret) {
2549 pr_err("scm_call to load app failed\n");
2550 if (!IS_ERR_OR_NULL(ihandle))
2551 ion_free(qseecom.ion_clnt, ihandle);
2552 ret = -EINVAL;
2553 goto loadapp_err;
2554 }
2555
2556 if (resp.result == QSEOS_RESULT_FAILURE) {
2557 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2558 if (!IS_ERR_OR_NULL(ihandle))
2559 ion_free(qseecom.ion_clnt, ihandle);
2560 ret = -EFAULT;
2561 goto loadapp_err;
2562 }
2563
2564 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2565 ret = __qseecom_process_incomplete_cmd(data, &resp);
2566 if (ret) {
2567 pr_err("process_incomplete_cmd failed err: %d\n",
2568 ret);
2569 if (!IS_ERR_OR_NULL(ihandle))
2570 ion_free(qseecom.ion_clnt, ihandle);
2571 ret = -EFAULT;
2572 goto loadapp_err;
2573 }
2574 }
2575
2576 if (resp.result != QSEOS_RESULT_SUCCESS) {
2577 pr_err("scm_call failed resp.result unknown, %d\n",
2578 resp.result);
2579 if (!IS_ERR_OR_NULL(ihandle))
2580 ion_free(qseecom.ion_clnt, ihandle);
2581 ret = -EFAULT;
2582 goto loadapp_err;
2583 }
2584
2585 app_id = resp.data;
2586
2587 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2588 if (!entry) {
2589 ret = -ENOMEM;
2590 goto loadapp_err;
2591 }
2592 entry->app_id = app_id;
2593 entry->ref_cnt = 1;
2594 entry->app_arch = load_img_req.app_arch;
2595 /*
2596 * keymaster app may be first loaded as "keymaste" by qseecomd,
2597 * and then used as "keymaster" on some targets. To avoid app
2598 * name checking error, register "keymaster" into app_list and
2599 * thread private data.
2600 */
2601 if (!strcmp(load_img_req.img_name, "keymaste"))
2602 strlcpy(entry->app_name, "keymaster",
2603 MAX_APP_NAME_SIZE);
2604 else
2605 strlcpy(entry->app_name, load_img_req.img_name,
2606 MAX_APP_NAME_SIZE);
2607 entry->app_blocked = false;
2608 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002609 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002610
2611 /* Deallocate the handle */
2612 if (!IS_ERR_OR_NULL(ihandle))
2613 ion_free(qseecom.ion_clnt, ihandle);
2614
2615 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2616 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2617 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2618 flags);
2619
2620 pr_warn("App with id %u (%s) now loaded\n", app_id,
2621 (char *)(load_img_req.img_name));
2622 }
2623 data->client.app_id = app_id;
2624 data->client.app_arch = load_img_req.app_arch;
2625 if (!strcmp(load_img_req.img_name, "keymaste"))
2626 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2627 else
2628 strlcpy(data->client.app_name, load_img_req.img_name,
2629 MAX_APP_NAME_SIZE);
2630 load_img_req.app_id = app_id;
2631 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2632 pr_err("copy_to_user failed\n");
2633 ret = -EFAULT;
2634 if (first_time == true) {
2635 spin_lock_irqsave(
2636 &qseecom.registered_app_list_lock, flags);
2637 list_del(&entry->list);
2638 spin_unlock_irqrestore(
2639 &qseecom.registered_app_list_lock, flags);
2640 kzfree(entry);
2641 }
2642 }
2643
2644loadapp_err:
2645 __qseecom_disable_clk_scale_down(data);
2646enable_clk_err:
2647 if (qseecom.support_bus_scaling) {
2648 mutex_lock(&qsee_bw_mutex);
2649 qseecom_unregister_bus_bandwidth_needs(data);
2650 mutex_unlock(&qsee_bw_mutex);
2651 }
2652 return ret;
2653}
2654
2655static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2656{
2657 int ret = 1; /* Set unload app */
2658
2659 wake_up_all(&qseecom.send_resp_wq);
2660 if (qseecom.qsee_reentrancy_support)
2661 mutex_unlock(&app_access_lock);
2662 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302663 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002664 atomic_read(&data->ioctl_count) <= 1)) {
2665 pr_err("Interrupted from abort\n");
2666 ret = -ERESTARTSYS;
2667 break;
2668 }
2669 }
2670 if (qseecom.qsee_reentrancy_support)
2671 mutex_lock(&app_access_lock);
2672 return ret;
2673}
2674
2675static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2676{
2677 int ret = 0;
2678
2679 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2680 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2681 ion_free(qseecom.ion_clnt, data->client.ihandle);
2682 data->client.ihandle = NULL;
2683 }
2684 return ret;
2685}
2686
2687static int qseecom_unload_app(struct qseecom_dev_handle *data,
2688 bool app_crash)
2689{
2690 unsigned long flags;
2691 unsigned long flags1;
2692 int ret = 0;
2693 struct qseecom_command_scm_resp resp;
2694 struct qseecom_registered_app_list *ptr_app = NULL;
2695 bool unload = false;
2696 bool found_app = false;
2697 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002698 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002699
2700 if (!data) {
2701 pr_err("Invalid/uninitialized device handle\n");
2702 return -EINVAL;
2703 }
2704
2705 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2706 pr_debug("Do not unload keymaster app from tz\n");
2707 goto unload_exit;
2708 }
2709
2710 __qseecom_cleanup_app(data);
2711 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2712
2713 if (data->client.app_id > 0) {
2714 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2715 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2716 list) {
2717 if (ptr_app->app_id == data->client.app_id) {
2718 if (!strcmp((void *)ptr_app->app_name,
2719 (void *)data->client.app_name)) {
2720 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002721 if (ptr_app->app_blocked ||
2722 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002723 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002724 if (app_crash || ptr_app->ref_cnt == 1)
2725 unload = true;
2726 break;
2727 }
2728 found_dead_app = true;
2729 break;
2730 }
2731 }
2732 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2733 flags);
2734 if (found_app == false && found_dead_app == false) {
2735 pr_err("Cannot find app with id = %d (%s)\n",
2736 data->client.app_id,
2737 (char *)data->client.app_name);
2738 ret = -EINVAL;
2739 goto unload_exit;
2740 }
2741 }
2742
2743 if (found_dead_app)
2744 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2745 (char *)data->client.app_name);
2746
2747 if (unload) {
2748 struct qseecom_unload_app_ireq req;
2749 /* Populate the structure for sending scm call to load image */
2750 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2751 req.app_id = data->client.app_id;
2752
2753 /* SCM_CALL to unload the app */
2754 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2755 sizeof(struct qseecom_unload_app_ireq),
2756 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002757 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002758 if (ret) {
2759 pr_err("scm_call to unload app (id = %d) failed\n",
2760 req.app_id);
2761 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002762 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002763 } else {
2764 pr_warn("App id %d now unloaded\n", req.app_id);
2765 }
2766 if (resp.result == QSEOS_RESULT_FAILURE) {
2767 pr_err("app (%d) unload_failed!!\n",
2768 data->client.app_id);
2769 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002770 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002771 }
2772 if (resp.result == QSEOS_RESULT_SUCCESS)
2773 pr_debug("App (%d) is unloaded!!\n",
2774 data->client.app_id);
2775 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2776 ret = __qseecom_process_incomplete_cmd(data, &resp);
2777 if (ret) {
2778 pr_err("process_incomplete_cmd fail err: %d\n",
2779 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002780 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002781 }
2782 }
2783 }
2784
Zhen Kongf818f152019-03-13 12:31:32 -07002785scm_exit:
2786 if (scm_called) {
2787 /* double check if this app_entry still exists */
2788 bool doublecheck = false;
2789
2790 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2791 list_for_each_entry(ptr_app,
2792 &qseecom.registered_app_list_head, list) {
2793 if ((ptr_app->app_id == data->client.app_id) &&
2794 (!strcmp((void *)ptr_app->app_name,
2795 (void *)data->client.app_name))) {
2796 doublecheck = true;
2797 break;
2798 }
2799 }
2800 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2801 flags1);
2802 if (!doublecheck) {
2803 pr_warn("app %d(%s) entry is already removed\n",
2804 data->client.app_id,
2805 (char *)data->client.app_name);
2806 found_app = false;
2807 }
2808 }
Zhen Kong7d500032018-08-06 16:58:31 -07002809unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002810 if (found_app) {
2811 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2812 if (app_crash) {
2813 ptr_app->ref_cnt = 0;
2814 pr_debug("app_crash: ref_count = 0\n");
2815 } else {
2816 if (ptr_app->ref_cnt == 1) {
2817 ptr_app->ref_cnt = 0;
2818 pr_debug("ref_count set to 0\n");
2819 } else {
2820 ptr_app->ref_cnt--;
2821 pr_debug("Can't unload app(%d) inuse\n",
2822 ptr_app->app_id);
2823 }
2824 }
2825 if (unload) {
2826 list_del(&ptr_app->list);
2827 kzfree(ptr_app);
2828 }
2829 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2830 flags1);
2831 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002832 qseecom_unmap_ion_allocated_memory(data);
2833 data->released = true;
2834 return ret;
2835}
2836
2837static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2838 unsigned long virt)
2839{
2840 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2841}
2842
2843static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2844 unsigned long virt)
2845{
2846 return (uintptr_t)data->client.sb_virt +
2847 (virt - data->client.user_virt_sb_base);
2848}
2849
2850int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2851 struct qseecom_send_svc_cmd_req *req_ptr,
2852 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2853{
2854 int ret = 0;
2855 void *req_buf = NULL;
2856
2857 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2858 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2859 req_ptr, send_svc_ireq_ptr);
2860 return -EINVAL;
2861 }
2862
2863 /* Clients need to ensure req_buf is at base offset of shared buffer */
2864 if ((uintptr_t)req_ptr->cmd_req_buf !=
2865 data_ptr->client.user_virt_sb_base) {
2866 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2867 return -EINVAL;
2868 }
2869
2870 if (data_ptr->client.sb_length <
2871 sizeof(struct qseecom_rpmb_provision_key)) {
2872 pr_err("shared buffer is too small to hold key type\n");
2873 return -EINVAL;
2874 }
2875 req_buf = data_ptr->client.sb_virt;
2876
2877 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2878 send_svc_ireq_ptr->key_type =
2879 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2880 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2881 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2882 data_ptr, (uintptr_t)req_ptr->resp_buf));
2883 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2884
2885 return ret;
2886}
2887
2888int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2889 struct qseecom_send_svc_cmd_req *req_ptr,
2890 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2891{
2892 int ret = 0;
2893 uint32_t reqd_len_sb_in = 0;
2894
2895 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2896 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2897 req_ptr, send_svc_ireq_ptr);
2898 return -EINVAL;
2899 }
2900
2901 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2902 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2903 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2904 pr_err("Required: %u, Available: %zu\n",
2905 reqd_len_sb_in, data_ptr->client.sb_length);
2906 return -ENOMEM;
2907 }
2908
2909 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2910 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2911 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2912 data_ptr, (uintptr_t)req_ptr->resp_buf));
2913 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2914
2915 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2916 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2917
2918
2919 return ret;
2920}
2921
2922static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2923 struct qseecom_send_svc_cmd_req *req)
2924{
2925 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2926 pr_err("req or cmd buffer or response buffer is null\n");
2927 return -EINVAL;
2928 }
2929
2930 if (!data || !data->client.ihandle) {
2931 pr_err("Client or client handle is not initialized\n");
2932 return -EINVAL;
2933 }
2934
2935 if (data->client.sb_virt == NULL) {
2936 pr_err("sb_virt null\n");
2937 return -EINVAL;
2938 }
2939
2940 if (data->client.user_virt_sb_base == 0) {
2941 pr_err("user_virt_sb_base is null\n");
2942 return -EINVAL;
2943 }
2944
2945 if (data->client.sb_length == 0) {
2946 pr_err("sb_length is 0\n");
2947 return -EINVAL;
2948 }
2949
2950 if (((uintptr_t)req->cmd_req_buf <
2951 data->client.user_virt_sb_base) ||
2952 ((uintptr_t)req->cmd_req_buf >=
2953 (data->client.user_virt_sb_base + data->client.sb_length))) {
2954 pr_err("cmd buffer address not within shared bufffer\n");
2955 return -EINVAL;
2956 }
2957 if (((uintptr_t)req->resp_buf <
2958 data->client.user_virt_sb_base) ||
2959 ((uintptr_t)req->resp_buf >=
2960 (data->client.user_virt_sb_base + data->client.sb_length))) {
2961 pr_err("response buffer address not within shared bufffer\n");
2962 return -EINVAL;
2963 }
2964 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2965 (req->cmd_req_len > data->client.sb_length) ||
2966 (req->resp_len > data->client.sb_length)) {
2967 pr_err("cmd buf length or response buf length not valid\n");
2968 return -EINVAL;
2969 }
2970 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2971 pr_err("Integer overflow detected in req_len & rsp_len\n");
2972 return -EINVAL;
2973 }
2974
2975 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2976 pr_debug("Not enough memory to fit cmd_buf.\n");
2977 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2978 (req->cmd_req_len + req->resp_len),
2979 data->client.sb_length);
2980 return -ENOMEM;
2981 }
2982 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2983 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2984 return -EINVAL;
2985 }
2986 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2987 pr_err("Integer overflow in resp_len & resp_buf\n");
2988 return -EINVAL;
2989 }
2990 if (data->client.user_virt_sb_base >
2991 (ULONG_MAX - data->client.sb_length)) {
2992 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2993 return -EINVAL;
2994 }
2995 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2996 ((uintptr_t)data->client.user_virt_sb_base +
2997 data->client.sb_length)) ||
2998 (((uintptr_t)req->resp_buf + req->resp_len) >
2999 ((uintptr_t)data->client.user_virt_sb_base +
3000 data->client.sb_length))) {
3001 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3002 return -EINVAL;
3003 }
3004 return 0;
3005}
3006
3007static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3008 void __user *argp)
3009{
3010 int ret = 0;
3011 struct qseecom_client_send_service_ireq send_svc_ireq;
3012 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3013 struct qseecom_command_scm_resp resp;
3014 struct qseecom_send_svc_cmd_req req;
3015 void *send_req_ptr;
3016 size_t req_buf_size;
3017
3018 /*struct qseecom_command_scm_resp resp;*/
3019
3020 if (copy_from_user(&req,
3021 (void __user *)argp,
3022 sizeof(req))) {
3023 pr_err("copy_from_user failed\n");
3024 return -EFAULT;
3025 }
3026
3027 if (__validate_send_service_cmd_inputs(data, &req))
3028 return -EINVAL;
3029
3030 data->type = QSEECOM_SECURE_SERVICE;
3031
3032 switch (req.cmd_id) {
3033 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3034 case QSEOS_RPMB_ERASE_COMMAND:
3035 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3036 send_req_ptr = &send_svc_ireq;
3037 req_buf_size = sizeof(send_svc_ireq);
3038 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3039 send_req_ptr))
3040 return -EINVAL;
3041 break;
3042 case QSEOS_FSM_LTEOTA_REQ_CMD:
3043 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3044 case QSEOS_FSM_IKE_REQ_CMD:
3045 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3046 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3047 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3048 case QSEOS_FSM_ENCFS_REQ_CMD:
3049 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3050 send_req_ptr = &send_fsm_key_svc_ireq;
3051 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3052 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3053 send_req_ptr))
3054 return -EINVAL;
3055 break;
3056 default:
3057 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3058 return -EINVAL;
3059 }
3060
3061 if (qseecom.support_bus_scaling) {
3062 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3063 if (ret) {
3064 pr_err("Fail to set bw HIGH\n");
3065 return ret;
3066 }
3067 } else {
3068 ret = qseecom_perf_enable(data);
3069 if (ret) {
3070 pr_err("Failed to vote for clocks with err %d\n", ret);
3071 goto exit;
3072 }
3073 }
3074
3075 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3076 data->client.sb_virt, data->client.sb_length,
3077 ION_IOC_CLEAN_INV_CACHES);
3078 if (ret) {
3079 pr_err("cache operation failed %d\n", ret);
3080 goto exit;
3081 }
3082 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3083 (const void *)send_req_ptr,
3084 req_buf_size, &resp, sizeof(resp));
3085 if (ret) {
3086 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3087 if (!qseecom.support_bus_scaling) {
3088 qsee_disable_clock_vote(data, CLK_DFAB);
3089 qsee_disable_clock_vote(data, CLK_SFPB);
3090 } else {
3091 __qseecom_add_bw_scale_down_timer(
3092 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3093 }
3094 goto exit;
3095 }
3096 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3097 data->client.sb_virt, data->client.sb_length,
3098 ION_IOC_INV_CACHES);
3099 if (ret) {
3100 pr_err("cache operation failed %d\n", ret);
3101 goto exit;
3102 }
3103 switch (resp.result) {
3104 case QSEOS_RESULT_SUCCESS:
3105 break;
3106 case QSEOS_RESULT_INCOMPLETE:
3107 pr_debug("qseos_result_incomplete\n");
3108 ret = __qseecom_process_incomplete_cmd(data, &resp);
3109 if (ret) {
3110 pr_err("process_incomplete_cmd fail with result: %d\n",
3111 resp.result);
3112 }
3113 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3114 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303115 if (put_user(resp.result,
3116 (uint32_t __user *)req.resp_buf)) {
3117 ret = -EINVAL;
3118 goto exit;
3119 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003120 ret = 0;
3121 }
3122 break;
3123 case QSEOS_RESULT_FAILURE:
3124 pr_err("scm call failed with resp.result: %d\n", resp.result);
3125 ret = -EINVAL;
3126 break;
3127 default:
3128 pr_err("Response result %d not supported\n",
3129 resp.result);
3130 ret = -EINVAL;
3131 break;
3132 }
3133 if (!qseecom.support_bus_scaling) {
3134 qsee_disable_clock_vote(data, CLK_DFAB);
3135 qsee_disable_clock_vote(data, CLK_SFPB);
3136 } else {
3137 __qseecom_add_bw_scale_down_timer(
3138 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3139 }
3140
3141exit:
3142 return ret;
3143}
3144
3145static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3146 struct qseecom_send_cmd_req *req)
3147
3148{
3149 if (!data || !data->client.ihandle) {
3150 pr_err("Client or client handle is not initialized\n");
3151 return -EINVAL;
3152 }
3153 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3154 (req->cmd_req_buf == NULL)) {
3155 pr_err("cmd buffer or response buffer is null\n");
3156 return -EINVAL;
3157 }
3158 if (((uintptr_t)req->cmd_req_buf <
3159 data->client.user_virt_sb_base) ||
3160 ((uintptr_t)req->cmd_req_buf >=
3161 (data->client.user_virt_sb_base + data->client.sb_length))) {
3162 pr_err("cmd buffer address not within shared bufffer\n");
3163 return -EINVAL;
3164 }
3165 if (((uintptr_t)req->resp_buf <
3166 data->client.user_virt_sb_base) ||
3167 ((uintptr_t)req->resp_buf >=
3168 (data->client.user_virt_sb_base + data->client.sb_length))) {
3169 pr_err("response buffer address not within shared bufffer\n");
3170 return -EINVAL;
3171 }
3172 if ((req->cmd_req_len == 0) ||
3173 (req->cmd_req_len > data->client.sb_length) ||
3174 (req->resp_len > data->client.sb_length)) {
3175 pr_err("cmd buf length or response buf length not valid\n");
3176 return -EINVAL;
3177 }
3178 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3179 pr_err("Integer overflow detected in req_len & rsp_len\n");
3180 return -EINVAL;
3181 }
3182
3183 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3184 pr_debug("Not enough memory to fit cmd_buf.\n");
3185 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3186 (req->cmd_req_len + req->resp_len),
3187 data->client.sb_length);
3188 return -ENOMEM;
3189 }
3190 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3191 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3192 return -EINVAL;
3193 }
3194 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3195 pr_err("Integer overflow in resp_len & resp_buf\n");
3196 return -EINVAL;
3197 }
3198 if (data->client.user_virt_sb_base >
3199 (ULONG_MAX - data->client.sb_length)) {
3200 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3201 return -EINVAL;
3202 }
3203 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3204 ((uintptr_t)data->client.user_virt_sb_base +
3205 data->client.sb_length)) ||
3206 (((uintptr_t)req->resp_buf + req->resp_len) >
3207 ((uintptr_t)data->client.user_virt_sb_base +
3208 data->client.sb_length))) {
3209 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3210 return -EINVAL;
3211 }
3212 return 0;
3213}
3214
3215int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3216 struct qseecom_registered_app_list *ptr_app,
3217 struct qseecom_dev_handle *data)
3218{
3219 int ret = 0;
3220
3221 switch (resp->result) {
3222 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3223 pr_warn("App(%d) %s is blocked on listener %d\n",
3224 data->client.app_id, data->client.app_name,
3225 resp->data);
3226 ret = __qseecom_process_reentrancy_blocked_on_listener(
3227 resp, ptr_app, data);
3228 if (ret) {
3229 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3230 data->client.app_id, data->client.app_name, resp->data);
3231 return ret;
3232 }
3233
3234 case QSEOS_RESULT_INCOMPLETE:
3235 qseecom.app_block_ref_cnt++;
3236 ptr_app->app_blocked = true;
3237 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3238 ptr_app->app_blocked = false;
3239 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003240 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003241 if (ret)
3242 pr_err("process_incomplete_cmd failed err: %d\n",
3243 ret);
3244 return ret;
3245 case QSEOS_RESULT_SUCCESS:
3246 return ret;
3247 default:
3248 pr_err("Response result %d not supported\n",
3249 resp->result);
3250 return -EINVAL;
3251 }
3252}
3253
3254static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3255 struct qseecom_send_cmd_req *req)
3256{
3257 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003258 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003259 u32 reqd_len_sb_in = 0;
3260 struct qseecom_client_send_data_ireq send_data_req = {0};
3261 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3262 struct qseecom_command_scm_resp resp;
3263 unsigned long flags;
3264 struct qseecom_registered_app_list *ptr_app;
3265 bool found_app = false;
3266 void *cmd_buf = NULL;
3267 size_t cmd_len;
3268 struct sglist_info *table = data->sglistinfo_ptr;
3269
3270 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3271 /* find app_id & img_name from list */
3272 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3273 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3274 list) {
3275 if ((ptr_app->app_id == data->client.app_id) &&
3276 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3277 found_app = true;
3278 break;
3279 }
3280 }
3281 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3282
3283 if (!found_app) {
3284 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3285 (char *)data->client.app_name);
3286 return -ENOENT;
3287 }
3288
3289 if (qseecom.qsee_version < QSEE_VERSION_40) {
3290 send_data_req.app_id = data->client.app_id;
3291 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3292 data, (uintptr_t)req->cmd_req_buf));
3293 send_data_req.req_len = req->cmd_req_len;
3294 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3295 data, (uintptr_t)req->resp_buf));
3296 send_data_req.rsp_len = req->resp_len;
3297 send_data_req.sglistinfo_ptr =
3298 (uint32_t)virt_to_phys(table);
3299 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3300 dmac_flush_range((void *)table,
3301 (void *)table + SGLISTINFO_TABLE_SIZE);
3302 cmd_buf = (void *)&send_data_req;
3303 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3304 } else {
3305 send_data_req_64bit.app_id = data->client.app_id;
3306 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3307 (uintptr_t)req->cmd_req_buf);
3308 send_data_req_64bit.req_len = req->cmd_req_len;
3309 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3310 (uintptr_t)req->resp_buf);
3311 send_data_req_64bit.rsp_len = req->resp_len;
3312 /* check if 32bit app's phys_addr region is under 4GB.*/
3313 if ((data->client.app_arch == ELFCLASS32) &&
3314 ((send_data_req_64bit.req_ptr >=
3315 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3316 (send_data_req_64bit.rsp_ptr >=
3317 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3318 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3319 data->client.app_name,
3320 send_data_req_64bit.req_ptr,
3321 send_data_req_64bit.req_len,
3322 send_data_req_64bit.rsp_ptr,
3323 send_data_req_64bit.rsp_len);
3324 return -EFAULT;
3325 }
3326 send_data_req_64bit.sglistinfo_ptr =
3327 (uint64_t)virt_to_phys(table);
3328 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3329 dmac_flush_range((void *)table,
3330 (void *)table + SGLISTINFO_TABLE_SIZE);
3331 cmd_buf = (void *)&send_data_req_64bit;
3332 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3333 }
3334
3335 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3336 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3337 else
3338 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3339
3340 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3341 data->client.sb_virt,
3342 reqd_len_sb_in,
3343 ION_IOC_CLEAN_INV_CACHES);
3344 if (ret) {
3345 pr_err("cache operation failed %d\n", ret);
3346 return ret;
3347 }
3348
3349 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3350
3351 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3352 cmd_buf, cmd_len,
3353 &resp, sizeof(resp));
3354 if (ret) {
3355 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3356 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003357 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003358 }
3359
3360 if (qseecom.qsee_reentrancy_support) {
3361 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003362 if (ret)
3363 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003364 } else {
3365 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3366 ret = __qseecom_process_incomplete_cmd(data, &resp);
3367 if (ret) {
3368 pr_err("process_incomplete_cmd failed err: %d\n",
3369 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003370 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003371 }
3372 } else {
3373 if (resp.result != QSEOS_RESULT_SUCCESS) {
3374 pr_err("Response result %d not supported\n",
3375 resp.result);
3376 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003377 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003378 }
3379 }
3380 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003381exit:
3382 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003383 data->client.sb_virt, data->client.sb_length,
3384 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003385 if (ret2) {
3386 pr_err("cache operation failed %d\n", ret2);
3387 return ret2;
3388 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003389 return ret;
3390}
3391
3392static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3393{
3394 int ret = 0;
3395 struct qseecom_send_cmd_req req;
3396
3397 ret = copy_from_user(&req, argp, sizeof(req));
3398 if (ret) {
3399 pr_err("copy_from_user failed\n");
3400 return ret;
3401 }
3402
3403 if (__validate_send_cmd_inputs(data, &req))
3404 return -EINVAL;
3405
3406 ret = __qseecom_send_cmd(data, &req);
3407
3408 if (ret)
3409 return ret;
3410
3411 return ret;
3412}
3413
3414int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3415 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3416 struct qseecom_dev_handle *data, int i) {
3417
3418 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3419 (req->ifd_data[i].fd > 0)) {
3420 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3421 (req->ifd_data[i].cmd_buf_offset >
3422 req->cmd_req_len - sizeof(uint32_t))) {
3423 pr_err("Invalid offset (req len) 0x%x\n",
3424 req->ifd_data[i].cmd_buf_offset);
3425 return -EINVAL;
3426 }
3427 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3428 (lstnr_resp->ifd_data[i].fd > 0)) {
3429 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3430 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3431 lstnr_resp->resp_len - sizeof(uint32_t))) {
3432 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3433 lstnr_resp->ifd_data[i].cmd_buf_offset);
3434 return -EINVAL;
3435 }
3436 }
3437 return 0;
3438}
3439
3440static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3441 struct qseecom_dev_handle *data)
3442{
3443 struct ion_handle *ihandle;
3444 char *field;
3445 int ret = 0;
3446 int i = 0;
3447 uint32_t len = 0;
3448 struct scatterlist *sg;
3449 struct qseecom_send_modfd_cmd_req *req = NULL;
3450 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3451 struct qseecom_registered_listener_list *this_lstnr = NULL;
3452 uint32_t offset;
3453 struct sg_table *sg_ptr;
3454
3455 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3456 (data->type != QSEECOM_CLIENT_APP))
3457 return -EFAULT;
3458
3459 if (msg == NULL) {
3460 pr_err("Invalid address\n");
3461 return -EINVAL;
3462 }
3463 if (data->type == QSEECOM_LISTENER_SERVICE) {
3464 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3465 this_lstnr = __qseecom_find_svc(data->listener.id);
3466 if (IS_ERR_OR_NULL(this_lstnr)) {
3467 pr_err("Invalid listener ID\n");
3468 return -ENOMEM;
3469 }
3470 } else {
3471 req = (struct qseecom_send_modfd_cmd_req *)msg;
3472 }
3473
3474 for (i = 0; i < MAX_ION_FD; i++) {
3475 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3476 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003477 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003478 req->ifd_data[i].fd);
3479 if (IS_ERR_OR_NULL(ihandle)) {
3480 pr_err("Ion client can't retrieve the handle\n");
3481 return -ENOMEM;
3482 }
3483 field = (char *) req->cmd_req_buf +
3484 req->ifd_data[i].cmd_buf_offset;
3485 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3486 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003487 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003488 lstnr_resp->ifd_data[i].fd);
3489 if (IS_ERR_OR_NULL(ihandle)) {
3490 pr_err("Ion client can't retrieve the handle\n");
3491 return -ENOMEM;
3492 }
3493 field = lstnr_resp->resp_buf_ptr +
3494 lstnr_resp->ifd_data[i].cmd_buf_offset;
3495 } else {
3496 continue;
3497 }
3498 /* Populate the cmd data structure with the phys_addr */
3499 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3500 if (IS_ERR_OR_NULL(sg_ptr)) {
3501 pr_err("IOn client could not retrieve sg table\n");
3502 goto err;
3503 }
3504 if (sg_ptr->nents == 0) {
3505 pr_err("Num of scattered entries is 0\n");
3506 goto err;
3507 }
3508 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3509 pr_err("Num of scattered entries");
3510 pr_err(" (%d) is greater than max supported %d\n",
3511 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3512 goto err;
3513 }
3514 sg = sg_ptr->sgl;
3515 if (sg_ptr->nents == 1) {
3516 uint32_t *update;
3517
3518 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3519 goto err;
3520 if ((data->type == QSEECOM_CLIENT_APP &&
3521 (data->client.app_arch == ELFCLASS32 ||
3522 data->client.app_arch == ELFCLASS64)) ||
3523 (data->type == QSEECOM_LISTENER_SERVICE)) {
3524 /*
3525 * Check if sg list phy add region is under 4GB
3526 */
3527 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3528 (!cleanup) &&
3529 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3530 >= PHY_ADDR_4G - sg->length)) {
3531 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3532 data->client.app_name,
3533 &(sg_dma_address(sg_ptr->sgl)),
3534 sg->length);
3535 goto err;
3536 }
3537 update = (uint32_t *) field;
3538 *update = cleanup ? 0 :
3539 (uint32_t)sg_dma_address(sg_ptr->sgl);
3540 } else {
3541 pr_err("QSEE app arch %u is not supported\n",
3542 data->client.app_arch);
3543 goto err;
3544 }
3545 len += (uint32_t)sg->length;
3546 } else {
3547 struct qseecom_sg_entry *update;
3548 int j = 0;
3549
3550 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3551 (req->ifd_data[i].fd > 0)) {
3552
3553 if ((req->cmd_req_len <
3554 SG_ENTRY_SZ * sg_ptr->nents) ||
3555 (req->ifd_data[i].cmd_buf_offset >
3556 (req->cmd_req_len -
3557 SG_ENTRY_SZ * sg_ptr->nents))) {
3558 pr_err("Invalid offset = 0x%x\n",
3559 req->ifd_data[i].cmd_buf_offset);
3560 goto err;
3561 }
3562
3563 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3564 (lstnr_resp->ifd_data[i].fd > 0)) {
3565
3566 if ((lstnr_resp->resp_len <
3567 SG_ENTRY_SZ * sg_ptr->nents) ||
3568 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3569 (lstnr_resp->resp_len -
3570 SG_ENTRY_SZ * sg_ptr->nents))) {
3571 goto err;
3572 }
3573 }
3574 if ((data->type == QSEECOM_CLIENT_APP &&
3575 (data->client.app_arch == ELFCLASS32 ||
3576 data->client.app_arch == ELFCLASS64)) ||
3577 (data->type == QSEECOM_LISTENER_SERVICE)) {
3578 update = (struct qseecom_sg_entry *)field;
3579 for (j = 0; j < sg_ptr->nents; j++) {
3580 /*
3581 * Check if sg list PA is under 4GB
3582 */
3583 if ((qseecom.qsee_version >=
3584 QSEE_VERSION_40) &&
3585 (!cleanup) &&
3586 ((uint64_t)(sg_dma_address(sg))
3587 >= PHY_ADDR_4G - sg->length)) {
3588 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3589 data->client.app_name,
3590 &(sg_dma_address(sg)),
3591 sg->length);
3592 goto err;
3593 }
3594 update->phys_addr = cleanup ? 0 :
3595 (uint32_t)sg_dma_address(sg);
3596 update->len = cleanup ? 0 : sg->length;
3597 update++;
3598 len += sg->length;
3599 sg = sg_next(sg);
3600 }
3601 } else {
3602 pr_err("QSEE app arch %u is not supported\n",
3603 data->client.app_arch);
3604 goto err;
3605 }
3606 }
3607
3608 if (cleanup) {
3609 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3610 ihandle, NULL, len,
3611 ION_IOC_INV_CACHES);
3612 if (ret) {
3613 pr_err("cache operation failed %d\n", ret);
3614 goto err;
3615 }
3616 } else {
3617 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3618 ihandle, NULL, len,
3619 ION_IOC_CLEAN_INV_CACHES);
3620 if (ret) {
3621 pr_err("cache operation failed %d\n", ret);
3622 goto err;
3623 }
3624 if (data->type == QSEECOM_CLIENT_APP) {
3625 offset = req->ifd_data[i].cmd_buf_offset;
3626 data->sglistinfo_ptr[i].indexAndFlags =
3627 SGLISTINFO_SET_INDEX_FLAG(
3628 (sg_ptr->nents == 1), 0, offset);
3629 data->sglistinfo_ptr[i].sizeOrCount =
3630 (sg_ptr->nents == 1) ?
3631 sg->length : sg_ptr->nents;
3632 data->sglist_cnt = i + 1;
3633 } else {
3634 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3635 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3636 (uintptr_t)this_lstnr->sb_virt);
3637 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3638 SGLISTINFO_SET_INDEX_FLAG(
3639 (sg_ptr->nents == 1), 0, offset);
3640 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3641 (sg_ptr->nents == 1) ?
3642 sg->length : sg_ptr->nents;
3643 this_lstnr->sglist_cnt = i + 1;
3644 }
3645 }
3646 /* Deallocate the handle */
3647 if (!IS_ERR_OR_NULL(ihandle))
3648 ion_free(qseecom.ion_clnt, ihandle);
3649 }
3650 return ret;
3651err:
3652 if (!IS_ERR_OR_NULL(ihandle))
3653 ion_free(qseecom.ion_clnt, ihandle);
3654 return -ENOMEM;
3655}
3656
3657static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3658 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3659{
3660 struct scatterlist *sg = sg_ptr->sgl;
3661 struct qseecom_sg_entry_64bit *sg_entry;
3662 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3663 void *buf;
3664 uint i;
3665 size_t size;
3666 dma_addr_t coh_pmem;
3667
3668 if (fd_idx >= MAX_ION_FD) {
3669 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3670 return -ENOMEM;
3671 }
3672 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3673 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3674 /* Allocate a contiguous kernel buffer */
3675 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3676 size = (size + PAGE_SIZE) & PAGE_MASK;
3677 buf = dma_alloc_coherent(qseecom.pdev,
3678 size, &coh_pmem, GFP_KERNEL);
3679 if (buf == NULL) {
3680 pr_err("failed to alloc memory for sg buf\n");
3681 return -ENOMEM;
3682 }
3683 /* update qseecom_sg_list_buf_hdr_64bit */
3684 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3685 buf_hdr->new_buf_phys_addr = coh_pmem;
3686 buf_hdr->nents_total = sg_ptr->nents;
3687 /* save the left sg entries into new allocated buf */
3688 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3689 for (i = 0; i < sg_ptr->nents; i++) {
3690 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3691 sg_entry->len = sg->length;
3692 sg_entry++;
3693 sg = sg_next(sg);
3694 }
3695
3696 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3697 data->client.sec_buf_fd[fd_idx].vbase = buf;
3698 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3699 data->client.sec_buf_fd[fd_idx].size = size;
3700
3701 return 0;
3702}
3703
3704static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3705 struct qseecom_dev_handle *data)
3706{
3707 struct ion_handle *ihandle;
3708 char *field;
3709 int ret = 0;
3710 int i = 0;
3711 uint32_t len = 0;
3712 struct scatterlist *sg;
3713 struct qseecom_send_modfd_cmd_req *req = NULL;
3714 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3715 struct qseecom_registered_listener_list *this_lstnr = NULL;
3716 uint32_t offset;
3717 struct sg_table *sg_ptr;
3718
3719 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3720 (data->type != QSEECOM_CLIENT_APP))
3721 return -EFAULT;
3722
3723 if (msg == NULL) {
3724 pr_err("Invalid address\n");
3725 return -EINVAL;
3726 }
3727 if (data->type == QSEECOM_LISTENER_SERVICE) {
3728 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3729 this_lstnr = __qseecom_find_svc(data->listener.id);
3730 if (IS_ERR_OR_NULL(this_lstnr)) {
3731 pr_err("Invalid listener ID\n");
3732 return -ENOMEM;
3733 }
3734 } else {
3735 req = (struct qseecom_send_modfd_cmd_req *)msg;
3736 }
3737
3738 for (i = 0; i < MAX_ION_FD; i++) {
3739 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3740 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003741 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003742 req->ifd_data[i].fd);
3743 if (IS_ERR_OR_NULL(ihandle)) {
3744 pr_err("Ion client can't retrieve the handle\n");
3745 return -ENOMEM;
3746 }
3747 field = (char *) req->cmd_req_buf +
3748 req->ifd_data[i].cmd_buf_offset;
3749 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3750 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003751 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003752 lstnr_resp->ifd_data[i].fd);
3753 if (IS_ERR_OR_NULL(ihandle)) {
3754 pr_err("Ion client can't retrieve the handle\n");
3755 return -ENOMEM;
3756 }
3757 field = lstnr_resp->resp_buf_ptr +
3758 lstnr_resp->ifd_data[i].cmd_buf_offset;
3759 } else {
3760 continue;
3761 }
3762 /* Populate the cmd data structure with the phys_addr */
3763 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3764 if (IS_ERR_OR_NULL(sg_ptr)) {
3765 pr_err("IOn client could not retrieve sg table\n");
3766 goto err;
3767 }
3768 if (sg_ptr->nents == 0) {
3769 pr_err("Num of scattered entries is 0\n");
3770 goto err;
3771 }
3772 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3773 pr_warn("Num of scattered entries");
3774 pr_warn(" (%d) is greater than %d\n",
3775 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3776 if (cleanup) {
3777 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3778 data->client.sec_buf_fd[i].vbase)
3779 dma_free_coherent(qseecom.pdev,
3780 data->client.sec_buf_fd[i].size,
3781 data->client.sec_buf_fd[i].vbase,
3782 data->client.sec_buf_fd[i].pbase);
3783 } else {
3784 ret = __qseecom_allocate_sg_list_buffer(data,
3785 field, i, sg_ptr);
3786 if (ret) {
3787 pr_err("Failed to allocate sg list buffer\n");
3788 goto err;
3789 }
3790 }
3791 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3792 sg = sg_ptr->sgl;
3793 goto cleanup;
3794 }
3795 sg = sg_ptr->sgl;
3796 if (sg_ptr->nents == 1) {
3797 uint64_t *update_64bit;
3798
3799 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3800 goto err;
3801 /* 64bit app uses 64bit address */
3802 update_64bit = (uint64_t *) field;
3803 *update_64bit = cleanup ? 0 :
3804 (uint64_t)sg_dma_address(sg_ptr->sgl);
3805 len += (uint32_t)sg->length;
3806 } else {
3807 struct qseecom_sg_entry_64bit *update_64bit;
3808 int j = 0;
3809
3810 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3811 (req->ifd_data[i].fd > 0)) {
3812
3813 if ((req->cmd_req_len <
3814 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3815 (req->ifd_data[i].cmd_buf_offset >
3816 (req->cmd_req_len -
3817 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3818 pr_err("Invalid offset = 0x%x\n",
3819 req->ifd_data[i].cmd_buf_offset);
3820 goto err;
3821 }
3822
3823 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3824 (lstnr_resp->ifd_data[i].fd > 0)) {
3825
3826 if ((lstnr_resp->resp_len <
3827 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3828 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3829 (lstnr_resp->resp_len -
3830 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3831 goto err;
3832 }
3833 }
3834 /* 64bit app uses 64bit address */
3835 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3836 for (j = 0; j < sg_ptr->nents; j++) {
3837 update_64bit->phys_addr = cleanup ? 0 :
3838 (uint64_t)sg_dma_address(sg);
3839 update_64bit->len = cleanup ? 0 :
3840 (uint32_t)sg->length;
3841 update_64bit++;
3842 len += sg->length;
3843 sg = sg_next(sg);
3844 }
3845 }
3846cleanup:
3847 if (cleanup) {
3848 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3849 ihandle, NULL, len,
3850 ION_IOC_INV_CACHES);
3851 if (ret) {
3852 pr_err("cache operation failed %d\n", ret);
3853 goto err;
3854 }
3855 } else {
3856 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3857 ihandle, NULL, len,
3858 ION_IOC_CLEAN_INV_CACHES);
3859 if (ret) {
3860 pr_err("cache operation failed %d\n", ret);
3861 goto err;
3862 }
3863 if (data->type == QSEECOM_CLIENT_APP) {
3864 offset = req->ifd_data[i].cmd_buf_offset;
3865 data->sglistinfo_ptr[i].indexAndFlags =
3866 SGLISTINFO_SET_INDEX_FLAG(
3867 (sg_ptr->nents == 1), 1, offset);
3868 data->sglistinfo_ptr[i].sizeOrCount =
3869 (sg_ptr->nents == 1) ?
3870 sg->length : sg_ptr->nents;
3871 data->sglist_cnt = i + 1;
3872 } else {
3873 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3874 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3875 (uintptr_t)this_lstnr->sb_virt);
3876 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3877 SGLISTINFO_SET_INDEX_FLAG(
3878 (sg_ptr->nents == 1), 1, offset);
3879 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3880 (sg_ptr->nents == 1) ?
3881 sg->length : sg_ptr->nents;
3882 this_lstnr->sglist_cnt = i + 1;
3883 }
3884 }
3885 /* Deallocate the handle */
3886 if (!IS_ERR_OR_NULL(ihandle))
3887 ion_free(qseecom.ion_clnt, ihandle);
3888 }
3889 return ret;
3890err:
3891 for (i = 0; i < MAX_ION_FD; i++)
3892 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3893 data->client.sec_buf_fd[i].vbase)
3894 dma_free_coherent(qseecom.pdev,
3895 data->client.sec_buf_fd[i].size,
3896 data->client.sec_buf_fd[i].vbase,
3897 data->client.sec_buf_fd[i].pbase);
3898 if (!IS_ERR_OR_NULL(ihandle))
3899 ion_free(qseecom.ion_clnt, ihandle);
3900 return -ENOMEM;
3901}
3902
3903static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3904 void __user *argp,
3905 bool is_64bit_addr)
3906{
3907 int ret = 0;
3908 int i;
3909 struct qseecom_send_modfd_cmd_req req;
3910 struct qseecom_send_cmd_req send_cmd_req;
3911
3912 ret = copy_from_user(&req, argp, sizeof(req));
3913 if (ret) {
3914 pr_err("copy_from_user failed\n");
3915 return ret;
3916 }
3917
3918 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3919 send_cmd_req.cmd_req_len = req.cmd_req_len;
3920 send_cmd_req.resp_buf = req.resp_buf;
3921 send_cmd_req.resp_len = req.resp_len;
3922
3923 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3924 return -EINVAL;
3925
3926 /* validate offsets */
3927 for (i = 0; i < MAX_ION_FD; i++) {
3928 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3929 pr_err("Invalid offset %d = 0x%x\n",
3930 i, req.ifd_data[i].cmd_buf_offset);
3931 return -EINVAL;
3932 }
3933 }
3934 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3935 (uintptr_t)req.cmd_req_buf);
3936 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3937 (uintptr_t)req.resp_buf);
3938
3939 if (!is_64bit_addr) {
3940 ret = __qseecom_update_cmd_buf(&req, false, data);
3941 if (ret)
3942 return ret;
3943 ret = __qseecom_send_cmd(data, &send_cmd_req);
3944 if (ret)
3945 return ret;
3946 ret = __qseecom_update_cmd_buf(&req, true, data);
3947 if (ret)
3948 return ret;
3949 } else {
3950 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3951 if (ret)
3952 return ret;
3953 ret = __qseecom_send_cmd(data, &send_cmd_req);
3954 if (ret)
3955 return ret;
3956 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3957 if (ret)
3958 return ret;
3959 }
3960
3961 return ret;
3962}
3963
3964static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3965 void __user *argp)
3966{
3967 return __qseecom_send_modfd_cmd(data, argp, false);
3968}
3969
3970static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3971 void __user *argp)
3972{
3973 return __qseecom_send_modfd_cmd(data, argp, true);
3974}
3975
3976
3977
3978static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3979 struct qseecom_registered_listener_list *svc)
3980{
3981 int ret;
3982
Zhen Kongf5087172018-10-11 17:22:05 -07003983 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003984 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003985}
3986
3987static int qseecom_receive_req(struct qseecom_dev_handle *data)
3988{
3989 int ret = 0;
3990 struct qseecom_registered_listener_list *this_lstnr;
3991
Zhen Kongbcdeda22018-11-16 13:50:51 -08003992 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003993 this_lstnr = __qseecom_find_svc(data->listener.id);
3994 if (!this_lstnr) {
3995 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003996 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003997 return -ENODATA;
3998 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003999 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004000
4001 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304002 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004003 __qseecom_listener_has_rcvd_req(data,
4004 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07004005 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004006 (uint32_t)data->listener.id);
4007 /* woken up for different reason */
4008 return -ERESTARTSYS;
4009 }
4010
Zhen Kongbcdeda22018-11-16 13:50:51 -08004011 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004012 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004013 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004014 return -ENODEV;
4015 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004016 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004017 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004018 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004019 break;
4020 }
4021 return ret;
4022}
4023
4024static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4025{
4026 unsigned char app_arch = 0;
4027 struct elf32_hdr *ehdr;
4028 struct elf64_hdr *ehdr64;
4029
4030 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4031
4032 switch (app_arch) {
4033 case ELFCLASS32: {
4034 ehdr = (struct elf32_hdr *)fw_entry->data;
4035 if (fw_entry->size < sizeof(*ehdr)) {
4036 pr_err("%s: Not big enough to be an elf32 header\n",
4037 qseecom.pdev->init_name);
4038 return false;
4039 }
4040 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4041 pr_err("%s: Not an elf32 header\n",
4042 qseecom.pdev->init_name);
4043 return false;
4044 }
4045 if (ehdr->e_phnum == 0) {
4046 pr_err("%s: No loadable segments\n",
4047 qseecom.pdev->init_name);
4048 return false;
4049 }
4050 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4051 sizeof(struct elf32_hdr) > fw_entry->size) {
4052 pr_err("%s: Program headers not within mdt\n",
4053 qseecom.pdev->init_name);
4054 return false;
4055 }
4056 break;
4057 }
4058 case ELFCLASS64: {
4059 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4060 if (fw_entry->size < sizeof(*ehdr64)) {
4061 pr_err("%s: Not big enough to be an elf64 header\n",
4062 qseecom.pdev->init_name);
4063 return false;
4064 }
4065 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4066 pr_err("%s: Not an elf64 header\n",
4067 qseecom.pdev->init_name);
4068 return false;
4069 }
4070 if (ehdr64->e_phnum == 0) {
4071 pr_err("%s: No loadable segments\n",
4072 qseecom.pdev->init_name);
4073 return false;
4074 }
4075 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4076 sizeof(struct elf64_hdr) > fw_entry->size) {
4077 pr_err("%s: Program headers not within mdt\n",
4078 qseecom.pdev->init_name);
4079 return false;
4080 }
4081 break;
4082 }
4083 default: {
4084 pr_err("QSEE app arch %u is not supported\n", app_arch);
4085 return false;
4086 }
4087 }
4088 return true;
4089}
4090
4091static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4092 uint32_t *app_arch)
4093{
4094 int ret = -1;
4095 int i = 0, rc = 0;
4096 const struct firmware *fw_entry = NULL;
4097 char fw_name[MAX_APP_NAME_SIZE];
4098 struct elf32_hdr *ehdr;
4099 struct elf64_hdr *ehdr64;
4100 int num_images = 0;
4101
4102 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4103 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4104 if (rc) {
4105 pr_err("error with request_firmware\n");
4106 ret = -EIO;
4107 goto err;
4108 }
4109 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4110 ret = -EIO;
4111 goto err;
4112 }
4113 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4114 *fw_size = fw_entry->size;
4115 if (*app_arch == ELFCLASS32) {
4116 ehdr = (struct elf32_hdr *)fw_entry->data;
4117 num_images = ehdr->e_phnum;
4118 } else if (*app_arch == ELFCLASS64) {
4119 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4120 num_images = ehdr64->e_phnum;
4121 } else {
4122 pr_err("QSEE %s app, arch %u is not supported\n",
4123 appname, *app_arch);
4124 ret = -EIO;
4125 goto err;
4126 }
4127 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4128 release_firmware(fw_entry);
4129 fw_entry = NULL;
4130 for (i = 0; i < num_images; i++) {
4131 memset(fw_name, 0, sizeof(fw_name));
4132 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4133 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4134 if (ret)
4135 goto err;
4136 if (*fw_size > U32_MAX - fw_entry->size) {
4137 pr_err("QSEE %s app file size overflow\n", appname);
4138 ret = -EINVAL;
4139 goto err;
4140 }
4141 *fw_size += fw_entry->size;
4142 release_firmware(fw_entry);
4143 fw_entry = NULL;
4144 }
4145
4146 return ret;
4147err:
4148 if (fw_entry)
4149 release_firmware(fw_entry);
4150 *fw_size = 0;
4151 return ret;
4152}
4153
4154static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4155 uint32_t fw_size,
4156 struct qseecom_load_app_ireq *load_req)
4157{
4158 int ret = -1;
4159 int i = 0, rc = 0;
4160 const struct firmware *fw_entry = NULL;
4161 char fw_name[MAX_APP_NAME_SIZE];
4162 u8 *img_data_ptr = img_data;
4163 struct elf32_hdr *ehdr;
4164 struct elf64_hdr *ehdr64;
4165 int num_images = 0;
4166 unsigned char app_arch = 0;
4167
4168 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4169 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4170 if (rc) {
4171 ret = -EIO;
4172 goto err;
4173 }
4174
4175 load_req->img_len = fw_entry->size;
4176 if (load_req->img_len > fw_size) {
4177 pr_err("app %s size %zu is larger than buf size %u\n",
4178 appname, fw_entry->size, fw_size);
4179 ret = -EINVAL;
4180 goto err;
4181 }
4182 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4183 img_data_ptr = img_data_ptr + fw_entry->size;
4184 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4185
4186 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4187 if (app_arch == ELFCLASS32) {
4188 ehdr = (struct elf32_hdr *)fw_entry->data;
4189 num_images = ehdr->e_phnum;
4190 } else if (app_arch == ELFCLASS64) {
4191 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4192 num_images = ehdr64->e_phnum;
4193 } else {
4194 pr_err("QSEE %s app, arch %u is not supported\n",
4195 appname, app_arch);
4196 ret = -EIO;
4197 goto err;
4198 }
4199 release_firmware(fw_entry);
4200 fw_entry = NULL;
4201 for (i = 0; i < num_images; i++) {
4202 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4203 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4204 if (ret) {
4205 pr_err("Failed to locate blob %s\n", fw_name);
4206 goto err;
4207 }
4208 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4209 (fw_entry->size + load_req->img_len > fw_size)) {
4210 pr_err("Invalid file size for %s\n", fw_name);
4211 ret = -EINVAL;
4212 goto err;
4213 }
4214 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4215 img_data_ptr = img_data_ptr + fw_entry->size;
4216 load_req->img_len += fw_entry->size;
4217 release_firmware(fw_entry);
4218 fw_entry = NULL;
4219 }
4220 return ret;
4221err:
4222 release_firmware(fw_entry);
4223 return ret;
4224}
4225
4226static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4227 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4228{
4229 size_t len = 0;
4230 int ret = 0;
4231 ion_phys_addr_t pa;
4232 struct ion_handle *ihandle = NULL;
4233 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004234 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004235 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004236
Zhen Kong3dd92792017-12-08 09:47:15 -08004237 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004238 if (retry++) {
4239 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004240 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004241 mutex_lock(&app_access_lock);
4242 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004243 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004244 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004245 } while (IS_ERR_OR_NULL(ihandle) &&
4246 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004247
4248 if (IS_ERR_OR_NULL(ihandle)) {
4249 pr_err("ION alloc failed\n");
4250 return -ENOMEM;
4251 }
4252 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4253 ihandle);
4254
4255 if (IS_ERR_OR_NULL(img_data)) {
4256 pr_err("ION memory mapping for image loading failed\n");
4257 ret = -ENOMEM;
4258 goto exit_ion_free;
4259 }
4260 /* Get the physical address of the ION BUF */
4261 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4262 if (ret) {
4263 pr_err("physical memory retrieval failure\n");
4264 ret = -EIO;
4265 goto exit_ion_unmap_kernel;
4266 }
4267
4268 *pihandle = ihandle;
4269 *data = img_data;
4270 *paddr = pa;
4271 return ret;
4272
4273exit_ion_unmap_kernel:
4274 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4275exit_ion_free:
4276 ion_free(qseecom.ion_clnt, ihandle);
4277 ihandle = NULL;
4278 return ret;
4279}
4280
4281static void __qseecom_free_img_data(struct ion_handle **ihandle)
4282{
4283 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4284 ion_free(qseecom.ion_clnt, *ihandle);
4285 *ihandle = NULL;
4286}
4287
4288static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4289 uint32_t *app_id)
4290{
4291 int ret = -1;
4292 uint32_t fw_size = 0;
4293 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4294 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4295 struct qseecom_command_scm_resp resp;
4296 u8 *img_data = NULL;
4297 ion_phys_addr_t pa = 0;
4298 struct ion_handle *ihandle = NULL;
4299 void *cmd_buf = NULL;
4300 size_t cmd_len;
4301 uint32_t app_arch = 0;
4302
4303 if (!data || !appname || !app_id) {
4304 pr_err("Null pointer to data or appname or appid\n");
4305 return -EINVAL;
4306 }
4307 *app_id = 0;
4308 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4309 return -EIO;
4310 data->client.app_arch = app_arch;
4311
4312 /* Check and load cmnlib */
4313 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4314 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4315 ret = qseecom_load_commonlib_image(data, "cmnlib");
4316 if (ret) {
4317 pr_err("failed to load cmnlib\n");
4318 return -EIO;
4319 }
4320 qseecom.commonlib_loaded = true;
4321 pr_debug("cmnlib is loaded\n");
4322 }
4323
4324 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4325 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4326 if (ret) {
4327 pr_err("failed to load cmnlib64\n");
4328 return -EIO;
4329 }
4330 qseecom.commonlib64_loaded = true;
4331 pr_debug("cmnlib64 is loaded\n");
4332 }
4333 }
4334
4335 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4336 if (ret)
4337 return ret;
4338
4339 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4340 if (ret) {
4341 ret = -EIO;
4342 goto exit_free_img_data;
4343 }
4344
4345 /* Populate the load_req parameters */
4346 if (qseecom.qsee_version < QSEE_VERSION_40) {
4347 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4348 load_req.mdt_len = load_req.mdt_len;
4349 load_req.img_len = load_req.img_len;
4350 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4351 load_req.phy_addr = (uint32_t)pa;
4352 cmd_buf = (void *)&load_req;
4353 cmd_len = sizeof(struct qseecom_load_app_ireq);
4354 } else {
4355 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4356 load_req_64bit.mdt_len = load_req.mdt_len;
4357 load_req_64bit.img_len = load_req.img_len;
4358 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4359 load_req_64bit.phy_addr = (uint64_t)pa;
4360 cmd_buf = (void *)&load_req_64bit;
4361 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4362 }
4363
4364 if (qseecom.support_bus_scaling) {
4365 mutex_lock(&qsee_bw_mutex);
4366 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4367 mutex_unlock(&qsee_bw_mutex);
4368 if (ret) {
4369 ret = -EIO;
4370 goto exit_free_img_data;
4371 }
4372 }
4373
4374 ret = __qseecom_enable_clk_scale_up(data);
4375 if (ret) {
4376 ret = -EIO;
4377 goto exit_unregister_bus_bw_need;
4378 }
4379
4380 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4381 img_data, fw_size,
4382 ION_IOC_CLEAN_INV_CACHES);
4383 if (ret) {
4384 pr_err("cache operation failed %d\n", ret);
4385 goto exit_disable_clk_vote;
4386 }
4387
4388 /* SCM_CALL to load the image */
4389 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4390 &resp, sizeof(resp));
4391 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004392 pr_err("scm_call to load failed : ret %d, result %x\n",
4393 ret, resp.result);
4394 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4395 ret = -EEXIST;
4396 else
4397 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004398 goto exit_disable_clk_vote;
4399 }
4400
4401 switch (resp.result) {
4402 case QSEOS_RESULT_SUCCESS:
4403 *app_id = resp.data;
4404 break;
4405 case QSEOS_RESULT_INCOMPLETE:
4406 ret = __qseecom_process_incomplete_cmd(data, &resp);
4407 if (ret)
4408 pr_err("process_incomplete_cmd FAILED\n");
4409 else
4410 *app_id = resp.data;
4411 break;
4412 case QSEOS_RESULT_FAILURE:
4413 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4414 break;
4415 default:
4416 pr_err("scm call return unknown response %d\n", resp.result);
4417 ret = -EINVAL;
4418 break;
4419 }
4420
4421exit_disable_clk_vote:
4422 __qseecom_disable_clk_scale_down(data);
4423
4424exit_unregister_bus_bw_need:
4425 if (qseecom.support_bus_scaling) {
4426 mutex_lock(&qsee_bw_mutex);
4427 qseecom_unregister_bus_bandwidth_needs(data);
4428 mutex_unlock(&qsee_bw_mutex);
4429 }
4430
4431exit_free_img_data:
4432 __qseecom_free_img_data(&ihandle);
4433 return ret;
4434}
4435
4436static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4437 char *cmnlib_name)
4438{
4439 int ret = 0;
4440 uint32_t fw_size = 0;
4441 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4442 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4443 struct qseecom_command_scm_resp resp;
4444 u8 *img_data = NULL;
4445 ion_phys_addr_t pa = 0;
4446 void *cmd_buf = NULL;
4447 size_t cmd_len;
4448 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004449 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004450
4451 if (!cmnlib_name) {
4452 pr_err("cmnlib_name is NULL\n");
4453 return -EINVAL;
4454 }
4455 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4456 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4457 cmnlib_name, strlen(cmnlib_name));
4458 return -EINVAL;
4459 }
4460
4461 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4462 return -EIO;
4463
Zhen Kong3bafb312017-10-18 10:27:20 -07004464 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004465 &img_data, fw_size, &pa);
4466 if (ret)
4467 return -EIO;
4468
4469 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4470 if (ret) {
4471 ret = -EIO;
4472 goto exit_free_img_data;
4473 }
4474 if (qseecom.qsee_version < QSEE_VERSION_40) {
4475 load_req.phy_addr = (uint32_t)pa;
4476 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4477 cmd_buf = (void *)&load_req;
4478 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4479 } else {
4480 load_req_64bit.phy_addr = (uint64_t)pa;
4481 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4482 load_req_64bit.img_len = load_req.img_len;
4483 load_req_64bit.mdt_len = load_req.mdt_len;
4484 cmd_buf = (void *)&load_req_64bit;
4485 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4486 }
4487
4488 if (qseecom.support_bus_scaling) {
4489 mutex_lock(&qsee_bw_mutex);
4490 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4491 mutex_unlock(&qsee_bw_mutex);
4492 if (ret) {
4493 ret = -EIO;
4494 goto exit_free_img_data;
4495 }
4496 }
4497
4498 /* Vote for the SFPB clock */
4499 ret = __qseecom_enable_clk_scale_up(data);
4500 if (ret) {
4501 ret = -EIO;
4502 goto exit_unregister_bus_bw_need;
4503 }
4504
Zhen Kong3bafb312017-10-18 10:27:20 -07004505 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004506 img_data, fw_size,
4507 ION_IOC_CLEAN_INV_CACHES);
4508 if (ret) {
4509 pr_err("cache operation failed %d\n", ret);
4510 goto exit_disable_clk_vote;
4511 }
4512
4513 /* SCM_CALL to load the image */
4514 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4515 &resp, sizeof(resp));
4516 if (ret) {
4517 pr_err("scm_call to load failed : ret %d\n", ret);
4518 ret = -EIO;
4519 goto exit_disable_clk_vote;
4520 }
4521
4522 switch (resp.result) {
4523 case QSEOS_RESULT_SUCCESS:
4524 break;
4525 case QSEOS_RESULT_FAILURE:
4526 pr_err("scm call failed w/response result%d\n", resp.result);
4527 ret = -EINVAL;
4528 goto exit_disable_clk_vote;
4529 case QSEOS_RESULT_INCOMPLETE:
4530 ret = __qseecom_process_incomplete_cmd(data, &resp);
4531 if (ret) {
4532 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4533 goto exit_disable_clk_vote;
4534 }
4535 break;
4536 default:
4537 pr_err("scm call return unknown response %d\n", resp.result);
4538 ret = -EINVAL;
4539 goto exit_disable_clk_vote;
4540 }
4541
4542exit_disable_clk_vote:
4543 __qseecom_disable_clk_scale_down(data);
4544
4545exit_unregister_bus_bw_need:
4546 if (qseecom.support_bus_scaling) {
4547 mutex_lock(&qsee_bw_mutex);
4548 qseecom_unregister_bus_bandwidth_needs(data);
4549 mutex_unlock(&qsee_bw_mutex);
4550 }
4551
4552exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004553 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004554 return ret;
4555}
4556
4557static int qseecom_unload_commonlib_image(void)
4558{
4559 int ret = -EINVAL;
4560 struct qseecom_unload_lib_image_ireq unload_req = {0};
4561 struct qseecom_command_scm_resp resp;
4562
4563 /* Populate the remaining parameters */
4564 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4565
4566 /* SCM_CALL to load the image */
4567 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4568 sizeof(struct qseecom_unload_lib_image_ireq),
4569 &resp, sizeof(resp));
4570 if (ret) {
4571 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4572 ret = -EIO;
4573 } else {
4574 switch (resp.result) {
4575 case QSEOS_RESULT_SUCCESS:
4576 break;
4577 case QSEOS_RESULT_FAILURE:
4578 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4579 break;
4580 default:
4581 pr_err("scm call return unknown response %d\n",
4582 resp.result);
4583 ret = -EINVAL;
4584 break;
4585 }
4586 }
4587
4588 return ret;
4589}
4590
4591int qseecom_start_app(struct qseecom_handle **handle,
4592 char *app_name, uint32_t size)
4593{
4594 int32_t ret = 0;
4595 unsigned long flags = 0;
4596 struct qseecom_dev_handle *data = NULL;
4597 struct qseecom_check_app_ireq app_ireq;
4598 struct qseecom_registered_app_list *entry = NULL;
4599 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4600 bool found_app = false;
4601 size_t len;
4602 ion_phys_addr_t pa;
4603 uint32_t fw_size, app_arch;
4604 uint32_t app_id = 0;
4605
Zhen Kongc4c162a2019-01-23 12:07:12 -08004606 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004607
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004608 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4609 pr_err("Not allowed to be called in %d state\n",
4610 atomic_read(&qseecom.qseecom_state));
4611 return -EPERM;
4612 }
4613 if (!app_name) {
4614 pr_err("failed to get the app name\n");
4615 return -EINVAL;
4616 }
4617
Zhen Kong64a6d7282017-06-16 11:55:07 -07004618 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004619 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004620 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004621 return -EINVAL;
4622 }
4623
4624 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4625 if (!(*handle))
4626 return -ENOMEM;
4627
4628 data = kzalloc(sizeof(*data), GFP_KERNEL);
4629 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304630 ret = -ENOMEM;
4631 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004632 }
4633 data->abort = 0;
4634 data->type = QSEECOM_CLIENT_APP;
4635 data->released = false;
4636 data->client.sb_length = size;
4637 data->client.user_virt_sb_base = 0;
4638 data->client.ihandle = NULL;
4639
4640 init_waitqueue_head(&data->abort_wq);
4641
4642 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4643 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4644 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4645 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304646 ret = -ENOMEM;
4647 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004648 }
4649 mutex_lock(&app_access_lock);
4650
Zhen Kong5d02be92018-05-29 16:17:29 -07004651recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004652 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4653 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4654 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4655 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304656 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004657
4658 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4659 if (app_id) {
4660 pr_warn("App id %d for [%s] app exists\n", app_id,
4661 (char *)app_ireq.app_name);
4662 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4663 list_for_each_entry(entry,
4664 &qseecom.registered_app_list_head, list){
4665 if (entry->app_id == app_id) {
4666 entry->ref_cnt++;
4667 found_app = true;
4668 break;
4669 }
4670 }
4671 spin_unlock_irqrestore(
4672 &qseecom.registered_app_list_lock, flags);
4673 if (!found_app)
4674 pr_warn("App_id %d [%s] was loaded but not registered\n",
4675 ret, (char *)app_ireq.app_name);
4676 } else {
4677 /* load the app and get the app_id */
4678 pr_debug("%s: Loading app for the first time'\n",
4679 qseecom.pdev->init_name);
4680 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004681 if (ret == -EEXIST) {
4682 pr_err("recheck if TA %s is loaded\n", app_name);
4683 goto recheck;
4684 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304685 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004686 }
4687 data->client.app_id = app_id;
4688 if (!found_app) {
4689 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4690 if (!entry) {
4691 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304692 ret = -ENOMEM;
4693 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004694 }
4695 entry->app_id = app_id;
4696 entry->ref_cnt = 1;
4697 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4698 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4699 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304700 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004701 }
4702 entry->app_arch = app_arch;
4703 entry->app_blocked = false;
4704 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004705 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004706 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4707 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4708 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4709 flags);
4710 }
4711
4712 /* Get the physical address of the ION BUF */
4713 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4714 if (ret) {
4715 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4716 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304717 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004718 }
4719
4720 /* Populate the structure for sending scm call to load image */
4721 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4722 data->client.ihandle);
4723 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4724 pr_err("ION memory mapping for client shared buf failed\n");
4725 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304726 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004727 }
4728 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4729 data->client.sb_phys = (phys_addr_t)pa;
4730 (*handle)->dev = (void *)data;
4731 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4732 (*handle)->sbuf_len = data->client.sb_length;
4733
4734 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4735 if (!kclient_entry) {
4736 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304737 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004738 }
4739 kclient_entry->handle = *handle;
4740
4741 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4742 list_add_tail(&kclient_entry->list,
4743 &qseecom.registered_kclient_list_head);
4744 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4745
4746 mutex_unlock(&app_access_lock);
4747 return 0;
4748
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304749exit_ion_unmap_kernel:
4750 if (!IS_ERR_OR_NULL(data->client.ihandle))
4751 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4752exit_entry_free:
4753 kfree(entry);
4754exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004755 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304756 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4757 ion_free(qseecom.ion_clnt, data->client.ihandle);
4758 data->client.ihandle = NULL;
4759 }
4760exit_data_free:
4761 kfree(data);
4762exit_handle_free:
4763 if (*handle) {
4764 kfree(*handle);
4765 *handle = NULL;
4766 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004767 return ret;
4768}
4769EXPORT_SYMBOL(qseecom_start_app);
4770
4771int qseecom_shutdown_app(struct qseecom_handle **handle)
4772{
4773 int ret = -EINVAL;
4774 struct qseecom_dev_handle *data;
4775
4776 struct qseecom_registered_kclient_list *kclient = NULL;
4777 unsigned long flags = 0;
4778 bool found_handle = false;
4779
Zhen Kongc4c162a2019-01-23 12:07:12 -08004780 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004781
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004782 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4783 pr_err("Not allowed to be called in %d state\n",
4784 atomic_read(&qseecom.qseecom_state));
4785 return -EPERM;
4786 }
4787
4788 if ((handle == NULL) || (*handle == NULL)) {
4789 pr_err("Handle is not initialized\n");
4790 return -EINVAL;
4791 }
4792 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4793 mutex_lock(&app_access_lock);
4794
4795 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4796 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4797 list) {
4798 if (kclient->handle == (*handle)) {
4799 list_del(&kclient->list);
4800 found_handle = true;
4801 break;
4802 }
4803 }
4804 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4805 if (!found_handle)
4806 pr_err("Unable to find the handle, exiting\n");
4807 else
4808 ret = qseecom_unload_app(data, false);
4809
4810 mutex_unlock(&app_access_lock);
4811 if (ret == 0) {
4812 kzfree(data);
4813 kzfree(*handle);
4814 kzfree(kclient);
4815 *handle = NULL;
4816 }
4817
4818 return ret;
4819}
4820EXPORT_SYMBOL(qseecom_shutdown_app);
4821
4822int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4823 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4824{
4825 int ret = 0;
4826 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4827 struct qseecom_dev_handle *data;
4828 bool perf_enabled = false;
4829
Zhen Kongc4c162a2019-01-23 12:07:12 -08004830 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004831
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004832 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4833 pr_err("Not allowed to be called in %d state\n",
4834 atomic_read(&qseecom.qseecom_state));
4835 return -EPERM;
4836 }
4837
4838 if (handle == NULL) {
4839 pr_err("Handle is not initialized\n");
4840 return -EINVAL;
4841 }
4842 data = handle->dev;
4843
4844 req.cmd_req_len = sbuf_len;
4845 req.resp_len = rbuf_len;
4846 req.cmd_req_buf = send_buf;
4847 req.resp_buf = resp_buf;
4848
4849 if (__validate_send_cmd_inputs(data, &req))
4850 return -EINVAL;
4851
4852 mutex_lock(&app_access_lock);
4853 if (qseecom.support_bus_scaling) {
4854 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4855 if (ret) {
4856 pr_err("Failed to set bw.\n");
4857 mutex_unlock(&app_access_lock);
4858 return ret;
4859 }
4860 }
4861 /*
4862 * On targets where crypto clock is handled by HLOS,
4863 * if clk_access_cnt is zero and perf_enabled is false,
4864 * then the crypto clock was not enabled before sending cmd
4865 * to tz, qseecom will enable the clock to avoid service failure.
4866 */
4867 if (!qseecom.no_clock_support &&
4868 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4869 pr_debug("ce clock is not enabled!\n");
4870 ret = qseecom_perf_enable(data);
4871 if (ret) {
4872 pr_err("Failed to vote for clock with err %d\n",
4873 ret);
4874 mutex_unlock(&app_access_lock);
4875 return -EINVAL;
4876 }
4877 perf_enabled = true;
4878 }
4879 if (!strcmp(data->client.app_name, "securemm"))
4880 data->use_legacy_cmd = true;
4881
4882 ret = __qseecom_send_cmd(data, &req);
4883 data->use_legacy_cmd = false;
4884 if (qseecom.support_bus_scaling)
4885 __qseecom_add_bw_scale_down_timer(
4886 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4887
4888 if (perf_enabled) {
4889 qsee_disable_clock_vote(data, CLK_DFAB);
4890 qsee_disable_clock_vote(data, CLK_SFPB);
4891 }
4892
4893 mutex_unlock(&app_access_lock);
4894
4895 if (ret)
4896 return ret;
4897
4898 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4899 req.resp_len, req.resp_buf);
4900 return ret;
4901}
4902EXPORT_SYMBOL(qseecom_send_command);
4903
4904int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4905{
4906 int ret = 0;
4907
4908 if ((handle == NULL) || (handle->dev == NULL)) {
4909 pr_err("No valid kernel client\n");
4910 return -EINVAL;
4911 }
4912 if (high) {
4913 if (qseecom.support_bus_scaling) {
4914 mutex_lock(&qsee_bw_mutex);
4915 __qseecom_register_bus_bandwidth_needs(handle->dev,
4916 HIGH);
4917 mutex_unlock(&qsee_bw_mutex);
4918 } else {
4919 ret = qseecom_perf_enable(handle->dev);
4920 if (ret)
4921 pr_err("Failed to vote for clock with err %d\n",
4922 ret);
4923 }
4924 } else {
4925 if (!qseecom.support_bus_scaling) {
4926 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4927 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4928 } else {
4929 mutex_lock(&qsee_bw_mutex);
4930 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4931 mutex_unlock(&qsee_bw_mutex);
4932 }
4933 }
4934 return ret;
4935}
4936EXPORT_SYMBOL(qseecom_set_bandwidth);
4937
4938int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4939{
4940 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4941 struct qseecom_dev_handle dummy_private_data = {0};
4942 struct qseecom_command_scm_resp resp;
4943 int ret = 0;
4944
4945 if (!desc) {
4946 pr_err("desc is NULL\n");
4947 return -EINVAL;
4948 }
4949
4950 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004951 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004952 resp.data = desc->ret[2]; /*listener_id*/
4953
Zhen Konge7f525f2017-12-01 18:26:25 -08004954 dummy_private_data.client.app_id = desc->ret[1];
Zhen Kong0ea975d2019-03-12 14:40:24 -07004955 dummy_private_data.client.from_smcinvoke = true;
Zhen Konge7f525f2017-12-01 18:26:25 -08004956 dummy_app_entry.app_id = desc->ret[1];
4957
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004958 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004959 if (qseecom.qsee_reentrancy_support)
4960 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004961 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004962 else
4963 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4964 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004965 mutex_unlock(&app_access_lock);
4966 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004967 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004968 (int)desc->ret[0], (int)desc->ret[2],
4969 (int)desc->ret[1], ret);
4970 desc->ret[0] = resp.result;
4971 desc->ret[1] = resp.resp_type;
4972 desc->ret[2] = resp.data;
4973 return ret;
4974}
4975EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4976
4977static int qseecom_send_resp(void)
4978{
4979 qseecom.send_resp_flag = 1;
4980 wake_up_interruptible(&qseecom.send_resp_wq);
4981 return 0;
4982}
4983
4984static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4985{
4986 struct qseecom_registered_listener_list *this_lstnr = NULL;
4987
4988 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4989 this_lstnr = __qseecom_find_svc(data->listener.id);
4990 if (this_lstnr == NULL)
4991 return -EINVAL;
4992 qseecom.send_resp_flag = 1;
4993 this_lstnr->send_resp_flag = 1;
4994 wake_up_interruptible(&qseecom.send_resp_wq);
4995 return 0;
4996}
4997
4998static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4999 struct qseecom_send_modfd_listener_resp *resp,
5000 struct qseecom_registered_listener_list *this_lstnr)
5001{
5002 int i;
5003
5004 if (!data || !resp || !this_lstnr) {
5005 pr_err("listener handle or resp msg is null\n");
5006 return -EINVAL;
5007 }
5008
5009 if (resp->resp_buf_ptr == NULL) {
5010 pr_err("resp buffer is null\n");
5011 return -EINVAL;
5012 }
5013 /* validate resp buf length */
5014 if ((resp->resp_len == 0) ||
5015 (resp->resp_len > this_lstnr->sb_length)) {
5016 pr_err("resp buf length %d not valid\n", resp->resp_len);
5017 return -EINVAL;
5018 }
5019
5020 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5021 pr_err("Integer overflow in resp_len & resp_buf\n");
5022 return -EINVAL;
5023 }
5024 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5025 (ULONG_MAX - this_lstnr->sb_length)) {
5026 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5027 return -EINVAL;
5028 }
5029 /* validate resp buf */
5030 if (((uintptr_t)resp->resp_buf_ptr <
5031 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5032 ((uintptr_t)resp->resp_buf_ptr >=
5033 ((uintptr_t)this_lstnr->user_virt_sb_base +
5034 this_lstnr->sb_length)) ||
5035 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5036 ((uintptr_t)this_lstnr->user_virt_sb_base +
5037 this_lstnr->sb_length))) {
5038 pr_err("resp buf is out of shared buffer region\n");
5039 return -EINVAL;
5040 }
5041
5042 /* validate offsets */
5043 for (i = 0; i < MAX_ION_FD; i++) {
5044 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5045 pr_err("Invalid offset %d = 0x%x\n",
5046 i, resp->ifd_data[i].cmd_buf_offset);
5047 return -EINVAL;
5048 }
5049 }
5050
5051 return 0;
5052}
5053
5054static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5055 void __user *argp, bool is_64bit_addr)
5056{
5057 struct qseecom_send_modfd_listener_resp resp;
5058 struct qseecom_registered_listener_list *this_lstnr = NULL;
5059
5060 if (copy_from_user(&resp, argp, sizeof(resp))) {
5061 pr_err("copy_from_user failed");
5062 return -EINVAL;
5063 }
5064
5065 this_lstnr = __qseecom_find_svc(data->listener.id);
5066 if (this_lstnr == NULL)
5067 return -EINVAL;
5068
5069 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5070 return -EINVAL;
5071
5072 resp.resp_buf_ptr = this_lstnr->sb_virt +
5073 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5074
5075 if (!is_64bit_addr)
5076 __qseecom_update_cmd_buf(&resp, false, data);
5077 else
5078 __qseecom_update_cmd_buf_64(&resp, false, data);
5079 qseecom.send_resp_flag = 1;
5080 this_lstnr->send_resp_flag = 1;
5081 wake_up_interruptible(&qseecom.send_resp_wq);
5082 return 0;
5083}
5084
5085static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5086 void __user *argp)
5087{
5088 return __qseecom_send_modfd_resp(data, argp, false);
5089}
5090
5091static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5092 void __user *argp)
5093{
5094 return __qseecom_send_modfd_resp(data, argp, true);
5095}
5096
5097static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5098 void __user *argp)
5099{
5100 struct qseecom_qseos_version_req req;
5101
5102 if (copy_from_user(&req, argp, sizeof(req))) {
5103 pr_err("copy_from_user failed");
5104 return -EINVAL;
5105 }
5106 req.qseos_version = qseecom.qseos_version;
5107 if (copy_to_user(argp, &req, sizeof(req))) {
5108 pr_err("copy_to_user failed");
5109 return -EINVAL;
5110 }
5111 return 0;
5112}
5113
5114static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5115{
5116 int rc = 0;
5117 struct qseecom_clk *qclk = NULL;
5118
5119 if (qseecom.no_clock_support)
5120 return 0;
5121
5122 if (ce == CLK_QSEE)
5123 qclk = &qseecom.qsee;
5124 if (ce == CLK_CE_DRV)
5125 qclk = &qseecom.ce_drv;
5126
5127 if (qclk == NULL) {
5128 pr_err("CLK type not supported\n");
5129 return -EINVAL;
5130 }
5131 mutex_lock(&clk_access_lock);
5132
5133 if (qclk->clk_access_cnt == ULONG_MAX) {
5134 pr_err("clk_access_cnt beyond limitation\n");
5135 goto err;
5136 }
5137 if (qclk->clk_access_cnt > 0) {
5138 qclk->clk_access_cnt++;
5139 mutex_unlock(&clk_access_lock);
5140 return rc;
5141 }
5142
5143 /* Enable CE core clk */
5144 if (qclk->ce_core_clk != NULL) {
5145 rc = clk_prepare_enable(qclk->ce_core_clk);
5146 if (rc) {
5147 pr_err("Unable to enable/prepare CE core clk\n");
5148 goto err;
5149 }
5150 }
5151 /* Enable CE clk */
5152 if (qclk->ce_clk != NULL) {
5153 rc = clk_prepare_enable(qclk->ce_clk);
5154 if (rc) {
5155 pr_err("Unable to enable/prepare CE iface clk\n");
5156 goto ce_clk_err;
5157 }
5158 }
5159 /* Enable AXI clk */
5160 if (qclk->ce_bus_clk != NULL) {
5161 rc = clk_prepare_enable(qclk->ce_bus_clk);
5162 if (rc) {
5163 pr_err("Unable to enable/prepare CE bus clk\n");
5164 goto ce_bus_clk_err;
5165 }
5166 }
5167 qclk->clk_access_cnt++;
5168 mutex_unlock(&clk_access_lock);
5169 return 0;
5170
5171ce_bus_clk_err:
5172 if (qclk->ce_clk != NULL)
5173 clk_disable_unprepare(qclk->ce_clk);
5174ce_clk_err:
5175 if (qclk->ce_core_clk != NULL)
5176 clk_disable_unprepare(qclk->ce_core_clk);
5177err:
5178 mutex_unlock(&clk_access_lock);
5179 return -EIO;
5180}
5181
5182static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5183{
5184 struct qseecom_clk *qclk;
5185
5186 if (qseecom.no_clock_support)
5187 return;
5188
5189 if (ce == CLK_QSEE)
5190 qclk = &qseecom.qsee;
5191 else
5192 qclk = &qseecom.ce_drv;
5193
5194 mutex_lock(&clk_access_lock);
5195
5196 if (qclk->clk_access_cnt == 0) {
5197 mutex_unlock(&clk_access_lock);
5198 return;
5199 }
5200
5201 if (qclk->clk_access_cnt == 1) {
5202 if (qclk->ce_clk != NULL)
5203 clk_disable_unprepare(qclk->ce_clk);
5204 if (qclk->ce_core_clk != NULL)
5205 clk_disable_unprepare(qclk->ce_core_clk);
5206 if (qclk->ce_bus_clk != NULL)
5207 clk_disable_unprepare(qclk->ce_bus_clk);
5208 }
5209 qclk->clk_access_cnt--;
5210 mutex_unlock(&clk_access_lock);
5211}
5212
5213static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5214 int32_t clk_type)
5215{
5216 int ret = 0;
5217 struct qseecom_clk *qclk;
5218
5219 if (qseecom.no_clock_support)
5220 return 0;
5221
5222 qclk = &qseecom.qsee;
5223 if (!qseecom.qsee_perf_client)
5224 return ret;
5225
5226 switch (clk_type) {
5227 case CLK_DFAB:
5228 mutex_lock(&qsee_bw_mutex);
5229 if (!qseecom.qsee_bw_count) {
5230 if (qseecom.qsee_sfpb_bw_count > 0)
5231 ret = msm_bus_scale_client_update_request(
5232 qseecom.qsee_perf_client, 3);
5233 else {
5234 if (qclk->ce_core_src_clk != NULL)
5235 ret = __qseecom_enable_clk(CLK_QSEE);
5236 if (!ret) {
5237 ret =
5238 msm_bus_scale_client_update_request(
5239 qseecom.qsee_perf_client, 1);
5240 if ((ret) &&
5241 (qclk->ce_core_src_clk != NULL))
5242 __qseecom_disable_clk(CLK_QSEE);
5243 }
5244 }
5245 if (ret)
5246 pr_err("DFAB Bandwidth req failed (%d)\n",
5247 ret);
5248 else {
5249 qseecom.qsee_bw_count++;
5250 data->perf_enabled = true;
5251 }
5252 } else {
5253 qseecom.qsee_bw_count++;
5254 data->perf_enabled = true;
5255 }
5256 mutex_unlock(&qsee_bw_mutex);
5257 break;
5258 case CLK_SFPB:
5259 mutex_lock(&qsee_bw_mutex);
5260 if (!qseecom.qsee_sfpb_bw_count) {
5261 if (qseecom.qsee_bw_count > 0)
5262 ret = msm_bus_scale_client_update_request(
5263 qseecom.qsee_perf_client, 3);
5264 else {
5265 if (qclk->ce_core_src_clk != NULL)
5266 ret = __qseecom_enable_clk(CLK_QSEE);
5267 if (!ret) {
5268 ret =
5269 msm_bus_scale_client_update_request(
5270 qseecom.qsee_perf_client, 2);
5271 if ((ret) &&
5272 (qclk->ce_core_src_clk != NULL))
5273 __qseecom_disable_clk(CLK_QSEE);
5274 }
5275 }
5276
5277 if (ret)
5278 pr_err("SFPB Bandwidth req failed (%d)\n",
5279 ret);
5280 else {
5281 qseecom.qsee_sfpb_bw_count++;
5282 data->fast_load_enabled = true;
5283 }
5284 } else {
5285 qseecom.qsee_sfpb_bw_count++;
5286 data->fast_load_enabled = true;
5287 }
5288 mutex_unlock(&qsee_bw_mutex);
5289 break;
5290 default:
5291 pr_err("Clock type not defined\n");
5292 break;
5293 }
5294 return ret;
5295}
5296
5297static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5298 int32_t clk_type)
5299{
5300 int32_t ret = 0;
5301 struct qseecom_clk *qclk;
5302
5303 qclk = &qseecom.qsee;
5304
5305 if (qseecom.no_clock_support)
5306 return;
5307 if (!qseecom.qsee_perf_client)
5308 return;
5309
5310 switch (clk_type) {
5311 case CLK_DFAB:
5312 mutex_lock(&qsee_bw_mutex);
5313 if (qseecom.qsee_bw_count == 0) {
5314 pr_err("Client error.Extra call to disable DFAB clk\n");
5315 mutex_unlock(&qsee_bw_mutex);
5316 return;
5317 }
5318
5319 if (qseecom.qsee_bw_count == 1) {
5320 if (qseecom.qsee_sfpb_bw_count > 0)
5321 ret = msm_bus_scale_client_update_request(
5322 qseecom.qsee_perf_client, 2);
5323 else {
5324 ret = msm_bus_scale_client_update_request(
5325 qseecom.qsee_perf_client, 0);
5326 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5327 __qseecom_disable_clk(CLK_QSEE);
5328 }
5329 if (ret)
5330 pr_err("SFPB Bandwidth req fail (%d)\n",
5331 ret);
5332 else {
5333 qseecom.qsee_bw_count--;
5334 data->perf_enabled = false;
5335 }
5336 } else {
5337 qseecom.qsee_bw_count--;
5338 data->perf_enabled = false;
5339 }
5340 mutex_unlock(&qsee_bw_mutex);
5341 break;
5342 case CLK_SFPB:
5343 mutex_lock(&qsee_bw_mutex);
5344 if (qseecom.qsee_sfpb_bw_count == 0) {
5345 pr_err("Client error.Extra call to disable SFPB clk\n");
5346 mutex_unlock(&qsee_bw_mutex);
5347 return;
5348 }
5349 if (qseecom.qsee_sfpb_bw_count == 1) {
5350 if (qseecom.qsee_bw_count > 0)
5351 ret = msm_bus_scale_client_update_request(
5352 qseecom.qsee_perf_client, 1);
5353 else {
5354 ret = msm_bus_scale_client_update_request(
5355 qseecom.qsee_perf_client, 0);
5356 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5357 __qseecom_disable_clk(CLK_QSEE);
5358 }
5359 if (ret)
5360 pr_err("SFPB Bandwidth req fail (%d)\n",
5361 ret);
5362 else {
5363 qseecom.qsee_sfpb_bw_count--;
5364 data->fast_load_enabled = false;
5365 }
5366 } else {
5367 qseecom.qsee_sfpb_bw_count--;
5368 data->fast_load_enabled = false;
5369 }
5370 mutex_unlock(&qsee_bw_mutex);
5371 break;
5372 default:
5373 pr_err("Clock type not defined\n");
5374 break;
5375 }
5376
5377}
5378
5379static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5380 void __user *argp)
5381{
5382 struct ion_handle *ihandle; /* Ion handle */
5383 struct qseecom_load_img_req load_img_req;
5384 int uret = 0;
5385 int ret;
5386 ion_phys_addr_t pa = 0;
5387 size_t len;
5388 struct qseecom_load_app_ireq load_req;
5389 struct qseecom_load_app_64bit_ireq load_req_64bit;
5390 struct qseecom_command_scm_resp resp;
5391 void *cmd_buf = NULL;
5392 size_t cmd_len;
5393 /* Copy the relevant information needed for loading the image */
5394 if (copy_from_user(&load_img_req,
5395 (void __user *)argp,
5396 sizeof(struct qseecom_load_img_req))) {
5397 pr_err("copy_from_user failed\n");
5398 return -EFAULT;
5399 }
5400
5401 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005402 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005403 load_img_req.ifd_data_fd);
5404 if (IS_ERR_OR_NULL(ihandle)) {
5405 pr_err("Ion client could not retrieve the handle\n");
5406 return -ENOMEM;
5407 }
5408
5409 /* Get the physical address of the ION BUF */
5410 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5411 if (ret) {
5412 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5413 ret);
5414 return ret;
5415 }
5416 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5417 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5418 len, load_img_req.mdt_len,
5419 load_img_req.img_len);
5420 return ret;
5421 }
5422 /* Populate the structure for sending scm call to load image */
5423 if (qseecom.qsee_version < QSEE_VERSION_40) {
5424 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5425 load_req.mdt_len = load_img_req.mdt_len;
5426 load_req.img_len = load_img_req.img_len;
5427 load_req.phy_addr = (uint32_t)pa;
5428 cmd_buf = (void *)&load_req;
5429 cmd_len = sizeof(struct qseecom_load_app_ireq);
5430 } else {
5431 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5432 load_req_64bit.mdt_len = load_img_req.mdt_len;
5433 load_req_64bit.img_len = load_img_req.img_len;
5434 load_req_64bit.phy_addr = (uint64_t)pa;
5435 cmd_buf = (void *)&load_req_64bit;
5436 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5437 }
5438
5439 if (qseecom.support_bus_scaling) {
5440 mutex_lock(&qsee_bw_mutex);
5441 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5442 mutex_unlock(&qsee_bw_mutex);
5443 if (ret) {
5444 ret = -EIO;
5445 goto exit_cpu_restore;
5446 }
5447 }
5448
5449 /* Vote for the SFPB clock */
5450 ret = __qseecom_enable_clk_scale_up(data);
5451 if (ret) {
5452 ret = -EIO;
5453 goto exit_register_bus_bandwidth_needs;
5454 }
5455 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5456 ION_IOC_CLEAN_INV_CACHES);
5457 if (ret) {
5458 pr_err("cache operation failed %d\n", ret);
5459 goto exit_disable_clock;
5460 }
5461 /* SCM_CALL to load the external elf */
5462 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5463 &resp, sizeof(resp));
5464 if (ret) {
5465 pr_err("scm_call to load failed : ret %d\n",
5466 ret);
5467 ret = -EFAULT;
5468 goto exit_disable_clock;
5469 }
5470
5471 switch (resp.result) {
5472 case QSEOS_RESULT_SUCCESS:
5473 break;
5474 case QSEOS_RESULT_INCOMPLETE:
5475 pr_err("%s: qseos result incomplete\n", __func__);
5476 ret = __qseecom_process_incomplete_cmd(data, &resp);
5477 if (ret)
5478 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5479 break;
5480 case QSEOS_RESULT_FAILURE:
5481 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5482 ret = -EFAULT;
5483 break;
5484 default:
5485 pr_err("scm_call response result %d not supported\n",
5486 resp.result);
5487 ret = -EFAULT;
5488 break;
5489 }
5490
5491exit_disable_clock:
5492 __qseecom_disable_clk_scale_down(data);
5493
5494exit_register_bus_bandwidth_needs:
5495 if (qseecom.support_bus_scaling) {
5496 mutex_lock(&qsee_bw_mutex);
5497 uret = qseecom_unregister_bus_bandwidth_needs(data);
5498 mutex_unlock(&qsee_bw_mutex);
5499 if (uret)
5500 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5501 uret, ret);
5502 }
5503
5504exit_cpu_restore:
5505 /* Deallocate the handle */
5506 if (!IS_ERR_OR_NULL(ihandle))
5507 ion_free(qseecom.ion_clnt, ihandle);
5508 return ret;
5509}
5510
5511static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5512{
5513 int ret = 0;
5514 struct qseecom_command_scm_resp resp;
5515 struct qseecom_unload_app_ireq req;
5516
5517 /* unavailable client app */
5518 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5519
5520 /* Populate the structure for sending scm call to unload image */
5521 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5522
5523 /* SCM_CALL to unload the external elf */
5524 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5525 sizeof(struct qseecom_unload_app_ireq),
5526 &resp, sizeof(resp));
5527 if (ret) {
5528 pr_err("scm_call to unload failed : ret %d\n",
5529 ret);
5530 ret = -EFAULT;
5531 goto qseecom_unload_external_elf_scm_err;
5532 }
5533 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5534 ret = __qseecom_process_incomplete_cmd(data, &resp);
5535 if (ret)
5536 pr_err("process_incomplete_cmd fail err: %d\n",
5537 ret);
5538 } else {
5539 if (resp.result != QSEOS_RESULT_SUCCESS) {
5540 pr_err("scm_call to unload image failed resp.result =%d\n",
5541 resp.result);
5542 ret = -EFAULT;
5543 }
5544 }
5545
5546qseecom_unload_external_elf_scm_err:
5547
5548 return ret;
5549}
5550
5551static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5552 void __user *argp)
5553{
5554
5555 int32_t ret;
5556 struct qseecom_qseos_app_load_query query_req;
5557 struct qseecom_check_app_ireq req;
5558 struct qseecom_registered_app_list *entry = NULL;
5559 unsigned long flags = 0;
5560 uint32_t app_arch = 0, app_id = 0;
5561 bool found_app = false;
5562
5563 /* Copy the relevant information needed for loading the image */
5564 if (copy_from_user(&query_req,
5565 (void __user *)argp,
5566 sizeof(struct qseecom_qseos_app_load_query))) {
5567 pr_err("copy_from_user failed\n");
5568 return -EFAULT;
5569 }
5570
5571 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5572 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5573 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5574
5575 ret = __qseecom_check_app_exists(req, &app_id);
5576 if (ret) {
5577 pr_err(" scm call to check if app is loaded failed");
5578 return ret; /* scm call failed */
5579 }
5580 if (app_id) {
5581 pr_debug("App id %d (%s) already exists\n", app_id,
5582 (char *)(req.app_name));
5583 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5584 list_for_each_entry(entry,
5585 &qseecom.registered_app_list_head, list){
5586 if (entry->app_id == app_id) {
5587 app_arch = entry->app_arch;
5588 entry->ref_cnt++;
5589 found_app = true;
5590 break;
5591 }
5592 }
5593 spin_unlock_irqrestore(
5594 &qseecom.registered_app_list_lock, flags);
5595 data->client.app_id = app_id;
5596 query_req.app_id = app_id;
5597 if (app_arch) {
5598 data->client.app_arch = app_arch;
5599 query_req.app_arch = app_arch;
5600 } else {
5601 data->client.app_arch = 0;
5602 query_req.app_arch = 0;
5603 }
5604 strlcpy(data->client.app_name, query_req.app_name,
5605 MAX_APP_NAME_SIZE);
5606 /*
5607 * If app was loaded by appsbl before and was not registered,
5608 * regiser this app now.
5609 */
5610 if (!found_app) {
5611 pr_debug("Register app %d [%s] which was loaded before\n",
5612 ret, (char *)query_req.app_name);
5613 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5614 if (!entry) {
5615 pr_err("kmalloc for app entry failed\n");
5616 return -ENOMEM;
5617 }
5618 entry->app_id = app_id;
5619 entry->ref_cnt = 1;
5620 entry->app_arch = data->client.app_arch;
5621 strlcpy(entry->app_name, data->client.app_name,
5622 MAX_APP_NAME_SIZE);
5623 entry->app_blocked = false;
5624 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005625 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005626 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5627 flags);
5628 list_add_tail(&entry->list,
5629 &qseecom.registered_app_list_head);
5630 spin_unlock_irqrestore(
5631 &qseecom.registered_app_list_lock, flags);
5632 }
5633 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5634 pr_err("copy_to_user failed\n");
5635 return -EFAULT;
5636 }
5637 return -EEXIST; /* app already loaded */
5638 } else {
5639 return 0; /* app not loaded */
5640 }
5641}
5642
5643static int __qseecom_get_ce_pipe_info(
5644 enum qseecom_key_management_usage_type usage,
5645 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5646{
5647 int ret = -EINVAL;
5648 int i, j;
5649 struct qseecom_ce_info_use *p = NULL;
5650 int total = 0;
5651 struct qseecom_ce_pipe_entry *pcepipe;
5652
5653 switch (usage) {
5654 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5655 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5656 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5657 if (qseecom.support_fde) {
5658 p = qseecom.ce_info.fde;
5659 total = qseecom.ce_info.num_fde;
5660 } else {
5661 pr_err("system does not support fde\n");
5662 return -EINVAL;
5663 }
5664 break;
5665 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5666 if (qseecom.support_pfe) {
5667 p = qseecom.ce_info.pfe;
5668 total = qseecom.ce_info.num_pfe;
5669 } else {
5670 pr_err("system does not support pfe\n");
5671 return -EINVAL;
5672 }
5673 break;
5674 default:
5675 pr_err("unsupported usage %d\n", usage);
5676 return -EINVAL;
5677 }
5678
5679 for (j = 0; j < total; j++) {
5680 if (p->unit_num == unit) {
5681 pcepipe = p->ce_pipe_entry;
5682 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5683 (*ce_hw)[i] = pcepipe->ce_num;
5684 *pipe = pcepipe->ce_pipe_pair;
5685 pcepipe++;
5686 }
5687 ret = 0;
5688 break;
5689 }
5690 p++;
5691 }
5692 return ret;
5693}
5694
5695static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5696 enum qseecom_key_management_usage_type usage,
5697 struct qseecom_key_generate_ireq *ireq)
5698{
5699 struct qseecom_command_scm_resp resp;
5700 int ret;
5701
5702 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5703 usage >= QSEOS_KM_USAGE_MAX) {
5704 pr_err("Error:: unsupported usage %d\n", usage);
5705 return -EFAULT;
5706 }
5707 ret = __qseecom_enable_clk(CLK_QSEE);
5708 if (ret)
5709 return ret;
5710
5711 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5712 ireq, sizeof(struct qseecom_key_generate_ireq),
5713 &resp, sizeof(resp));
5714 if (ret) {
5715 if (ret == -EINVAL &&
5716 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5717 pr_debug("Key ID exists.\n");
5718 ret = 0;
5719 } else {
5720 pr_err("scm call to generate key failed : %d\n", ret);
5721 ret = -EFAULT;
5722 }
5723 goto generate_key_exit;
5724 }
5725
5726 switch (resp.result) {
5727 case QSEOS_RESULT_SUCCESS:
5728 break;
5729 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5730 pr_debug("Key ID exists.\n");
5731 break;
5732 case QSEOS_RESULT_INCOMPLETE:
5733 ret = __qseecom_process_incomplete_cmd(data, &resp);
5734 if (ret) {
5735 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5736 pr_debug("Key ID exists.\n");
5737 ret = 0;
5738 } else {
5739 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5740 resp.result);
5741 }
5742 }
5743 break;
5744 case QSEOS_RESULT_FAILURE:
5745 default:
5746 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5747 ret = -EINVAL;
5748 break;
5749 }
5750generate_key_exit:
5751 __qseecom_disable_clk(CLK_QSEE);
5752 return ret;
5753}
5754
5755static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5756 enum qseecom_key_management_usage_type usage,
5757 struct qseecom_key_delete_ireq *ireq)
5758{
5759 struct qseecom_command_scm_resp resp;
5760 int ret;
5761
5762 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5763 usage >= QSEOS_KM_USAGE_MAX) {
5764 pr_err("Error:: unsupported usage %d\n", usage);
5765 return -EFAULT;
5766 }
5767 ret = __qseecom_enable_clk(CLK_QSEE);
5768 if (ret)
5769 return ret;
5770
5771 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5772 ireq, sizeof(struct qseecom_key_delete_ireq),
5773 &resp, sizeof(struct qseecom_command_scm_resp));
5774 if (ret) {
5775 if (ret == -EINVAL &&
5776 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5777 pr_debug("Max attempts to input password reached.\n");
5778 ret = -ERANGE;
5779 } else {
5780 pr_err("scm call to delete key failed : %d\n", ret);
5781 ret = -EFAULT;
5782 }
5783 goto del_key_exit;
5784 }
5785
5786 switch (resp.result) {
5787 case QSEOS_RESULT_SUCCESS:
5788 break;
5789 case QSEOS_RESULT_INCOMPLETE:
5790 ret = __qseecom_process_incomplete_cmd(data, &resp);
5791 if (ret) {
5792 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5793 resp.result);
5794 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5795 pr_debug("Max attempts to input password reached.\n");
5796 ret = -ERANGE;
5797 }
5798 }
5799 break;
5800 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5801 pr_debug("Max attempts to input password reached.\n");
5802 ret = -ERANGE;
5803 break;
5804 case QSEOS_RESULT_FAILURE:
5805 default:
5806 pr_err("Delete key scm call failed resp.result %d\n",
5807 resp.result);
5808 ret = -EINVAL;
5809 break;
5810 }
5811del_key_exit:
5812 __qseecom_disable_clk(CLK_QSEE);
5813 return ret;
5814}
5815
5816static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5817 enum qseecom_key_management_usage_type usage,
5818 struct qseecom_key_select_ireq *ireq)
5819{
5820 struct qseecom_command_scm_resp resp;
5821 int ret;
5822
5823 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5824 usage >= QSEOS_KM_USAGE_MAX) {
5825 pr_err("Error:: unsupported usage %d\n", usage);
5826 return -EFAULT;
5827 }
5828 ret = __qseecom_enable_clk(CLK_QSEE);
5829 if (ret)
5830 return ret;
5831
5832 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5833 ret = __qseecom_enable_clk(CLK_CE_DRV);
5834 if (ret)
5835 return ret;
5836 }
5837
5838 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5839 ireq, sizeof(struct qseecom_key_select_ireq),
5840 &resp, sizeof(struct qseecom_command_scm_resp));
5841 if (ret) {
5842 if (ret == -EINVAL &&
5843 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5844 pr_debug("Max attempts to input password reached.\n");
5845 ret = -ERANGE;
5846 } else if (ret == -EINVAL &&
5847 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5848 pr_debug("Set Key operation under processing...\n");
5849 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5850 } else {
5851 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5852 ret);
5853 ret = -EFAULT;
5854 }
5855 goto set_key_exit;
5856 }
5857
5858 switch (resp.result) {
5859 case QSEOS_RESULT_SUCCESS:
5860 break;
5861 case QSEOS_RESULT_INCOMPLETE:
5862 ret = __qseecom_process_incomplete_cmd(data, &resp);
5863 if (ret) {
5864 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5865 resp.result);
5866 if (resp.result ==
5867 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5868 pr_debug("Set Key operation under processing...\n");
5869 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5870 }
5871 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5872 pr_debug("Max attempts to input password reached.\n");
5873 ret = -ERANGE;
5874 }
5875 }
5876 break;
5877 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5878 pr_debug("Max attempts to input password reached.\n");
5879 ret = -ERANGE;
5880 break;
5881 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5882 pr_debug("Set Key operation under processing...\n");
5883 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5884 break;
5885 case QSEOS_RESULT_FAILURE:
5886 default:
5887 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5888 ret = -EINVAL;
5889 break;
5890 }
5891set_key_exit:
5892 __qseecom_disable_clk(CLK_QSEE);
5893 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5894 __qseecom_disable_clk(CLK_CE_DRV);
5895 return ret;
5896}
5897
5898static int __qseecom_update_current_key_user_info(
5899 struct qseecom_dev_handle *data,
5900 enum qseecom_key_management_usage_type usage,
5901 struct qseecom_key_userinfo_update_ireq *ireq)
5902{
5903 struct qseecom_command_scm_resp resp;
5904 int ret;
5905
5906 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5907 usage >= QSEOS_KM_USAGE_MAX) {
5908 pr_err("Error:: unsupported usage %d\n", usage);
5909 return -EFAULT;
5910 }
5911 ret = __qseecom_enable_clk(CLK_QSEE);
5912 if (ret)
5913 return ret;
5914
5915 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5916 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5917 &resp, sizeof(struct qseecom_command_scm_resp));
5918 if (ret) {
5919 if (ret == -EINVAL &&
5920 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5921 pr_debug("Set Key operation under processing...\n");
5922 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5923 } else {
5924 pr_err("scm call to update key userinfo failed: %d\n",
5925 ret);
5926 __qseecom_disable_clk(CLK_QSEE);
5927 return -EFAULT;
5928 }
5929 }
5930
5931 switch (resp.result) {
5932 case QSEOS_RESULT_SUCCESS:
5933 break;
5934 case QSEOS_RESULT_INCOMPLETE:
5935 ret = __qseecom_process_incomplete_cmd(data, &resp);
5936 if (resp.result ==
5937 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5938 pr_debug("Set Key operation under processing...\n");
5939 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5940 }
5941 if (ret)
5942 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5943 resp.result);
5944 break;
5945 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5946 pr_debug("Update Key operation under processing...\n");
5947 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5948 break;
5949 case QSEOS_RESULT_FAILURE:
5950 default:
5951 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5952 ret = -EINVAL;
5953 break;
5954 }
5955
5956 __qseecom_disable_clk(CLK_QSEE);
5957 return ret;
5958}
5959
5960
5961static int qseecom_enable_ice_setup(int usage)
5962{
5963 int ret = 0;
5964
5965 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5966 ret = qcom_ice_setup_ice_hw("ufs", true);
5967 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5968 ret = qcom_ice_setup_ice_hw("sdcc", true);
5969
5970 return ret;
5971}
5972
5973static int qseecom_disable_ice_setup(int usage)
5974{
5975 int ret = 0;
5976
5977 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5978 ret = qcom_ice_setup_ice_hw("ufs", false);
5979 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5980 ret = qcom_ice_setup_ice_hw("sdcc", false);
5981
5982 return ret;
5983}
5984
5985static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5986{
5987 struct qseecom_ce_info_use *pce_info_use, *p;
5988 int total = 0;
5989 int i;
5990
5991 switch (usage) {
5992 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5993 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5994 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5995 p = qseecom.ce_info.fde;
5996 total = qseecom.ce_info.num_fde;
5997 break;
5998 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5999 p = qseecom.ce_info.pfe;
6000 total = qseecom.ce_info.num_pfe;
6001 break;
6002 default:
6003 pr_err("unsupported usage %d\n", usage);
6004 return -EINVAL;
6005 }
6006
6007 pce_info_use = NULL;
6008
6009 for (i = 0; i < total; i++) {
6010 if (p->unit_num == unit) {
6011 pce_info_use = p;
6012 break;
6013 }
6014 p++;
6015 }
6016 if (!pce_info_use) {
6017 pr_err("can not find %d\n", unit);
6018 return -EINVAL;
6019 }
6020 return pce_info_use->num_ce_pipe_entries;
6021}
6022
6023static int qseecom_create_key(struct qseecom_dev_handle *data,
6024 void __user *argp)
6025{
6026 int i;
6027 uint32_t *ce_hw = NULL;
6028 uint32_t pipe = 0;
6029 int ret = 0;
6030 uint32_t flags = 0;
6031 struct qseecom_create_key_req create_key_req;
6032 struct qseecom_key_generate_ireq generate_key_ireq;
6033 struct qseecom_key_select_ireq set_key_ireq;
6034 uint32_t entries = 0;
6035
6036 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6037 if (ret) {
6038 pr_err("copy_from_user failed\n");
6039 return ret;
6040 }
6041
6042 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6043 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6044 pr_err("unsupported usage %d\n", create_key_req.usage);
6045 ret = -EFAULT;
6046 return ret;
6047 }
6048 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6049 create_key_req.usage);
6050 if (entries <= 0) {
6051 pr_err("no ce instance for usage %d instance %d\n",
6052 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6053 ret = -EINVAL;
6054 return ret;
6055 }
6056
6057 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6058 if (!ce_hw) {
6059 ret = -ENOMEM;
6060 return ret;
6061 }
6062 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6063 DEFAULT_CE_INFO_UNIT);
6064 if (ret) {
6065 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6066 ret = -EINVAL;
6067 goto free_buf;
6068 }
6069
6070 if (qseecom.fde_key_size)
6071 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6072 else
6073 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6074
Jiten Patela7bb1d52018-05-11 12:34:26 +05306075 if (qseecom.enable_key_wrap_in_ks == true)
6076 flags |= ENABLE_KEY_WRAP_IN_KS;
6077
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006078 generate_key_ireq.flags = flags;
6079 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6080 memset((void *)generate_key_ireq.key_id,
6081 0, QSEECOM_KEY_ID_SIZE);
6082 memset((void *)generate_key_ireq.hash32,
6083 0, QSEECOM_HASH_SIZE);
6084 memcpy((void *)generate_key_ireq.key_id,
6085 (void *)key_id_array[create_key_req.usage].desc,
6086 QSEECOM_KEY_ID_SIZE);
6087 memcpy((void *)generate_key_ireq.hash32,
6088 (void *)create_key_req.hash32,
6089 QSEECOM_HASH_SIZE);
6090
6091 ret = __qseecom_generate_and_save_key(data,
6092 create_key_req.usage, &generate_key_ireq);
6093 if (ret) {
6094 pr_err("Failed to generate key on storage: %d\n", ret);
6095 goto free_buf;
6096 }
6097
6098 for (i = 0; i < entries; i++) {
6099 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6100 if (create_key_req.usage ==
6101 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6102 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6103 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6104
6105 } else if (create_key_req.usage ==
6106 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6107 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6108 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6109
6110 } else {
6111 set_key_ireq.ce = ce_hw[i];
6112 set_key_ireq.pipe = pipe;
6113 }
6114 set_key_ireq.flags = flags;
6115
6116 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6117 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6118 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6119 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6120 memcpy((void *)set_key_ireq.key_id,
6121 (void *)key_id_array[create_key_req.usage].desc,
6122 QSEECOM_KEY_ID_SIZE);
6123 memcpy((void *)set_key_ireq.hash32,
6124 (void *)create_key_req.hash32,
6125 QSEECOM_HASH_SIZE);
6126 /*
6127 * It will return false if it is GPCE based crypto instance or
6128 * ICE is setup properly
6129 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006130 ret = qseecom_enable_ice_setup(create_key_req.usage);
6131 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006132 goto free_buf;
6133
6134 do {
6135 ret = __qseecom_set_clear_ce_key(data,
6136 create_key_req.usage,
6137 &set_key_ireq);
6138 /*
6139 * wait a little before calling scm again to let other
6140 * processes run
6141 */
6142 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6143 msleep(50);
6144
6145 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6146
6147 qseecom_disable_ice_setup(create_key_req.usage);
6148
6149 if (ret) {
6150 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6151 pipe, ce_hw[i], ret);
6152 goto free_buf;
6153 } else {
6154 pr_err("Set the key successfully\n");
6155 if ((create_key_req.usage ==
6156 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6157 (create_key_req.usage ==
6158 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6159 goto free_buf;
6160 }
6161 }
6162
6163free_buf:
6164 kzfree(ce_hw);
6165 return ret;
6166}
6167
6168static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6169 void __user *argp)
6170{
6171 uint32_t *ce_hw = NULL;
6172 uint32_t pipe = 0;
6173 int ret = 0;
6174 uint32_t flags = 0;
6175 int i, j;
6176 struct qseecom_wipe_key_req wipe_key_req;
6177 struct qseecom_key_delete_ireq delete_key_ireq;
6178 struct qseecom_key_select_ireq clear_key_ireq;
6179 uint32_t entries = 0;
6180
6181 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6182 if (ret) {
6183 pr_err("copy_from_user failed\n");
6184 return ret;
6185 }
6186
6187 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6188 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6189 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6190 ret = -EFAULT;
6191 return ret;
6192 }
6193
6194 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6195 wipe_key_req.usage);
6196 if (entries <= 0) {
6197 pr_err("no ce instance for usage %d instance %d\n",
6198 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6199 ret = -EINVAL;
6200 return ret;
6201 }
6202
6203 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6204 if (!ce_hw) {
6205 ret = -ENOMEM;
6206 return ret;
6207 }
6208
6209 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6210 DEFAULT_CE_INFO_UNIT);
6211 if (ret) {
6212 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6213 ret = -EINVAL;
6214 goto free_buf;
6215 }
6216
6217 if (wipe_key_req.wipe_key_flag) {
6218 delete_key_ireq.flags = flags;
6219 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6220 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6221 memcpy((void *)delete_key_ireq.key_id,
6222 (void *)key_id_array[wipe_key_req.usage].desc,
6223 QSEECOM_KEY_ID_SIZE);
6224 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6225
6226 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6227 &delete_key_ireq);
6228 if (ret) {
6229 pr_err("Failed to delete key from ssd storage: %d\n",
6230 ret);
6231 ret = -EFAULT;
6232 goto free_buf;
6233 }
6234 }
6235
6236 for (j = 0; j < entries; j++) {
6237 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6238 if (wipe_key_req.usage ==
6239 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6240 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6241 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6242 } else if (wipe_key_req.usage ==
6243 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6244 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6245 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6246 } else {
6247 clear_key_ireq.ce = ce_hw[j];
6248 clear_key_ireq.pipe = pipe;
6249 }
6250 clear_key_ireq.flags = flags;
6251 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6252 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6253 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6254 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6255
6256 /*
6257 * It will return false if it is GPCE based crypto instance or
6258 * ICE is setup properly
6259 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006260 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6261 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006262 goto free_buf;
6263
6264 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6265 &clear_key_ireq);
6266
6267 qseecom_disable_ice_setup(wipe_key_req.usage);
6268
6269 if (ret) {
6270 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6271 pipe, ce_hw[j], ret);
6272 ret = -EFAULT;
6273 goto free_buf;
6274 }
6275 }
6276
6277free_buf:
6278 kzfree(ce_hw);
6279 return ret;
6280}
6281
6282static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6283 void __user *argp)
6284{
6285 int ret = 0;
6286 uint32_t flags = 0;
6287 struct qseecom_update_key_userinfo_req update_key_req;
6288 struct qseecom_key_userinfo_update_ireq ireq;
6289
6290 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6291 if (ret) {
6292 pr_err("copy_from_user failed\n");
6293 return ret;
6294 }
6295
6296 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6297 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6298 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6299 return -EFAULT;
6300 }
6301
6302 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6303
6304 if (qseecom.fde_key_size)
6305 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6306 else
6307 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6308
6309 ireq.flags = flags;
6310 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6311 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6312 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6313 memcpy((void *)ireq.key_id,
6314 (void *)key_id_array[update_key_req.usage].desc,
6315 QSEECOM_KEY_ID_SIZE);
6316 memcpy((void *)ireq.current_hash32,
6317 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6318 memcpy((void *)ireq.new_hash32,
6319 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6320
6321 do {
6322 ret = __qseecom_update_current_key_user_info(data,
6323 update_key_req.usage,
6324 &ireq);
6325 /*
6326 * wait a little before calling scm again to let other
6327 * processes run
6328 */
6329 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6330 msleep(50);
6331
6332 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6333 if (ret) {
6334 pr_err("Failed to update key info: %d\n", ret);
6335 return ret;
6336 }
6337 return ret;
6338
6339}
6340static int qseecom_is_es_activated(void __user *argp)
6341{
Zhen Kong26e62742018-05-04 17:19:06 -07006342 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006343 struct qseecom_command_scm_resp resp;
6344 int ret;
6345
6346 if (qseecom.qsee_version < QSEE_VERSION_04) {
6347 pr_err("invalid qsee version\n");
6348 return -ENODEV;
6349 }
6350
6351 if (argp == NULL) {
6352 pr_err("arg is null\n");
6353 return -EINVAL;
6354 }
6355
6356 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6357 &req, sizeof(req), &resp, sizeof(resp));
6358 if (ret) {
6359 pr_err("scm_call failed\n");
6360 return ret;
6361 }
6362
6363 req.is_activated = resp.result;
6364 ret = copy_to_user(argp, &req, sizeof(req));
6365 if (ret) {
6366 pr_err("copy_to_user failed\n");
6367 return ret;
6368 }
6369
6370 return 0;
6371}
6372
6373static int qseecom_save_partition_hash(void __user *argp)
6374{
6375 struct qseecom_save_partition_hash_req req;
6376 struct qseecom_command_scm_resp resp;
6377 int ret;
6378
6379 memset(&resp, 0x00, sizeof(resp));
6380
6381 if (qseecom.qsee_version < QSEE_VERSION_04) {
6382 pr_err("invalid qsee version\n");
6383 return -ENODEV;
6384 }
6385
6386 if (argp == NULL) {
6387 pr_err("arg is null\n");
6388 return -EINVAL;
6389 }
6390
6391 ret = copy_from_user(&req, argp, sizeof(req));
6392 if (ret) {
6393 pr_err("copy_from_user failed\n");
6394 return ret;
6395 }
6396
6397 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6398 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6399 if (ret) {
6400 pr_err("qseecom_scm_call failed\n");
6401 return ret;
6402 }
6403
6404 return 0;
6405}
6406
6407static int qseecom_mdtp_cipher_dip(void __user *argp)
6408{
6409 struct qseecom_mdtp_cipher_dip_req req;
6410 u32 tzbuflenin, tzbuflenout;
6411 char *tzbufin = NULL, *tzbufout = NULL;
6412 struct scm_desc desc = {0};
6413 int ret;
6414
6415 do {
6416 /* Copy the parameters from userspace */
6417 if (argp == NULL) {
6418 pr_err("arg is null\n");
6419 ret = -EINVAL;
6420 break;
6421 }
6422
6423 ret = copy_from_user(&req, argp, sizeof(req));
6424 if (ret) {
6425 pr_err("copy_from_user failed, ret= %d\n", ret);
6426 break;
6427 }
6428
6429 if (req.in_buf == NULL || req.out_buf == NULL ||
6430 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6431 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6432 req.direction > 1) {
6433 pr_err("invalid parameters\n");
6434 ret = -EINVAL;
6435 break;
6436 }
6437
6438 /* Copy the input buffer from userspace to kernel space */
6439 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6440 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6441 if (!tzbufin) {
6442 pr_err("error allocating in buffer\n");
6443 ret = -ENOMEM;
6444 break;
6445 }
6446
6447 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6448 if (ret) {
6449 pr_err("copy_from_user failed, ret=%d\n", ret);
6450 break;
6451 }
6452
6453 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6454
6455 /* Prepare the output buffer in kernel space */
6456 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6457 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6458 if (!tzbufout) {
6459 pr_err("error allocating out buffer\n");
6460 ret = -ENOMEM;
6461 break;
6462 }
6463
6464 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6465
6466 /* Send the command to TZ */
6467 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6468 desc.args[0] = virt_to_phys(tzbufin);
6469 desc.args[1] = req.in_buf_size;
6470 desc.args[2] = virt_to_phys(tzbufout);
6471 desc.args[3] = req.out_buf_size;
6472 desc.args[4] = req.direction;
6473
6474 ret = __qseecom_enable_clk(CLK_QSEE);
6475 if (ret)
6476 break;
6477
Zhen Kong03f220d2019-02-01 17:12:34 -08006478 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006479
6480 __qseecom_disable_clk(CLK_QSEE);
6481
6482 if (ret) {
6483 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6484 ret);
6485 break;
6486 }
6487
6488 /* Copy the output buffer from kernel space to userspace */
6489 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6490 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6491 if (ret) {
6492 pr_err("copy_to_user failed, ret=%d\n", ret);
6493 break;
6494 }
6495 } while (0);
6496
6497 kzfree(tzbufin);
6498 kzfree(tzbufout);
6499
6500 return ret;
6501}
6502
6503static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6504 struct qseecom_qteec_req *req)
6505{
6506 if (!data || !data->client.ihandle) {
6507 pr_err("Client or client handle is not initialized\n");
6508 return -EINVAL;
6509 }
6510
6511 if (data->type != QSEECOM_CLIENT_APP)
6512 return -EFAULT;
6513
6514 if (req->req_len > UINT_MAX - req->resp_len) {
6515 pr_err("Integer overflow detected in req_len & rsp_len\n");
6516 return -EINVAL;
6517 }
6518
6519 if (req->req_len + req->resp_len > data->client.sb_length) {
6520 pr_debug("Not enough memory to fit cmd_buf.\n");
6521 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6522 (req->req_len + req->resp_len), data->client.sb_length);
6523 return -ENOMEM;
6524 }
6525
6526 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6527 pr_err("cmd buffer or response buffer is null\n");
6528 return -EINVAL;
6529 }
6530 if (((uintptr_t)req->req_ptr <
6531 data->client.user_virt_sb_base) ||
6532 ((uintptr_t)req->req_ptr >=
6533 (data->client.user_virt_sb_base + data->client.sb_length))) {
6534 pr_err("cmd buffer address not within shared bufffer\n");
6535 return -EINVAL;
6536 }
6537
6538 if (((uintptr_t)req->resp_ptr <
6539 data->client.user_virt_sb_base) ||
6540 ((uintptr_t)req->resp_ptr >=
6541 (data->client.user_virt_sb_base + data->client.sb_length))) {
6542 pr_err("response buffer address not within shared bufffer\n");
6543 return -EINVAL;
6544 }
6545
6546 if ((req->req_len == 0) || (req->resp_len == 0)) {
6547 pr_err("cmd buf lengtgh/response buf length not valid\n");
6548 return -EINVAL;
6549 }
6550
6551 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6552 pr_err("Integer overflow in req_len & req_ptr\n");
6553 return -EINVAL;
6554 }
6555
6556 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6557 pr_err("Integer overflow in resp_len & resp_ptr\n");
6558 return -EINVAL;
6559 }
6560
6561 if (data->client.user_virt_sb_base >
6562 (ULONG_MAX - data->client.sb_length)) {
6563 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6564 return -EINVAL;
6565 }
6566 if ((((uintptr_t)req->req_ptr + req->req_len) >
6567 ((uintptr_t)data->client.user_virt_sb_base +
6568 data->client.sb_length)) ||
6569 (((uintptr_t)req->resp_ptr + req->resp_len) >
6570 ((uintptr_t)data->client.user_virt_sb_base +
6571 data->client.sb_length))) {
6572 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6573 return -EINVAL;
6574 }
6575 return 0;
6576}
6577
6578static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6579 uint32_t fd_idx, struct sg_table *sg_ptr)
6580{
6581 struct scatterlist *sg = sg_ptr->sgl;
6582 struct qseecom_sg_entry *sg_entry;
6583 void *buf;
6584 uint i;
6585 size_t size;
6586 dma_addr_t coh_pmem;
6587
6588 if (fd_idx >= MAX_ION_FD) {
6589 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6590 return -ENOMEM;
6591 }
6592 /*
6593 * Allocate a buffer, populate it with number of entry plus
6594 * each sg entry's phy addr and length; then return the
6595 * phy_addr of the buffer.
6596 */
6597 size = sizeof(uint32_t) +
6598 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6599 size = (size + PAGE_SIZE) & PAGE_MASK;
6600 buf = dma_alloc_coherent(qseecom.pdev,
6601 size, &coh_pmem, GFP_KERNEL);
6602 if (buf == NULL) {
6603 pr_err("failed to alloc memory for sg buf\n");
6604 return -ENOMEM;
6605 }
6606 *(uint32_t *)buf = sg_ptr->nents;
6607 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6608 for (i = 0; i < sg_ptr->nents; i++) {
6609 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6610 sg_entry->len = sg->length;
6611 sg_entry++;
6612 sg = sg_next(sg);
6613 }
6614 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6615 data->client.sec_buf_fd[fd_idx].vbase = buf;
6616 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6617 data->client.sec_buf_fd[fd_idx].size = size;
6618 return 0;
6619}
6620
6621static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6622 struct qseecom_dev_handle *data, bool cleanup)
6623{
6624 struct ion_handle *ihandle;
6625 int ret = 0;
6626 int i = 0;
6627 uint32_t *update;
6628 struct sg_table *sg_ptr = NULL;
6629 struct scatterlist *sg;
6630 struct qseecom_param_memref *memref;
6631
6632 if (req == NULL) {
6633 pr_err("Invalid address\n");
6634 return -EINVAL;
6635 }
6636 for (i = 0; i < MAX_ION_FD; i++) {
6637 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006638 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006639 req->ifd_data[i].fd);
6640 if (IS_ERR_OR_NULL(ihandle)) {
6641 pr_err("Ion client can't retrieve the handle\n");
6642 return -ENOMEM;
6643 }
6644 if ((req->req_len < sizeof(uint32_t)) ||
6645 (req->ifd_data[i].cmd_buf_offset >
6646 req->req_len - sizeof(uint32_t))) {
6647 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6648 req->req_len,
6649 req->ifd_data[i].cmd_buf_offset);
6650 return -EINVAL;
6651 }
6652 update = (uint32_t *)((char *) req->req_ptr +
6653 req->ifd_data[i].cmd_buf_offset);
6654 if (!update) {
6655 pr_err("update pointer is NULL\n");
6656 return -EINVAL;
6657 }
6658 } else {
6659 continue;
6660 }
6661 /* Populate the cmd data structure with the phys_addr */
6662 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6663 if (IS_ERR_OR_NULL(sg_ptr)) {
6664 pr_err("IOn client could not retrieve sg table\n");
6665 goto err;
6666 }
6667 sg = sg_ptr->sgl;
6668 if (sg == NULL) {
6669 pr_err("sg is NULL\n");
6670 goto err;
6671 }
6672 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6673 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6674 sg_ptr->nents, sg->length);
6675 goto err;
6676 }
6677 /* clean up buf for pre-allocated fd */
6678 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6679 (*update)) {
6680 if (data->client.sec_buf_fd[i].vbase)
6681 dma_free_coherent(qseecom.pdev,
6682 data->client.sec_buf_fd[i].size,
6683 data->client.sec_buf_fd[i].vbase,
6684 data->client.sec_buf_fd[i].pbase);
6685 memset((void *)update, 0,
6686 sizeof(struct qseecom_param_memref));
6687 memset(&(data->client.sec_buf_fd[i]), 0,
6688 sizeof(struct qseecom_sec_buf_fd_info));
6689 goto clean;
6690 }
6691
6692 if (*update == 0) {
6693 /* update buf for pre-allocated fd from secure heap*/
6694 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6695 sg_ptr);
6696 if (ret) {
6697 pr_err("Failed to handle buf for fd[%d]\n", i);
6698 goto err;
6699 }
6700 memref = (struct qseecom_param_memref *)update;
6701 memref->buffer =
6702 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6703 memref->size =
6704 (uint32_t)(data->client.sec_buf_fd[i].size);
6705 } else {
6706 /* update buf for fd from non-secure qseecom heap */
6707 if (sg_ptr->nents != 1) {
6708 pr_err("Num of scat entr (%d) invalid\n",
6709 sg_ptr->nents);
6710 goto err;
6711 }
6712 if (cleanup)
6713 *update = 0;
6714 else
6715 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6716 }
6717clean:
6718 if (cleanup) {
6719 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6720 ihandle, NULL, sg->length,
6721 ION_IOC_INV_CACHES);
6722 if (ret) {
6723 pr_err("cache operation failed %d\n", ret);
6724 goto err;
6725 }
6726 } else {
6727 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6728 ihandle, NULL, sg->length,
6729 ION_IOC_CLEAN_INV_CACHES);
6730 if (ret) {
6731 pr_err("cache operation failed %d\n", ret);
6732 goto err;
6733 }
6734 data->sglistinfo_ptr[i].indexAndFlags =
6735 SGLISTINFO_SET_INDEX_FLAG(
6736 (sg_ptr->nents == 1), 0,
6737 req->ifd_data[i].cmd_buf_offset);
6738 data->sglistinfo_ptr[i].sizeOrCount =
6739 (sg_ptr->nents == 1) ?
6740 sg->length : sg_ptr->nents;
6741 data->sglist_cnt = i + 1;
6742 }
6743 /* Deallocate the handle */
6744 if (!IS_ERR_OR_NULL(ihandle))
6745 ion_free(qseecom.ion_clnt, ihandle);
6746 }
6747 return ret;
6748err:
6749 if (!IS_ERR_OR_NULL(ihandle))
6750 ion_free(qseecom.ion_clnt, ihandle);
6751 return -ENOMEM;
6752}
6753
6754static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6755 struct qseecom_qteec_req *req, uint32_t cmd_id)
6756{
6757 struct qseecom_command_scm_resp resp;
6758 struct qseecom_qteec_ireq ireq;
6759 struct qseecom_qteec_64bit_ireq ireq_64bit;
6760 struct qseecom_registered_app_list *ptr_app;
6761 bool found_app = false;
6762 unsigned long flags;
6763 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006764 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006765 uint32_t reqd_len_sb_in = 0;
6766 void *cmd_buf = NULL;
6767 size_t cmd_len;
6768 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306769 void *req_ptr = NULL;
6770 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006771
6772 ret = __qseecom_qteec_validate_msg(data, req);
6773 if (ret)
6774 return ret;
6775
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306776 req_ptr = req->req_ptr;
6777 resp_ptr = req->resp_ptr;
6778
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006779 /* find app_id & img_name from list */
6780 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6781 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6782 list) {
6783 if ((ptr_app->app_id == data->client.app_id) &&
6784 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6785 found_app = true;
6786 break;
6787 }
6788 }
6789 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6790 if (!found_app) {
6791 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6792 (char *)data->client.app_name);
6793 return -ENOENT;
6794 }
6795
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306796 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6797 (uintptr_t)req->req_ptr);
6798 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6799 (uintptr_t)req->resp_ptr);
6800
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006801 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6802 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6803 ret = __qseecom_update_qteec_req_buf(
6804 (struct qseecom_qteec_modfd_req *)req, data, false);
6805 if (ret)
6806 return ret;
6807 }
6808
6809 if (qseecom.qsee_version < QSEE_VERSION_40) {
6810 ireq.app_id = data->client.app_id;
6811 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306812 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006813 ireq.req_len = req->req_len;
6814 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306815 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006816 ireq.resp_len = req->resp_len;
6817 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6818 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6819 dmac_flush_range((void *)table,
6820 (void *)table + SGLISTINFO_TABLE_SIZE);
6821 cmd_buf = (void *)&ireq;
6822 cmd_len = sizeof(struct qseecom_qteec_ireq);
6823 } else {
6824 ireq_64bit.app_id = data->client.app_id;
6825 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306826 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006827 ireq_64bit.req_len = req->req_len;
6828 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306829 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006830 ireq_64bit.resp_len = req->resp_len;
6831 if ((data->client.app_arch == ELFCLASS32) &&
6832 ((ireq_64bit.req_ptr >=
6833 PHY_ADDR_4G - ireq_64bit.req_len) ||
6834 (ireq_64bit.resp_ptr >=
6835 PHY_ADDR_4G - ireq_64bit.resp_len))){
6836 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6837 data->client.app_name, data->client.app_id);
6838 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6839 ireq_64bit.req_ptr, ireq_64bit.req_len,
6840 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6841 return -EFAULT;
6842 }
6843 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6844 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6845 dmac_flush_range((void *)table,
6846 (void *)table + SGLISTINFO_TABLE_SIZE);
6847 cmd_buf = (void *)&ireq_64bit;
6848 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6849 }
6850 if (qseecom.whitelist_support == true
6851 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6852 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6853 else
6854 *(uint32_t *)cmd_buf = cmd_id;
6855
6856 reqd_len_sb_in = req->req_len + req->resp_len;
6857 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6858 data->client.sb_virt,
6859 reqd_len_sb_in,
6860 ION_IOC_CLEAN_INV_CACHES);
6861 if (ret) {
6862 pr_err("cache operation failed %d\n", ret);
6863 return ret;
6864 }
6865
6866 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6867
6868 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6869 cmd_buf, cmd_len,
6870 &resp, sizeof(resp));
6871 if (ret) {
6872 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6873 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006874 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006875 }
6876
6877 if (qseecom.qsee_reentrancy_support) {
6878 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006879 if (ret)
6880 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006881 } else {
6882 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6883 ret = __qseecom_process_incomplete_cmd(data, &resp);
6884 if (ret) {
6885 pr_err("process_incomplete_cmd failed err: %d\n",
6886 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006887 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006888 }
6889 } else {
6890 if (resp.result != QSEOS_RESULT_SUCCESS) {
6891 pr_err("Response result %d not supported\n",
6892 resp.result);
6893 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006894 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006895 }
6896 }
6897 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006898exit:
6899 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006900 data->client.sb_virt, data->client.sb_length,
6901 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006902 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006903 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006904 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006905 }
6906
6907 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6908 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006909 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006910 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006911 if (ret2)
6912 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006913 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006914 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006915}
6916
6917static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6918 void __user *argp)
6919{
6920 struct qseecom_qteec_modfd_req req;
6921 int ret = 0;
6922
6923 ret = copy_from_user(&req, argp,
6924 sizeof(struct qseecom_qteec_modfd_req));
6925 if (ret) {
6926 pr_err("copy_from_user failed\n");
6927 return ret;
6928 }
6929 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6930 QSEOS_TEE_OPEN_SESSION);
6931
6932 return ret;
6933}
6934
6935static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6936 void __user *argp)
6937{
6938 struct qseecom_qteec_req req;
6939 int ret = 0;
6940
6941 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6942 if (ret) {
6943 pr_err("copy_from_user failed\n");
6944 return ret;
6945 }
6946 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6947 return ret;
6948}
6949
6950static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6951 void __user *argp)
6952{
6953 struct qseecom_qteec_modfd_req req;
6954 struct qseecom_command_scm_resp resp;
6955 struct qseecom_qteec_ireq ireq;
6956 struct qseecom_qteec_64bit_ireq ireq_64bit;
6957 struct qseecom_registered_app_list *ptr_app;
6958 bool found_app = false;
6959 unsigned long flags;
6960 int ret = 0;
6961 int i = 0;
6962 uint32_t reqd_len_sb_in = 0;
6963 void *cmd_buf = NULL;
6964 size_t cmd_len;
6965 struct sglist_info *table = data->sglistinfo_ptr;
6966 void *req_ptr = NULL;
6967 void *resp_ptr = NULL;
6968
6969 ret = copy_from_user(&req, argp,
6970 sizeof(struct qseecom_qteec_modfd_req));
6971 if (ret) {
6972 pr_err("copy_from_user failed\n");
6973 return ret;
6974 }
6975 ret = __qseecom_qteec_validate_msg(data,
6976 (struct qseecom_qteec_req *)(&req));
6977 if (ret)
6978 return ret;
6979 req_ptr = req.req_ptr;
6980 resp_ptr = req.resp_ptr;
6981
6982 /* find app_id & img_name from list */
6983 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6984 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6985 list) {
6986 if ((ptr_app->app_id == data->client.app_id) &&
6987 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6988 found_app = true;
6989 break;
6990 }
6991 }
6992 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6993 if (!found_app) {
6994 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6995 (char *)data->client.app_name);
6996 return -ENOENT;
6997 }
6998
6999 /* validate offsets */
7000 for (i = 0; i < MAX_ION_FD; i++) {
7001 if (req.ifd_data[i].fd) {
7002 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7003 return -EINVAL;
7004 }
7005 }
7006 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7007 (uintptr_t)req.req_ptr);
7008 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7009 (uintptr_t)req.resp_ptr);
7010 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7011 if (ret)
7012 return ret;
7013
7014 if (qseecom.qsee_version < QSEE_VERSION_40) {
7015 ireq.app_id = data->client.app_id;
7016 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7017 (uintptr_t)req_ptr);
7018 ireq.req_len = req.req_len;
7019 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7020 (uintptr_t)resp_ptr);
7021 ireq.resp_len = req.resp_len;
7022 cmd_buf = (void *)&ireq;
7023 cmd_len = sizeof(struct qseecom_qteec_ireq);
7024 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7025 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7026 dmac_flush_range((void *)table,
7027 (void *)table + SGLISTINFO_TABLE_SIZE);
7028 } else {
7029 ireq_64bit.app_id = data->client.app_id;
7030 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7031 (uintptr_t)req_ptr);
7032 ireq_64bit.req_len = req.req_len;
7033 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7034 (uintptr_t)resp_ptr);
7035 ireq_64bit.resp_len = req.resp_len;
7036 cmd_buf = (void *)&ireq_64bit;
7037 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7038 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7039 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7040 dmac_flush_range((void *)table,
7041 (void *)table + SGLISTINFO_TABLE_SIZE);
7042 }
7043 reqd_len_sb_in = req.req_len + req.resp_len;
7044 if (qseecom.whitelist_support == true)
7045 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7046 else
7047 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7048
7049 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7050 data->client.sb_virt,
7051 reqd_len_sb_in,
7052 ION_IOC_CLEAN_INV_CACHES);
7053 if (ret) {
7054 pr_err("cache operation failed %d\n", ret);
7055 return ret;
7056 }
7057
7058 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7059
7060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7061 cmd_buf, cmd_len,
7062 &resp, sizeof(resp));
7063 if (ret) {
7064 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7065 ret, data->client.app_id);
7066 return ret;
7067 }
7068
7069 if (qseecom.qsee_reentrancy_support) {
7070 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7071 } else {
7072 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7073 ret = __qseecom_process_incomplete_cmd(data, &resp);
7074 if (ret) {
7075 pr_err("process_incomplete_cmd failed err: %d\n",
7076 ret);
7077 return ret;
7078 }
7079 } else {
7080 if (resp.result != QSEOS_RESULT_SUCCESS) {
7081 pr_err("Response result %d not supported\n",
7082 resp.result);
7083 ret = -EINVAL;
7084 }
7085 }
7086 }
7087 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7088 if (ret)
7089 return ret;
7090
7091 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7092 data->client.sb_virt, data->client.sb_length,
7093 ION_IOC_INV_CACHES);
7094 if (ret) {
7095 pr_err("cache operation failed %d\n", ret);
7096 return ret;
7097 }
7098 return 0;
7099}
7100
7101static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7102 void __user *argp)
7103{
7104 struct qseecom_qteec_modfd_req req;
7105 int ret = 0;
7106
7107 ret = copy_from_user(&req, argp,
7108 sizeof(struct qseecom_qteec_modfd_req));
7109 if (ret) {
7110 pr_err("copy_from_user failed\n");
7111 return ret;
7112 }
7113 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7114 QSEOS_TEE_REQUEST_CANCELLATION);
7115
7116 return ret;
7117}
7118
7119static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7120{
7121 if (data->sglist_cnt) {
7122 memset(data->sglistinfo_ptr, 0,
7123 SGLISTINFO_TABLE_SIZE);
7124 data->sglist_cnt = 0;
7125 }
7126}
7127
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307128static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007129 unsigned int cmd, unsigned long arg)
7130{
7131 int ret = 0;
7132 struct qseecom_dev_handle *data = file->private_data;
7133 void __user *argp = (void __user *) arg;
7134 bool perf_enabled = false;
7135
7136 if (!data) {
7137 pr_err("Invalid/uninitialized device handle\n");
7138 return -EINVAL;
7139 }
7140
7141 if (data->abort) {
7142 pr_err("Aborting qseecom driver\n");
7143 return -ENODEV;
7144 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007145 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7146 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7147 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7148 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007149 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007150
7151 switch (cmd) {
7152 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7153 if (data->type != QSEECOM_GENERIC) {
7154 pr_err("reg lstnr req: invalid handle (%d)\n",
7155 data->type);
7156 ret = -EINVAL;
7157 break;
7158 }
7159 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007160 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007161 atomic_inc(&data->ioctl_count);
7162 data->type = QSEECOM_LISTENER_SERVICE;
7163 ret = qseecom_register_listener(data, argp);
7164 atomic_dec(&data->ioctl_count);
7165 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007166 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007167 if (ret)
7168 pr_err("failed qseecom_register_listener: %d\n", ret);
7169 break;
7170 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307171 case QSEECOM_IOCTL_SET_ICE_INFO: {
7172 struct qseecom_ice_data_t ice_data;
7173
7174 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7175 if (ret) {
7176 pr_err("copy_from_user failed\n");
7177 return -EFAULT;
7178 }
7179 qcom_ice_set_fde_flag(ice_data.flag);
7180 break;
7181 }
7182
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007183 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7184 if ((data->listener.id == 0) ||
7185 (data->type != QSEECOM_LISTENER_SERVICE)) {
7186 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7187 data->type, data->listener.id);
7188 ret = -EINVAL;
7189 break;
7190 }
7191 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007192 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007193 atomic_inc(&data->ioctl_count);
7194 ret = qseecom_unregister_listener(data);
7195 atomic_dec(&data->ioctl_count);
7196 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007197 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007198 if (ret)
7199 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7200 break;
7201 }
7202 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7203 if ((data->client.app_id == 0) ||
7204 (data->type != QSEECOM_CLIENT_APP)) {
7205 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7206 data->type, data->client.app_id);
7207 ret = -EINVAL;
7208 break;
7209 }
7210 /* Only one client allowed here at a time */
7211 mutex_lock(&app_access_lock);
7212 if (qseecom.support_bus_scaling) {
7213 /* register bus bw in case the client doesn't do it */
7214 if (!data->mode) {
7215 mutex_lock(&qsee_bw_mutex);
7216 __qseecom_register_bus_bandwidth_needs(
7217 data, HIGH);
7218 mutex_unlock(&qsee_bw_mutex);
7219 }
7220 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7221 if (ret) {
7222 pr_err("Failed to set bw.\n");
7223 ret = -EINVAL;
7224 mutex_unlock(&app_access_lock);
7225 break;
7226 }
7227 }
7228 /*
7229 * On targets where crypto clock is handled by HLOS,
7230 * if clk_access_cnt is zero and perf_enabled is false,
7231 * then the crypto clock was not enabled before sending cmd to
7232 * tz, qseecom will enable the clock to avoid service failure.
7233 */
7234 if (!qseecom.no_clock_support &&
7235 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7236 pr_debug("ce clock is not enabled!\n");
7237 ret = qseecom_perf_enable(data);
7238 if (ret) {
7239 pr_err("Failed to vote for clock with err %d\n",
7240 ret);
7241 mutex_unlock(&app_access_lock);
7242 ret = -EINVAL;
7243 break;
7244 }
7245 perf_enabled = true;
7246 }
7247 atomic_inc(&data->ioctl_count);
7248 ret = qseecom_send_cmd(data, argp);
7249 if (qseecom.support_bus_scaling)
7250 __qseecom_add_bw_scale_down_timer(
7251 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7252 if (perf_enabled) {
7253 qsee_disable_clock_vote(data, CLK_DFAB);
7254 qsee_disable_clock_vote(data, CLK_SFPB);
7255 }
7256 atomic_dec(&data->ioctl_count);
7257 wake_up_all(&data->abort_wq);
7258 mutex_unlock(&app_access_lock);
7259 if (ret)
7260 pr_err("failed qseecom_send_cmd: %d\n", ret);
7261 break;
7262 }
7263 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7264 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7265 if ((data->client.app_id == 0) ||
7266 (data->type != QSEECOM_CLIENT_APP)) {
7267 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7268 data->type, data->client.app_id);
7269 ret = -EINVAL;
7270 break;
7271 }
7272 /* Only one client allowed here at a time */
7273 mutex_lock(&app_access_lock);
7274 if (qseecom.support_bus_scaling) {
7275 if (!data->mode) {
7276 mutex_lock(&qsee_bw_mutex);
7277 __qseecom_register_bus_bandwidth_needs(
7278 data, HIGH);
7279 mutex_unlock(&qsee_bw_mutex);
7280 }
7281 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7282 if (ret) {
7283 pr_err("Failed to set bw.\n");
7284 mutex_unlock(&app_access_lock);
7285 ret = -EINVAL;
7286 break;
7287 }
7288 }
7289 /*
7290 * On targets where crypto clock is handled by HLOS,
7291 * if clk_access_cnt is zero and perf_enabled is false,
7292 * then the crypto clock was not enabled before sending cmd to
7293 * tz, qseecom will enable the clock to avoid service failure.
7294 */
7295 if (!qseecom.no_clock_support &&
7296 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7297 pr_debug("ce clock is not enabled!\n");
7298 ret = qseecom_perf_enable(data);
7299 if (ret) {
7300 pr_err("Failed to vote for clock with err %d\n",
7301 ret);
7302 mutex_unlock(&app_access_lock);
7303 ret = -EINVAL;
7304 break;
7305 }
7306 perf_enabled = true;
7307 }
7308 atomic_inc(&data->ioctl_count);
7309 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7310 ret = qseecom_send_modfd_cmd(data, argp);
7311 else
7312 ret = qseecom_send_modfd_cmd_64(data, argp);
7313 if (qseecom.support_bus_scaling)
7314 __qseecom_add_bw_scale_down_timer(
7315 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7316 if (perf_enabled) {
7317 qsee_disable_clock_vote(data, CLK_DFAB);
7318 qsee_disable_clock_vote(data, CLK_SFPB);
7319 }
7320 atomic_dec(&data->ioctl_count);
7321 wake_up_all(&data->abort_wq);
7322 mutex_unlock(&app_access_lock);
7323 if (ret)
7324 pr_err("failed qseecom_send_cmd: %d\n", ret);
7325 __qseecom_clean_data_sglistinfo(data);
7326 break;
7327 }
7328 case QSEECOM_IOCTL_RECEIVE_REQ: {
7329 if ((data->listener.id == 0) ||
7330 (data->type != QSEECOM_LISTENER_SERVICE)) {
7331 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7332 data->type, data->listener.id);
7333 ret = -EINVAL;
7334 break;
7335 }
7336 atomic_inc(&data->ioctl_count);
7337 ret = qseecom_receive_req(data);
7338 atomic_dec(&data->ioctl_count);
7339 wake_up_all(&data->abort_wq);
7340 if (ret && (ret != -ERESTARTSYS))
7341 pr_err("failed qseecom_receive_req: %d\n", ret);
7342 break;
7343 }
7344 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7345 if ((data->listener.id == 0) ||
7346 (data->type != QSEECOM_LISTENER_SERVICE)) {
7347 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7348 data->type, data->listener.id);
7349 ret = -EINVAL;
7350 break;
7351 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007352 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007353 atomic_inc(&data->ioctl_count);
7354 if (!qseecom.qsee_reentrancy_support)
7355 ret = qseecom_send_resp();
7356 else
7357 ret = qseecom_reentrancy_send_resp(data);
7358 atomic_dec(&data->ioctl_count);
7359 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007360 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007361 if (ret)
7362 pr_err("failed qseecom_send_resp: %d\n", ret);
7363 break;
7364 }
7365 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7366 if ((data->type != QSEECOM_CLIENT_APP) &&
7367 (data->type != QSEECOM_GENERIC) &&
7368 (data->type != QSEECOM_SECURE_SERVICE)) {
7369 pr_err("set mem param req: invalid handle (%d)\n",
7370 data->type);
7371 ret = -EINVAL;
7372 break;
7373 }
7374 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7375 mutex_lock(&app_access_lock);
7376 atomic_inc(&data->ioctl_count);
7377 ret = qseecom_set_client_mem_param(data, argp);
7378 atomic_dec(&data->ioctl_count);
7379 mutex_unlock(&app_access_lock);
7380 if (ret)
7381 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7382 ret);
7383 break;
7384 }
7385 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7386 if ((data->type != QSEECOM_GENERIC) &&
7387 (data->type != QSEECOM_CLIENT_APP)) {
7388 pr_err("load app req: invalid handle (%d)\n",
7389 data->type);
7390 ret = -EINVAL;
7391 break;
7392 }
7393 data->type = QSEECOM_CLIENT_APP;
7394 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7395 mutex_lock(&app_access_lock);
7396 atomic_inc(&data->ioctl_count);
7397 ret = qseecom_load_app(data, argp);
7398 atomic_dec(&data->ioctl_count);
7399 mutex_unlock(&app_access_lock);
7400 if (ret)
7401 pr_err("failed load_app request: %d\n", ret);
7402 break;
7403 }
7404 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7405 if ((data->client.app_id == 0) ||
7406 (data->type != QSEECOM_CLIENT_APP)) {
7407 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7408 data->type, data->client.app_id);
7409 ret = -EINVAL;
7410 break;
7411 }
7412 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7413 mutex_lock(&app_access_lock);
7414 atomic_inc(&data->ioctl_count);
7415 ret = qseecom_unload_app(data, false);
7416 atomic_dec(&data->ioctl_count);
7417 mutex_unlock(&app_access_lock);
7418 if (ret)
7419 pr_err("failed unload_app request: %d\n", ret);
7420 break;
7421 }
7422 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7423 atomic_inc(&data->ioctl_count);
7424 ret = qseecom_get_qseos_version(data, argp);
7425 if (ret)
7426 pr_err("qseecom_get_qseos_version: %d\n", ret);
7427 atomic_dec(&data->ioctl_count);
7428 break;
7429 }
7430 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7431 if ((data->type != QSEECOM_GENERIC) &&
7432 (data->type != QSEECOM_CLIENT_APP)) {
7433 pr_err("perf enable req: invalid handle (%d)\n",
7434 data->type);
7435 ret = -EINVAL;
7436 break;
7437 }
7438 if ((data->type == QSEECOM_CLIENT_APP) &&
7439 (data->client.app_id == 0)) {
7440 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7441 data->type, data->client.app_id);
7442 ret = -EINVAL;
7443 break;
7444 }
7445 atomic_inc(&data->ioctl_count);
7446 if (qseecom.support_bus_scaling) {
7447 mutex_lock(&qsee_bw_mutex);
7448 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7449 mutex_unlock(&qsee_bw_mutex);
7450 } else {
7451 ret = qseecom_perf_enable(data);
7452 if (ret)
7453 pr_err("Fail to vote for clocks %d\n", ret);
7454 }
7455 atomic_dec(&data->ioctl_count);
7456 break;
7457 }
7458 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7459 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7460 (data->type != QSEECOM_CLIENT_APP)) {
7461 pr_err("perf disable req: invalid handle (%d)\n",
7462 data->type);
7463 ret = -EINVAL;
7464 break;
7465 }
7466 if ((data->type == QSEECOM_CLIENT_APP) &&
7467 (data->client.app_id == 0)) {
7468 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7469 data->type, data->client.app_id);
7470 ret = -EINVAL;
7471 break;
7472 }
7473 atomic_inc(&data->ioctl_count);
7474 if (!qseecom.support_bus_scaling) {
7475 qsee_disable_clock_vote(data, CLK_DFAB);
7476 qsee_disable_clock_vote(data, CLK_SFPB);
7477 } else {
7478 mutex_lock(&qsee_bw_mutex);
7479 qseecom_unregister_bus_bandwidth_needs(data);
7480 mutex_unlock(&qsee_bw_mutex);
7481 }
7482 atomic_dec(&data->ioctl_count);
7483 break;
7484 }
7485
7486 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7487 /* If crypto clock is not handled by HLOS, return directly. */
7488 if (qseecom.no_clock_support) {
7489 pr_debug("crypto clock is not handled by HLOS\n");
7490 break;
7491 }
7492 if ((data->client.app_id == 0) ||
7493 (data->type != QSEECOM_CLIENT_APP)) {
7494 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7495 data->type, data->client.app_id);
7496 ret = -EINVAL;
7497 break;
7498 }
7499 atomic_inc(&data->ioctl_count);
7500 ret = qseecom_scale_bus_bandwidth(data, argp);
7501 atomic_dec(&data->ioctl_count);
7502 break;
7503 }
7504 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7505 if (data->type != QSEECOM_GENERIC) {
7506 pr_err("load ext elf req: invalid client handle (%d)\n",
7507 data->type);
7508 ret = -EINVAL;
7509 break;
7510 }
7511 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7512 data->released = true;
7513 mutex_lock(&app_access_lock);
7514 atomic_inc(&data->ioctl_count);
7515 ret = qseecom_load_external_elf(data, argp);
7516 atomic_dec(&data->ioctl_count);
7517 mutex_unlock(&app_access_lock);
7518 if (ret)
7519 pr_err("failed load_external_elf request: %d\n", ret);
7520 break;
7521 }
7522 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7523 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7524 pr_err("unload ext elf req: invalid handle (%d)\n",
7525 data->type);
7526 ret = -EINVAL;
7527 break;
7528 }
7529 data->released = true;
7530 mutex_lock(&app_access_lock);
7531 atomic_inc(&data->ioctl_count);
7532 ret = qseecom_unload_external_elf(data);
7533 atomic_dec(&data->ioctl_count);
7534 mutex_unlock(&app_access_lock);
7535 if (ret)
7536 pr_err("failed unload_app request: %d\n", ret);
7537 break;
7538 }
7539 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7540 data->type = QSEECOM_CLIENT_APP;
7541 mutex_lock(&app_access_lock);
7542 atomic_inc(&data->ioctl_count);
7543 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7544 ret = qseecom_query_app_loaded(data, argp);
7545 atomic_dec(&data->ioctl_count);
7546 mutex_unlock(&app_access_lock);
7547 break;
7548 }
7549 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7550 if (data->type != QSEECOM_GENERIC) {
7551 pr_err("send cmd svc req: invalid handle (%d)\n",
7552 data->type);
7553 ret = -EINVAL;
7554 break;
7555 }
7556 data->type = QSEECOM_SECURE_SERVICE;
7557 if (qseecom.qsee_version < QSEE_VERSION_03) {
7558 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7559 qseecom.qsee_version);
7560 return -EINVAL;
7561 }
7562 mutex_lock(&app_access_lock);
7563 atomic_inc(&data->ioctl_count);
7564 ret = qseecom_send_service_cmd(data, argp);
7565 atomic_dec(&data->ioctl_count);
7566 mutex_unlock(&app_access_lock);
7567 break;
7568 }
7569 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7570 if (!(qseecom.support_pfe || qseecom.support_fde))
7571 pr_err("Features requiring key init not supported\n");
7572 if (data->type != QSEECOM_GENERIC) {
7573 pr_err("create key req: invalid handle (%d)\n",
7574 data->type);
7575 ret = -EINVAL;
7576 break;
7577 }
7578 if (qseecom.qsee_version < QSEE_VERSION_05) {
7579 pr_err("Create Key feature unsupported: qsee ver %u\n",
7580 qseecom.qsee_version);
7581 return -EINVAL;
7582 }
7583 data->released = true;
7584 mutex_lock(&app_access_lock);
7585 atomic_inc(&data->ioctl_count);
7586 ret = qseecom_create_key(data, argp);
7587 if (ret)
7588 pr_err("failed to create encryption key: %d\n", ret);
7589
7590 atomic_dec(&data->ioctl_count);
7591 mutex_unlock(&app_access_lock);
7592 break;
7593 }
7594 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7595 if (!(qseecom.support_pfe || qseecom.support_fde))
7596 pr_err("Features requiring key init not supported\n");
7597 if (data->type != QSEECOM_GENERIC) {
7598 pr_err("wipe key req: invalid handle (%d)\n",
7599 data->type);
7600 ret = -EINVAL;
7601 break;
7602 }
7603 if (qseecom.qsee_version < QSEE_VERSION_05) {
7604 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7605 qseecom.qsee_version);
7606 return -EINVAL;
7607 }
7608 data->released = true;
7609 mutex_lock(&app_access_lock);
7610 atomic_inc(&data->ioctl_count);
7611 ret = qseecom_wipe_key(data, argp);
7612 if (ret)
7613 pr_err("failed to wipe encryption key: %d\n", ret);
7614 atomic_dec(&data->ioctl_count);
7615 mutex_unlock(&app_access_lock);
7616 break;
7617 }
7618 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7619 if (!(qseecom.support_pfe || qseecom.support_fde))
7620 pr_err("Features requiring key init not supported\n");
7621 if (data->type != QSEECOM_GENERIC) {
7622 pr_err("update key req: invalid handle (%d)\n",
7623 data->type);
7624 ret = -EINVAL;
7625 break;
7626 }
7627 if (qseecom.qsee_version < QSEE_VERSION_05) {
7628 pr_err("Update Key feature unsupported in qsee ver %u\n",
7629 qseecom.qsee_version);
7630 return -EINVAL;
7631 }
7632 data->released = true;
7633 mutex_lock(&app_access_lock);
7634 atomic_inc(&data->ioctl_count);
7635 ret = qseecom_update_key_user_info(data, argp);
7636 if (ret)
7637 pr_err("failed to update key user info: %d\n", ret);
7638 atomic_dec(&data->ioctl_count);
7639 mutex_unlock(&app_access_lock);
7640 break;
7641 }
7642 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7643 if (data->type != QSEECOM_GENERIC) {
7644 pr_err("save part hash req: invalid handle (%d)\n",
7645 data->type);
7646 ret = -EINVAL;
7647 break;
7648 }
7649 data->released = true;
7650 mutex_lock(&app_access_lock);
7651 atomic_inc(&data->ioctl_count);
7652 ret = qseecom_save_partition_hash(argp);
7653 atomic_dec(&data->ioctl_count);
7654 mutex_unlock(&app_access_lock);
7655 break;
7656 }
7657 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7658 if (data->type != QSEECOM_GENERIC) {
7659 pr_err("ES activated req: invalid handle (%d)\n",
7660 data->type);
7661 ret = -EINVAL;
7662 break;
7663 }
7664 data->released = true;
7665 mutex_lock(&app_access_lock);
7666 atomic_inc(&data->ioctl_count);
7667 ret = qseecom_is_es_activated(argp);
7668 atomic_dec(&data->ioctl_count);
7669 mutex_unlock(&app_access_lock);
7670 break;
7671 }
7672 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7673 if (data->type != QSEECOM_GENERIC) {
7674 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7675 data->type);
7676 ret = -EINVAL;
7677 break;
7678 }
7679 data->released = true;
7680 mutex_lock(&app_access_lock);
7681 atomic_inc(&data->ioctl_count);
7682 ret = qseecom_mdtp_cipher_dip(argp);
7683 atomic_dec(&data->ioctl_count);
7684 mutex_unlock(&app_access_lock);
7685 break;
7686 }
7687 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7688 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7689 if ((data->listener.id == 0) ||
7690 (data->type != QSEECOM_LISTENER_SERVICE)) {
7691 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7692 data->type, data->listener.id);
7693 ret = -EINVAL;
7694 break;
7695 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007696 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007697 atomic_inc(&data->ioctl_count);
7698 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7699 ret = qseecom_send_modfd_resp(data, argp);
7700 else
7701 ret = qseecom_send_modfd_resp_64(data, argp);
7702 atomic_dec(&data->ioctl_count);
7703 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007704 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007705 if (ret)
7706 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7707 __qseecom_clean_data_sglistinfo(data);
7708 break;
7709 }
7710 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7711 if ((data->client.app_id == 0) ||
7712 (data->type != QSEECOM_CLIENT_APP)) {
7713 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7714 data->type, data->client.app_id);
7715 ret = -EINVAL;
7716 break;
7717 }
7718 if (qseecom.qsee_version < QSEE_VERSION_40) {
7719 pr_err("GP feature unsupported: qsee ver %u\n",
7720 qseecom.qsee_version);
7721 return -EINVAL;
7722 }
7723 /* Only one client allowed here at a time */
7724 mutex_lock(&app_access_lock);
7725 atomic_inc(&data->ioctl_count);
7726 ret = qseecom_qteec_open_session(data, argp);
7727 atomic_dec(&data->ioctl_count);
7728 wake_up_all(&data->abort_wq);
7729 mutex_unlock(&app_access_lock);
7730 if (ret)
7731 pr_err("failed open_session_cmd: %d\n", ret);
7732 __qseecom_clean_data_sglistinfo(data);
7733 break;
7734 }
7735 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7736 if ((data->client.app_id == 0) ||
7737 (data->type != QSEECOM_CLIENT_APP)) {
7738 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7739 data->type, data->client.app_id);
7740 ret = -EINVAL;
7741 break;
7742 }
7743 if (qseecom.qsee_version < QSEE_VERSION_40) {
7744 pr_err("GP feature unsupported: qsee ver %u\n",
7745 qseecom.qsee_version);
7746 return -EINVAL;
7747 }
7748 /* Only one client allowed here at a time */
7749 mutex_lock(&app_access_lock);
7750 atomic_inc(&data->ioctl_count);
7751 ret = qseecom_qteec_close_session(data, argp);
7752 atomic_dec(&data->ioctl_count);
7753 wake_up_all(&data->abort_wq);
7754 mutex_unlock(&app_access_lock);
7755 if (ret)
7756 pr_err("failed close_session_cmd: %d\n", ret);
7757 break;
7758 }
7759 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7760 if ((data->client.app_id == 0) ||
7761 (data->type != QSEECOM_CLIENT_APP)) {
7762 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7763 data->type, data->client.app_id);
7764 ret = -EINVAL;
7765 break;
7766 }
7767 if (qseecom.qsee_version < QSEE_VERSION_40) {
7768 pr_err("GP feature unsupported: qsee ver %u\n",
7769 qseecom.qsee_version);
7770 return -EINVAL;
7771 }
7772 /* Only one client allowed here at a time */
7773 mutex_lock(&app_access_lock);
7774 atomic_inc(&data->ioctl_count);
7775 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7776 atomic_dec(&data->ioctl_count);
7777 wake_up_all(&data->abort_wq);
7778 mutex_unlock(&app_access_lock);
7779 if (ret)
7780 pr_err("failed Invoke cmd: %d\n", ret);
7781 __qseecom_clean_data_sglistinfo(data);
7782 break;
7783 }
7784 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7785 if ((data->client.app_id == 0) ||
7786 (data->type != QSEECOM_CLIENT_APP)) {
7787 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7788 data->type, data->client.app_id);
7789 ret = -EINVAL;
7790 break;
7791 }
7792 if (qseecom.qsee_version < QSEE_VERSION_40) {
7793 pr_err("GP feature unsupported: qsee ver %u\n",
7794 qseecom.qsee_version);
7795 return -EINVAL;
7796 }
7797 /* Only one client allowed here at a time */
7798 mutex_lock(&app_access_lock);
7799 atomic_inc(&data->ioctl_count);
7800 ret = qseecom_qteec_request_cancellation(data, argp);
7801 atomic_dec(&data->ioctl_count);
7802 wake_up_all(&data->abort_wq);
7803 mutex_unlock(&app_access_lock);
7804 if (ret)
7805 pr_err("failed request_cancellation: %d\n", ret);
7806 break;
7807 }
7808 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7809 atomic_inc(&data->ioctl_count);
7810 ret = qseecom_get_ce_info(data, argp);
7811 if (ret)
7812 pr_err("failed get fde ce pipe info: %d\n", ret);
7813 atomic_dec(&data->ioctl_count);
7814 break;
7815 }
7816 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7817 atomic_inc(&data->ioctl_count);
7818 ret = qseecom_free_ce_info(data, argp);
7819 if (ret)
7820 pr_err("failed get fde ce pipe info: %d\n", ret);
7821 atomic_dec(&data->ioctl_count);
7822 break;
7823 }
7824 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7825 atomic_inc(&data->ioctl_count);
7826 ret = qseecom_query_ce_info(data, argp);
7827 if (ret)
7828 pr_err("failed get fde ce pipe info: %d\n", ret);
7829 atomic_dec(&data->ioctl_count);
7830 break;
7831 }
7832 default:
7833 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7834 return -EINVAL;
7835 }
7836 return ret;
7837}
7838
7839static int qseecom_open(struct inode *inode, struct file *file)
7840{
7841 int ret = 0;
7842 struct qseecom_dev_handle *data;
7843
7844 data = kzalloc(sizeof(*data), GFP_KERNEL);
7845 if (!data)
7846 return -ENOMEM;
7847 file->private_data = data;
7848 data->abort = 0;
7849 data->type = QSEECOM_GENERIC;
7850 data->released = false;
7851 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7852 data->mode = INACTIVE;
7853 init_waitqueue_head(&data->abort_wq);
7854 atomic_set(&data->ioctl_count, 0);
7855 return ret;
7856}
7857
7858static int qseecom_release(struct inode *inode, struct file *file)
7859{
7860 struct qseecom_dev_handle *data = file->private_data;
7861 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007862 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007863
7864 if (data->released == false) {
7865 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7866 data->type, data->mode, data);
7867 switch (data->type) {
7868 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007869 pr_debug("release lsnr svc %d\n", data->listener.id);
7870 free_private_data = false;
7871 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007872 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007873 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007874 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007875 break;
7876 case QSEECOM_CLIENT_APP:
7877 mutex_lock(&app_access_lock);
7878 ret = qseecom_unload_app(data, true);
7879 mutex_unlock(&app_access_lock);
7880 break;
7881 case QSEECOM_SECURE_SERVICE:
7882 case QSEECOM_GENERIC:
7883 ret = qseecom_unmap_ion_allocated_memory(data);
7884 if (ret)
7885 pr_err("Ion Unmap failed\n");
7886 break;
7887 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7888 break;
7889 default:
7890 pr_err("Unsupported clnt_handle_type %d",
7891 data->type);
7892 break;
7893 }
7894 }
7895
7896 if (qseecom.support_bus_scaling) {
7897 mutex_lock(&qsee_bw_mutex);
7898 if (data->mode != INACTIVE) {
7899 qseecom_unregister_bus_bandwidth_needs(data);
7900 if (qseecom.cumulative_mode == INACTIVE) {
7901 ret = __qseecom_set_msm_bus_request(INACTIVE);
7902 if (ret)
7903 pr_err("Fail to scale down bus\n");
7904 }
7905 }
7906 mutex_unlock(&qsee_bw_mutex);
7907 } else {
7908 if (data->fast_load_enabled == true)
7909 qsee_disable_clock_vote(data, CLK_SFPB);
7910 if (data->perf_enabled == true)
7911 qsee_disable_clock_vote(data, CLK_DFAB);
7912 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007913
Zhen Kongbcdeda22018-11-16 13:50:51 -08007914 if (free_private_data)
7915 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007916 return ret;
7917}
7918
7919#ifdef CONFIG_COMPAT
7920#include "compat_qseecom.c"
7921#else
7922#define compat_qseecom_ioctl NULL
7923#endif
7924
7925static const struct file_operations qseecom_fops = {
7926 .owner = THIS_MODULE,
7927 .unlocked_ioctl = qseecom_ioctl,
7928 .compat_ioctl = compat_qseecom_ioctl,
7929 .open = qseecom_open,
7930 .release = qseecom_release
7931};
7932
7933static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7934{
7935 int rc = 0;
7936 struct device *pdev;
7937 struct qseecom_clk *qclk;
7938 char *core_clk_src = NULL;
7939 char *core_clk = NULL;
7940 char *iface_clk = NULL;
7941 char *bus_clk = NULL;
7942
7943 switch (ce) {
7944 case CLK_QSEE: {
7945 core_clk_src = "core_clk_src";
7946 core_clk = "core_clk";
7947 iface_clk = "iface_clk";
7948 bus_clk = "bus_clk";
7949 qclk = &qseecom.qsee;
7950 qclk->instance = CLK_QSEE;
7951 break;
7952 };
7953 case CLK_CE_DRV: {
7954 core_clk_src = "ce_drv_core_clk_src";
7955 core_clk = "ce_drv_core_clk";
7956 iface_clk = "ce_drv_iface_clk";
7957 bus_clk = "ce_drv_bus_clk";
7958 qclk = &qseecom.ce_drv;
7959 qclk->instance = CLK_CE_DRV;
7960 break;
7961 };
7962 default:
7963 pr_err("Invalid ce hw instance: %d!\n", ce);
7964 return -EIO;
7965 }
7966
7967 if (qseecom.no_clock_support) {
7968 qclk->ce_core_clk = NULL;
7969 qclk->ce_clk = NULL;
7970 qclk->ce_bus_clk = NULL;
7971 qclk->ce_core_src_clk = NULL;
7972 return 0;
7973 }
7974
7975 pdev = qseecom.pdev;
7976
7977 /* Get CE3 src core clk. */
7978 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7979 if (!IS_ERR(qclk->ce_core_src_clk)) {
7980 rc = clk_set_rate(qclk->ce_core_src_clk,
7981 qseecom.ce_opp_freq_hz);
7982 if (rc) {
7983 clk_put(qclk->ce_core_src_clk);
7984 qclk->ce_core_src_clk = NULL;
7985 pr_err("Unable to set the core src clk @%uMhz.\n",
7986 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7987 return -EIO;
7988 }
7989 } else {
7990 pr_warn("Unable to get CE core src clk, set to NULL\n");
7991 qclk->ce_core_src_clk = NULL;
7992 }
7993
7994 /* Get CE core clk */
7995 qclk->ce_core_clk = clk_get(pdev, core_clk);
7996 if (IS_ERR(qclk->ce_core_clk)) {
7997 rc = PTR_ERR(qclk->ce_core_clk);
7998 pr_err("Unable to get CE core clk\n");
7999 if (qclk->ce_core_src_clk != NULL)
8000 clk_put(qclk->ce_core_src_clk);
8001 return -EIO;
8002 }
8003
8004 /* Get CE Interface clk */
8005 qclk->ce_clk = clk_get(pdev, iface_clk);
8006 if (IS_ERR(qclk->ce_clk)) {
8007 rc = PTR_ERR(qclk->ce_clk);
8008 pr_err("Unable to get CE interface clk\n");
8009 if (qclk->ce_core_src_clk != NULL)
8010 clk_put(qclk->ce_core_src_clk);
8011 clk_put(qclk->ce_core_clk);
8012 return -EIO;
8013 }
8014
8015 /* Get CE AXI clk */
8016 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8017 if (IS_ERR(qclk->ce_bus_clk)) {
8018 rc = PTR_ERR(qclk->ce_bus_clk);
8019 pr_err("Unable to get CE BUS interface clk\n");
8020 if (qclk->ce_core_src_clk != NULL)
8021 clk_put(qclk->ce_core_src_clk);
8022 clk_put(qclk->ce_core_clk);
8023 clk_put(qclk->ce_clk);
8024 return -EIO;
8025 }
8026
8027 return rc;
8028}
8029
8030static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8031{
8032 struct qseecom_clk *qclk;
8033
8034 if (ce == CLK_QSEE)
8035 qclk = &qseecom.qsee;
8036 else
8037 qclk = &qseecom.ce_drv;
8038
8039 if (qclk->ce_clk != NULL) {
8040 clk_put(qclk->ce_clk);
8041 qclk->ce_clk = NULL;
8042 }
8043 if (qclk->ce_core_clk != NULL) {
8044 clk_put(qclk->ce_core_clk);
8045 qclk->ce_core_clk = NULL;
8046 }
8047 if (qclk->ce_bus_clk != NULL) {
8048 clk_put(qclk->ce_bus_clk);
8049 qclk->ce_bus_clk = NULL;
8050 }
8051 if (qclk->ce_core_src_clk != NULL) {
8052 clk_put(qclk->ce_core_src_clk);
8053 qclk->ce_core_src_clk = NULL;
8054 }
8055 qclk->instance = CLK_INVALID;
8056}
8057
8058static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8059{
8060 int rc = 0;
8061 uint32_t hlos_num_ce_hw_instances;
8062 uint32_t disk_encrypt_pipe;
8063 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008064 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008065 int i;
8066 const int *tbl;
8067 int size;
8068 int entry;
8069 struct qseecom_crypto_info *pfde_tbl = NULL;
8070 struct qseecom_crypto_info *p;
8071 int tbl_size;
8072 int j;
8073 bool old_db = true;
8074 struct qseecom_ce_info_use *pce_info_use;
8075 uint32_t *unit_tbl = NULL;
8076 int total_units = 0;
8077 struct qseecom_ce_pipe_entry *pce_entry;
8078
8079 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8080 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8081
8082 if (of_property_read_u32((&pdev->dev)->of_node,
8083 "qcom,qsee-ce-hw-instance",
8084 &qseecom.ce_info.qsee_ce_hw_instance)) {
8085 pr_err("Fail to get qsee ce hw instance information.\n");
8086 rc = -EINVAL;
8087 goto out;
8088 } else {
8089 pr_debug("qsee-ce-hw-instance=0x%x\n",
8090 qseecom.ce_info.qsee_ce_hw_instance);
8091 }
8092
8093 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8094 "qcom,support-fde");
8095 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8096 "qcom,support-pfe");
8097
8098 if (!qseecom.support_pfe && !qseecom.support_fde) {
8099 pr_warn("Device does not support PFE/FDE");
8100 goto out;
8101 }
8102
8103 if (qseecom.support_fde)
8104 tbl = of_get_property((&pdev->dev)->of_node,
8105 "qcom,full-disk-encrypt-info", &size);
8106 else
8107 tbl = NULL;
8108 if (tbl) {
8109 old_db = false;
8110 if (size % sizeof(struct qseecom_crypto_info)) {
8111 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8112 size);
8113 rc = -EINVAL;
8114 goto out;
8115 }
8116 tbl_size = size / sizeof
8117 (struct qseecom_crypto_info);
8118
8119 pfde_tbl = kzalloc(size, GFP_KERNEL);
8120 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8121 total_units = 0;
8122
8123 if (!pfde_tbl || !unit_tbl) {
8124 pr_err("failed to alloc memory\n");
8125 rc = -ENOMEM;
8126 goto out;
8127 }
8128 if (of_property_read_u32_array((&pdev->dev)->of_node,
8129 "qcom,full-disk-encrypt-info",
8130 (u32 *)pfde_tbl, size/sizeof(u32))) {
8131 pr_err("failed to read full-disk-encrypt-info tbl\n");
8132 rc = -EINVAL;
8133 goto out;
8134 }
8135
8136 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8137 for (j = 0; j < total_units; j++) {
8138 if (p->unit_num == *(unit_tbl + j))
8139 break;
8140 }
8141 if (j == total_units) {
8142 *(unit_tbl + total_units) = p->unit_num;
8143 total_units++;
8144 }
8145 }
8146
8147 qseecom.ce_info.num_fde = total_units;
8148 pce_info_use = qseecom.ce_info.fde = kcalloc(
8149 total_units, sizeof(struct qseecom_ce_info_use),
8150 GFP_KERNEL);
8151 if (!pce_info_use) {
8152 pr_err("failed to alloc memory\n");
8153 rc = -ENOMEM;
8154 goto out;
8155 }
8156
8157 for (j = 0; j < total_units; j++, pce_info_use++) {
8158 pce_info_use->unit_num = *(unit_tbl + j);
8159 pce_info_use->alloc = false;
8160 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8161 pce_info_use->num_ce_pipe_entries = 0;
8162 pce_info_use->ce_pipe_entry = NULL;
8163 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8164 if (p->unit_num == pce_info_use->unit_num)
8165 pce_info_use->num_ce_pipe_entries++;
8166 }
8167
8168 entry = pce_info_use->num_ce_pipe_entries;
8169 pce_entry = pce_info_use->ce_pipe_entry =
8170 kcalloc(entry,
8171 sizeof(struct qseecom_ce_pipe_entry),
8172 GFP_KERNEL);
8173 if (pce_entry == NULL) {
8174 pr_err("failed to alloc memory\n");
8175 rc = -ENOMEM;
8176 goto out;
8177 }
8178
8179 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8180 if (p->unit_num == pce_info_use->unit_num) {
8181 pce_entry->ce_num = p->ce;
8182 pce_entry->ce_pipe_pair =
8183 p->pipe_pair;
8184 pce_entry->valid = true;
8185 pce_entry++;
8186 }
8187 }
8188 }
8189 kfree(unit_tbl);
8190 unit_tbl = NULL;
8191 kfree(pfde_tbl);
8192 pfde_tbl = NULL;
8193 }
8194
8195 if (qseecom.support_pfe)
8196 tbl = of_get_property((&pdev->dev)->of_node,
8197 "qcom,per-file-encrypt-info", &size);
8198 else
8199 tbl = NULL;
8200 if (tbl) {
8201 old_db = false;
8202 if (size % sizeof(struct qseecom_crypto_info)) {
8203 pr_err("per-file-encrypt-info tbl size(%d)\n",
8204 size);
8205 rc = -EINVAL;
8206 goto out;
8207 }
8208 tbl_size = size / sizeof
8209 (struct qseecom_crypto_info);
8210
8211 pfde_tbl = kzalloc(size, GFP_KERNEL);
8212 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8213 total_units = 0;
8214 if (!pfde_tbl || !unit_tbl) {
8215 pr_err("failed to alloc memory\n");
8216 rc = -ENOMEM;
8217 goto out;
8218 }
8219 if (of_property_read_u32_array((&pdev->dev)->of_node,
8220 "qcom,per-file-encrypt-info",
8221 (u32 *)pfde_tbl, size/sizeof(u32))) {
8222 pr_err("failed to read per-file-encrypt-info tbl\n");
8223 rc = -EINVAL;
8224 goto out;
8225 }
8226
8227 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8228 for (j = 0; j < total_units; j++) {
8229 if (p->unit_num == *(unit_tbl + j))
8230 break;
8231 }
8232 if (j == total_units) {
8233 *(unit_tbl + total_units) = p->unit_num;
8234 total_units++;
8235 }
8236 }
8237
8238 qseecom.ce_info.num_pfe = total_units;
8239 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8240 total_units, sizeof(struct qseecom_ce_info_use),
8241 GFP_KERNEL);
8242 if (!pce_info_use) {
8243 pr_err("failed to alloc memory\n");
8244 rc = -ENOMEM;
8245 goto out;
8246 }
8247
8248 for (j = 0; j < total_units; j++, pce_info_use++) {
8249 pce_info_use->unit_num = *(unit_tbl + j);
8250 pce_info_use->alloc = false;
8251 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8252 pce_info_use->num_ce_pipe_entries = 0;
8253 pce_info_use->ce_pipe_entry = NULL;
8254 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8255 if (p->unit_num == pce_info_use->unit_num)
8256 pce_info_use->num_ce_pipe_entries++;
8257 }
8258
8259 entry = pce_info_use->num_ce_pipe_entries;
8260 pce_entry = pce_info_use->ce_pipe_entry =
8261 kcalloc(entry,
8262 sizeof(struct qseecom_ce_pipe_entry),
8263 GFP_KERNEL);
8264 if (pce_entry == NULL) {
8265 pr_err("failed to alloc memory\n");
8266 rc = -ENOMEM;
8267 goto out;
8268 }
8269
8270 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8271 if (p->unit_num == pce_info_use->unit_num) {
8272 pce_entry->ce_num = p->ce;
8273 pce_entry->ce_pipe_pair =
8274 p->pipe_pair;
8275 pce_entry->valid = true;
8276 pce_entry++;
8277 }
8278 }
8279 }
8280 kfree(unit_tbl);
8281 unit_tbl = NULL;
8282 kfree(pfde_tbl);
8283 pfde_tbl = NULL;
8284 }
8285
8286 if (!old_db)
8287 goto out1;
8288
8289 if (of_property_read_bool((&pdev->dev)->of_node,
8290 "qcom,support-multiple-ce-hw-instance")) {
8291 if (of_property_read_u32((&pdev->dev)->of_node,
8292 "qcom,hlos-num-ce-hw-instances",
8293 &hlos_num_ce_hw_instances)) {
8294 pr_err("Fail: get hlos number of ce hw instance\n");
8295 rc = -EINVAL;
8296 goto out;
8297 }
8298 } else {
8299 hlos_num_ce_hw_instances = 1;
8300 }
8301
8302 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8303 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8304 MAX_CE_PIPE_PAIR_PER_UNIT);
8305 rc = -EINVAL;
8306 goto out;
8307 }
8308
8309 if (of_property_read_u32_array((&pdev->dev)->of_node,
8310 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8311 hlos_num_ce_hw_instances)) {
8312 pr_err("Fail: get hlos ce hw instance info\n");
8313 rc = -EINVAL;
8314 goto out;
8315 }
8316
8317 if (qseecom.support_fde) {
8318 pce_info_use = qseecom.ce_info.fde =
8319 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8320 if (!pce_info_use) {
8321 pr_err("failed to alloc memory\n");
8322 rc = -ENOMEM;
8323 goto out;
8324 }
8325 /* by default for old db */
8326 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8327 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8328 pce_info_use->alloc = false;
8329 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8330 pce_info_use->ce_pipe_entry = NULL;
8331 if (of_property_read_u32((&pdev->dev)->of_node,
8332 "qcom,disk-encrypt-pipe-pair",
8333 &disk_encrypt_pipe)) {
8334 pr_err("Fail to get FDE pipe information.\n");
8335 rc = -EINVAL;
8336 goto out;
8337 } else {
8338 pr_debug("disk-encrypt-pipe-pair=0x%x",
8339 disk_encrypt_pipe);
8340 }
8341 entry = pce_info_use->num_ce_pipe_entries =
8342 hlos_num_ce_hw_instances;
8343 pce_entry = pce_info_use->ce_pipe_entry =
8344 kcalloc(entry,
8345 sizeof(struct qseecom_ce_pipe_entry),
8346 GFP_KERNEL);
8347 if (pce_entry == NULL) {
8348 pr_err("failed to alloc memory\n");
8349 rc = -ENOMEM;
8350 goto out;
8351 }
8352 for (i = 0; i < entry; i++) {
8353 pce_entry->ce_num = hlos_ce_hw_instance[i];
8354 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8355 pce_entry->valid = 1;
8356 pce_entry++;
8357 }
8358 } else {
8359 pr_warn("Device does not support FDE");
8360 disk_encrypt_pipe = 0xff;
8361 }
8362 if (qseecom.support_pfe) {
8363 pce_info_use = qseecom.ce_info.pfe =
8364 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8365 if (!pce_info_use) {
8366 pr_err("failed to alloc memory\n");
8367 rc = -ENOMEM;
8368 goto out;
8369 }
8370 /* by default for old db */
8371 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8372 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8373 pce_info_use->alloc = false;
8374 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8375 pce_info_use->ce_pipe_entry = NULL;
8376
8377 if (of_property_read_u32((&pdev->dev)->of_node,
8378 "qcom,file-encrypt-pipe-pair",
8379 &file_encrypt_pipe)) {
8380 pr_err("Fail to get PFE pipe information.\n");
8381 rc = -EINVAL;
8382 goto out;
8383 } else {
8384 pr_debug("file-encrypt-pipe-pair=0x%x",
8385 file_encrypt_pipe);
8386 }
8387 entry = pce_info_use->num_ce_pipe_entries =
8388 hlos_num_ce_hw_instances;
8389 pce_entry = pce_info_use->ce_pipe_entry =
8390 kcalloc(entry,
8391 sizeof(struct qseecom_ce_pipe_entry),
8392 GFP_KERNEL);
8393 if (pce_entry == NULL) {
8394 pr_err("failed to alloc memory\n");
8395 rc = -ENOMEM;
8396 goto out;
8397 }
8398 for (i = 0; i < entry; i++) {
8399 pce_entry->ce_num = hlos_ce_hw_instance[i];
8400 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8401 pce_entry->valid = 1;
8402 pce_entry++;
8403 }
8404 } else {
8405 pr_warn("Device does not support PFE");
8406 file_encrypt_pipe = 0xff;
8407 }
8408
8409out1:
8410 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8411 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8412out:
8413 if (rc) {
8414 if (qseecom.ce_info.fde) {
8415 pce_info_use = qseecom.ce_info.fde;
8416 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8417 pce_entry = pce_info_use->ce_pipe_entry;
8418 kfree(pce_entry);
8419 pce_info_use++;
8420 }
8421 }
8422 kfree(qseecom.ce_info.fde);
8423 qseecom.ce_info.fde = NULL;
8424 if (qseecom.ce_info.pfe) {
8425 pce_info_use = qseecom.ce_info.pfe;
8426 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8427 pce_entry = pce_info_use->ce_pipe_entry;
8428 kfree(pce_entry);
8429 pce_info_use++;
8430 }
8431 }
8432 kfree(qseecom.ce_info.pfe);
8433 qseecom.ce_info.pfe = NULL;
8434 }
8435 kfree(unit_tbl);
8436 kfree(pfde_tbl);
8437 return rc;
8438}
8439
8440static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8441 void __user *argp)
8442{
8443 struct qseecom_ce_info_req req;
8444 struct qseecom_ce_info_req *pinfo = &req;
8445 int ret = 0;
8446 int i;
8447 unsigned int entries;
8448 struct qseecom_ce_info_use *pce_info_use, *p;
8449 int total = 0;
8450 bool found = false;
8451 struct qseecom_ce_pipe_entry *pce_entry;
8452
8453 ret = copy_from_user(pinfo, argp,
8454 sizeof(struct qseecom_ce_info_req));
8455 if (ret) {
8456 pr_err("copy_from_user failed\n");
8457 return ret;
8458 }
8459
8460 switch (pinfo->usage) {
8461 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8462 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8463 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8464 if (qseecom.support_fde) {
8465 p = qseecom.ce_info.fde;
8466 total = qseecom.ce_info.num_fde;
8467 } else {
8468 pr_err("system does not support fde\n");
8469 return -EINVAL;
8470 }
8471 break;
8472 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8473 if (qseecom.support_pfe) {
8474 p = qseecom.ce_info.pfe;
8475 total = qseecom.ce_info.num_pfe;
8476 } else {
8477 pr_err("system does not support pfe\n");
8478 return -EINVAL;
8479 }
8480 break;
8481 default:
8482 pr_err("unsupported usage %d\n", pinfo->usage);
8483 return -EINVAL;
8484 }
8485
8486 pce_info_use = NULL;
8487 for (i = 0; i < total; i++) {
8488 if (!p->alloc)
8489 pce_info_use = p;
8490 else if (!memcmp(p->handle, pinfo->handle,
8491 MAX_CE_INFO_HANDLE_SIZE)) {
8492 pce_info_use = p;
8493 found = true;
8494 break;
8495 }
8496 p++;
8497 }
8498
8499 if (pce_info_use == NULL)
8500 return -EBUSY;
8501
8502 pinfo->unit_num = pce_info_use->unit_num;
8503 if (!pce_info_use->alloc) {
8504 pce_info_use->alloc = true;
8505 memcpy(pce_info_use->handle,
8506 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8507 }
8508 if (pce_info_use->num_ce_pipe_entries >
8509 MAX_CE_PIPE_PAIR_PER_UNIT)
8510 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8511 else
8512 entries = pce_info_use->num_ce_pipe_entries;
8513 pinfo->num_ce_pipe_entries = entries;
8514 pce_entry = pce_info_use->ce_pipe_entry;
8515 for (i = 0; i < entries; i++, pce_entry++)
8516 pinfo->ce_pipe_entry[i] = *pce_entry;
8517 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8518 pinfo->ce_pipe_entry[i].valid = 0;
8519
8520 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8521 pr_err("copy_to_user failed\n");
8522 ret = -EFAULT;
8523 }
8524 return ret;
8525}
8526
8527static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8528 void __user *argp)
8529{
8530 struct qseecom_ce_info_req req;
8531 struct qseecom_ce_info_req *pinfo = &req;
8532 int ret = 0;
8533 struct qseecom_ce_info_use *p;
8534 int total = 0;
8535 int i;
8536 bool found = false;
8537
8538 ret = copy_from_user(pinfo, argp,
8539 sizeof(struct qseecom_ce_info_req));
8540 if (ret)
8541 return ret;
8542
8543 switch (pinfo->usage) {
8544 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8545 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8546 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8547 if (qseecom.support_fde) {
8548 p = qseecom.ce_info.fde;
8549 total = qseecom.ce_info.num_fde;
8550 } else {
8551 pr_err("system does not support fde\n");
8552 return -EINVAL;
8553 }
8554 break;
8555 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8556 if (qseecom.support_pfe) {
8557 p = qseecom.ce_info.pfe;
8558 total = qseecom.ce_info.num_pfe;
8559 } else {
8560 pr_err("system does not support pfe\n");
8561 return -EINVAL;
8562 }
8563 break;
8564 default:
8565 pr_err("unsupported usage %d\n", pinfo->usage);
8566 return -EINVAL;
8567 }
8568
8569 for (i = 0; i < total; i++) {
8570 if (p->alloc &&
8571 !memcmp(p->handle, pinfo->handle,
8572 MAX_CE_INFO_HANDLE_SIZE)) {
8573 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8574 p->alloc = false;
8575 found = true;
8576 break;
8577 }
8578 p++;
8579 }
8580 return ret;
8581}
8582
8583static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8584 void __user *argp)
8585{
8586 struct qseecom_ce_info_req req;
8587 struct qseecom_ce_info_req *pinfo = &req;
8588 int ret = 0;
8589 int i;
8590 unsigned int entries;
8591 struct qseecom_ce_info_use *pce_info_use, *p;
8592 int total = 0;
8593 bool found = false;
8594 struct qseecom_ce_pipe_entry *pce_entry;
8595
8596 ret = copy_from_user(pinfo, argp,
8597 sizeof(struct qseecom_ce_info_req));
8598 if (ret)
8599 return ret;
8600
8601 switch (pinfo->usage) {
8602 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8603 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8604 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8605 if (qseecom.support_fde) {
8606 p = qseecom.ce_info.fde;
8607 total = qseecom.ce_info.num_fde;
8608 } else {
8609 pr_err("system does not support fde\n");
8610 return -EINVAL;
8611 }
8612 break;
8613 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8614 if (qseecom.support_pfe) {
8615 p = qseecom.ce_info.pfe;
8616 total = qseecom.ce_info.num_pfe;
8617 } else {
8618 pr_err("system does not support pfe\n");
8619 return -EINVAL;
8620 }
8621 break;
8622 default:
8623 pr_err("unsupported usage %d\n", pinfo->usage);
8624 return -EINVAL;
8625 }
8626
8627 pce_info_use = NULL;
8628 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8629 pinfo->num_ce_pipe_entries = 0;
8630 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8631 pinfo->ce_pipe_entry[i].valid = 0;
8632
8633 for (i = 0; i < total; i++) {
8634
8635 if (p->alloc && !memcmp(p->handle,
8636 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8637 pce_info_use = p;
8638 found = true;
8639 break;
8640 }
8641 p++;
8642 }
8643 if (!pce_info_use)
8644 goto out;
8645 pinfo->unit_num = pce_info_use->unit_num;
8646 if (pce_info_use->num_ce_pipe_entries >
8647 MAX_CE_PIPE_PAIR_PER_UNIT)
8648 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8649 else
8650 entries = pce_info_use->num_ce_pipe_entries;
8651 pinfo->num_ce_pipe_entries = entries;
8652 pce_entry = pce_info_use->ce_pipe_entry;
8653 for (i = 0; i < entries; i++, pce_entry++)
8654 pinfo->ce_pipe_entry[i] = *pce_entry;
8655 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8656 pinfo->ce_pipe_entry[i].valid = 0;
8657out:
8658 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8659 pr_err("copy_to_user failed\n");
8660 ret = -EFAULT;
8661 }
8662 return ret;
8663}
8664
8665/*
8666 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8667 * then whitelist feature is not supported.
8668 */
8669static int qseecom_check_whitelist_feature(void)
8670{
8671 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8672
8673 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8674}
8675
8676static int qseecom_probe(struct platform_device *pdev)
8677{
8678 int rc;
8679 int i;
8680 uint32_t feature = 10;
8681 struct device *class_dev;
8682 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8683 struct qseecom_command_scm_resp resp;
8684 struct qseecom_ce_info_use *pce_info_use = NULL;
8685
8686 qseecom.qsee_bw_count = 0;
8687 qseecom.qsee_perf_client = 0;
8688 qseecom.qsee_sfpb_bw_count = 0;
8689
8690 qseecom.qsee.ce_core_clk = NULL;
8691 qseecom.qsee.ce_clk = NULL;
8692 qseecom.qsee.ce_core_src_clk = NULL;
8693 qseecom.qsee.ce_bus_clk = NULL;
8694
8695 qseecom.cumulative_mode = 0;
8696 qseecom.current_mode = INACTIVE;
8697 qseecom.support_bus_scaling = false;
8698 qseecom.support_fde = false;
8699 qseecom.support_pfe = false;
8700
8701 qseecom.ce_drv.ce_core_clk = NULL;
8702 qseecom.ce_drv.ce_clk = NULL;
8703 qseecom.ce_drv.ce_core_src_clk = NULL;
8704 qseecom.ce_drv.ce_bus_clk = NULL;
8705 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8706
8707 qseecom.app_block_ref_cnt = 0;
8708 init_waitqueue_head(&qseecom.app_block_wq);
8709 qseecom.whitelist_support = true;
8710
8711 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8712 if (rc < 0) {
8713 pr_err("alloc_chrdev_region failed %d\n", rc);
8714 return rc;
8715 }
8716
8717 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8718 if (IS_ERR(driver_class)) {
8719 rc = -ENOMEM;
8720 pr_err("class_create failed %d\n", rc);
8721 goto exit_unreg_chrdev_region;
8722 }
8723
8724 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8725 QSEECOM_DEV);
8726 if (IS_ERR(class_dev)) {
8727 pr_err("class_device_create failed %d\n", rc);
8728 rc = -ENOMEM;
8729 goto exit_destroy_class;
8730 }
8731
8732 cdev_init(&qseecom.cdev, &qseecom_fops);
8733 qseecom.cdev.owner = THIS_MODULE;
8734
8735 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8736 if (rc < 0) {
8737 pr_err("cdev_add failed %d\n", rc);
8738 goto exit_destroy_device;
8739 }
8740
8741 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008742 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8743 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008744 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008745 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8746 spin_lock_init(&qseecom.registered_kclient_list_lock);
8747 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008748 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008749 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008750 qseecom.send_resp_flag = 0;
8751
8752 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008753 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008754 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8755 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008756 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008757 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8758 if (rc) {
8759 pr_err("Failed to get QSEE version info %d\n", rc);
8760 goto exit_del_cdev;
8761 }
8762 qseecom.qsee_version = resp.result;
8763 qseecom.qseos_version = QSEOS_VERSION_14;
8764 qseecom.commonlib_loaded = false;
8765 qseecom.commonlib64_loaded = false;
8766 qseecom.pdev = class_dev;
8767 /* Create ION msm client */
8768 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8769 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8770 pr_err("Ion client cannot be created\n");
8771 rc = -ENOMEM;
8772 goto exit_del_cdev;
8773 }
8774
8775 /* register client for bus scaling */
8776 if (pdev->dev.of_node) {
8777 qseecom.pdev->of_node = pdev->dev.of_node;
8778 qseecom.support_bus_scaling =
8779 of_property_read_bool((&pdev->dev)->of_node,
8780 "qcom,support-bus-scaling");
8781 rc = qseecom_retrieve_ce_data(pdev);
8782 if (rc)
8783 goto exit_destroy_ion_client;
8784 qseecom.appsbl_qseecom_support =
8785 of_property_read_bool((&pdev->dev)->of_node,
8786 "qcom,appsbl-qseecom-support");
8787 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8788 qseecom.appsbl_qseecom_support);
8789
8790 qseecom.commonlib64_loaded =
8791 of_property_read_bool((&pdev->dev)->of_node,
8792 "qcom,commonlib64-loaded-by-uefi");
8793 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8794 qseecom.commonlib64_loaded);
8795 qseecom.fde_key_size =
8796 of_property_read_bool((&pdev->dev)->of_node,
8797 "qcom,fde-key-size");
8798 qseecom.no_clock_support =
8799 of_property_read_bool((&pdev->dev)->of_node,
8800 "qcom,no-clock-support");
8801 if (!qseecom.no_clock_support) {
8802 pr_info("qseecom clocks handled by other subsystem\n");
8803 } else {
8804 pr_info("no-clock-support=0x%x",
8805 qseecom.no_clock_support);
8806 }
8807
8808 if (of_property_read_u32((&pdev->dev)->of_node,
8809 "qcom,qsee-reentrancy-support",
8810 &qseecom.qsee_reentrancy_support)) {
8811 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8812 qseecom.qsee_reentrancy_support = 0;
8813 } else {
8814 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8815 qseecom.qsee_reentrancy_support);
8816 }
8817
Jiten Patela7bb1d52018-05-11 12:34:26 +05308818 qseecom.enable_key_wrap_in_ks =
8819 of_property_read_bool((&pdev->dev)->of_node,
8820 "qcom,enable-key-wrap-in-ks");
8821 if (qseecom.enable_key_wrap_in_ks) {
8822 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8823 qseecom.enable_key_wrap_in_ks);
8824 }
8825
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008826 /*
8827 * The qseecom bus scaling flag can not be enabled when
8828 * crypto clock is not handled by HLOS.
8829 */
8830 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8831 pr_err("support_bus_scaling flag can not be enabled.\n");
8832 rc = -EINVAL;
8833 goto exit_destroy_ion_client;
8834 }
8835
8836 if (of_property_read_u32((&pdev->dev)->of_node,
8837 "qcom,ce-opp-freq",
8838 &qseecom.ce_opp_freq_hz)) {
8839 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8840 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8841 }
8842 rc = __qseecom_init_clk(CLK_QSEE);
8843 if (rc)
8844 goto exit_destroy_ion_client;
8845
8846 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8847 (qseecom.support_pfe || qseecom.support_fde)) {
8848 rc = __qseecom_init_clk(CLK_CE_DRV);
8849 if (rc) {
8850 __qseecom_deinit_clk(CLK_QSEE);
8851 goto exit_destroy_ion_client;
8852 }
8853 } else {
8854 struct qseecom_clk *qclk;
8855
8856 qclk = &qseecom.qsee;
8857 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8858 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8859 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8860 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8861 }
8862
8863 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8864 msm_bus_cl_get_pdata(pdev);
8865 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8866 (!qseecom.is_apps_region_protected &&
8867 !qseecom.appsbl_qseecom_support)) {
8868 struct resource *resource = NULL;
8869 struct qsee_apps_region_info_ireq req;
8870 struct qsee_apps_region_info_64bit_ireq req_64bit;
8871 struct qseecom_command_scm_resp resp;
8872 void *cmd_buf = NULL;
8873 size_t cmd_len;
8874
8875 resource = platform_get_resource_byname(pdev,
8876 IORESOURCE_MEM, "secapp-region");
8877 if (resource) {
8878 if (qseecom.qsee_version < QSEE_VERSION_40) {
8879 req.qsee_cmd_id =
8880 QSEOS_APP_REGION_NOTIFICATION;
8881 req.addr = (uint32_t)resource->start;
8882 req.size = resource_size(resource);
8883 cmd_buf = (void *)&req;
8884 cmd_len = sizeof(struct
8885 qsee_apps_region_info_ireq);
8886 pr_warn("secure app region addr=0x%x size=0x%x",
8887 req.addr, req.size);
8888 } else {
8889 req_64bit.qsee_cmd_id =
8890 QSEOS_APP_REGION_NOTIFICATION;
8891 req_64bit.addr = resource->start;
8892 req_64bit.size = resource_size(
8893 resource);
8894 cmd_buf = (void *)&req_64bit;
8895 cmd_len = sizeof(struct
8896 qsee_apps_region_info_64bit_ireq);
8897 pr_warn("secure app region addr=0x%llx size=0x%x",
8898 req_64bit.addr, req_64bit.size);
8899 }
8900 } else {
8901 pr_err("Fail to get secure app region info\n");
8902 rc = -EINVAL;
8903 goto exit_deinit_clock;
8904 }
8905 rc = __qseecom_enable_clk(CLK_QSEE);
8906 if (rc) {
8907 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8908 rc = -EIO;
8909 goto exit_deinit_clock;
8910 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008911 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008912 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8913 cmd_buf, cmd_len,
8914 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008915 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008916 __qseecom_disable_clk(CLK_QSEE);
8917 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8918 pr_err("send secapp reg fail %d resp.res %d\n",
8919 rc, resp.result);
8920 rc = -EINVAL;
8921 goto exit_deinit_clock;
8922 }
8923 }
8924 /*
8925 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8926 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8927 * Pls add "qseecom.commonlib64_loaded = true" here too.
8928 */
8929 if (qseecom.is_apps_region_protected ||
8930 qseecom.appsbl_qseecom_support)
8931 qseecom.commonlib_loaded = true;
8932 } else {
8933 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8934 pdev->dev.platform_data;
8935 }
8936 if (qseecom.support_bus_scaling) {
8937 init_timer(&(qseecom.bw_scale_down_timer));
8938 INIT_WORK(&qseecom.bw_inactive_req_ws,
8939 qseecom_bw_inactive_req_work);
8940 qseecom.bw_scale_down_timer.function =
8941 qseecom_scale_bus_bandwidth_timer_callback;
8942 }
8943 qseecom.timer_running = false;
8944 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8945 qseecom_platform_support);
8946
8947 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8948 pr_warn("qseecom.whitelist_support = %d\n",
8949 qseecom.whitelist_support);
8950
8951 if (!qseecom.qsee_perf_client)
8952 pr_err("Unable to register bus client\n");
8953
Zhen Kongc4c162a2019-01-23 12:07:12 -08008954 /*create a kthread to process pending listener unregister task */
8955 qseecom.unregister_lsnr_kthread_task = kthread_run(
8956 __qseecom_unregister_listener_kthread_func,
8957 NULL, "qseecom-unreg-lsnr");
8958 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8959 pr_err("failed to create kthread to unregister listener\n");
8960 rc = -EINVAL;
8961 goto exit_deinit_clock;
8962 }
8963 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8964 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008965 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8966 return 0;
8967
8968exit_deinit_clock:
8969 __qseecom_deinit_clk(CLK_QSEE);
8970 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8971 (qseecom.support_pfe || qseecom.support_fde))
8972 __qseecom_deinit_clk(CLK_CE_DRV);
8973exit_destroy_ion_client:
8974 if (qseecom.ce_info.fde) {
8975 pce_info_use = qseecom.ce_info.fde;
8976 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8977 kzfree(pce_info_use->ce_pipe_entry);
8978 pce_info_use++;
8979 }
8980 kfree(qseecom.ce_info.fde);
8981 }
8982 if (qseecom.ce_info.pfe) {
8983 pce_info_use = qseecom.ce_info.pfe;
8984 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8985 kzfree(pce_info_use->ce_pipe_entry);
8986 pce_info_use++;
8987 }
8988 kfree(qseecom.ce_info.pfe);
8989 }
8990 ion_client_destroy(qseecom.ion_clnt);
8991exit_del_cdev:
8992 cdev_del(&qseecom.cdev);
8993exit_destroy_device:
8994 device_destroy(driver_class, qseecom_device_no);
8995exit_destroy_class:
8996 class_destroy(driver_class);
8997exit_unreg_chrdev_region:
8998 unregister_chrdev_region(qseecom_device_no, 1);
8999 return rc;
9000}
9001
9002static int qseecom_remove(struct platform_device *pdev)
9003{
9004 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309005 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009006 unsigned long flags = 0;
9007 int ret = 0;
9008 int i;
9009 struct qseecom_ce_pipe_entry *pce_entry;
9010 struct qseecom_ce_info_use *pce_info_use;
9011
9012 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9013 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9014
Monika Singhe711b162018-04-24 09:54:50 +05309015 list_for_each_entry_safe(kclient, kclient_tmp,
9016 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009017
9018 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009019 if (!kclient->handle) {
9020 list_del(&kclient->list);
9021 kzfree(kclient);
9022 break;
9023 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009024
9025 list_del(&kclient->list);
9026 mutex_lock(&app_access_lock);
9027 ret = qseecom_unload_app(kclient->handle->dev, false);
9028 mutex_unlock(&app_access_lock);
9029 if (!ret) {
9030 kzfree(kclient->handle->dev);
9031 kzfree(kclient->handle);
9032 kzfree(kclient);
9033 }
9034 }
9035
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009036 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9037
9038 if (qseecom.qseos_version > QSEEE_VERSION_00)
9039 qseecom_unload_commonlib_image();
9040
9041 if (qseecom.qsee_perf_client)
9042 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9043 0);
9044 if (pdev->dev.platform_data != NULL)
9045 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9046
9047 if (qseecom.support_bus_scaling) {
9048 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9049 del_timer_sync(&qseecom.bw_scale_down_timer);
9050 }
9051
9052 if (qseecom.ce_info.fde) {
9053 pce_info_use = qseecom.ce_info.fde;
9054 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9055 pce_entry = pce_info_use->ce_pipe_entry;
9056 kfree(pce_entry);
9057 pce_info_use++;
9058 }
9059 }
9060 kfree(qseecom.ce_info.fde);
9061 if (qseecom.ce_info.pfe) {
9062 pce_info_use = qseecom.ce_info.pfe;
9063 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9064 pce_entry = pce_info_use->ce_pipe_entry;
9065 kfree(pce_entry);
9066 pce_info_use++;
9067 }
9068 }
9069 kfree(qseecom.ce_info.pfe);
9070
9071 /* register client for bus scaling */
9072 if (pdev->dev.of_node) {
9073 __qseecom_deinit_clk(CLK_QSEE);
9074 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9075 (qseecom.support_pfe || qseecom.support_fde))
9076 __qseecom_deinit_clk(CLK_CE_DRV);
9077 }
9078
9079 ion_client_destroy(qseecom.ion_clnt);
9080
Zhen Kongc4c162a2019-01-23 12:07:12 -08009081 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9082
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009083 cdev_del(&qseecom.cdev);
9084
9085 device_destroy(driver_class, qseecom_device_no);
9086
9087 class_destroy(driver_class);
9088
9089 unregister_chrdev_region(qseecom_device_no, 1);
9090
9091 return ret;
9092}
9093
9094static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9095{
9096 int ret = 0;
9097 struct qseecom_clk *qclk;
9098
9099 qclk = &qseecom.qsee;
9100 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9101 if (qseecom.no_clock_support)
9102 return 0;
9103
9104 mutex_lock(&qsee_bw_mutex);
9105 mutex_lock(&clk_access_lock);
9106
9107 if (qseecom.current_mode != INACTIVE) {
9108 ret = msm_bus_scale_client_update_request(
9109 qseecom.qsee_perf_client, INACTIVE);
9110 if (ret)
9111 pr_err("Fail to scale down bus\n");
9112 else
9113 qseecom.current_mode = INACTIVE;
9114 }
9115
9116 if (qclk->clk_access_cnt) {
9117 if (qclk->ce_clk != NULL)
9118 clk_disable_unprepare(qclk->ce_clk);
9119 if (qclk->ce_core_clk != NULL)
9120 clk_disable_unprepare(qclk->ce_core_clk);
9121 if (qclk->ce_bus_clk != NULL)
9122 clk_disable_unprepare(qclk->ce_bus_clk);
9123 }
9124
9125 del_timer_sync(&(qseecom.bw_scale_down_timer));
9126 qseecom.timer_running = false;
9127
9128 mutex_unlock(&clk_access_lock);
9129 mutex_unlock(&qsee_bw_mutex);
9130 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9131
9132 return 0;
9133}
9134
9135static int qseecom_resume(struct platform_device *pdev)
9136{
9137 int mode = 0;
9138 int ret = 0;
9139 struct qseecom_clk *qclk;
9140
9141 qclk = &qseecom.qsee;
9142 if (qseecom.no_clock_support)
9143 goto exit;
9144
9145 mutex_lock(&qsee_bw_mutex);
9146 mutex_lock(&clk_access_lock);
9147 if (qseecom.cumulative_mode >= HIGH)
9148 mode = HIGH;
9149 else
9150 mode = qseecom.cumulative_mode;
9151
9152 if (qseecom.cumulative_mode != INACTIVE) {
9153 ret = msm_bus_scale_client_update_request(
9154 qseecom.qsee_perf_client, mode);
9155 if (ret)
9156 pr_err("Fail to scale up bus to %d\n", mode);
9157 else
9158 qseecom.current_mode = mode;
9159 }
9160
9161 if (qclk->clk_access_cnt) {
9162 if (qclk->ce_core_clk != NULL) {
9163 ret = clk_prepare_enable(qclk->ce_core_clk);
9164 if (ret) {
9165 pr_err("Unable to enable/prep CE core clk\n");
9166 qclk->clk_access_cnt = 0;
9167 goto err;
9168 }
9169 }
9170 if (qclk->ce_clk != NULL) {
9171 ret = clk_prepare_enable(qclk->ce_clk);
9172 if (ret) {
9173 pr_err("Unable to enable/prep CE iface clk\n");
9174 qclk->clk_access_cnt = 0;
9175 goto ce_clk_err;
9176 }
9177 }
9178 if (qclk->ce_bus_clk != NULL) {
9179 ret = clk_prepare_enable(qclk->ce_bus_clk);
9180 if (ret) {
9181 pr_err("Unable to enable/prep CE bus clk\n");
9182 qclk->clk_access_cnt = 0;
9183 goto ce_bus_clk_err;
9184 }
9185 }
9186 }
9187
9188 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9189 qseecom.bw_scale_down_timer.expires = jiffies +
9190 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9191 mod_timer(&(qseecom.bw_scale_down_timer),
9192 qseecom.bw_scale_down_timer.expires);
9193 qseecom.timer_running = true;
9194 }
9195
9196 mutex_unlock(&clk_access_lock);
9197 mutex_unlock(&qsee_bw_mutex);
9198 goto exit;
9199
9200ce_bus_clk_err:
9201 if (qclk->ce_clk)
9202 clk_disable_unprepare(qclk->ce_clk);
9203ce_clk_err:
9204 if (qclk->ce_core_clk)
9205 clk_disable_unprepare(qclk->ce_core_clk);
9206err:
9207 mutex_unlock(&clk_access_lock);
9208 mutex_unlock(&qsee_bw_mutex);
9209 ret = -EIO;
9210exit:
9211 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9212 return ret;
9213}
9214
9215static const struct of_device_id qseecom_match[] = {
9216 {
9217 .compatible = "qcom,qseecom",
9218 },
9219 {}
9220};
9221
9222static struct platform_driver qseecom_plat_driver = {
9223 .probe = qseecom_probe,
9224 .remove = qseecom_remove,
9225 .suspend = qseecom_suspend,
9226 .resume = qseecom_resume,
9227 .driver = {
9228 .name = "qseecom",
9229 .owner = THIS_MODULE,
9230 .of_match_table = qseecom_match,
9231 },
9232};
9233
9234static int qseecom_init(void)
9235{
9236 return platform_driver_register(&qseecom_plat_driver);
9237}
9238
9239static void qseecom_exit(void)
9240{
9241 platform_driver_unregister(&qseecom_plat_driver);
9242}
9243
9244MODULE_LICENSE("GPL v2");
9245MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9246
9247module_init(qseecom_init);
9248module_exit(qseecom_exit);