blob: 6611b22220029ebca537f928ab92272d6e816376 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
334};
335
336struct qseecom_listener_handle {
337 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800338 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800339 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700340};
341
342static struct qseecom_control qseecom;
343
344struct qseecom_dev_handle {
345 enum qseecom_client_handle_type type;
346 union {
347 struct qseecom_client_handle client;
348 struct qseecom_listener_handle listener;
349 };
350 bool released;
351 int abort;
352 wait_queue_head_t abort_wq;
353 atomic_t ioctl_count;
354 bool perf_enabled;
355 bool fast_load_enabled;
356 enum qseecom_bandwidth_request_mode mode;
357 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
358 uint32_t sglist_cnt;
359 bool use_legacy_cmd;
360};
361
362struct qseecom_key_id_usage_desc {
363 uint8_t desc[QSEECOM_KEY_ID_SIZE];
364};
365
366struct qseecom_crypto_info {
367 unsigned int unit_num;
368 unsigned int ce;
369 unsigned int pipe_pair;
370};
371
372static struct qseecom_key_id_usage_desc key_id_array[] = {
373 {
374 .desc = "Undefined Usage Index",
375 },
376
377 {
378 .desc = "Full Disk Encryption",
379 },
380
381 {
382 .desc = "Per File Encryption",
383 },
384
385 {
386 .desc = "UFS ICE Full Disk Encryption",
387 },
388
389 {
390 .desc = "SDCC ICE Full Disk Encryption",
391 },
392};
393
394/* Function proto types */
395static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
396static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
397static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
398static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
399static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
400static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
401 char *cmnlib_name);
402static int qseecom_enable_ice_setup(int usage);
403static int qseecom_disable_ice_setup(int usage);
404static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
405static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
406 void __user *argp);
407static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
408 void __user *argp);
409static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
410 void __user *argp);
411
412static int get_qseecom_keymaster_status(char *str)
413{
414 get_option(&str, &qseecom.is_apps_region_protected);
415 return 1;
416}
417__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
418
Zhen Kong03f220d2019-02-01 17:12:34 -0800419
420#define QSEECOM_SCM_EBUSY_WAIT_MS 30
421#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
422
423static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
424{
425 int ret = 0;
426 int retry_count = 0;
427
428 do {
429 ret = scm_call2_noretry(smc_id, desc);
430 if (ret == -EBUSY) {
431 mutex_unlock(&app_access_lock);
432 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
433 mutex_lock(&app_access_lock);
434 }
435 if (retry_count == 33)
436 pr_warn("secure world has been busy for 1 second!\n");
437 } while (ret == -EBUSY &&
438 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
439 return ret;
440}
441
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700442static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
443 const void *req_buf, void *resp_buf)
444{
445 int ret = 0;
446 uint32_t smc_id = 0;
447 uint32_t qseos_cmd_id = 0;
448 struct scm_desc desc = {0};
449 struct qseecom_command_scm_resp *scm_resp = NULL;
450
451 if (!req_buf || !resp_buf) {
452 pr_err("Invalid buffer pointer\n");
453 return -EINVAL;
454 }
455 qseos_cmd_id = *(uint32_t *)req_buf;
456 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
457
458 switch (svc_id) {
459 case 6: {
460 if (tz_cmd_id == 3) {
461 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
462 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
463 desc.args[0] = *(uint32_t *)req_buf;
464 } else {
465 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
466 svc_id, tz_cmd_id);
467 return -EINVAL;
468 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800469 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700470 break;
471 }
472 case SCM_SVC_ES: {
473 switch (tz_cmd_id) {
474 case SCM_SAVE_PARTITION_HASH_ID: {
475 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
476 struct qseecom_save_partition_hash_req *p_hash_req =
477 (struct qseecom_save_partition_hash_req *)
478 req_buf;
479 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
480
481 if (!tzbuf)
482 return -ENOMEM;
483 memset(tzbuf, 0, tzbuflen);
484 memcpy(tzbuf, p_hash_req->digest,
485 SHA256_DIGEST_LENGTH);
486 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
487 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
488 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
489 desc.args[0] = p_hash_req->partition_id;
490 desc.args[1] = virt_to_phys(tzbuf);
491 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800492 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700493 kzfree(tzbuf);
494 break;
495 }
496 default: {
497 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
498 tz_cmd_id);
499 ret = -EINVAL;
500 break;
501 }
502 } /* end of switch (tz_cmd_id) */
503 break;
504 } /* end of case SCM_SVC_ES */
505 case SCM_SVC_TZSCHEDULER: {
506 switch (qseos_cmd_id) {
507 case QSEOS_APP_START_COMMAND: {
508 struct qseecom_load_app_ireq *req;
509 struct qseecom_load_app_64bit_ireq *req_64bit;
510
511 smc_id = TZ_OS_APP_START_ID;
512 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
513 if (qseecom.qsee_version < QSEE_VERSION_40) {
514 req = (struct qseecom_load_app_ireq *)req_buf;
515 desc.args[0] = req->mdt_len;
516 desc.args[1] = req->img_len;
517 desc.args[2] = req->phy_addr;
518 } else {
519 req_64bit =
520 (struct qseecom_load_app_64bit_ireq *)
521 req_buf;
522 desc.args[0] = req_64bit->mdt_len;
523 desc.args[1] = req_64bit->img_len;
524 desc.args[2] = req_64bit->phy_addr;
525 }
526 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800527 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700528 break;
529 }
530 case QSEOS_APP_SHUTDOWN_COMMAND: {
531 struct qseecom_unload_app_ireq *req;
532
533 req = (struct qseecom_unload_app_ireq *)req_buf;
534 smc_id = TZ_OS_APP_SHUTDOWN_ID;
535 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
536 desc.args[0] = req->app_id;
Zhen Kongaf127672019-06-10 13:06:41 -0700537 ret = scm_call2(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700538 break;
539 }
540 case QSEOS_APP_LOOKUP_COMMAND: {
541 struct qseecom_check_app_ireq *req;
542 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
543 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
544
545 if (!tzbuf)
546 return -ENOMEM;
547 req = (struct qseecom_check_app_ireq *)req_buf;
548 pr_debug("Lookup app_name = %s\n", req->app_name);
549 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
550 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
551 smc_id = TZ_OS_APP_LOOKUP_ID;
552 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
553 desc.args[0] = virt_to_phys(tzbuf);
554 desc.args[1] = strlen(req->app_name);
555 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800556 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700557 kzfree(tzbuf);
558 break;
559 }
560 case QSEOS_APP_REGION_NOTIFICATION: {
561 struct qsee_apps_region_info_ireq *req;
562 struct qsee_apps_region_info_64bit_ireq *req_64bit;
563
564 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
565 desc.arginfo =
566 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
567 if (qseecom.qsee_version < QSEE_VERSION_40) {
568 req = (struct qsee_apps_region_info_ireq *)
569 req_buf;
570 desc.args[0] = req->addr;
571 desc.args[1] = req->size;
572 } else {
573 req_64bit =
574 (struct qsee_apps_region_info_64bit_ireq *)
575 req_buf;
576 desc.args[0] = req_64bit->addr;
577 desc.args[1] = req_64bit->size;
578 }
579 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800580 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700581 break;
582 }
583 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
584 struct qseecom_load_lib_image_ireq *req;
585 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
586
587 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
588 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
589 if (qseecom.qsee_version < QSEE_VERSION_40) {
590 req = (struct qseecom_load_lib_image_ireq *)
591 req_buf;
592 desc.args[0] = req->mdt_len;
593 desc.args[1] = req->img_len;
594 desc.args[2] = req->phy_addr;
595 } else {
596 req_64bit =
597 (struct qseecom_load_lib_image_64bit_ireq *)
598 req_buf;
599 desc.args[0] = req_64bit->mdt_len;
600 desc.args[1] = req_64bit->img_len;
601 desc.args[2] = req_64bit->phy_addr;
602 }
603 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800604 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700605 break;
606 }
607 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
608 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
609 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
610 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800611 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700612 break;
613 }
614 case QSEOS_REGISTER_LISTENER: {
615 struct qseecom_register_listener_ireq *req;
616 struct qseecom_register_listener_64bit_ireq *req_64bit;
617
618 desc.arginfo =
619 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
620 if (qseecom.qsee_version < QSEE_VERSION_40) {
621 req = (struct qseecom_register_listener_ireq *)
622 req_buf;
623 desc.args[0] = req->listener_id;
624 desc.args[1] = req->sb_ptr;
625 desc.args[2] = req->sb_len;
626 } else {
627 req_64bit =
628 (struct qseecom_register_listener_64bit_ireq *)
629 req_buf;
630 desc.args[0] = req_64bit->listener_id;
631 desc.args[1] = req_64bit->sb_ptr;
632 desc.args[2] = req_64bit->sb_len;
633 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700634 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700635 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800636 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800637 if (ret == -EIO) {
638 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700639 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700640 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800641 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700642 }
643 break;
644 }
645 case QSEOS_DEREGISTER_LISTENER: {
646 struct qseecom_unregister_listener_ireq *req;
647
648 req = (struct qseecom_unregister_listener_ireq *)
649 req_buf;
650 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
651 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
652 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800653 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700654 break;
655 }
656 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
657 struct qseecom_client_listener_data_irsp *req;
658
659 req = (struct qseecom_client_listener_data_irsp *)
660 req_buf;
661 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
662 desc.arginfo =
663 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
664 desc.args[0] = req->listener_id;
665 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800666 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700667 break;
668 }
669 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
670 struct qseecom_client_listener_data_irsp *req;
671 struct qseecom_client_listener_data_64bit_irsp *req_64;
672
673 smc_id =
674 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
675 desc.arginfo =
676 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
677 if (qseecom.qsee_version < QSEE_VERSION_40) {
678 req =
679 (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 desc.args[0] = req->listener_id;
682 desc.args[1] = req->status;
683 desc.args[2] = req->sglistinfo_ptr;
684 desc.args[3] = req->sglistinfo_len;
685 } else {
686 req_64 =
687 (struct qseecom_client_listener_data_64bit_irsp *)
688 req_buf;
689 desc.args[0] = req_64->listener_id;
690 desc.args[1] = req_64->status;
691 desc.args[2] = req_64->sglistinfo_ptr;
692 desc.args[3] = req_64->sglistinfo_len;
693 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800694 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700695 break;
696 }
697 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
698 struct qseecom_load_app_ireq *req;
699 struct qseecom_load_app_64bit_ireq *req_64bit;
700
701 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
702 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
703 if (qseecom.qsee_version < QSEE_VERSION_40) {
704 req = (struct qseecom_load_app_ireq *)req_buf;
705 desc.args[0] = req->mdt_len;
706 desc.args[1] = req->img_len;
707 desc.args[2] = req->phy_addr;
708 } else {
709 req_64bit =
710 (struct qseecom_load_app_64bit_ireq *)req_buf;
711 desc.args[0] = req_64bit->mdt_len;
712 desc.args[1] = req_64bit->img_len;
713 desc.args[2] = req_64bit->phy_addr;
714 }
715 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800716 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700717 break;
718 }
719 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
720 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
721 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
722 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800723 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700724 break;
725 }
726
727 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
728 struct qseecom_client_send_data_ireq *req;
729 struct qseecom_client_send_data_64bit_ireq *req_64bit;
730
731 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
732 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
733 if (qseecom.qsee_version < QSEE_VERSION_40) {
734 req = (struct qseecom_client_send_data_ireq *)
735 req_buf;
736 desc.args[0] = req->app_id;
737 desc.args[1] = req->req_ptr;
738 desc.args[2] = req->req_len;
739 desc.args[3] = req->rsp_ptr;
740 desc.args[4] = req->rsp_len;
741 } else {
742 req_64bit =
743 (struct qseecom_client_send_data_64bit_ireq *)
744 req_buf;
745 desc.args[0] = req_64bit->app_id;
746 desc.args[1] = req_64bit->req_ptr;
747 desc.args[2] = req_64bit->req_len;
748 desc.args[3] = req_64bit->rsp_ptr;
749 desc.args[4] = req_64bit->rsp_len;
750 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800751 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700752 break;
753 }
754 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
755 struct qseecom_client_send_data_ireq *req;
756 struct qseecom_client_send_data_64bit_ireq *req_64bit;
757
758 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
759 desc.arginfo =
760 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
761 if (qseecom.qsee_version < QSEE_VERSION_40) {
762 req = (struct qseecom_client_send_data_ireq *)
763 req_buf;
764 desc.args[0] = req->app_id;
765 desc.args[1] = req->req_ptr;
766 desc.args[2] = req->req_len;
767 desc.args[3] = req->rsp_ptr;
768 desc.args[4] = req->rsp_len;
769 desc.args[5] = req->sglistinfo_ptr;
770 desc.args[6] = req->sglistinfo_len;
771 } else {
772 req_64bit =
773 (struct qseecom_client_send_data_64bit_ireq *)
774 req_buf;
775 desc.args[0] = req_64bit->app_id;
776 desc.args[1] = req_64bit->req_ptr;
777 desc.args[2] = req_64bit->req_len;
778 desc.args[3] = req_64bit->rsp_ptr;
779 desc.args[4] = req_64bit->rsp_len;
780 desc.args[5] = req_64bit->sglistinfo_ptr;
781 desc.args[6] = req_64bit->sglistinfo_len;
782 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800783 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700784 break;
785 }
786 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
787 struct qseecom_client_send_service_ireq *req;
788
789 req = (struct qseecom_client_send_service_ireq *)
790 req_buf;
791 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
792 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
793 desc.args[0] = req->key_type;
794 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800795 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700796 break;
797 }
798 case QSEOS_RPMB_ERASE_COMMAND: {
799 smc_id = TZ_OS_RPMB_ERASE_ID;
800 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800802 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700803 break;
804 }
805 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
806 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
807 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
808 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800809 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700810 break;
811 }
812 case QSEOS_GENERATE_KEY: {
813 u32 tzbuflen = PAGE_ALIGN(sizeof
814 (struct qseecom_key_generate_ireq) -
815 sizeof(uint32_t));
816 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
817
818 if (!tzbuf)
819 return -ENOMEM;
820 memset(tzbuf, 0, tzbuflen);
821 memcpy(tzbuf, req_buf + sizeof(uint32_t),
822 (sizeof(struct qseecom_key_generate_ireq) -
823 sizeof(uint32_t)));
824 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
825 smc_id = TZ_OS_KS_GEN_KEY_ID;
826 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
827 desc.args[0] = virt_to_phys(tzbuf);
828 desc.args[1] = tzbuflen;
829 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800830 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700831 kzfree(tzbuf);
832 break;
833 }
834 case QSEOS_DELETE_KEY: {
835 u32 tzbuflen = PAGE_ALIGN(sizeof
836 (struct qseecom_key_delete_ireq) -
837 sizeof(uint32_t));
838 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
839
840 if (!tzbuf)
841 return -ENOMEM;
842 memset(tzbuf, 0, tzbuflen);
843 memcpy(tzbuf, req_buf + sizeof(uint32_t),
844 (sizeof(struct qseecom_key_delete_ireq) -
845 sizeof(uint32_t)));
846 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
847 smc_id = TZ_OS_KS_DEL_KEY_ID;
848 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
849 desc.args[0] = virt_to_phys(tzbuf);
850 desc.args[1] = tzbuflen;
851 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800852 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700853 kzfree(tzbuf);
854 break;
855 }
856 case QSEOS_SET_KEY: {
857 u32 tzbuflen = PAGE_ALIGN(sizeof
858 (struct qseecom_key_select_ireq) -
859 sizeof(uint32_t));
860 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
861
862 if (!tzbuf)
863 return -ENOMEM;
864 memset(tzbuf, 0, tzbuflen);
865 memcpy(tzbuf, req_buf + sizeof(uint32_t),
866 (sizeof(struct qseecom_key_select_ireq) -
867 sizeof(uint32_t)));
868 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
869 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
870 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
871 desc.args[0] = virt_to_phys(tzbuf);
872 desc.args[1] = tzbuflen;
873 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800874 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700875 kzfree(tzbuf);
876 break;
877 }
878 case QSEOS_UPDATE_KEY_USERINFO: {
879 u32 tzbuflen = PAGE_ALIGN(sizeof
880 (struct qseecom_key_userinfo_update_ireq) -
881 sizeof(uint32_t));
882 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
883
884 if (!tzbuf)
885 return -ENOMEM;
886 memset(tzbuf, 0, tzbuflen);
887 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
888 (struct qseecom_key_userinfo_update_ireq) -
889 sizeof(uint32_t)));
890 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
891 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
892 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
893 desc.args[0] = virt_to_phys(tzbuf);
894 desc.args[1] = tzbuflen;
895 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800896 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700897 kzfree(tzbuf);
898 break;
899 }
900 case QSEOS_TEE_OPEN_SESSION: {
901 struct qseecom_qteec_ireq *req;
902 struct qseecom_qteec_64bit_ireq *req_64bit;
903
904 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
905 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
906 if (qseecom.qsee_version < QSEE_VERSION_40) {
907 req = (struct qseecom_qteec_ireq *)req_buf;
908 desc.args[0] = req->app_id;
909 desc.args[1] = req->req_ptr;
910 desc.args[2] = req->req_len;
911 desc.args[3] = req->resp_ptr;
912 desc.args[4] = req->resp_len;
913 } else {
914 req_64bit = (struct qseecom_qteec_64bit_ireq *)
915 req_buf;
916 desc.args[0] = req_64bit->app_id;
917 desc.args[1] = req_64bit->req_ptr;
918 desc.args[2] = req_64bit->req_len;
919 desc.args[3] = req_64bit->resp_ptr;
920 desc.args[4] = req_64bit->resp_len;
921 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800922 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700923 break;
924 }
925 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
926 struct qseecom_qteec_ireq *req;
927 struct qseecom_qteec_64bit_ireq *req_64bit;
928
929 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
930 desc.arginfo =
931 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
932 if (qseecom.qsee_version < QSEE_VERSION_40) {
933 req = (struct qseecom_qteec_ireq *)req_buf;
934 desc.args[0] = req->app_id;
935 desc.args[1] = req->req_ptr;
936 desc.args[2] = req->req_len;
937 desc.args[3] = req->resp_ptr;
938 desc.args[4] = req->resp_len;
939 desc.args[5] = req->sglistinfo_ptr;
940 desc.args[6] = req->sglistinfo_len;
941 } else {
942 req_64bit = (struct qseecom_qteec_64bit_ireq *)
943 req_buf;
944 desc.args[0] = req_64bit->app_id;
945 desc.args[1] = req_64bit->req_ptr;
946 desc.args[2] = req_64bit->req_len;
947 desc.args[3] = req_64bit->resp_ptr;
948 desc.args[4] = req_64bit->resp_len;
949 desc.args[5] = req_64bit->sglistinfo_ptr;
950 desc.args[6] = req_64bit->sglistinfo_len;
951 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800952 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700953 break;
954 }
955 case QSEOS_TEE_INVOKE_COMMAND: {
956 struct qseecom_qteec_ireq *req;
957 struct qseecom_qteec_64bit_ireq *req_64bit;
958
959 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
960 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
961 if (qseecom.qsee_version < QSEE_VERSION_40) {
962 req = (struct qseecom_qteec_ireq *)req_buf;
963 desc.args[0] = req->app_id;
964 desc.args[1] = req->req_ptr;
965 desc.args[2] = req->req_len;
966 desc.args[3] = req->resp_ptr;
967 desc.args[4] = req->resp_len;
968 } else {
969 req_64bit = (struct qseecom_qteec_64bit_ireq *)
970 req_buf;
971 desc.args[0] = req_64bit->app_id;
972 desc.args[1] = req_64bit->req_ptr;
973 desc.args[2] = req_64bit->req_len;
974 desc.args[3] = req_64bit->resp_ptr;
975 desc.args[4] = req_64bit->resp_len;
976 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800977 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700978 break;
979 }
980 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
981 struct qseecom_qteec_ireq *req;
982 struct qseecom_qteec_64bit_ireq *req_64bit;
983
984 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
985 desc.arginfo =
986 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
987 if (qseecom.qsee_version < QSEE_VERSION_40) {
988 req = (struct qseecom_qteec_ireq *)req_buf;
989 desc.args[0] = req->app_id;
990 desc.args[1] = req->req_ptr;
991 desc.args[2] = req->req_len;
992 desc.args[3] = req->resp_ptr;
993 desc.args[4] = req->resp_len;
994 desc.args[5] = req->sglistinfo_ptr;
995 desc.args[6] = req->sglistinfo_len;
996 } else {
997 req_64bit = (struct qseecom_qteec_64bit_ireq *)
998 req_buf;
999 desc.args[0] = req_64bit->app_id;
1000 desc.args[1] = req_64bit->req_ptr;
1001 desc.args[2] = req_64bit->req_len;
1002 desc.args[3] = req_64bit->resp_ptr;
1003 desc.args[4] = req_64bit->resp_len;
1004 desc.args[5] = req_64bit->sglistinfo_ptr;
1005 desc.args[6] = req_64bit->sglistinfo_len;
1006 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001007 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001008 break;
1009 }
1010 case QSEOS_TEE_CLOSE_SESSION: {
1011 struct qseecom_qteec_ireq *req;
1012 struct qseecom_qteec_64bit_ireq *req_64bit;
1013
1014 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1015 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1016 if (qseecom.qsee_version < QSEE_VERSION_40) {
1017 req = (struct qseecom_qteec_ireq *)req_buf;
1018 desc.args[0] = req->app_id;
1019 desc.args[1] = req->req_ptr;
1020 desc.args[2] = req->req_len;
1021 desc.args[3] = req->resp_ptr;
1022 desc.args[4] = req->resp_len;
1023 } else {
1024 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1025 req_buf;
1026 desc.args[0] = req_64bit->app_id;
1027 desc.args[1] = req_64bit->req_ptr;
1028 desc.args[2] = req_64bit->req_len;
1029 desc.args[3] = req_64bit->resp_ptr;
1030 desc.args[4] = req_64bit->resp_len;
1031 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001032 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001033 break;
1034 }
1035 case QSEOS_TEE_REQUEST_CANCELLATION: {
1036 struct qseecom_qteec_ireq *req;
1037 struct qseecom_qteec_64bit_ireq *req_64bit;
1038
1039 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1040 desc.arginfo =
1041 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1042 if (qseecom.qsee_version < QSEE_VERSION_40) {
1043 req = (struct qseecom_qteec_ireq *)req_buf;
1044 desc.args[0] = req->app_id;
1045 desc.args[1] = req->req_ptr;
1046 desc.args[2] = req->req_len;
1047 desc.args[3] = req->resp_ptr;
1048 desc.args[4] = req->resp_len;
1049 } else {
1050 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1051 req_buf;
1052 desc.args[0] = req_64bit->app_id;
1053 desc.args[1] = req_64bit->req_ptr;
1054 desc.args[2] = req_64bit->req_len;
1055 desc.args[3] = req_64bit->resp_ptr;
1056 desc.args[4] = req_64bit->resp_len;
1057 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001058 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001059 break;
1060 }
1061 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1062 struct qseecom_continue_blocked_request_ireq *req =
1063 (struct qseecom_continue_blocked_request_ireq *)
1064 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001065 if (qseecom.smcinvoke_support)
1066 smc_id =
1067 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1068 else
1069 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001070 desc.arginfo =
1071 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001072 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001073 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001074 break;
1075 }
1076 default: {
1077 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1078 qseos_cmd_id);
1079 ret = -EINVAL;
1080 break;
1081 }
1082 } /*end of switch (qsee_cmd_id) */
1083 break;
1084 } /*end of case SCM_SVC_TZSCHEDULER*/
1085 default: {
1086 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1087 svc_id);
1088 ret = -EINVAL;
1089 break;
1090 }
1091 } /*end of switch svc_id */
1092 scm_resp->result = desc.ret[0];
1093 scm_resp->resp_type = desc.ret[1];
1094 scm_resp->data = desc.ret[2];
1095 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1096 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1097 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1098 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1099 return ret;
1100}
1101
1102
1103static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1104 size_t cmd_len, void *resp_buf, size_t resp_len)
1105{
1106 if (!is_scm_armv8())
1107 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1108 resp_buf, resp_len);
1109 else
1110 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1111}
1112
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001113static struct qseecom_registered_listener_list *__qseecom_find_svc(
1114 int32_t listener_id)
1115{
1116 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001117
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118 list_for_each_entry(entry,
1119 &qseecom.registered_listener_list_head, list) {
1120 if (entry->svc.listener_id == listener_id)
1121 break;
1122 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001124 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001125 return NULL;
1126 }
1127
1128 return entry;
1129}
1130
1131static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1132 struct qseecom_dev_handle *handle,
1133 struct qseecom_register_listener_req *listener)
1134{
1135 int ret = 0;
1136 struct qseecom_register_listener_ireq req;
1137 struct qseecom_register_listener_64bit_ireq req_64bit;
1138 struct qseecom_command_scm_resp resp;
1139 ion_phys_addr_t pa;
1140 void *cmd_buf = NULL;
1141 size_t cmd_len;
1142
1143 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001144 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 listener->ifd_data_fd);
1146 if (IS_ERR_OR_NULL(svc->ihandle)) {
1147 pr_err("Ion client could not retrieve the handle\n");
1148 return -ENOMEM;
1149 }
1150
1151 /* Get the physical address of the ION BUF */
1152 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1153 if (ret) {
1154 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1155 ret);
1156 return ret;
1157 }
1158 /* Populate the structure for sending scm call to load image */
1159 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1160 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1161 pr_err("ION memory mapping for listener shared buffer failed\n");
1162 return -ENOMEM;
1163 }
1164 svc->sb_phys = (phys_addr_t)pa;
1165
1166 if (qseecom.qsee_version < QSEE_VERSION_40) {
1167 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1168 req.listener_id = svc->svc.listener_id;
1169 req.sb_len = svc->sb_length;
1170 req.sb_ptr = (uint32_t)svc->sb_phys;
1171 cmd_buf = (void *)&req;
1172 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1173 } else {
1174 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1175 req_64bit.listener_id = svc->svc.listener_id;
1176 req_64bit.sb_len = svc->sb_length;
1177 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1178 cmd_buf = (void *)&req_64bit;
1179 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1180 }
1181
1182 resp.result = QSEOS_RESULT_INCOMPLETE;
1183
Zhen Kongc4c162a2019-01-23 12:07:12 -08001184 mutex_unlock(&listener_access_lock);
1185 mutex_lock(&app_access_lock);
1186 __qseecom_reentrancy_check_if_no_app_blocked(
1187 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001188 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1189 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001190 mutex_unlock(&app_access_lock);
1191 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001192 if (ret) {
1193 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1194 return -EINVAL;
1195 }
1196
1197 if (resp.result != QSEOS_RESULT_SUCCESS) {
1198 pr_err("Error SB registration req: resp.result = %d\n",
1199 resp.result);
1200 return -EPERM;
1201 }
1202 return 0;
1203}
1204
1205static int qseecom_register_listener(struct qseecom_dev_handle *data,
1206 void __user *argp)
1207{
1208 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001209 struct qseecom_register_listener_req rcvd_lstnr;
1210 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001211 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212
1213 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1214 if (ret) {
1215 pr_err("copy_from_user failed\n");
1216 return ret;
1217 }
1218 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1219 rcvd_lstnr.sb_size))
1220 return -EFAULT;
1221
Zhen Kong3c674612018-09-06 22:51:27 -07001222 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001223
1224 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1225 if (ptr_svc) {
1226 if (ptr_svc->unregister_pending == false) {
1227 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001228 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 data->released = true;
1230 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 } else {
1232 /*wait until listener is unregistered*/
1233 pr_debug("register %d has to wait\n",
1234 rcvd_lstnr.listener_id);
1235 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301236 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001237 qseecom.register_lsnr_pending_wq,
1238 list_empty(
1239 &qseecom.unregister_lsnr_pending_list_head));
1240 if (ret) {
1241 pr_err("interrupted register_pending_wq %d\n",
1242 rcvd_lstnr.listener_id);
1243 mutex_lock(&listener_access_lock);
1244 return -ERESTARTSYS;
1245 }
1246 mutex_lock(&listener_access_lock);
1247 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1250 if (!new_entry)
1251 return -ENOMEM;
1252 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001253 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001254
1255 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1256 new_entry->sb_length = rcvd_lstnr.sb_size;
1257 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1258 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001259 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1260 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 kzfree(new_entry);
1262 return -ENOMEM;
1263 }
1264
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001265 init_waitqueue_head(&new_entry->rcv_req_wq);
1266 init_waitqueue_head(&new_entry->listener_block_app_wq);
1267 new_entry->send_resp_flag = 0;
1268 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001269 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270
Zhen Kong3c674612018-09-06 22:51:27 -07001271 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 return ret;
1273}
1274
Zhen Kongbcdeda22018-11-16 13:50:51 -08001275static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1276 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001277{
1278 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_command_scm_resp resp;
1281 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1282
1283 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1284 req.listener_id = data->listener.id;
1285 resp.result = QSEOS_RESULT_INCOMPLETE;
1286
Zhen Kongc4c162a2019-01-23 12:07:12 -08001287 mutex_unlock(&listener_access_lock);
1288 mutex_lock(&app_access_lock);
1289 __qseecom_reentrancy_check_if_no_app_blocked(
1290 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1292 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001293 mutex_unlock(&app_access_lock);
1294 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (ret) {
1296 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1297 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001298 if (ret == -EBUSY)
1299 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001300 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001301 }
1302
1303 if (resp.result != QSEOS_RESULT_SUCCESS) {
1304 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1305 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001306 ret = -EPERM;
1307 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001308 }
1309
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301311 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001312 atomic_read(&data->ioctl_count) <= 1)) {
1313 pr_err("Interrupted from abort\n");
1314 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001315 }
1316 }
1317
Zhen Kong3c674612018-09-06 22:51:27 -07001318exit:
1319 if (ptr_svc->sb_virt) {
1320 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001321 if (!IS_ERR_OR_NULL(ihandle)) {
1322 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1323 ion_free(qseecom.ion_clnt, ihandle);
1324 }
1325 }
Zhen Kong3c674612018-09-06 22:51:27 -07001326 list_del(&ptr_svc->list);
1327 kzfree(ptr_svc);
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001330 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 return ret;
1332}
1333
Zhen Kongbcdeda22018-11-16 13:50:51 -08001334static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1335{
1336 struct qseecom_registered_listener_list *ptr_svc = NULL;
1337 struct qseecom_unregister_pending_list *entry = NULL;
1338
1339 ptr_svc = __qseecom_find_svc(data->listener.id);
1340 if (!ptr_svc) {
1341 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1342 return -ENODATA;
1343 }
1344 /* stop CA thread waiting for listener response */
1345 ptr_svc->abort = 1;
1346 wake_up_interruptible_all(&qseecom.send_resp_wq);
1347
Zhen Kongc4c162a2019-01-23 12:07:12 -08001348 /* stop listener thread waiting for listener request */
1349 data->abort = 1;
1350 wake_up_all(&ptr_svc->rcv_req_wq);
1351
Zhen Kongbcdeda22018-11-16 13:50:51 -08001352 /* return directly if pending*/
1353 if (ptr_svc->unregister_pending)
1354 return 0;
1355
1356 /*add unregistration into pending list*/
1357 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1358 if (!entry)
1359 return -ENOMEM;
1360 entry->data = data;
1361 list_add_tail(&entry->list,
1362 &qseecom.unregister_lsnr_pending_list_head);
1363 ptr_svc->unregister_pending = true;
1364 pr_debug("unregister %d pending\n", data->listener.id);
1365 return 0;
1366}
1367
1368static void __qseecom_processing_pending_lsnr_unregister(void)
1369{
1370 struct qseecom_unregister_pending_list *entry = NULL;
1371 struct qseecom_registered_listener_list *ptr_svc = NULL;
1372 struct list_head *pos;
1373 int ret = 0;
1374
1375 mutex_lock(&listener_access_lock);
1376 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1377 pos = qseecom.unregister_lsnr_pending_list_head.next;
1378 entry = list_entry(pos,
1379 struct qseecom_unregister_pending_list, list);
1380 if (entry && entry->data) {
1381 pr_debug("process pending unregister %d\n",
1382 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001383 /* don't process if qseecom_release is not called*/
1384 if (!entry->data->listener.release_called)
1385 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001386 ptr_svc = __qseecom_find_svc(
1387 entry->data->listener.id);
1388 if (ptr_svc) {
1389 ret = __qseecom_unregister_listener(
1390 entry->data, ptr_svc);
1391 if (ret == -EBUSY) {
1392 pr_debug("unregister %d pending again\n",
1393 entry->data->listener.id);
1394 mutex_unlock(&listener_access_lock);
1395 return;
1396 }
1397 } else
1398 pr_err("invalid listener %d\n",
1399 entry->data->listener.id);
1400 kzfree(entry->data);
1401 }
1402 list_del(pos);
1403 kzfree(entry);
1404 }
1405 mutex_unlock(&listener_access_lock);
1406 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1407}
1408
Zhen Kongc4c162a2019-01-23 12:07:12 -08001409static void __wakeup_unregister_listener_kthread(void)
1410{
1411 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1412 LSNR_UNREG_KT_WAKEUP);
1413 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1414}
1415
1416static int __qseecom_unregister_listener_kthread_func(void *data)
1417{
1418 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301419 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001420 qseecom.unregister_lsnr_kthread_wq,
1421 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1422 == LSNR_UNREG_KT_WAKEUP);
1423 pr_debug("kthread to unregister listener is called %d\n",
1424 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1425 __qseecom_processing_pending_lsnr_unregister();
1426 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1427 LSNR_UNREG_KT_SLEEP);
1428 }
1429 pr_warn("kthread to unregister listener stopped\n");
1430 return 0;
1431}
1432
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001433static int __qseecom_set_msm_bus_request(uint32_t mode)
1434{
1435 int ret = 0;
1436 struct qseecom_clk *qclk;
1437
1438 qclk = &qseecom.qsee;
1439 if (qclk->ce_core_src_clk != NULL) {
1440 if (mode == INACTIVE) {
1441 __qseecom_disable_clk(CLK_QSEE);
1442 } else {
1443 ret = __qseecom_enable_clk(CLK_QSEE);
1444 if (ret)
1445 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1446 ret, mode);
1447 }
1448 }
1449
1450 if ((!ret) && (qseecom.current_mode != mode)) {
1451 ret = msm_bus_scale_client_update_request(
1452 qseecom.qsee_perf_client, mode);
1453 if (ret) {
1454 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1455 ret, mode);
1456 if (qclk->ce_core_src_clk != NULL) {
1457 if (mode == INACTIVE) {
1458 ret = __qseecom_enable_clk(CLK_QSEE);
1459 if (ret)
1460 pr_err("CLK enable failed\n");
1461 } else
1462 __qseecom_disable_clk(CLK_QSEE);
1463 }
1464 }
1465 qseecom.current_mode = mode;
1466 }
1467 return ret;
1468}
1469
1470static void qseecom_bw_inactive_req_work(struct work_struct *work)
1471{
1472 mutex_lock(&app_access_lock);
1473 mutex_lock(&qsee_bw_mutex);
1474 if (qseecom.timer_running)
1475 __qseecom_set_msm_bus_request(INACTIVE);
1476 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1477 qseecom.current_mode, qseecom.cumulative_mode);
1478 qseecom.timer_running = false;
1479 mutex_unlock(&qsee_bw_mutex);
1480 mutex_unlock(&app_access_lock);
1481}
1482
1483static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1484{
1485 schedule_work(&qseecom.bw_inactive_req_ws);
1486}
1487
1488static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1489{
1490 struct qseecom_clk *qclk;
1491 int ret = 0;
1492
1493 mutex_lock(&clk_access_lock);
1494 if (ce == CLK_QSEE)
1495 qclk = &qseecom.qsee;
1496 else
1497 qclk = &qseecom.ce_drv;
1498
1499 if (qclk->clk_access_cnt > 2) {
1500 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1501 ret = -EINVAL;
1502 goto err_dec_ref_cnt;
1503 }
1504 if (qclk->clk_access_cnt == 2)
1505 qclk->clk_access_cnt--;
1506
1507err_dec_ref_cnt:
1508 mutex_unlock(&clk_access_lock);
1509 return ret;
1510}
1511
1512
1513static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1514{
1515 int32_t ret = 0;
1516 int32_t request_mode = INACTIVE;
1517
1518 mutex_lock(&qsee_bw_mutex);
1519 if (mode == 0) {
1520 if (qseecom.cumulative_mode > MEDIUM)
1521 request_mode = HIGH;
1522 else
1523 request_mode = qseecom.cumulative_mode;
1524 } else {
1525 request_mode = mode;
1526 }
1527
1528 ret = __qseecom_set_msm_bus_request(request_mode);
1529 if (ret) {
1530 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1531 ret, request_mode);
1532 goto err_scale_timer;
1533 }
1534
1535 if (qseecom.timer_running) {
1536 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1537 if (ret) {
1538 pr_err("Failed to decrease clk ref count.\n");
1539 goto err_scale_timer;
1540 }
1541 del_timer_sync(&(qseecom.bw_scale_down_timer));
1542 qseecom.timer_running = false;
1543 }
1544err_scale_timer:
1545 mutex_unlock(&qsee_bw_mutex);
1546 return ret;
1547}
1548
1549
1550static int qseecom_unregister_bus_bandwidth_needs(
1551 struct qseecom_dev_handle *data)
1552{
1553 int32_t ret = 0;
1554
1555 qseecom.cumulative_mode -= data->mode;
1556 data->mode = INACTIVE;
1557
1558 return ret;
1559}
1560
1561static int __qseecom_register_bus_bandwidth_needs(
1562 struct qseecom_dev_handle *data, uint32_t request_mode)
1563{
1564 int32_t ret = 0;
1565
1566 if (data->mode == INACTIVE) {
1567 qseecom.cumulative_mode += request_mode;
1568 data->mode = request_mode;
1569 } else {
1570 if (data->mode != request_mode) {
1571 qseecom.cumulative_mode -= data->mode;
1572 qseecom.cumulative_mode += request_mode;
1573 data->mode = request_mode;
1574 }
1575 }
1576 return ret;
1577}
1578
1579static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1580{
1581 int ret = 0;
1582
1583 ret = qsee_vote_for_clock(data, CLK_DFAB);
1584 if (ret) {
1585 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1586 goto perf_enable_exit;
1587 }
1588 ret = qsee_vote_for_clock(data, CLK_SFPB);
1589 if (ret) {
1590 qsee_disable_clock_vote(data, CLK_DFAB);
1591 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1592 goto perf_enable_exit;
1593 }
1594
1595perf_enable_exit:
1596 return ret;
1597}
1598
1599static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1600 void __user *argp)
1601{
1602 int32_t ret = 0;
1603 int32_t req_mode;
1604
1605 if (qseecom.no_clock_support)
1606 return 0;
1607
1608 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1609 if (ret) {
1610 pr_err("copy_from_user failed\n");
1611 return ret;
1612 }
1613 if (req_mode > HIGH) {
1614 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1615 return -EINVAL;
1616 }
1617
1618 /*
1619 * Register bus bandwidth needs if bus scaling feature is enabled;
1620 * otherwise, qseecom enable/disable clocks for the client directly.
1621 */
1622 if (qseecom.support_bus_scaling) {
1623 mutex_lock(&qsee_bw_mutex);
1624 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1625 mutex_unlock(&qsee_bw_mutex);
1626 } else {
1627 pr_debug("Bus scaling feature is NOT enabled\n");
1628 pr_debug("request bandwidth mode %d for the client\n",
1629 req_mode);
1630 if (req_mode != INACTIVE) {
1631 ret = qseecom_perf_enable(data);
1632 if (ret)
1633 pr_err("Failed to vote for clock with err %d\n",
1634 ret);
1635 } else {
1636 qsee_disable_clock_vote(data, CLK_DFAB);
1637 qsee_disable_clock_vote(data, CLK_SFPB);
1638 }
1639 }
1640 return ret;
1641}
1642
1643static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1644{
1645 if (qseecom.no_clock_support)
1646 return;
1647
1648 mutex_lock(&qsee_bw_mutex);
1649 qseecom.bw_scale_down_timer.expires = jiffies +
1650 msecs_to_jiffies(duration);
1651 mod_timer(&(qseecom.bw_scale_down_timer),
1652 qseecom.bw_scale_down_timer.expires);
1653 qseecom.timer_running = true;
1654 mutex_unlock(&qsee_bw_mutex);
1655}
1656
1657static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1658{
1659 if (!qseecom.support_bus_scaling)
1660 qsee_disable_clock_vote(data, CLK_SFPB);
1661 else
1662 __qseecom_add_bw_scale_down_timer(
1663 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1664}
1665
1666static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1667{
1668 int ret = 0;
1669
1670 if (qseecom.support_bus_scaling) {
1671 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1672 if (ret)
1673 pr_err("Failed to set bw MEDIUM.\n");
1674 } else {
1675 ret = qsee_vote_for_clock(data, CLK_SFPB);
1676 if (ret)
1677 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1678 }
1679 return ret;
1680}
1681
1682static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1683 void __user *argp)
1684{
1685 ion_phys_addr_t pa;
1686 int32_t ret;
1687 struct qseecom_set_sb_mem_param_req req;
1688 size_t len;
1689
1690 /* Copy the relevant information needed for loading the image */
1691 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1692 return -EFAULT;
1693
1694 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1695 (req.sb_len == 0)) {
1696 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1697 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1698 return -EFAULT;
1699 }
1700 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1701 req.sb_len))
1702 return -EFAULT;
1703
1704 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001705 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001706 req.ifd_data_fd);
1707 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1708 pr_err("Ion client could not retrieve the handle\n");
1709 return -ENOMEM;
1710 }
1711 /* Get the physical address of the ION BUF */
1712 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1713 if (ret) {
1714
1715 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1716 ret);
1717 return ret;
1718 }
1719
1720 if (len < req.sb_len) {
1721 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1722 req.sb_len, len);
1723 return -EINVAL;
1724 }
1725 /* Populate the structure for sending scm call to load image */
1726 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1727 data->client.ihandle);
1728 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1729 pr_err("ION memory mapping for client shared buf failed\n");
1730 return -ENOMEM;
1731 }
1732 data->client.sb_phys = (phys_addr_t)pa;
1733 data->client.sb_length = req.sb_len;
1734 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1735 return 0;
1736}
1737
Zhen Kong26e62742018-05-04 17:19:06 -07001738static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1739 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001740{
1741 int ret;
1742
1743 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001744 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001745}
1746
1747static int __qseecom_reentrancy_listener_has_sent_rsp(
1748 struct qseecom_dev_handle *data,
1749 struct qseecom_registered_listener_list *ptr_svc)
1750{
1751 int ret;
1752
1753 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001754 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001755}
1756
1757static void __qseecom_clean_listener_sglistinfo(
1758 struct qseecom_registered_listener_list *ptr_svc)
1759{
1760 if (ptr_svc->sglist_cnt) {
1761 memset(ptr_svc->sglistinfo_ptr, 0,
1762 SGLISTINFO_TABLE_SIZE);
1763 ptr_svc->sglist_cnt = 0;
1764 }
1765}
1766
1767static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1768 struct qseecom_command_scm_resp *resp)
1769{
1770 int ret = 0;
1771 int rc = 0;
1772 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001773 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1774 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1775 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 struct qseecom_registered_listener_list *ptr_svc = NULL;
1777 sigset_t new_sigset;
1778 sigset_t old_sigset;
1779 uint32_t status;
1780 void *cmd_buf = NULL;
1781 size_t cmd_len;
1782 struct sglist_info *table = NULL;
1783
Zhen Kongbcdeda22018-11-16 13:50:51 -08001784 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001785 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1786 lstnr = resp->data;
1787 /*
1788 * Wake up blocking lsitener service with the lstnr id
1789 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001790 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001791 list_for_each_entry(ptr_svc,
1792 &qseecom.registered_listener_list_head, list) {
1793 if (ptr_svc->svc.listener_id == lstnr) {
1794 ptr_svc->listener_in_use = true;
1795 ptr_svc->rcv_req_flag = 1;
1796 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1797 break;
1798 }
1799 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001800
1801 if (ptr_svc == NULL) {
1802 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001803 rc = -EINVAL;
1804 status = QSEOS_RESULT_FAILURE;
1805 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 }
1807
1808 if (!ptr_svc->ihandle) {
1809 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001810 rc = -EINVAL;
1811 status = QSEOS_RESULT_FAILURE;
1812 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001813 }
1814
1815 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001816 pr_err("Service %d does not exist\n",
1817 lstnr);
1818 rc = -ERESTARTSYS;
1819 ptr_svc = NULL;
1820 status = QSEOS_RESULT_FAILURE;
1821 goto err_resp;
1822 }
1823
1824 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001825 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001826 lstnr, ptr_svc->abort);
1827 rc = -ENODEV;
1828 status = QSEOS_RESULT_FAILURE;
1829 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 }
Zhen Kong25731112018-09-20 13:10:03 -07001831
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001832 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1833
1834 /* initialize the new signal mask with all signals*/
1835 sigfillset(&new_sigset);
1836 /* block all signals */
1837 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1838
Zhen Kongbcdeda22018-11-16 13:50:51 -08001839 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001840 do {
1841 /*
1842 * When reentrancy is not supported, check global
1843 * send_resp_flag; otherwise, check this listener's
1844 * send_resp_flag.
1845 */
1846 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301847 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001848 __qseecom_listener_has_sent_rsp(
1849 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001850 break;
1851 }
1852
1853 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301854 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001855 __qseecom_reentrancy_listener_has_sent_rsp(
1856 data, ptr_svc))) {
1857 break;
1858 }
1859 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001860 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 /* restore signal mask */
1862 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001863 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1865 data->client.app_id, lstnr, ret);
1866 rc = -ENODEV;
1867 status = QSEOS_RESULT_FAILURE;
1868 } else {
1869 status = QSEOS_RESULT_SUCCESS;
1870 }
Zhen Kong26e62742018-05-04 17:19:06 -07001871err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001873 if (ptr_svc) {
1874 ptr_svc->send_resp_flag = 0;
1875 table = ptr_svc->sglistinfo_ptr;
1876 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001877 if (qseecom.qsee_version < QSEE_VERSION_40) {
1878 send_data_rsp.listener_id = lstnr;
1879 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001880 if (table) {
1881 send_data_rsp.sglistinfo_ptr =
1882 (uint32_t)virt_to_phys(table);
1883 send_data_rsp.sglistinfo_len =
1884 SGLISTINFO_TABLE_SIZE;
1885 dmac_flush_range((void *)table,
1886 (void *)table + SGLISTINFO_TABLE_SIZE);
1887 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001888 cmd_buf = (void *)&send_data_rsp;
1889 cmd_len = sizeof(send_data_rsp);
1890 } else {
1891 send_data_rsp_64bit.listener_id = lstnr;
1892 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001893 if (table) {
1894 send_data_rsp_64bit.sglistinfo_ptr =
1895 virt_to_phys(table);
1896 send_data_rsp_64bit.sglistinfo_len =
1897 SGLISTINFO_TABLE_SIZE;
1898 dmac_flush_range((void *)table,
1899 (void *)table + SGLISTINFO_TABLE_SIZE);
1900 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001901 cmd_buf = (void *)&send_data_rsp_64bit;
1902 cmd_len = sizeof(send_data_rsp_64bit);
1903 }
Zhen Kong7d500032018-08-06 16:58:31 -07001904 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1906 else
1907 *(uint32_t *)cmd_buf =
1908 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001909 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001910 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1911 ptr_svc->ihandle,
1912 ptr_svc->sb_virt, ptr_svc->sb_length,
1913 ION_IOC_CLEAN_INV_CACHES);
1914 if (ret) {
1915 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001916 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001917 }
1918 }
1919
1920 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1921 ret = __qseecom_enable_clk(CLK_QSEE);
1922 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001923 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001924 }
1925
1926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001928 if (ptr_svc) {
1929 ptr_svc->listener_in_use = false;
1930 __qseecom_clean_listener_sglistinfo(ptr_svc);
1931 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001932 if (ret) {
1933 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1934 ret, data->client.app_id);
1935 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1936 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001937 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001938 }
Zhen Kong26e62742018-05-04 17:19:06 -07001939 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1940 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1942 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1943 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1944 resp->result, data->client.app_id, lstnr);
1945 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001947 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001948exit:
1949 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001950 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1951 __qseecom_disable_clk(CLK_QSEE);
1952
1953 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07001955 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001956 if (rc)
1957 return rc;
1958
1959 return ret;
1960}
1961
Zhen Konga91aaf02018-02-02 17:21:04 -08001962static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001963 struct qseecom_command_scm_resp *resp,
1964 struct qseecom_registered_app_list *ptr_app,
1965 struct qseecom_dev_handle *data)
1966{
1967 struct qseecom_registered_listener_list *list_ptr;
1968 int ret = 0;
1969 struct qseecom_continue_blocked_request_ireq ireq;
1970 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001971 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001972 sigset_t new_sigset;
1973 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001974 unsigned long flags;
1975 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001976
1977 if (!resp || !data) {
1978 pr_err("invalid resp or data pointer\n");
1979 ret = -EINVAL;
1980 goto exit;
1981 }
1982
1983 /* find app_id & img_name from list */
Zhen Konge4804722019-02-27 21:13:18 -08001984 if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001985 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1986 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1987 list) {
1988 if ((ptr_app->app_id == data->client.app_id) &&
1989 (!strcmp(ptr_app->app_name,
1990 data->client.app_name))) {
1991 found_app = true;
1992 break;
1993 }
1994 }
1995 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1996 flags);
1997 if (!found_app) {
1998 pr_err("app_id %d (%s) is not found\n",
1999 data->client.app_id,
2000 (char *)data->client.app_name);
2001 ret = -ENOENT;
2002 goto exit;
2003 }
2004 }
2005
Zhen Kongd8cc0052017-11-13 15:13:31 -08002006 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002007 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002008 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002009 list_ptr = __qseecom_find_svc(resp->data);
2010 if (!list_ptr) {
2011 pr_err("Invalid listener ID %d\n", resp->data);
2012 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002013 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002014 goto exit;
2015 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002016 ptr_app->blocked_on_listener_id = resp->data;
2017
2018 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2019 resp->data, list_ptr->listener_in_use,
2020 session_id, data->client.app_id);
2021
2022 /* sleep until listener is available */
2023 sigfillset(&new_sigset);
2024 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2025
2026 do {
2027 qseecom.app_block_ref_cnt++;
2028 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002029 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002030 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302031 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002032 list_ptr->listener_block_app_wq,
2033 !list_ptr->listener_in_use);
2034 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002035 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002036 ptr_app->app_blocked = false;
2037 qseecom.app_block_ref_cnt--;
2038 } while (list_ptr->listener_in_use);
2039
2040 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2041
2042 ptr_app->blocked_on_listener_id = 0;
2043 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2044 resp->data, session_id, data->client.app_id);
2045
2046 /* notify TZ that listener is available */
2047 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2048
2049 if (qseecom.smcinvoke_support)
2050 ireq.app_or_session_id = session_id;
2051 else
2052 ireq.app_or_session_id = data->client.app_id;
2053
2054 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2055 &ireq, sizeof(ireq),
2056 &continue_resp, sizeof(continue_resp));
2057 if (ret && qseecom.smcinvoke_support) {
2058 /* retry with legacy cmd */
2059 qseecom.smcinvoke_support = false;
2060 ireq.app_or_session_id = data->client.app_id;
2061 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2062 &ireq, sizeof(ireq),
2063 &continue_resp, sizeof(continue_resp));
2064 qseecom.smcinvoke_support = true;
2065 if (ret) {
2066 pr_err("unblock app %d or session %d fail\n",
2067 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002068 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002069 goto exit;
2070 }
2071 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002072 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002073 resp->result = continue_resp.result;
2074 resp->resp_type = continue_resp.resp_type;
2075 resp->data = continue_resp.data;
2076 pr_debug("unblock resp = %d\n", resp->result);
2077 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2078
2079 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2080 pr_err("Unexpected unblock resp %d\n", resp->result);
2081 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002082 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002083exit:
2084 return ret;
2085}
2086
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002087static int __qseecom_reentrancy_process_incomplete_cmd(
2088 struct qseecom_dev_handle *data,
2089 struct qseecom_command_scm_resp *resp)
2090{
2091 int ret = 0;
2092 int rc = 0;
2093 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002094 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2095 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2096 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002097 struct qseecom_registered_listener_list *ptr_svc = NULL;
2098 sigset_t new_sigset;
2099 sigset_t old_sigset;
2100 uint32_t status;
2101 void *cmd_buf = NULL;
2102 size_t cmd_len;
2103 struct sglist_info *table = NULL;
2104
Zhen Kong26e62742018-05-04 17:19:06 -07002105 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002106 lstnr = resp->data;
2107 /*
2108 * Wake up blocking lsitener service with the lstnr id
2109 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002110 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002111 list_for_each_entry(ptr_svc,
2112 &qseecom.registered_listener_list_head, list) {
2113 if (ptr_svc->svc.listener_id == lstnr) {
2114 ptr_svc->listener_in_use = true;
2115 ptr_svc->rcv_req_flag = 1;
2116 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2117 break;
2118 }
2119 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002120
2121 if (ptr_svc == NULL) {
2122 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002123 rc = -EINVAL;
2124 status = QSEOS_RESULT_FAILURE;
2125 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002126 }
2127
2128 if (!ptr_svc->ihandle) {
2129 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002130 rc = -EINVAL;
2131 status = QSEOS_RESULT_FAILURE;
2132 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002133 }
2134
2135 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002136 pr_err("Service %d does not exist\n",
2137 lstnr);
2138 rc = -ERESTARTSYS;
2139 ptr_svc = NULL;
2140 status = QSEOS_RESULT_FAILURE;
2141 goto err_resp;
2142 }
2143
2144 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002145 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002146 lstnr, ptr_svc->abort);
2147 rc = -ENODEV;
2148 status = QSEOS_RESULT_FAILURE;
2149 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002150 }
Zhen Kong25731112018-09-20 13:10:03 -07002151
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002152 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2153
2154 /* initialize the new signal mask with all signals*/
2155 sigfillset(&new_sigset);
2156
2157 /* block all signals */
2158 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2159
2160 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002161 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002162 mutex_unlock(&app_access_lock);
2163 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302164 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002165 __qseecom_reentrancy_listener_has_sent_rsp(
2166 data, ptr_svc))) {
2167 break;
2168 }
2169 } while (1);
2170 /* lock mutex again after resp sent */
2171 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002172 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002173 ptr_svc->send_resp_flag = 0;
2174 qseecom.send_resp_flag = 0;
2175
2176 /* restore signal mask */
2177 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002178 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002179 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2180 data->client.app_id, lstnr, ret);
2181 rc = -ENODEV;
2182 status = QSEOS_RESULT_FAILURE;
2183 } else {
2184 status = QSEOS_RESULT_SUCCESS;
2185 }
Zhen Kong26e62742018-05-04 17:19:06 -07002186err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002187 if (ptr_svc)
2188 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002189 if (qseecom.qsee_version < QSEE_VERSION_40) {
2190 send_data_rsp.listener_id = lstnr;
2191 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002192 if (table) {
2193 send_data_rsp.sglistinfo_ptr =
2194 (uint32_t)virt_to_phys(table);
2195 send_data_rsp.sglistinfo_len =
2196 SGLISTINFO_TABLE_SIZE;
2197 dmac_flush_range((void *)table,
2198 (void *)table + SGLISTINFO_TABLE_SIZE);
2199 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002200 cmd_buf = (void *)&send_data_rsp;
2201 cmd_len = sizeof(send_data_rsp);
2202 } else {
2203 send_data_rsp_64bit.listener_id = lstnr;
2204 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002205 if (table) {
2206 send_data_rsp_64bit.sglistinfo_ptr =
2207 virt_to_phys(table);
2208 send_data_rsp_64bit.sglistinfo_len =
2209 SGLISTINFO_TABLE_SIZE;
2210 dmac_flush_range((void *)table,
2211 (void *)table + SGLISTINFO_TABLE_SIZE);
2212 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002213 cmd_buf = (void *)&send_data_rsp_64bit;
2214 cmd_len = sizeof(send_data_rsp_64bit);
2215 }
Zhen Kong7d500032018-08-06 16:58:31 -07002216 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002217 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2218 else
2219 *(uint32_t *)cmd_buf =
2220 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002221 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002222 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2223 ptr_svc->ihandle,
2224 ptr_svc->sb_virt, ptr_svc->sb_length,
2225 ION_IOC_CLEAN_INV_CACHES);
2226 if (ret) {
2227 pr_err("cache operation failed %d\n", ret);
2228 return ret;
2229 }
2230 }
2231 if (lstnr == RPMB_SERVICE) {
2232 ret = __qseecom_enable_clk(CLK_QSEE);
2233 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002234 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002235 }
2236
2237 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2238 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002239 if (ptr_svc) {
2240 ptr_svc->listener_in_use = false;
2241 __qseecom_clean_listener_sglistinfo(ptr_svc);
2242 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2243 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002244
2245 if (ret) {
2246 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2247 ret, data->client.app_id);
2248 goto exit;
2249 }
2250
2251 switch (resp->result) {
2252 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2253 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2254 lstnr, data->client.app_id, resp->data);
2255 if (lstnr == resp->data) {
2256 pr_err("lstnr %d should not be blocked!\n",
2257 lstnr);
2258 ret = -EINVAL;
2259 goto exit;
2260 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002261 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002262 ret = __qseecom_process_reentrancy_blocked_on_listener(
2263 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002264 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002265 if (ret) {
2266 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2267 data->client.app_id,
2268 data->client.app_name, resp->data);
2269 goto exit;
2270 }
2271 case QSEOS_RESULT_SUCCESS:
2272 case QSEOS_RESULT_INCOMPLETE:
2273 break;
2274 default:
2275 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2276 resp->result, data->client.app_id, lstnr);
2277 ret = -EINVAL;
2278 goto exit;
2279 }
2280exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002281 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002282 if (lstnr == RPMB_SERVICE)
2283 __qseecom_disable_clk(CLK_QSEE);
2284
2285 }
2286 if (rc)
2287 return rc;
2288
2289 return ret;
2290}
2291
2292/*
2293 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2294 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2295 * So, needs to first check if no app blocked before sending OS level scm call,
2296 * then wait until all apps are unblocked.
2297 */
2298static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2299{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002300 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2301 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2302 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2303 /* thread sleep until this app unblocked */
2304 while (qseecom.app_block_ref_cnt > 0) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002305 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002306 wait_event_interruptible(qseecom.app_block_wq,
2307 (!qseecom.app_block_ref_cnt));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002308 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002309 }
2310 }
2311}
2312
2313/*
2314 * scm_call of send data will fail if this TA is blocked or there are more
2315 * than one TA requesting listener services; So, first check to see if need
2316 * to wait.
2317 */
2318static void __qseecom_reentrancy_check_if_this_app_blocked(
2319 struct qseecom_registered_app_list *ptr_app)
2320{
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002321 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002322 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002323 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2324 /* thread sleep until this app unblocked */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002325 mutex_unlock(&app_access_lock);
Zhen Kongcc580932019-04-23 22:16:56 -07002326 wait_event_interruptible(qseecom.app_block_wq,
2327 (!ptr_app->app_blocked &&
2328 qseecom.app_block_ref_cnt <= 1));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002329 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002330 }
Zhen Kongdea10592018-07-30 17:50:10 -07002331 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002332 }
2333}
2334
2335static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2336 uint32_t *app_id)
2337{
2338 int32_t ret;
2339 struct qseecom_command_scm_resp resp;
2340 bool found_app = false;
2341 struct qseecom_registered_app_list *entry = NULL;
2342 unsigned long flags = 0;
2343
2344 if (!app_id) {
2345 pr_err("Null pointer to app_id\n");
2346 return -EINVAL;
2347 }
2348 *app_id = 0;
2349
2350 /* check if app exists and has been registered locally */
2351 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2352 list_for_each_entry(entry,
2353 &qseecom.registered_app_list_head, list) {
2354 if (!strcmp(entry->app_name, req.app_name)) {
2355 found_app = true;
2356 break;
2357 }
2358 }
2359 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2360 if (found_app) {
2361 pr_debug("Found app with id %d\n", entry->app_id);
2362 *app_id = entry->app_id;
2363 return 0;
2364 }
2365
2366 memset((void *)&resp, 0, sizeof(resp));
2367
2368 /* SCM_CALL to check if app_id for the mentioned app exists */
2369 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2370 sizeof(struct qseecom_check_app_ireq),
2371 &resp, sizeof(resp));
2372 if (ret) {
2373 pr_err("scm_call to check if app is already loaded failed\n");
2374 return -EINVAL;
2375 }
2376
2377 if (resp.result == QSEOS_RESULT_FAILURE)
2378 return 0;
2379
2380 switch (resp.resp_type) {
2381 /*qsee returned listener type response */
2382 case QSEOS_LISTENER_ID:
2383 pr_err("resp type is of listener type instead of app");
2384 return -EINVAL;
2385 case QSEOS_APP_ID:
2386 *app_id = resp.data;
2387 return 0;
2388 default:
2389 pr_err("invalid resp type (%d) from qsee",
2390 resp.resp_type);
2391 return -ENODEV;
2392 }
2393}
2394
2395static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2396{
2397 struct qseecom_registered_app_list *entry = NULL;
2398 unsigned long flags = 0;
2399 u32 app_id = 0;
2400 struct ion_handle *ihandle; /* Ion handle */
2401 struct qseecom_load_img_req load_img_req;
2402 int32_t ret = 0;
2403 ion_phys_addr_t pa = 0;
2404 size_t len;
2405 struct qseecom_command_scm_resp resp;
2406 struct qseecom_check_app_ireq req;
2407 struct qseecom_load_app_ireq load_req;
2408 struct qseecom_load_app_64bit_ireq load_req_64bit;
2409 void *cmd_buf = NULL;
2410 size_t cmd_len;
2411 bool first_time = false;
2412
2413 /* Copy the relevant information needed for loading the image */
2414 if (copy_from_user(&load_img_req,
2415 (void __user *)argp,
2416 sizeof(struct qseecom_load_img_req))) {
2417 pr_err("copy_from_user failed\n");
2418 return -EFAULT;
2419 }
2420
2421 /* Check and load cmnlib */
2422 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2423 if (!qseecom.commonlib_loaded &&
2424 load_img_req.app_arch == ELFCLASS32) {
2425 ret = qseecom_load_commonlib_image(data, "cmnlib");
2426 if (ret) {
2427 pr_err("failed to load cmnlib\n");
2428 return -EIO;
2429 }
2430 qseecom.commonlib_loaded = true;
2431 pr_debug("cmnlib is loaded\n");
2432 }
2433
2434 if (!qseecom.commonlib64_loaded &&
2435 load_img_req.app_arch == ELFCLASS64) {
2436 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2437 if (ret) {
2438 pr_err("failed to load cmnlib64\n");
2439 return -EIO;
2440 }
2441 qseecom.commonlib64_loaded = true;
2442 pr_debug("cmnlib64 is loaded\n");
2443 }
2444 }
2445
2446 if (qseecom.support_bus_scaling) {
2447 mutex_lock(&qsee_bw_mutex);
2448 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2449 mutex_unlock(&qsee_bw_mutex);
2450 if (ret)
2451 return ret;
2452 }
2453
2454 /* Vote for the SFPB clock */
2455 ret = __qseecom_enable_clk_scale_up(data);
2456 if (ret)
2457 goto enable_clk_err;
2458
2459 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2460 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2461 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2462
2463 ret = __qseecom_check_app_exists(req, &app_id);
2464 if (ret < 0)
2465 goto loadapp_err;
2466
2467 if (app_id) {
2468 pr_debug("App id %d (%s) already exists\n", app_id,
2469 (char *)(req.app_name));
2470 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2471 list_for_each_entry(entry,
2472 &qseecom.registered_app_list_head, list){
2473 if (entry->app_id == app_id) {
2474 entry->ref_cnt++;
2475 break;
2476 }
2477 }
2478 spin_unlock_irqrestore(
2479 &qseecom.registered_app_list_lock, flags);
2480 ret = 0;
2481 } else {
2482 first_time = true;
2483 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2484 (char *)(load_img_req.img_name));
2485 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002486 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002487 load_img_req.ifd_data_fd);
2488 if (IS_ERR_OR_NULL(ihandle)) {
2489 pr_err("Ion client could not retrieve the handle\n");
2490 ret = -ENOMEM;
2491 goto loadapp_err;
2492 }
2493
2494 /* Get the physical address of the ION BUF */
2495 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2496 if (ret) {
2497 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2498 ret);
2499 goto loadapp_err;
2500 }
2501 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2502 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2503 len, load_img_req.mdt_len,
2504 load_img_req.img_len);
2505 ret = -EINVAL;
2506 goto loadapp_err;
2507 }
2508 /* Populate the structure for sending scm call to load image */
2509 if (qseecom.qsee_version < QSEE_VERSION_40) {
2510 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2511 load_req.mdt_len = load_img_req.mdt_len;
2512 load_req.img_len = load_img_req.img_len;
2513 strlcpy(load_req.app_name, load_img_req.img_name,
2514 MAX_APP_NAME_SIZE);
2515 load_req.phy_addr = (uint32_t)pa;
2516 cmd_buf = (void *)&load_req;
2517 cmd_len = sizeof(struct qseecom_load_app_ireq);
2518 } else {
2519 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2520 load_req_64bit.mdt_len = load_img_req.mdt_len;
2521 load_req_64bit.img_len = load_img_req.img_len;
2522 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2523 MAX_APP_NAME_SIZE);
2524 load_req_64bit.phy_addr = (uint64_t)pa;
2525 cmd_buf = (void *)&load_req_64bit;
2526 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2527 }
2528
2529 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2530 ION_IOC_CLEAN_INV_CACHES);
2531 if (ret) {
2532 pr_err("cache operation failed %d\n", ret);
2533 goto loadapp_err;
2534 }
2535
2536 /* SCM_CALL to load the app and get the app_id back */
2537 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2538 cmd_len, &resp, sizeof(resp));
2539 if (ret) {
2540 pr_err("scm_call to load app failed\n");
2541 if (!IS_ERR_OR_NULL(ihandle))
2542 ion_free(qseecom.ion_clnt, ihandle);
2543 ret = -EINVAL;
2544 goto loadapp_err;
2545 }
2546
2547 if (resp.result == QSEOS_RESULT_FAILURE) {
2548 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2549 if (!IS_ERR_OR_NULL(ihandle))
2550 ion_free(qseecom.ion_clnt, ihandle);
2551 ret = -EFAULT;
2552 goto loadapp_err;
2553 }
2554
2555 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2556 ret = __qseecom_process_incomplete_cmd(data, &resp);
2557 if (ret) {
2558 pr_err("process_incomplete_cmd failed err: %d\n",
2559 ret);
2560 if (!IS_ERR_OR_NULL(ihandle))
2561 ion_free(qseecom.ion_clnt, ihandle);
2562 ret = -EFAULT;
2563 goto loadapp_err;
2564 }
2565 }
2566
2567 if (resp.result != QSEOS_RESULT_SUCCESS) {
2568 pr_err("scm_call failed resp.result unknown, %d\n",
2569 resp.result);
2570 if (!IS_ERR_OR_NULL(ihandle))
2571 ion_free(qseecom.ion_clnt, ihandle);
2572 ret = -EFAULT;
2573 goto loadapp_err;
2574 }
2575
2576 app_id = resp.data;
2577
2578 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2579 if (!entry) {
2580 ret = -ENOMEM;
2581 goto loadapp_err;
2582 }
2583 entry->app_id = app_id;
2584 entry->ref_cnt = 1;
2585 entry->app_arch = load_img_req.app_arch;
2586 /*
2587 * keymaster app may be first loaded as "keymaste" by qseecomd,
2588 * and then used as "keymaster" on some targets. To avoid app
2589 * name checking error, register "keymaster" into app_list and
2590 * thread private data.
2591 */
2592 if (!strcmp(load_img_req.img_name, "keymaste"))
2593 strlcpy(entry->app_name, "keymaster",
2594 MAX_APP_NAME_SIZE);
2595 else
2596 strlcpy(entry->app_name, load_img_req.img_name,
2597 MAX_APP_NAME_SIZE);
2598 entry->app_blocked = false;
2599 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002600 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002601
2602 /* Deallocate the handle */
2603 if (!IS_ERR_OR_NULL(ihandle))
2604 ion_free(qseecom.ion_clnt, ihandle);
2605
2606 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2607 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2608 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2609 flags);
2610
2611 pr_warn("App with id %u (%s) now loaded\n", app_id,
2612 (char *)(load_img_req.img_name));
2613 }
2614 data->client.app_id = app_id;
2615 data->client.app_arch = load_img_req.app_arch;
2616 if (!strcmp(load_img_req.img_name, "keymaste"))
2617 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2618 else
2619 strlcpy(data->client.app_name, load_img_req.img_name,
2620 MAX_APP_NAME_SIZE);
2621 load_img_req.app_id = app_id;
2622 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2623 pr_err("copy_to_user failed\n");
2624 ret = -EFAULT;
2625 if (first_time == true) {
2626 spin_lock_irqsave(
2627 &qseecom.registered_app_list_lock, flags);
2628 list_del(&entry->list);
2629 spin_unlock_irqrestore(
2630 &qseecom.registered_app_list_lock, flags);
2631 kzfree(entry);
2632 }
2633 }
2634
2635loadapp_err:
2636 __qseecom_disable_clk_scale_down(data);
2637enable_clk_err:
2638 if (qseecom.support_bus_scaling) {
2639 mutex_lock(&qsee_bw_mutex);
2640 qseecom_unregister_bus_bandwidth_needs(data);
2641 mutex_unlock(&qsee_bw_mutex);
2642 }
2643 return ret;
2644}
2645
2646static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2647{
2648 int ret = 1; /* Set unload app */
2649
2650 wake_up_all(&qseecom.send_resp_wq);
2651 if (qseecom.qsee_reentrancy_support)
2652 mutex_unlock(&app_access_lock);
2653 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302654 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002655 atomic_read(&data->ioctl_count) <= 1)) {
2656 pr_err("Interrupted from abort\n");
2657 ret = -ERESTARTSYS;
2658 break;
2659 }
2660 }
2661 if (qseecom.qsee_reentrancy_support)
2662 mutex_lock(&app_access_lock);
2663 return ret;
2664}
2665
2666static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2667{
2668 int ret = 0;
2669
2670 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2671 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2672 ion_free(qseecom.ion_clnt, data->client.ihandle);
2673 data->client.ihandle = NULL;
2674 }
2675 return ret;
2676}
2677
2678static int qseecom_unload_app(struct qseecom_dev_handle *data,
2679 bool app_crash)
2680{
2681 unsigned long flags;
2682 unsigned long flags1;
2683 int ret = 0;
2684 struct qseecom_command_scm_resp resp;
2685 struct qseecom_registered_app_list *ptr_app = NULL;
2686 bool unload = false;
2687 bool found_app = false;
2688 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002689 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002690
2691 if (!data) {
2692 pr_err("Invalid/uninitialized device handle\n");
2693 return -EINVAL;
2694 }
2695
2696 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2697 pr_debug("Do not unload keymaster app from tz\n");
2698 goto unload_exit;
2699 }
2700
2701 __qseecom_cleanup_app(data);
2702 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2703
2704 if (data->client.app_id > 0) {
2705 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2706 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2707 list) {
2708 if (ptr_app->app_id == data->client.app_id) {
2709 if (!strcmp((void *)ptr_app->app_name,
2710 (void *)data->client.app_name)) {
2711 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002712 if (ptr_app->app_blocked ||
2713 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002714 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002715 if (app_crash || ptr_app->ref_cnt == 1)
2716 unload = true;
2717 break;
2718 }
2719 found_dead_app = true;
2720 break;
2721 }
2722 }
2723 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2724 flags);
2725 if (found_app == false && found_dead_app == false) {
2726 pr_err("Cannot find app with id = %d (%s)\n",
2727 data->client.app_id,
2728 (char *)data->client.app_name);
2729 ret = -EINVAL;
2730 goto unload_exit;
2731 }
2732 }
2733
2734 if (found_dead_app)
2735 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2736 (char *)data->client.app_name);
2737
2738 if (unload) {
2739 struct qseecom_unload_app_ireq req;
2740 /* Populate the structure for sending scm call to load image */
2741 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2742 req.app_id = data->client.app_id;
2743
2744 /* SCM_CALL to unload the app */
2745 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2746 sizeof(struct qseecom_unload_app_ireq),
2747 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002748 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002749 if (ret) {
2750 pr_err("scm_call to unload app (id = %d) failed\n",
2751 req.app_id);
2752 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002753 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002754 } else {
2755 pr_warn("App id %d now unloaded\n", req.app_id);
2756 }
2757 if (resp.result == QSEOS_RESULT_FAILURE) {
2758 pr_err("app (%d) unload_failed!!\n",
2759 data->client.app_id);
2760 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002761 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002762 }
2763 if (resp.result == QSEOS_RESULT_SUCCESS)
2764 pr_debug("App (%d) is unloaded!!\n",
2765 data->client.app_id);
2766 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2767 ret = __qseecom_process_incomplete_cmd(data, &resp);
2768 if (ret) {
2769 pr_err("process_incomplete_cmd fail err: %d\n",
2770 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002771 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002772 }
2773 }
2774 }
2775
Zhen Kongf818f152019-03-13 12:31:32 -07002776scm_exit:
2777 if (scm_called) {
2778 /* double check if this app_entry still exists */
2779 bool doublecheck = false;
2780
2781 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2782 list_for_each_entry(ptr_app,
2783 &qseecom.registered_app_list_head, list) {
2784 if ((ptr_app->app_id == data->client.app_id) &&
2785 (!strcmp((void *)ptr_app->app_name,
2786 (void *)data->client.app_name))) {
2787 doublecheck = true;
2788 break;
2789 }
2790 }
2791 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2792 flags1);
2793 if (!doublecheck) {
2794 pr_warn("app %d(%s) entry is already removed\n",
2795 data->client.app_id,
2796 (char *)data->client.app_name);
2797 found_app = false;
2798 }
2799 }
Zhen Kong7d500032018-08-06 16:58:31 -07002800unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002801 if (found_app) {
2802 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2803 if (app_crash) {
2804 ptr_app->ref_cnt = 0;
2805 pr_debug("app_crash: ref_count = 0\n");
2806 } else {
2807 if (ptr_app->ref_cnt == 1) {
2808 ptr_app->ref_cnt = 0;
2809 pr_debug("ref_count set to 0\n");
2810 } else {
2811 ptr_app->ref_cnt--;
2812 pr_debug("Can't unload app(%d) inuse\n",
2813 ptr_app->app_id);
2814 }
2815 }
2816 if (unload) {
2817 list_del(&ptr_app->list);
2818 kzfree(ptr_app);
2819 }
2820 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2821 flags1);
2822 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002823 qseecom_unmap_ion_allocated_memory(data);
2824 data->released = true;
2825 return ret;
2826}
2827
2828static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2829 unsigned long virt)
2830{
2831 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2832}
2833
2834static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2835 unsigned long virt)
2836{
2837 return (uintptr_t)data->client.sb_virt +
2838 (virt - data->client.user_virt_sb_base);
2839}
2840
2841int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2842 struct qseecom_send_svc_cmd_req *req_ptr,
2843 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2844{
2845 int ret = 0;
2846 void *req_buf = NULL;
2847
2848 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2849 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2850 req_ptr, send_svc_ireq_ptr);
2851 return -EINVAL;
2852 }
2853
2854 /* Clients need to ensure req_buf is at base offset of shared buffer */
2855 if ((uintptr_t)req_ptr->cmd_req_buf !=
2856 data_ptr->client.user_virt_sb_base) {
2857 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2858 return -EINVAL;
2859 }
2860
2861 if (data_ptr->client.sb_length <
2862 sizeof(struct qseecom_rpmb_provision_key)) {
2863 pr_err("shared buffer is too small to hold key type\n");
2864 return -EINVAL;
2865 }
2866 req_buf = data_ptr->client.sb_virt;
2867
2868 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2869 send_svc_ireq_ptr->key_type =
2870 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2871 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2872 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2873 data_ptr, (uintptr_t)req_ptr->resp_buf));
2874 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2875
2876 return ret;
2877}
2878
2879int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2880 struct qseecom_send_svc_cmd_req *req_ptr,
2881 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2882{
2883 int ret = 0;
2884 uint32_t reqd_len_sb_in = 0;
2885
2886 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2887 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2888 req_ptr, send_svc_ireq_ptr);
2889 return -EINVAL;
2890 }
2891
2892 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2893 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2894 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2895 pr_err("Required: %u, Available: %zu\n",
2896 reqd_len_sb_in, data_ptr->client.sb_length);
2897 return -ENOMEM;
2898 }
2899
2900 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2901 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2902 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2903 data_ptr, (uintptr_t)req_ptr->resp_buf));
2904 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2905
2906 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2907 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2908
2909
2910 return ret;
2911}
2912
2913static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2914 struct qseecom_send_svc_cmd_req *req)
2915{
2916 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2917 pr_err("req or cmd buffer or response buffer is null\n");
2918 return -EINVAL;
2919 }
2920
2921 if (!data || !data->client.ihandle) {
2922 pr_err("Client or client handle is not initialized\n");
2923 return -EINVAL;
2924 }
2925
2926 if (data->client.sb_virt == NULL) {
2927 pr_err("sb_virt null\n");
2928 return -EINVAL;
2929 }
2930
2931 if (data->client.user_virt_sb_base == 0) {
2932 pr_err("user_virt_sb_base is null\n");
2933 return -EINVAL;
2934 }
2935
2936 if (data->client.sb_length == 0) {
2937 pr_err("sb_length is 0\n");
2938 return -EINVAL;
2939 }
2940
2941 if (((uintptr_t)req->cmd_req_buf <
2942 data->client.user_virt_sb_base) ||
2943 ((uintptr_t)req->cmd_req_buf >=
2944 (data->client.user_virt_sb_base + data->client.sb_length))) {
2945 pr_err("cmd buffer address not within shared bufffer\n");
2946 return -EINVAL;
2947 }
2948 if (((uintptr_t)req->resp_buf <
2949 data->client.user_virt_sb_base) ||
2950 ((uintptr_t)req->resp_buf >=
2951 (data->client.user_virt_sb_base + data->client.sb_length))) {
2952 pr_err("response buffer address not within shared bufffer\n");
2953 return -EINVAL;
2954 }
2955 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2956 (req->cmd_req_len > data->client.sb_length) ||
2957 (req->resp_len > data->client.sb_length)) {
2958 pr_err("cmd buf length or response buf length not valid\n");
2959 return -EINVAL;
2960 }
2961 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2962 pr_err("Integer overflow detected in req_len & rsp_len\n");
2963 return -EINVAL;
2964 }
2965
2966 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2967 pr_debug("Not enough memory to fit cmd_buf.\n");
2968 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2969 (req->cmd_req_len + req->resp_len),
2970 data->client.sb_length);
2971 return -ENOMEM;
2972 }
2973 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2974 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2975 return -EINVAL;
2976 }
2977 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2978 pr_err("Integer overflow in resp_len & resp_buf\n");
2979 return -EINVAL;
2980 }
2981 if (data->client.user_virt_sb_base >
2982 (ULONG_MAX - data->client.sb_length)) {
2983 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2984 return -EINVAL;
2985 }
2986 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2987 ((uintptr_t)data->client.user_virt_sb_base +
2988 data->client.sb_length)) ||
2989 (((uintptr_t)req->resp_buf + req->resp_len) >
2990 ((uintptr_t)data->client.user_virt_sb_base +
2991 data->client.sb_length))) {
2992 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2993 return -EINVAL;
2994 }
2995 return 0;
2996}
2997
2998static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2999 void __user *argp)
3000{
3001 int ret = 0;
3002 struct qseecom_client_send_service_ireq send_svc_ireq;
3003 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3004 struct qseecom_command_scm_resp resp;
3005 struct qseecom_send_svc_cmd_req req;
3006 void *send_req_ptr;
3007 size_t req_buf_size;
3008
3009 /*struct qseecom_command_scm_resp resp;*/
3010
3011 if (copy_from_user(&req,
3012 (void __user *)argp,
3013 sizeof(req))) {
3014 pr_err("copy_from_user failed\n");
3015 return -EFAULT;
3016 }
3017
3018 if (__validate_send_service_cmd_inputs(data, &req))
3019 return -EINVAL;
3020
3021 data->type = QSEECOM_SECURE_SERVICE;
3022
3023 switch (req.cmd_id) {
3024 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3025 case QSEOS_RPMB_ERASE_COMMAND:
3026 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3027 send_req_ptr = &send_svc_ireq;
3028 req_buf_size = sizeof(send_svc_ireq);
3029 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3030 send_req_ptr))
3031 return -EINVAL;
3032 break;
3033 case QSEOS_FSM_LTEOTA_REQ_CMD:
3034 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3035 case QSEOS_FSM_IKE_REQ_CMD:
3036 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3037 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3038 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3039 case QSEOS_FSM_ENCFS_REQ_CMD:
3040 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3041 send_req_ptr = &send_fsm_key_svc_ireq;
3042 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3043 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3044 send_req_ptr))
3045 return -EINVAL;
3046 break;
3047 default:
3048 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3049 return -EINVAL;
3050 }
3051
3052 if (qseecom.support_bus_scaling) {
3053 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3054 if (ret) {
3055 pr_err("Fail to set bw HIGH\n");
3056 return ret;
3057 }
3058 } else {
3059 ret = qseecom_perf_enable(data);
3060 if (ret) {
3061 pr_err("Failed to vote for clocks with err %d\n", ret);
3062 goto exit;
3063 }
3064 }
3065
3066 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3067 data->client.sb_virt, data->client.sb_length,
3068 ION_IOC_CLEAN_INV_CACHES);
3069 if (ret) {
3070 pr_err("cache operation failed %d\n", ret);
3071 goto exit;
3072 }
3073 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3074 (const void *)send_req_ptr,
3075 req_buf_size, &resp, sizeof(resp));
3076 if (ret) {
3077 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3078 if (!qseecom.support_bus_scaling) {
3079 qsee_disable_clock_vote(data, CLK_DFAB);
3080 qsee_disable_clock_vote(data, CLK_SFPB);
3081 } else {
3082 __qseecom_add_bw_scale_down_timer(
3083 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3084 }
3085 goto exit;
3086 }
3087 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3088 data->client.sb_virt, data->client.sb_length,
3089 ION_IOC_INV_CACHES);
3090 if (ret) {
3091 pr_err("cache operation failed %d\n", ret);
3092 goto exit;
3093 }
3094 switch (resp.result) {
3095 case QSEOS_RESULT_SUCCESS:
3096 break;
3097 case QSEOS_RESULT_INCOMPLETE:
3098 pr_debug("qseos_result_incomplete\n");
3099 ret = __qseecom_process_incomplete_cmd(data, &resp);
3100 if (ret) {
3101 pr_err("process_incomplete_cmd fail with result: %d\n",
3102 resp.result);
3103 }
3104 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3105 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303106 if (put_user(resp.result,
3107 (uint32_t __user *)req.resp_buf)) {
3108 ret = -EINVAL;
3109 goto exit;
3110 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003111 ret = 0;
3112 }
3113 break;
3114 case QSEOS_RESULT_FAILURE:
3115 pr_err("scm call failed with resp.result: %d\n", resp.result);
3116 ret = -EINVAL;
3117 break;
3118 default:
3119 pr_err("Response result %d not supported\n",
3120 resp.result);
3121 ret = -EINVAL;
3122 break;
3123 }
3124 if (!qseecom.support_bus_scaling) {
3125 qsee_disable_clock_vote(data, CLK_DFAB);
3126 qsee_disable_clock_vote(data, CLK_SFPB);
3127 } else {
3128 __qseecom_add_bw_scale_down_timer(
3129 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3130 }
3131
3132exit:
3133 return ret;
3134}
3135
3136static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3137 struct qseecom_send_cmd_req *req)
3138
3139{
3140 if (!data || !data->client.ihandle) {
3141 pr_err("Client or client handle is not initialized\n");
3142 return -EINVAL;
3143 }
3144 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3145 (req->cmd_req_buf == NULL)) {
3146 pr_err("cmd buffer or response buffer is null\n");
3147 return -EINVAL;
3148 }
3149 if (((uintptr_t)req->cmd_req_buf <
3150 data->client.user_virt_sb_base) ||
3151 ((uintptr_t)req->cmd_req_buf >=
3152 (data->client.user_virt_sb_base + data->client.sb_length))) {
3153 pr_err("cmd buffer address not within shared bufffer\n");
3154 return -EINVAL;
3155 }
3156 if (((uintptr_t)req->resp_buf <
3157 data->client.user_virt_sb_base) ||
3158 ((uintptr_t)req->resp_buf >=
3159 (data->client.user_virt_sb_base + data->client.sb_length))) {
3160 pr_err("response buffer address not within shared bufffer\n");
3161 return -EINVAL;
3162 }
3163 if ((req->cmd_req_len == 0) ||
3164 (req->cmd_req_len > data->client.sb_length) ||
3165 (req->resp_len > data->client.sb_length)) {
3166 pr_err("cmd buf length or response buf length not valid\n");
3167 return -EINVAL;
3168 }
3169 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3170 pr_err("Integer overflow detected in req_len & rsp_len\n");
3171 return -EINVAL;
3172 }
3173
3174 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3175 pr_debug("Not enough memory to fit cmd_buf.\n");
3176 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3177 (req->cmd_req_len + req->resp_len),
3178 data->client.sb_length);
3179 return -ENOMEM;
3180 }
3181 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3182 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3183 return -EINVAL;
3184 }
3185 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3186 pr_err("Integer overflow in resp_len & resp_buf\n");
3187 return -EINVAL;
3188 }
3189 if (data->client.user_virt_sb_base >
3190 (ULONG_MAX - data->client.sb_length)) {
3191 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3192 return -EINVAL;
3193 }
3194 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3195 ((uintptr_t)data->client.user_virt_sb_base +
3196 data->client.sb_length)) ||
3197 (((uintptr_t)req->resp_buf + req->resp_len) >
3198 ((uintptr_t)data->client.user_virt_sb_base +
3199 data->client.sb_length))) {
3200 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3201 return -EINVAL;
3202 }
3203 return 0;
3204}
3205
3206int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3207 struct qseecom_registered_app_list *ptr_app,
3208 struct qseecom_dev_handle *data)
3209{
3210 int ret = 0;
3211
3212 switch (resp->result) {
3213 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3214 pr_warn("App(%d) %s is blocked on listener %d\n",
3215 data->client.app_id, data->client.app_name,
3216 resp->data);
3217 ret = __qseecom_process_reentrancy_blocked_on_listener(
3218 resp, ptr_app, data);
3219 if (ret) {
3220 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3221 data->client.app_id, data->client.app_name, resp->data);
3222 return ret;
3223 }
3224
3225 case QSEOS_RESULT_INCOMPLETE:
3226 qseecom.app_block_ref_cnt++;
3227 ptr_app->app_blocked = true;
3228 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3229 ptr_app->app_blocked = false;
3230 qseecom.app_block_ref_cnt--;
Zhen Kongcc580932019-04-23 22:16:56 -07003231 wake_up_interruptible_all(&qseecom.app_block_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003232 if (ret)
3233 pr_err("process_incomplete_cmd failed err: %d\n",
3234 ret);
3235 return ret;
3236 case QSEOS_RESULT_SUCCESS:
3237 return ret;
3238 default:
3239 pr_err("Response result %d not supported\n",
3240 resp->result);
3241 return -EINVAL;
3242 }
3243}
3244
3245static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3246 struct qseecom_send_cmd_req *req)
3247{
3248 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003249 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003250 u32 reqd_len_sb_in = 0;
3251 struct qseecom_client_send_data_ireq send_data_req = {0};
3252 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3253 struct qseecom_command_scm_resp resp;
3254 unsigned long flags;
3255 struct qseecom_registered_app_list *ptr_app;
3256 bool found_app = false;
3257 void *cmd_buf = NULL;
3258 size_t cmd_len;
3259 struct sglist_info *table = data->sglistinfo_ptr;
3260
3261 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3262 /* find app_id & img_name from list */
3263 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3264 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3265 list) {
3266 if ((ptr_app->app_id == data->client.app_id) &&
3267 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3268 found_app = true;
3269 break;
3270 }
3271 }
3272 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3273
3274 if (!found_app) {
3275 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3276 (char *)data->client.app_name);
3277 return -ENOENT;
3278 }
3279
3280 if (qseecom.qsee_version < QSEE_VERSION_40) {
3281 send_data_req.app_id = data->client.app_id;
3282 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3283 data, (uintptr_t)req->cmd_req_buf));
3284 send_data_req.req_len = req->cmd_req_len;
3285 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3286 data, (uintptr_t)req->resp_buf));
3287 send_data_req.rsp_len = req->resp_len;
3288 send_data_req.sglistinfo_ptr =
3289 (uint32_t)virt_to_phys(table);
3290 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3291 dmac_flush_range((void *)table,
3292 (void *)table + SGLISTINFO_TABLE_SIZE);
3293 cmd_buf = (void *)&send_data_req;
3294 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3295 } else {
3296 send_data_req_64bit.app_id = data->client.app_id;
3297 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3298 (uintptr_t)req->cmd_req_buf);
3299 send_data_req_64bit.req_len = req->cmd_req_len;
3300 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3301 (uintptr_t)req->resp_buf);
3302 send_data_req_64bit.rsp_len = req->resp_len;
3303 /* check if 32bit app's phys_addr region is under 4GB.*/
3304 if ((data->client.app_arch == ELFCLASS32) &&
3305 ((send_data_req_64bit.req_ptr >=
3306 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3307 (send_data_req_64bit.rsp_ptr >=
3308 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3309 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3310 data->client.app_name,
3311 send_data_req_64bit.req_ptr,
3312 send_data_req_64bit.req_len,
3313 send_data_req_64bit.rsp_ptr,
3314 send_data_req_64bit.rsp_len);
3315 return -EFAULT;
3316 }
3317 send_data_req_64bit.sglistinfo_ptr =
3318 (uint64_t)virt_to_phys(table);
3319 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3320 dmac_flush_range((void *)table,
3321 (void *)table + SGLISTINFO_TABLE_SIZE);
3322 cmd_buf = (void *)&send_data_req_64bit;
3323 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3324 }
3325
3326 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3327 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3328 else
3329 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3330
3331 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3332 data->client.sb_virt,
3333 reqd_len_sb_in,
3334 ION_IOC_CLEAN_INV_CACHES);
3335 if (ret) {
3336 pr_err("cache operation failed %d\n", ret);
3337 return ret;
3338 }
3339
3340 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3341
3342 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3343 cmd_buf, cmd_len,
3344 &resp, sizeof(resp));
3345 if (ret) {
3346 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3347 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003348 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003349 }
3350
3351 if (qseecom.qsee_reentrancy_support) {
3352 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003353 if (ret)
3354 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003355 } else {
3356 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3357 ret = __qseecom_process_incomplete_cmd(data, &resp);
3358 if (ret) {
3359 pr_err("process_incomplete_cmd failed err: %d\n",
3360 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003361 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003362 }
3363 } else {
3364 if (resp.result != QSEOS_RESULT_SUCCESS) {
3365 pr_err("Response result %d not supported\n",
3366 resp.result);
3367 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003368 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003369 }
3370 }
3371 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003372exit:
3373 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003374 data->client.sb_virt, data->client.sb_length,
3375 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003376 if (ret2) {
3377 pr_err("cache operation failed %d\n", ret2);
3378 return ret2;
3379 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003380 return ret;
3381}
3382
3383static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3384{
3385 int ret = 0;
3386 struct qseecom_send_cmd_req req;
3387
3388 ret = copy_from_user(&req, argp, sizeof(req));
3389 if (ret) {
3390 pr_err("copy_from_user failed\n");
3391 return ret;
3392 }
3393
3394 if (__validate_send_cmd_inputs(data, &req))
3395 return -EINVAL;
3396
3397 ret = __qseecom_send_cmd(data, &req);
3398
3399 if (ret)
3400 return ret;
3401
3402 return ret;
3403}
3404
3405int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3406 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3407 struct qseecom_dev_handle *data, int i) {
3408
3409 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3410 (req->ifd_data[i].fd > 0)) {
3411 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3412 (req->ifd_data[i].cmd_buf_offset >
3413 req->cmd_req_len - sizeof(uint32_t))) {
3414 pr_err("Invalid offset (req len) 0x%x\n",
3415 req->ifd_data[i].cmd_buf_offset);
3416 return -EINVAL;
3417 }
3418 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3419 (lstnr_resp->ifd_data[i].fd > 0)) {
3420 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3421 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3422 lstnr_resp->resp_len - sizeof(uint32_t))) {
3423 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3424 lstnr_resp->ifd_data[i].cmd_buf_offset);
3425 return -EINVAL;
3426 }
3427 }
3428 return 0;
3429}
3430
3431static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3432 struct qseecom_dev_handle *data)
3433{
3434 struct ion_handle *ihandle;
3435 char *field;
3436 int ret = 0;
3437 int i = 0;
3438 uint32_t len = 0;
3439 struct scatterlist *sg;
3440 struct qseecom_send_modfd_cmd_req *req = NULL;
3441 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3442 struct qseecom_registered_listener_list *this_lstnr = NULL;
3443 uint32_t offset;
3444 struct sg_table *sg_ptr;
3445
3446 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3447 (data->type != QSEECOM_CLIENT_APP))
3448 return -EFAULT;
3449
3450 if (msg == NULL) {
3451 pr_err("Invalid address\n");
3452 return -EINVAL;
3453 }
3454 if (data->type == QSEECOM_LISTENER_SERVICE) {
3455 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3456 this_lstnr = __qseecom_find_svc(data->listener.id);
3457 if (IS_ERR_OR_NULL(this_lstnr)) {
3458 pr_err("Invalid listener ID\n");
3459 return -ENOMEM;
3460 }
3461 } else {
3462 req = (struct qseecom_send_modfd_cmd_req *)msg;
3463 }
3464
3465 for (i = 0; i < MAX_ION_FD; i++) {
3466 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3467 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003468 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003469 req->ifd_data[i].fd);
3470 if (IS_ERR_OR_NULL(ihandle)) {
3471 pr_err("Ion client can't retrieve the handle\n");
3472 return -ENOMEM;
3473 }
3474 field = (char *) req->cmd_req_buf +
3475 req->ifd_data[i].cmd_buf_offset;
3476 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3477 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003478 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003479 lstnr_resp->ifd_data[i].fd);
3480 if (IS_ERR_OR_NULL(ihandle)) {
3481 pr_err("Ion client can't retrieve the handle\n");
3482 return -ENOMEM;
3483 }
3484 field = lstnr_resp->resp_buf_ptr +
3485 lstnr_resp->ifd_data[i].cmd_buf_offset;
3486 } else {
3487 continue;
3488 }
3489 /* Populate the cmd data structure with the phys_addr */
3490 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3491 if (IS_ERR_OR_NULL(sg_ptr)) {
3492 pr_err("IOn client could not retrieve sg table\n");
3493 goto err;
3494 }
3495 if (sg_ptr->nents == 0) {
3496 pr_err("Num of scattered entries is 0\n");
3497 goto err;
3498 }
3499 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3500 pr_err("Num of scattered entries");
3501 pr_err(" (%d) is greater than max supported %d\n",
3502 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3503 goto err;
3504 }
3505 sg = sg_ptr->sgl;
3506 if (sg_ptr->nents == 1) {
3507 uint32_t *update;
3508
3509 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3510 goto err;
3511 if ((data->type == QSEECOM_CLIENT_APP &&
3512 (data->client.app_arch == ELFCLASS32 ||
3513 data->client.app_arch == ELFCLASS64)) ||
3514 (data->type == QSEECOM_LISTENER_SERVICE)) {
3515 /*
3516 * Check if sg list phy add region is under 4GB
3517 */
3518 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3519 (!cleanup) &&
3520 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3521 >= PHY_ADDR_4G - sg->length)) {
3522 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3523 data->client.app_name,
3524 &(sg_dma_address(sg_ptr->sgl)),
3525 sg->length);
3526 goto err;
3527 }
3528 update = (uint32_t *) field;
3529 *update = cleanup ? 0 :
3530 (uint32_t)sg_dma_address(sg_ptr->sgl);
3531 } else {
3532 pr_err("QSEE app arch %u is not supported\n",
3533 data->client.app_arch);
3534 goto err;
3535 }
3536 len += (uint32_t)sg->length;
3537 } else {
3538 struct qseecom_sg_entry *update;
3539 int j = 0;
3540
3541 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3542 (req->ifd_data[i].fd > 0)) {
3543
3544 if ((req->cmd_req_len <
3545 SG_ENTRY_SZ * sg_ptr->nents) ||
3546 (req->ifd_data[i].cmd_buf_offset >
3547 (req->cmd_req_len -
3548 SG_ENTRY_SZ * sg_ptr->nents))) {
3549 pr_err("Invalid offset = 0x%x\n",
3550 req->ifd_data[i].cmd_buf_offset);
3551 goto err;
3552 }
3553
3554 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3555 (lstnr_resp->ifd_data[i].fd > 0)) {
3556
3557 if ((lstnr_resp->resp_len <
3558 SG_ENTRY_SZ * sg_ptr->nents) ||
3559 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3560 (lstnr_resp->resp_len -
3561 SG_ENTRY_SZ * sg_ptr->nents))) {
3562 goto err;
3563 }
3564 }
3565 if ((data->type == QSEECOM_CLIENT_APP &&
3566 (data->client.app_arch == ELFCLASS32 ||
3567 data->client.app_arch == ELFCLASS64)) ||
3568 (data->type == QSEECOM_LISTENER_SERVICE)) {
3569 update = (struct qseecom_sg_entry *)field;
3570 for (j = 0; j < sg_ptr->nents; j++) {
3571 /*
3572 * Check if sg list PA is under 4GB
3573 */
3574 if ((qseecom.qsee_version >=
3575 QSEE_VERSION_40) &&
3576 (!cleanup) &&
3577 ((uint64_t)(sg_dma_address(sg))
3578 >= PHY_ADDR_4G - sg->length)) {
3579 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3580 data->client.app_name,
3581 &(sg_dma_address(sg)),
3582 sg->length);
3583 goto err;
3584 }
3585 update->phys_addr = cleanup ? 0 :
3586 (uint32_t)sg_dma_address(sg);
3587 update->len = cleanup ? 0 : sg->length;
3588 update++;
3589 len += sg->length;
3590 sg = sg_next(sg);
3591 }
3592 } else {
3593 pr_err("QSEE app arch %u is not supported\n",
3594 data->client.app_arch);
3595 goto err;
3596 }
3597 }
3598
3599 if (cleanup) {
3600 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3601 ihandle, NULL, len,
3602 ION_IOC_INV_CACHES);
3603 if (ret) {
3604 pr_err("cache operation failed %d\n", ret);
3605 goto err;
3606 }
3607 } else {
3608 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3609 ihandle, NULL, len,
3610 ION_IOC_CLEAN_INV_CACHES);
3611 if (ret) {
3612 pr_err("cache operation failed %d\n", ret);
3613 goto err;
3614 }
3615 if (data->type == QSEECOM_CLIENT_APP) {
3616 offset = req->ifd_data[i].cmd_buf_offset;
3617 data->sglistinfo_ptr[i].indexAndFlags =
3618 SGLISTINFO_SET_INDEX_FLAG(
3619 (sg_ptr->nents == 1), 0, offset);
3620 data->sglistinfo_ptr[i].sizeOrCount =
3621 (sg_ptr->nents == 1) ?
3622 sg->length : sg_ptr->nents;
3623 data->sglist_cnt = i + 1;
3624 } else {
3625 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3626 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3627 (uintptr_t)this_lstnr->sb_virt);
3628 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3629 SGLISTINFO_SET_INDEX_FLAG(
3630 (sg_ptr->nents == 1), 0, offset);
3631 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3632 (sg_ptr->nents == 1) ?
3633 sg->length : sg_ptr->nents;
3634 this_lstnr->sglist_cnt = i + 1;
3635 }
3636 }
3637 /* Deallocate the handle */
3638 if (!IS_ERR_OR_NULL(ihandle))
3639 ion_free(qseecom.ion_clnt, ihandle);
3640 }
3641 return ret;
3642err:
3643 if (!IS_ERR_OR_NULL(ihandle))
3644 ion_free(qseecom.ion_clnt, ihandle);
3645 return -ENOMEM;
3646}
3647
3648static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3649 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3650{
3651 struct scatterlist *sg = sg_ptr->sgl;
3652 struct qseecom_sg_entry_64bit *sg_entry;
3653 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3654 void *buf;
3655 uint i;
3656 size_t size;
3657 dma_addr_t coh_pmem;
3658
3659 if (fd_idx >= MAX_ION_FD) {
3660 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3661 return -ENOMEM;
3662 }
3663 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3664 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3665 /* Allocate a contiguous kernel buffer */
3666 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3667 size = (size + PAGE_SIZE) & PAGE_MASK;
3668 buf = dma_alloc_coherent(qseecom.pdev,
3669 size, &coh_pmem, GFP_KERNEL);
3670 if (buf == NULL) {
3671 pr_err("failed to alloc memory for sg buf\n");
3672 return -ENOMEM;
3673 }
3674 /* update qseecom_sg_list_buf_hdr_64bit */
3675 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3676 buf_hdr->new_buf_phys_addr = coh_pmem;
3677 buf_hdr->nents_total = sg_ptr->nents;
3678 /* save the left sg entries into new allocated buf */
3679 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3680 for (i = 0; i < sg_ptr->nents; i++) {
3681 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3682 sg_entry->len = sg->length;
3683 sg_entry++;
3684 sg = sg_next(sg);
3685 }
3686
3687 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3688 data->client.sec_buf_fd[fd_idx].vbase = buf;
3689 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3690 data->client.sec_buf_fd[fd_idx].size = size;
3691
3692 return 0;
3693}
3694
3695static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3696 struct qseecom_dev_handle *data)
3697{
3698 struct ion_handle *ihandle;
3699 char *field;
3700 int ret = 0;
3701 int i = 0;
3702 uint32_t len = 0;
3703 struct scatterlist *sg;
3704 struct qseecom_send_modfd_cmd_req *req = NULL;
3705 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3706 struct qseecom_registered_listener_list *this_lstnr = NULL;
3707 uint32_t offset;
3708 struct sg_table *sg_ptr;
3709
3710 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3711 (data->type != QSEECOM_CLIENT_APP))
3712 return -EFAULT;
3713
3714 if (msg == NULL) {
3715 pr_err("Invalid address\n");
3716 return -EINVAL;
3717 }
3718 if (data->type == QSEECOM_LISTENER_SERVICE) {
3719 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3720 this_lstnr = __qseecom_find_svc(data->listener.id);
3721 if (IS_ERR_OR_NULL(this_lstnr)) {
3722 pr_err("Invalid listener ID\n");
3723 return -ENOMEM;
3724 }
3725 } else {
3726 req = (struct qseecom_send_modfd_cmd_req *)msg;
3727 }
3728
3729 for (i = 0; i < MAX_ION_FD; i++) {
3730 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3731 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003732 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003733 req->ifd_data[i].fd);
3734 if (IS_ERR_OR_NULL(ihandle)) {
3735 pr_err("Ion client can't retrieve the handle\n");
3736 return -ENOMEM;
3737 }
3738 field = (char *) req->cmd_req_buf +
3739 req->ifd_data[i].cmd_buf_offset;
3740 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3741 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003742 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003743 lstnr_resp->ifd_data[i].fd);
3744 if (IS_ERR_OR_NULL(ihandle)) {
3745 pr_err("Ion client can't retrieve the handle\n");
3746 return -ENOMEM;
3747 }
3748 field = lstnr_resp->resp_buf_ptr +
3749 lstnr_resp->ifd_data[i].cmd_buf_offset;
3750 } else {
3751 continue;
3752 }
3753 /* Populate the cmd data structure with the phys_addr */
3754 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3755 if (IS_ERR_OR_NULL(sg_ptr)) {
3756 pr_err("IOn client could not retrieve sg table\n");
3757 goto err;
3758 }
3759 if (sg_ptr->nents == 0) {
3760 pr_err("Num of scattered entries is 0\n");
3761 goto err;
3762 }
3763 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3764 pr_warn("Num of scattered entries");
3765 pr_warn(" (%d) is greater than %d\n",
3766 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3767 if (cleanup) {
3768 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3769 data->client.sec_buf_fd[i].vbase)
3770 dma_free_coherent(qseecom.pdev,
3771 data->client.sec_buf_fd[i].size,
3772 data->client.sec_buf_fd[i].vbase,
3773 data->client.sec_buf_fd[i].pbase);
3774 } else {
3775 ret = __qseecom_allocate_sg_list_buffer(data,
3776 field, i, sg_ptr);
3777 if (ret) {
3778 pr_err("Failed to allocate sg list buffer\n");
3779 goto err;
3780 }
3781 }
3782 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3783 sg = sg_ptr->sgl;
3784 goto cleanup;
3785 }
3786 sg = sg_ptr->sgl;
3787 if (sg_ptr->nents == 1) {
3788 uint64_t *update_64bit;
3789
3790 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3791 goto err;
3792 /* 64bit app uses 64bit address */
3793 update_64bit = (uint64_t *) field;
3794 *update_64bit = cleanup ? 0 :
3795 (uint64_t)sg_dma_address(sg_ptr->sgl);
3796 len += (uint32_t)sg->length;
3797 } else {
3798 struct qseecom_sg_entry_64bit *update_64bit;
3799 int j = 0;
3800
3801 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3802 (req->ifd_data[i].fd > 0)) {
3803
3804 if ((req->cmd_req_len <
3805 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3806 (req->ifd_data[i].cmd_buf_offset >
3807 (req->cmd_req_len -
3808 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3809 pr_err("Invalid offset = 0x%x\n",
3810 req->ifd_data[i].cmd_buf_offset);
3811 goto err;
3812 }
3813
3814 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3815 (lstnr_resp->ifd_data[i].fd > 0)) {
3816
3817 if ((lstnr_resp->resp_len <
3818 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3819 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3820 (lstnr_resp->resp_len -
3821 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3822 goto err;
3823 }
3824 }
3825 /* 64bit app uses 64bit address */
3826 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3827 for (j = 0; j < sg_ptr->nents; j++) {
3828 update_64bit->phys_addr = cleanup ? 0 :
3829 (uint64_t)sg_dma_address(sg);
3830 update_64bit->len = cleanup ? 0 :
3831 (uint32_t)sg->length;
3832 update_64bit++;
3833 len += sg->length;
3834 sg = sg_next(sg);
3835 }
3836 }
3837cleanup:
3838 if (cleanup) {
3839 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3840 ihandle, NULL, len,
3841 ION_IOC_INV_CACHES);
3842 if (ret) {
3843 pr_err("cache operation failed %d\n", ret);
3844 goto err;
3845 }
3846 } else {
3847 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3848 ihandle, NULL, len,
3849 ION_IOC_CLEAN_INV_CACHES);
3850 if (ret) {
3851 pr_err("cache operation failed %d\n", ret);
3852 goto err;
3853 }
3854 if (data->type == QSEECOM_CLIENT_APP) {
3855 offset = req->ifd_data[i].cmd_buf_offset;
3856 data->sglistinfo_ptr[i].indexAndFlags =
3857 SGLISTINFO_SET_INDEX_FLAG(
3858 (sg_ptr->nents == 1), 1, offset);
3859 data->sglistinfo_ptr[i].sizeOrCount =
3860 (sg_ptr->nents == 1) ?
3861 sg->length : sg_ptr->nents;
3862 data->sglist_cnt = i + 1;
3863 } else {
3864 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3865 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3866 (uintptr_t)this_lstnr->sb_virt);
3867 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3868 SGLISTINFO_SET_INDEX_FLAG(
3869 (sg_ptr->nents == 1), 1, offset);
3870 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3871 (sg_ptr->nents == 1) ?
3872 sg->length : sg_ptr->nents;
3873 this_lstnr->sglist_cnt = i + 1;
3874 }
3875 }
3876 /* Deallocate the handle */
3877 if (!IS_ERR_OR_NULL(ihandle))
3878 ion_free(qseecom.ion_clnt, ihandle);
3879 }
3880 return ret;
3881err:
3882 for (i = 0; i < MAX_ION_FD; i++)
3883 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3884 data->client.sec_buf_fd[i].vbase)
3885 dma_free_coherent(qseecom.pdev,
3886 data->client.sec_buf_fd[i].size,
3887 data->client.sec_buf_fd[i].vbase,
3888 data->client.sec_buf_fd[i].pbase);
3889 if (!IS_ERR_OR_NULL(ihandle))
3890 ion_free(qseecom.ion_clnt, ihandle);
3891 return -ENOMEM;
3892}
3893
3894static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3895 void __user *argp,
3896 bool is_64bit_addr)
3897{
3898 int ret = 0;
3899 int i;
3900 struct qseecom_send_modfd_cmd_req req;
3901 struct qseecom_send_cmd_req send_cmd_req;
3902
3903 ret = copy_from_user(&req, argp, sizeof(req));
3904 if (ret) {
3905 pr_err("copy_from_user failed\n");
3906 return ret;
3907 }
3908
3909 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3910 send_cmd_req.cmd_req_len = req.cmd_req_len;
3911 send_cmd_req.resp_buf = req.resp_buf;
3912 send_cmd_req.resp_len = req.resp_len;
3913
3914 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3915 return -EINVAL;
3916
3917 /* validate offsets */
3918 for (i = 0; i < MAX_ION_FD; i++) {
3919 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3920 pr_err("Invalid offset %d = 0x%x\n",
3921 i, req.ifd_data[i].cmd_buf_offset);
3922 return -EINVAL;
3923 }
3924 }
3925 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3926 (uintptr_t)req.cmd_req_buf);
3927 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3928 (uintptr_t)req.resp_buf);
3929
3930 if (!is_64bit_addr) {
3931 ret = __qseecom_update_cmd_buf(&req, false, data);
3932 if (ret)
3933 return ret;
3934 ret = __qseecom_send_cmd(data, &send_cmd_req);
3935 if (ret)
3936 return ret;
3937 ret = __qseecom_update_cmd_buf(&req, true, data);
3938 if (ret)
3939 return ret;
3940 } else {
3941 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3942 if (ret)
3943 return ret;
3944 ret = __qseecom_send_cmd(data, &send_cmd_req);
3945 if (ret)
3946 return ret;
3947 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3948 if (ret)
3949 return ret;
3950 }
3951
3952 return ret;
3953}
3954
3955static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3956 void __user *argp)
3957{
3958 return __qseecom_send_modfd_cmd(data, argp, false);
3959}
3960
3961static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3962 void __user *argp)
3963{
3964 return __qseecom_send_modfd_cmd(data, argp, true);
3965}
3966
3967
3968
3969static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3970 struct qseecom_registered_listener_list *svc)
3971{
3972 int ret;
3973
Zhen Kongf5087172018-10-11 17:22:05 -07003974 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003975 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003976}
3977
3978static int qseecom_receive_req(struct qseecom_dev_handle *data)
3979{
3980 int ret = 0;
3981 struct qseecom_registered_listener_list *this_lstnr;
3982
Zhen Kongbcdeda22018-11-16 13:50:51 -08003983 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003984 this_lstnr = __qseecom_find_svc(data->listener.id);
3985 if (!this_lstnr) {
3986 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003987 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003988 return -ENODATA;
3989 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003990 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003991
3992 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05303993 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003994 __qseecom_listener_has_rcvd_req(data,
3995 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003996 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003997 (uint32_t)data->listener.id);
3998 /* woken up for different reason */
3999 return -ERESTARTSYS;
4000 }
4001
Zhen Kongbcdeda22018-11-16 13:50:51 -08004002 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004003 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004004 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004005 return -ENODEV;
4006 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004007 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004008 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004009 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004010 break;
4011 }
4012 return ret;
4013}
4014
4015static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4016{
4017 unsigned char app_arch = 0;
4018 struct elf32_hdr *ehdr;
4019 struct elf64_hdr *ehdr64;
4020
4021 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4022
4023 switch (app_arch) {
4024 case ELFCLASS32: {
4025 ehdr = (struct elf32_hdr *)fw_entry->data;
4026 if (fw_entry->size < sizeof(*ehdr)) {
4027 pr_err("%s: Not big enough to be an elf32 header\n",
4028 qseecom.pdev->init_name);
4029 return false;
4030 }
4031 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4032 pr_err("%s: Not an elf32 header\n",
4033 qseecom.pdev->init_name);
4034 return false;
4035 }
4036 if (ehdr->e_phnum == 0) {
4037 pr_err("%s: No loadable segments\n",
4038 qseecom.pdev->init_name);
4039 return false;
4040 }
4041 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4042 sizeof(struct elf32_hdr) > fw_entry->size) {
4043 pr_err("%s: Program headers not within mdt\n",
4044 qseecom.pdev->init_name);
4045 return false;
4046 }
4047 break;
4048 }
4049 case ELFCLASS64: {
4050 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4051 if (fw_entry->size < sizeof(*ehdr64)) {
4052 pr_err("%s: Not big enough to be an elf64 header\n",
4053 qseecom.pdev->init_name);
4054 return false;
4055 }
4056 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4057 pr_err("%s: Not an elf64 header\n",
4058 qseecom.pdev->init_name);
4059 return false;
4060 }
4061 if (ehdr64->e_phnum == 0) {
4062 pr_err("%s: No loadable segments\n",
4063 qseecom.pdev->init_name);
4064 return false;
4065 }
4066 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4067 sizeof(struct elf64_hdr) > fw_entry->size) {
4068 pr_err("%s: Program headers not within mdt\n",
4069 qseecom.pdev->init_name);
4070 return false;
4071 }
4072 break;
4073 }
4074 default: {
4075 pr_err("QSEE app arch %u is not supported\n", app_arch);
4076 return false;
4077 }
4078 }
4079 return true;
4080}
4081
4082static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4083 uint32_t *app_arch)
4084{
4085 int ret = -1;
4086 int i = 0, rc = 0;
4087 const struct firmware *fw_entry = NULL;
4088 char fw_name[MAX_APP_NAME_SIZE];
4089 struct elf32_hdr *ehdr;
4090 struct elf64_hdr *ehdr64;
4091 int num_images = 0;
4092
4093 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4094 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4095 if (rc) {
4096 pr_err("error with request_firmware\n");
4097 ret = -EIO;
4098 goto err;
4099 }
4100 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4101 ret = -EIO;
4102 goto err;
4103 }
4104 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4105 *fw_size = fw_entry->size;
4106 if (*app_arch == ELFCLASS32) {
4107 ehdr = (struct elf32_hdr *)fw_entry->data;
4108 num_images = ehdr->e_phnum;
4109 } else if (*app_arch == ELFCLASS64) {
4110 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4111 num_images = ehdr64->e_phnum;
4112 } else {
4113 pr_err("QSEE %s app, arch %u is not supported\n",
4114 appname, *app_arch);
4115 ret = -EIO;
4116 goto err;
4117 }
4118 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4119 release_firmware(fw_entry);
4120 fw_entry = NULL;
4121 for (i = 0; i < num_images; i++) {
4122 memset(fw_name, 0, sizeof(fw_name));
4123 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4124 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4125 if (ret)
4126 goto err;
4127 if (*fw_size > U32_MAX - fw_entry->size) {
4128 pr_err("QSEE %s app file size overflow\n", appname);
4129 ret = -EINVAL;
4130 goto err;
4131 }
4132 *fw_size += fw_entry->size;
4133 release_firmware(fw_entry);
4134 fw_entry = NULL;
4135 }
4136
4137 return ret;
4138err:
4139 if (fw_entry)
4140 release_firmware(fw_entry);
4141 *fw_size = 0;
4142 return ret;
4143}
4144
4145static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4146 uint32_t fw_size,
4147 struct qseecom_load_app_ireq *load_req)
4148{
4149 int ret = -1;
4150 int i = 0, rc = 0;
4151 const struct firmware *fw_entry = NULL;
4152 char fw_name[MAX_APP_NAME_SIZE];
4153 u8 *img_data_ptr = img_data;
4154 struct elf32_hdr *ehdr;
4155 struct elf64_hdr *ehdr64;
4156 int num_images = 0;
4157 unsigned char app_arch = 0;
4158
4159 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4160 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4161 if (rc) {
4162 ret = -EIO;
4163 goto err;
4164 }
4165
4166 load_req->img_len = fw_entry->size;
4167 if (load_req->img_len > fw_size) {
4168 pr_err("app %s size %zu is larger than buf size %u\n",
4169 appname, fw_entry->size, fw_size);
4170 ret = -EINVAL;
4171 goto err;
4172 }
4173 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4174 img_data_ptr = img_data_ptr + fw_entry->size;
4175 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4176
4177 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4178 if (app_arch == ELFCLASS32) {
4179 ehdr = (struct elf32_hdr *)fw_entry->data;
4180 num_images = ehdr->e_phnum;
4181 } else if (app_arch == ELFCLASS64) {
4182 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4183 num_images = ehdr64->e_phnum;
4184 } else {
4185 pr_err("QSEE %s app, arch %u is not supported\n",
4186 appname, app_arch);
4187 ret = -EIO;
4188 goto err;
4189 }
4190 release_firmware(fw_entry);
4191 fw_entry = NULL;
4192 for (i = 0; i < num_images; i++) {
4193 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4194 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4195 if (ret) {
4196 pr_err("Failed to locate blob %s\n", fw_name);
4197 goto err;
4198 }
4199 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4200 (fw_entry->size + load_req->img_len > fw_size)) {
4201 pr_err("Invalid file size for %s\n", fw_name);
4202 ret = -EINVAL;
4203 goto err;
4204 }
4205 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4206 img_data_ptr = img_data_ptr + fw_entry->size;
4207 load_req->img_len += fw_entry->size;
4208 release_firmware(fw_entry);
4209 fw_entry = NULL;
4210 }
4211 return ret;
4212err:
4213 release_firmware(fw_entry);
4214 return ret;
4215}
4216
4217static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4218 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4219{
4220 size_t len = 0;
4221 int ret = 0;
4222 ion_phys_addr_t pa;
4223 struct ion_handle *ihandle = NULL;
4224 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004225 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004226 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004227
Zhen Kong3dd92792017-12-08 09:47:15 -08004228 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004229 if (retry++) {
4230 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004231 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004232 mutex_lock(&app_access_lock);
4233 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004234 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004235 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004236 } while (IS_ERR_OR_NULL(ihandle) &&
4237 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004238
4239 if (IS_ERR_OR_NULL(ihandle)) {
4240 pr_err("ION alloc failed\n");
4241 return -ENOMEM;
4242 }
4243 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4244 ihandle);
4245
4246 if (IS_ERR_OR_NULL(img_data)) {
4247 pr_err("ION memory mapping for image loading failed\n");
4248 ret = -ENOMEM;
4249 goto exit_ion_free;
4250 }
4251 /* Get the physical address of the ION BUF */
4252 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4253 if (ret) {
4254 pr_err("physical memory retrieval failure\n");
4255 ret = -EIO;
4256 goto exit_ion_unmap_kernel;
4257 }
4258
4259 *pihandle = ihandle;
4260 *data = img_data;
4261 *paddr = pa;
4262 return ret;
4263
4264exit_ion_unmap_kernel:
4265 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4266exit_ion_free:
4267 ion_free(qseecom.ion_clnt, ihandle);
4268 ihandle = NULL;
4269 return ret;
4270}
4271
4272static void __qseecom_free_img_data(struct ion_handle **ihandle)
4273{
4274 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4275 ion_free(qseecom.ion_clnt, *ihandle);
4276 *ihandle = NULL;
4277}
4278
4279static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4280 uint32_t *app_id)
4281{
4282 int ret = -1;
4283 uint32_t fw_size = 0;
4284 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4285 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4286 struct qseecom_command_scm_resp resp;
4287 u8 *img_data = NULL;
4288 ion_phys_addr_t pa = 0;
4289 struct ion_handle *ihandle = NULL;
4290 void *cmd_buf = NULL;
4291 size_t cmd_len;
4292 uint32_t app_arch = 0;
4293
4294 if (!data || !appname || !app_id) {
4295 pr_err("Null pointer to data or appname or appid\n");
4296 return -EINVAL;
4297 }
4298 *app_id = 0;
4299 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4300 return -EIO;
4301 data->client.app_arch = app_arch;
4302
4303 /* Check and load cmnlib */
4304 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4305 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4306 ret = qseecom_load_commonlib_image(data, "cmnlib");
4307 if (ret) {
4308 pr_err("failed to load cmnlib\n");
4309 return -EIO;
4310 }
4311 qseecom.commonlib_loaded = true;
4312 pr_debug("cmnlib is loaded\n");
4313 }
4314
4315 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4316 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4317 if (ret) {
4318 pr_err("failed to load cmnlib64\n");
4319 return -EIO;
4320 }
4321 qseecom.commonlib64_loaded = true;
4322 pr_debug("cmnlib64 is loaded\n");
4323 }
4324 }
4325
4326 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4327 if (ret)
4328 return ret;
4329
4330 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4331 if (ret) {
4332 ret = -EIO;
4333 goto exit_free_img_data;
4334 }
4335
4336 /* Populate the load_req parameters */
4337 if (qseecom.qsee_version < QSEE_VERSION_40) {
4338 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4339 load_req.mdt_len = load_req.mdt_len;
4340 load_req.img_len = load_req.img_len;
4341 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4342 load_req.phy_addr = (uint32_t)pa;
4343 cmd_buf = (void *)&load_req;
4344 cmd_len = sizeof(struct qseecom_load_app_ireq);
4345 } else {
4346 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4347 load_req_64bit.mdt_len = load_req.mdt_len;
4348 load_req_64bit.img_len = load_req.img_len;
4349 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4350 load_req_64bit.phy_addr = (uint64_t)pa;
4351 cmd_buf = (void *)&load_req_64bit;
4352 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4353 }
4354
4355 if (qseecom.support_bus_scaling) {
4356 mutex_lock(&qsee_bw_mutex);
4357 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4358 mutex_unlock(&qsee_bw_mutex);
4359 if (ret) {
4360 ret = -EIO;
4361 goto exit_free_img_data;
4362 }
4363 }
4364
4365 ret = __qseecom_enable_clk_scale_up(data);
4366 if (ret) {
4367 ret = -EIO;
4368 goto exit_unregister_bus_bw_need;
4369 }
4370
4371 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4372 img_data, fw_size,
4373 ION_IOC_CLEAN_INV_CACHES);
4374 if (ret) {
4375 pr_err("cache operation failed %d\n", ret);
4376 goto exit_disable_clk_vote;
4377 }
4378
4379 /* SCM_CALL to load the image */
4380 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4381 &resp, sizeof(resp));
4382 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004383 pr_err("scm_call to load failed : ret %d, result %x\n",
4384 ret, resp.result);
4385 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4386 ret = -EEXIST;
4387 else
4388 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004389 goto exit_disable_clk_vote;
4390 }
4391
4392 switch (resp.result) {
4393 case QSEOS_RESULT_SUCCESS:
4394 *app_id = resp.data;
4395 break;
4396 case QSEOS_RESULT_INCOMPLETE:
4397 ret = __qseecom_process_incomplete_cmd(data, &resp);
4398 if (ret)
4399 pr_err("process_incomplete_cmd FAILED\n");
4400 else
4401 *app_id = resp.data;
4402 break;
4403 case QSEOS_RESULT_FAILURE:
4404 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4405 break;
4406 default:
4407 pr_err("scm call return unknown response %d\n", resp.result);
4408 ret = -EINVAL;
4409 break;
4410 }
4411
4412exit_disable_clk_vote:
4413 __qseecom_disable_clk_scale_down(data);
4414
4415exit_unregister_bus_bw_need:
4416 if (qseecom.support_bus_scaling) {
4417 mutex_lock(&qsee_bw_mutex);
4418 qseecom_unregister_bus_bandwidth_needs(data);
4419 mutex_unlock(&qsee_bw_mutex);
4420 }
4421
4422exit_free_img_data:
4423 __qseecom_free_img_data(&ihandle);
4424 return ret;
4425}
4426
4427static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4428 char *cmnlib_name)
4429{
4430 int ret = 0;
4431 uint32_t fw_size = 0;
4432 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4433 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4434 struct qseecom_command_scm_resp resp;
4435 u8 *img_data = NULL;
4436 ion_phys_addr_t pa = 0;
4437 void *cmd_buf = NULL;
4438 size_t cmd_len;
4439 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004440 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004441
4442 if (!cmnlib_name) {
4443 pr_err("cmnlib_name is NULL\n");
4444 return -EINVAL;
4445 }
4446 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4447 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4448 cmnlib_name, strlen(cmnlib_name));
4449 return -EINVAL;
4450 }
4451
4452 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4453 return -EIO;
4454
Zhen Kong3bafb312017-10-18 10:27:20 -07004455 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004456 &img_data, fw_size, &pa);
4457 if (ret)
4458 return -EIO;
4459
4460 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4461 if (ret) {
4462 ret = -EIO;
4463 goto exit_free_img_data;
4464 }
4465 if (qseecom.qsee_version < QSEE_VERSION_40) {
4466 load_req.phy_addr = (uint32_t)pa;
4467 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4468 cmd_buf = (void *)&load_req;
4469 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4470 } else {
4471 load_req_64bit.phy_addr = (uint64_t)pa;
4472 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4473 load_req_64bit.img_len = load_req.img_len;
4474 load_req_64bit.mdt_len = load_req.mdt_len;
4475 cmd_buf = (void *)&load_req_64bit;
4476 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4477 }
4478
4479 if (qseecom.support_bus_scaling) {
4480 mutex_lock(&qsee_bw_mutex);
4481 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4482 mutex_unlock(&qsee_bw_mutex);
4483 if (ret) {
4484 ret = -EIO;
4485 goto exit_free_img_data;
4486 }
4487 }
4488
4489 /* Vote for the SFPB clock */
4490 ret = __qseecom_enable_clk_scale_up(data);
4491 if (ret) {
4492 ret = -EIO;
4493 goto exit_unregister_bus_bw_need;
4494 }
4495
Zhen Kong3bafb312017-10-18 10:27:20 -07004496 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004497 img_data, fw_size,
4498 ION_IOC_CLEAN_INV_CACHES);
4499 if (ret) {
4500 pr_err("cache operation failed %d\n", ret);
4501 goto exit_disable_clk_vote;
4502 }
4503
4504 /* SCM_CALL to load the image */
4505 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4506 &resp, sizeof(resp));
4507 if (ret) {
4508 pr_err("scm_call to load failed : ret %d\n", ret);
4509 ret = -EIO;
4510 goto exit_disable_clk_vote;
4511 }
4512
4513 switch (resp.result) {
4514 case QSEOS_RESULT_SUCCESS:
4515 break;
4516 case QSEOS_RESULT_FAILURE:
4517 pr_err("scm call failed w/response result%d\n", resp.result);
4518 ret = -EINVAL;
4519 goto exit_disable_clk_vote;
4520 case QSEOS_RESULT_INCOMPLETE:
4521 ret = __qseecom_process_incomplete_cmd(data, &resp);
4522 if (ret) {
4523 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4524 goto exit_disable_clk_vote;
4525 }
4526 break;
4527 default:
4528 pr_err("scm call return unknown response %d\n", resp.result);
4529 ret = -EINVAL;
4530 goto exit_disable_clk_vote;
4531 }
4532
4533exit_disable_clk_vote:
4534 __qseecom_disable_clk_scale_down(data);
4535
4536exit_unregister_bus_bw_need:
4537 if (qseecom.support_bus_scaling) {
4538 mutex_lock(&qsee_bw_mutex);
4539 qseecom_unregister_bus_bandwidth_needs(data);
4540 mutex_unlock(&qsee_bw_mutex);
4541 }
4542
4543exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004544 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004545 return ret;
4546}
4547
4548static int qseecom_unload_commonlib_image(void)
4549{
4550 int ret = -EINVAL;
4551 struct qseecom_unload_lib_image_ireq unload_req = {0};
4552 struct qseecom_command_scm_resp resp;
4553
4554 /* Populate the remaining parameters */
4555 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4556
4557 /* SCM_CALL to load the image */
4558 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4559 sizeof(struct qseecom_unload_lib_image_ireq),
4560 &resp, sizeof(resp));
4561 if (ret) {
4562 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4563 ret = -EIO;
4564 } else {
4565 switch (resp.result) {
4566 case QSEOS_RESULT_SUCCESS:
4567 break;
4568 case QSEOS_RESULT_FAILURE:
4569 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4570 break;
4571 default:
4572 pr_err("scm call return unknown response %d\n",
4573 resp.result);
4574 ret = -EINVAL;
4575 break;
4576 }
4577 }
4578
4579 return ret;
4580}
4581
4582int qseecom_start_app(struct qseecom_handle **handle,
4583 char *app_name, uint32_t size)
4584{
4585 int32_t ret = 0;
4586 unsigned long flags = 0;
4587 struct qseecom_dev_handle *data = NULL;
4588 struct qseecom_check_app_ireq app_ireq;
4589 struct qseecom_registered_app_list *entry = NULL;
4590 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4591 bool found_app = false;
4592 size_t len;
4593 ion_phys_addr_t pa;
4594 uint32_t fw_size, app_arch;
4595 uint32_t app_id = 0;
4596
Zhen Kongc4c162a2019-01-23 12:07:12 -08004597 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004598
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004599 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4600 pr_err("Not allowed to be called in %d state\n",
4601 atomic_read(&qseecom.qseecom_state));
4602 return -EPERM;
4603 }
4604 if (!app_name) {
4605 pr_err("failed to get the app name\n");
4606 return -EINVAL;
4607 }
4608
Zhen Kong64a6d7282017-06-16 11:55:07 -07004609 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004610 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004611 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004612 return -EINVAL;
4613 }
4614
4615 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4616 if (!(*handle))
4617 return -ENOMEM;
4618
4619 data = kzalloc(sizeof(*data), GFP_KERNEL);
4620 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304621 ret = -ENOMEM;
4622 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004623 }
4624 data->abort = 0;
4625 data->type = QSEECOM_CLIENT_APP;
4626 data->released = false;
4627 data->client.sb_length = size;
4628 data->client.user_virt_sb_base = 0;
4629 data->client.ihandle = NULL;
4630
4631 init_waitqueue_head(&data->abort_wq);
4632
4633 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4634 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4635 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4636 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304637 ret = -ENOMEM;
4638 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004639 }
4640 mutex_lock(&app_access_lock);
4641
Zhen Kong5d02be92018-05-29 16:17:29 -07004642recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004643 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4644 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4645 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4646 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304647 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004648
4649 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4650 if (app_id) {
4651 pr_warn("App id %d for [%s] app exists\n", app_id,
4652 (char *)app_ireq.app_name);
4653 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4654 list_for_each_entry(entry,
4655 &qseecom.registered_app_list_head, list){
4656 if (entry->app_id == app_id) {
4657 entry->ref_cnt++;
4658 found_app = true;
4659 break;
4660 }
4661 }
4662 spin_unlock_irqrestore(
4663 &qseecom.registered_app_list_lock, flags);
4664 if (!found_app)
4665 pr_warn("App_id %d [%s] was loaded but not registered\n",
4666 ret, (char *)app_ireq.app_name);
4667 } else {
4668 /* load the app and get the app_id */
4669 pr_debug("%s: Loading app for the first time'\n",
4670 qseecom.pdev->init_name);
4671 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004672 if (ret == -EEXIST) {
4673 pr_err("recheck if TA %s is loaded\n", app_name);
4674 goto recheck;
4675 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304676 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004677 }
4678 data->client.app_id = app_id;
4679 if (!found_app) {
4680 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4681 if (!entry) {
4682 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304683 ret = -ENOMEM;
4684 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004685 }
4686 entry->app_id = app_id;
4687 entry->ref_cnt = 1;
4688 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4689 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4690 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304691 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004692 }
4693 entry->app_arch = app_arch;
4694 entry->app_blocked = false;
4695 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004696 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004697 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4698 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4699 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4700 flags);
4701 }
4702
4703 /* Get the physical address of the ION BUF */
4704 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4705 if (ret) {
4706 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4707 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304708 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004709 }
4710
4711 /* Populate the structure for sending scm call to load image */
4712 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4713 data->client.ihandle);
4714 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4715 pr_err("ION memory mapping for client shared buf failed\n");
4716 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304717 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004718 }
4719 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4720 data->client.sb_phys = (phys_addr_t)pa;
4721 (*handle)->dev = (void *)data;
4722 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4723 (*handle)->sbuf_len = data->client.sb_length;
4724
4725 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4726 if (!kclient_entry) {
4727 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304728 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004729 }
4730 kclient_entry->handle = *handle;
4731
4732 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4733 list_add_tail(&kclient_entry->list,
4734 &qseecom.registered_kclient_list_head);
4735 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4736
4737 mutex_unlock(&app_access_lock);
4738 return 0;
4739
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304740exit_ion_unmap_kernel:
4741 if (!IS_ERR_OR_NULL(data->client.ihandle))
4742 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4743exit_entry_free:
4744 kfree(entry);
4745exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004746 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304747 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4748 ion_free(qseecom.ion_clnt, data->client.ihandle);
4749 data->client.ihandle = NULL;
4750 }
4751exit_data_free:
4752 kfree(data);
4753exit_handle_free:
4754 if (*handle) {
4755 kfree(*handle);
4756 *handle = NULL;
4757 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004758 return ret;
4759}
4760EXPORT_SYMBOL(qseecom_start_app);
4761
4762int qseecom_shutdown_app(struct qseecom_handle **handle)
4763{
4764 int ret = -EINVAL;
4765 struct qseecom_dev_handle *data;
4766
4767 struct qseecom_registered_kclient_list *kclient = NULL;
4768 unsigned long flags = 0;
4769 bool found_handle = false;
4770
Zhen Kongc4c162a2019-01-23 12:07:12 -08004771 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004772
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004773 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4774 pr_err("Not allowed to be called in %d state\n",
4775 atomic_read(&qseecom.qseecom_state));
4776 return -EPERM;
4777 }
4778
4779 if ((handle == NULL) || (*handle == NULL)) {
4780 pr_err("Handle is not initialized\n");
4781 return -EINVAL;
4782 }
4783 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4784 mutex_lock(&app_access_lock);
4785
4786 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4787 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4788 list) {
4789 if (kclient->handle == (*handle)) {
4790 list_del(&kclient->list);
4791 found_handle = true;
4792 break;
4793 }
4794 }
4795 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4796 if (!found_handle)
4797 pr_err("Unable to find the handle, exiting\n");
4798 else
4799 ret = qseecom_unload_app(data, false);
4800
4801 mutex_unlock(&app_access_lock);
4802 if (ret == 0) {
4803 kzfree(data);
4804 kzfree(*handle);
4805 kzfree(kclient);
4806 *handle = NULL;
4807 }
4808
4809 return ret;
4810}
4811EXPORT_SYMBOL(qseecom_shutdown_app);
4812
4813int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4814 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4815{
4816 int ret = 0;
4817 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4818 struct qseecom_dev_handle *data;
4819 bool perf_enabled = false;
4820
Zhen Kongc4c162a2019-01-23 12:07:12 -08004821 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004822
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004823 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4824 pr_err("Not allowed to be called in %d state\n",
4825 atomic_read(&qseecom.qseecom_state));
4826 return -EPERM;
4827 }
4828
4829 if (handle == NULL) {
4830 pr_err("Handle is not initialized\n");
4831 return -EINVAL;
4832 }
4833 data = handle->dev;
4834
4835 req.cmd_req_len = sbuf_len;
4836 req.resp_len = rbuf_len;
4837 req.cmd_req_buf = send_buf;
4838 req.resp_buf = resp_buf;
4839
4840 if (__validate_send_cmd_inputs(data, &req))
4841 return -EINVAL;
4842
4843 mutex_lock(&app_access_lock);
4844 if (qseecom.support_bus_scaling) {
4845 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4846 if (ret) {
4847 pr_err("Failed to set bw.\n");
4848 mutex_unlock(&app_access_lock);
4849 return ret;
4850 }
4851 }
4852 /*
4853 * On targets where crypto clock is handled by HLOS,
4854 * if clk_access_cnt is zero and perf_enabled is false,
4855 * then the crypto clock was not enabled before sending cmd
4856 * to tz, qseecom will enable the clock to avoid service failure.
4857 */
4858 if (!qseecom.no_clock_support &&
4859 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4860 pr_debug("ce clock is not enabled!\n");
4861 ret = qseecom_perf_enable(data);
4862 if (ret) {
4863 pr_err("Failed to vote for clock with err %d\n",
4864 ret);
4865 mutex_unlock(&app_access_lock);
4866 return -EINVAL;
4867 }
4868 perf_enabled = true;
4869 }
4870 if (!strcmp(data->client.app_name, "securemm"))
4871 data->use_legacy_cmd = true;
4872
4873 ret = __qseecom_send_cmd(data, &req);
4874 data->use_legacy_cmd = false;
4875 if (qseecom.support_bus_scaling)
4876 __qseecom_add_bw_scale_down_timer(
4877 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4878
4879 if (perf_enabled) {
4880 qsee_disable_clock_vote(data, CLK_DFAB);
4881 qsee_disable_clock_vote(data, CLK_SFPB);
4882 }
4883
4884 mutex_unlock(&app_access_lock);
4885
4886 if (ret)
4887 return ret;
4888
4889 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4890 req.resp_len, req.resp_buf);
4891 return ret;
4892}
4893EXPORT_SYMBOL(qseecom_send_command);
4894
4895int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4896{
4897 int ret = 0;
4898
4899 if ((handle == NULL) || (handle->dev == NULL)) {
4900 pr_err("No valid kernel client\n");
4901 return -EINVAL;
4902 }
4903 if (high) {
4904 if (qseecom.support_bus_scaling) {
4905 mutex_lock(&qsee_bw_mutex);
4906 __qseecom_register_bus_bandwidth_needs(handle->dev,
4907 HIGH);
4908 mutex_unlock(&qsee_bw_mutex);
4909 } else {
4910 ret = qseecom_perf_enable(handle->dev);
4911 if (ret)
4912 pr_err("Failed to vote for clock with err %d\n",
4913 ret);
4914 }
4915 } else {
4916 if (!qseecom.support_bus_scaling) {
4917 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4918 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4919 } else {
4920 mutex_lock(&qsee_bw_mutex);
4921 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4922 mutex_unlock(&qsee_bw_mutex);
4923 }
4924 }
4925 return ret;
4926}
4927EXPORT_SYMBOL(qseecom_set_bandwidth);
4928
4929int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4930{
4931 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4932 struct qseecom_dev_handle dummy_private_data = {0};
4933 struct qseecom_command_scm_resp resp;
4934 int ret = 0;
4935
4936 if (!desc) {
4937 pr_err("desc is NULL\n");
4938 return -EINVAL;
4939 }
4940
4941 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004942 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004943 resp.data = desc->ret[2]; /*listener_id*/
4944
Zhen Konge7f525f2017-12-01 18:26:25 -08004945 dummy_private_data.client.app_id = desc->ret[1];
4946 dummy_app_entry.app_id = desc->ret[1];
4947
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004948 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004949 if (qseecom.qsee_reentrancy_support)
4950 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004951 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004952 else
4953 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4954 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004955 mutex_unlock(&app_access_lock);
4956 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004957 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004958 (int)desc->ret[0], (int)desc->ret[2],
4959 (int)desc->ret[1], ret);
4960 desc->ret[0] = resp.result;
4961 desc->ret[1] = resp.resp_type;
4962 desc->ret[2] = resp.data;
4963 return ret;
4964}
4965EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4966
4967static int qseecom_send_resp(void)
4968{
4969 qseecom.send_resp_flag = 1;
4970 wake_up_interruptible(&qseecom.send_resp_wq);
4971 return 0;
4972}
4973
4974static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4975{
4976 struct qseecom_registered_listener_list *this_lstnr = NULL;
4977
4978 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4979 this_lstnr = __qseecom_find_svc(data->listener.id);
4980 if (this_lstnr == NULL)
4981 return -EINVAL;
4982 qseecom.send_resp_flag = 1;
4983 this_lstnr->send_resp_flag = 1;
4984 wake_up_interruptible(&qseecom.send_resp_wq);
4985 return 0;
4986}
4987
4988static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4989 struct qseecom_send_modfd_listener_resp *resp,
4990 struct qseecom_registered_listener_list *this_lstnr)
4991{
4992 int i;
4993
4994 if (!data || !resp || !this_lstnr) {
4995 pr_err("listener handle or resp msg is null\n");
4996 return -EINVAL;
4997 }
4998
4999 if (resp->resp_buf_ptr == NULL) {
5000 pr_err("resp buffer is null\n");
5001 return -EINVAL;
5002 }
5003 /* validate resp buf length */
5004 if ((resp->resp_len == 0) ||
5005 (resp->resp_len > this_lstnr->sb_length)) {
5006 pr_err("resp buf length %d not valid\n", resp->resp_len);
5007 return -EINVAL;
5008 }
5009
5010 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5011 pr_err("Integer overflow in resp_len & resp_buf\n");
5012 return -EINVAL;
5013 }
5014 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5015 (ULONG_MAX - this_lstnr->sb_length)) {
5016 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5017 return -EINVAL;
5018 }
5019 /* validate resp buf */
5020 if (((uintptr_t)resp->resp_buf_ptr <
5021 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5022 ((uintptr_t)resp->resp_buf_ptr >=
5023 ((uintptr_t)this_lstnr->user_virt_sb_base +
5024 this_lstnr->sb_length)) ||
5025 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5026 ((uintptr_t)this_lstnr->user_virt_sb_base +
5027 this_lstnr->sb_length))) {
5028 pr_err("resp buf is out of shared buffer region\n");
5029 return -EINVAL;
5030 }
5031
5032 /* validate offsets */
5033 for (i = 0; i < MAX_ION_FD; i++) {
5034 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5035 pr_err("Invalid offset %d = 0x%x\n",
5036 i, resp->ifd_data[i].cmd_buf_offset);
5037 return -EINVAL;
5038 }
5039 }
5040
5041 return 0;
5042}
5043
5044static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5045 void __user *argp, bool is_64bit_addr)
5046{
5047 struct qseecom_send_modfd_listener_resp resp;
5048 struct qseecom_registered_listener_list *this_lstnr = NULL;
5049
5050 if (copy_from_user(&resp, argp, sizeof(resp))) {
5051 pr_err("copy_from_user failed");
5052 return -EINVAL;
5053 }
5054
5055 this_lstnr = __qseecom_find_svc(data->listener.id);
5056 if (this_lstnr == NULL)
5057 return -EINVAL;
5058
5059 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5060 return -EINVAL;
5061
5062 resp.resp_buf_ptr = this_lstnr->sb_virt +
5063 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5064
5065 if (!is_64bit_addr)
5066 __qseecom_update_cmd_buf(&resp, false, data);
5067 else
5068 __qseecom_update_cmd_buf_64(&resp, false, data);
5069 qseecom.send_resp_flag = 1;
5070 this_lstnr->send_resp_flag = 1;
5071 wake_up_interruptible(&qseecom.send_resp_wq);
5072 return 0;
5073}
5074
5075static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5076 void __user *argp)
5077{
5078 return __qseecom_send_modfd_resp(data, argp, false);
5079}
5080
5081static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5082 void __user *argp)
5083{
5084 return __qseecom_send_modfd_resp(data, argp, true);
5085}
5086
5087static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5088 void __user *argp)
5089{
5090 struct qseecom_qseos_version_req req;
5091
5092 if (copy_from_user(&req, argp, sizeof(req))) {
5093 pr_err("copy_from_user failed");
5094 return -EINVAL;
5095 }
5096 req.qseos_version = qseecom.qseos_version;
5097 if (copy_to_user(argp, &req, sizeof(req))) {
5098 pr_err("copy_to_user failed");
5099 return -EINVAL;
5100 }
5101 return 0;
5102}
5103
5104static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5105{
5106 int rc = 0;
5107 struct qseecom_clk *qclk = NULL;
5108
5109 if (qseecom.no_clock_support)
5110 return 0;
5111
5112 if (ce == CLK_QSEE)
5113 qclk = &qseecom.qsee;
5114 if (ce == CLK_CE_DRV)
5115 qclk = &qseecom.ce_drv;
5116
5117 if (qclk == NULL) {
5118 pr_err("CLK type not supported\n");
5119 return -EINVAL;
5120 }
5121 mutex_lock(&clk_access_lock);
5122
5123 if (qclk->clk_access_cnt == ULONG_MAX) {
5124 pr_err("clk_access_cnt beyond limitation\n");
5125 goto err;
5126 }
5127 if (qclk->clk_access_cnt > 0) {
5128 qclk->clk_access_cnt++;
5129 mutex_unlock(&clk_access_lock);
5130 return rc;
5131 }
5132
5133 /* Enable CE core clk */
5134 if (qclk->ce_core_clk != NULL) {
5135 rc = clk_prepare_enable(qclk->ce_core_clk);
5136 if (rc) {
5137 pr_err("Unable to enable/prepare CE core clk\n");
5138 goto err;
5139 }
5140 }
5141 /* Enable CE clk */
5142 if (qclk->ce_clk != NULL) {
5143 rc = clk_prepare_enable(qclk->ce_clk);
5144 if (rc) {
5145 pr_err("Unable to enable/prepare CE iface clk\n");
5146 goto ce_clk_err;
5147 }
5148 }
5149 /* Enable AXI clk */
5150 if (qclk->ce_bus_clk != NULL) {
5151 rc = clk_prepare_enable(qclk->ce_bus_clk);
5152 if (rc) {
5153 pr_err("Unable to enable/prepare CE bus clk\n");
5154 goto ce_bus_clk_err;
5155 }
5156 }
5157 qclk->clk_access_cnt++;
5158 mutex_unlock(&clk_access_lock);
5159 return 0;
5160
5161ce_bus_clk_err:
5162 if (qclk->ce_clk != NULL)
5163 clk_disable_unprepare(qclk->ce_clk);
5164ce_clk_err:
5165 if (qclk->ce_core_clk != NULL)
5166 clk_disable_unprepare(qclk->ce_core_clk);
5167err:
5168 mutex_unlock(&clk_access_lock);
5169 return -EIO;
5170}
5171
5172static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5173{
5174 struct qseecom_clk *qclk;
5175
5176 if (qseecom.no_clock_support)
5177 return;
5178
5179 if (ce == CLK_QSEE)
5180 qclk = &qseecom.qsee;
5181 else
5182 qclk = &qseecom.ce_drv;
5183
5184 mutex_lock(&clk_access_lock);
5185
5186 if (qclk->clk_access_cnt == 0) {
5187 mutex_unlock(&clk_access_lock);
5188 return;
5189 }
5190
5191 if (qclk->clk_access_cnt == 1) {
5192 if (qclk->ce_clk != NULL)
5193 clk_disable_unprepare(qclk->ce_clk);
5194 if (qclk->ce_core_clk != NULL)
5195 clk_disable_unprepare(qclk->ce_core_clk);
5196 if (qclk->ce_bus_clk != NULL)
5197 clk_disable_unprepare(qclk->ce_bus_clk);
5198 }
5199 qclk->clk_access_cnt--;
5200 mutex_unlock(&clk_access_lock);
5201}
5202
5203static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5204 int32_t clk_type)
5205{
5206 int ret = 0;
5207 struct qseecom_clk *qclk;
5208
5209 if (qseecom.no_clock_support)
5210 return 0;
5211
5212 qclk = &qseecom.qsee;
5213 if (!qseecom.qsee_perf_client)
5214 return ret;
5215
5216 switch (clk_type) {
5217 case CLK_DFAB:
5218 mutex_lock(&qsee_bw_mutex);
5219 if (!qseecom.qsee_bw_count) {
5220 if (qseecom.qsee_sfpb_bw_count > 0)
5221 ret = msm_bus_scale_client_update_request(
5222 qseecom.qsee_perf_client, 3);
5223 else {
5224 if (qclk->ce_core_src_clk != NULL)
5225 ret = __qseecom_enable_clk(CLK_QSEE);
5226 if (!ret) {
5227 ret =
5228 msm_bus_scale_client_update_request(
5229 qseecom.qsee_perf_client, 1);
5230 if ((ret) &&
5231 (qclk->ce_core_src_clk != NULL))
5232 __qseecom_disable_clk(CLK_QSEE);
5233 }
5234 }
5235 if (ret)
5236 pr_err("DFAB Bandwidth req failed (%d)\n",
5237 ret);
5238 else {
5239 qseecom.qsee_bw_count++;
5240 data->perf_enabled = true;
5241 }
5242 } else {
5243 qseecom.qsee_bw_count++;
5244 data->perf_enabled = true;
5245 }
5246 mutex_unlock(&qsee_bw_mutex);
5247 break;
5248 case CLK_SFPB:
5249 mutex_lock(&qsee_bw_mutex);
5250 if (!qseecom.qsee_sfpb_bw_count) {
5251 if (qseecom.qsee_bw_count > 0)
5252 ret = msm_bus_scale_client_update_request(
5253 qseecom.qsee_perf_client, 3);
5254 else {
5255 if (qclk->ce_core_src_clk != NULL)
5256 ret = __qseecom_enable_clk(CLK_QSEE);
5257 if (!ret) {
5258 ret =
5259 msm_bus_scale_client_update_request(
5260 qseecom.qsee_perf_client, 2);
5261 if ((ret) &&
5262 (qclk->ce_core_src_clk != NULL))
5263 __qseecom_disable_clk(CLK_QSEE);
5264 }
5265 }
5266
5267 if (ret)
5268 pr_err("SFPB Bandwidth req failed (%d)\n",
5269 ret);
5270 else {
5271 qseecom.qsee_sfpb_bw_count++;
5272 data->fast_load_enabled = true;
5273 }
5274 } else {
5275 qseecom.qsee_sfpb_bw_count++;
5276 data->fast_load_enabled = true;
5277 }
5278 mutex_unlock(&qsee_bw_mutex);
5279 break;
5280 default:
5281 pr_err("Clock type not defined\n");
5282 break;
5283 }
5284 return ret;
5285}
5286
5287static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5288 int32_t clk_type)
5289{
5290 int32_t ret = 0;
5291 struct qseecom_clk *qclk;
5292
5293 qclk = &qseecom.qsee;
5294
5295 if (qseecom.no_clock_support)
5296 return;
5297 if (!qseecom.qsee_perf_client)
5298 return;
5299
5300 switch (clk_type) {
5301 case CLK_DFAB:
5302 mutex_lock(&qsee_bw_mutex);
5303 if (qseecom.qsee_bw_count == 0) {
5304 pr_err("Client error.Extra call to disable DFAB clk\n");
5305 mutex_unlock(&qsee_bw_mutex);
5306 return;
5307 }
5308
5309 if (qseecom.qsee_bw_count == 1) {
5310 if (qseecom.qsee_sfpb_bw_count > 0)
5311 ret = msm_bus_scale_client_update_request(
5312 qseecom.qsee_perf_client, 2);
5313 else {
5314 ret = msm_bus_scale_client_update_request(
5315 qseecom.qsee_perf_client, 0);
5316 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5317 __qseecom_disable_clk(CLK_QSEE);
5318 }
5319 if (ret)
5320 pr_err("SFPB Bandwidth req fail (%d)\n",
5321 ret);
5322 else {
5323 qseecom.qsee_bw_count--;
5324 data->perf_enabled = false;
5325 }
5326 } else {
5327 qseecom.qsee_bw_count--;
5328 data->perf_enabled = false;
5329 }
5330 mutex_unlock(&qsee_bw_mutex);
5331 break;
5332 case CLK_SFPB:
5333 mutex_lock(&qsee_bw_mutex);
5334 if (qseecom.qsee_sfpb_bw_count == 0) {
5335 pr_err("Client error.Extra call to disable SFPB clk\n");
5336 mutex_unlock(&qsee_bw_mutex);
5337 return;
5338 }
5339 if (qseecom.qsee_sfpb_bw_count == 1) {
5340 if (qseecom.qsee_bw_count > 0)
5341 ret = msm_bus_scale_client_update_request(
5342 qseecom.qsee_perf_client, 1);
5343 else {
5344 ret = msm_bus_scale_client_update_request(
5345 qseecom.qsee_perf_client, 0);
5346 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5347 __qseecom_disable_clk(CLK_QSEE);
5348 }
5349 if (ret)
5350 pr_err("SFPB Bandwidth req fail (%d)\n",
5351 ret);
5352 else {
5353 qseecom.qsee_sfpb_bw_count--;
5354 data->fast_load_enabled = false;
5355 }
5356 } else {
5357 qseecom.qsee_sfpb_bw_count--;
5358 data->fast_load_enabled = false;
5359 }
5360 mutex_unlock(&qsee_bw_mutex);
5361 break;
5362 default:
5363 pr_err("Clock type not defined\n");
5364 break;
5365 }
5366
5367}
5368
5369static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5370 void __user *argp)
5371{
5372 struct ion_handle *ihandle; /* Ion handle */
5373 struct qseecom_load_img_req load_img_req;
5374 int uret = 0;
5375 int ret;
5376 ion_phys_addr_t pa = 0;
5377 size_t len;
5378 struct qseecom_load_app_ireq load_req;
5379 struct qseecom_load_app_64bit_ireq load_req_64bit;
5380 struct qseecom_command_scm_resp resp;
5381 void *cmd_buf = NULL;
5382 size_t cmd_len;
5383 /* Copy the relevant information needed for loading the image */
5384 if (copy_from_user(&load_img_req,
5385 (void __user *)argp,
5386 sizeof(struct qseecom_load_img_req))) {
5387 pr_err("copy_from_user failed\n");
5388 return -EFAULT;
5389 }
5390
5391 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005392 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005393 load_img_req.ifd_data_fd);
5394 if (IS_ERR_OR_NULL(ihandle)) {
5395 pr_err("Ion client could not retrieve the handle\n");
5396 return -ENOMEM;
5397 }
5398
5399 /* Get the physical address of the ION BUF */
5400 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5401 if (ret) {
5402 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5403 ret);
5404 return ret;
5405 }
5406 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5407 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5408 len, load_img_req.mdt_len,
5409 load_img_req.img_len);
5410 return ret;
5411 }
5412 /* Populate the structure for sending scm call to load image */
5413 if (qseecom.qsee_version < QSEE_VERSION_40) {
5414 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5415 load_req.mdt_len = load_img_req.mdt_len;
5416 load_req.img_len = load_img_req.img_len;
5417 load_req.phy_addr = (uint32_t)pa;
5418 cmd_buf = (void *)&load_req;
5419 cmd_len = sizeof(struct qseecom_load_app_ireq);
5420 } else {
5421 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5422 load_req_64bit.mdt_len = load_img_req.mdt_len;
5423 load_req_64bit.img_len = load_img_req.img_len;
5424 load_req_64bit.phy_addr = (uint64_t)pa;
5425 cmd_buf = (void *)&load_req_64bit;
5426 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5427 }
5428
5429 if (qseecom.support_bus_scaling) {
5430 mutex_lock(&qsee_bw_mutex);
5431 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5432 mutex_unlock(&qsee_bw_mutex);
5433 if (ret) {
5434 ret = -EIO;
5435 goto exit_cpu_restore;
5436 }
5437 }
5438
5439 /* Vote for the SFPB clock */
5440 ret = __qseecom_enable_clk_scale_up(data);
5441 if (ret) {
5442 ret = -EIO;
5443 goto exit_register_bus_bandwidth_needs;
5444 }
5445 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5446 ION_IOC_CLEAN_INV_CACHES);
5447 if (ret) {
5448 pr_err("cache operation failed %d\n", ret);
5449 goto exit_disable_clock;
5450 }
5451 /* SCM_CALL to load the external elf */
5452 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5453 &resp, sizeof(resp));
5454 if (ret) {
5455 pr_err("scm_call to load failed : ret %d\n",
5456 ret);
5457 ret = -EFAULT;
5458 goto exit_disable_clock;
5459 }
5460
5461 switch (resp.result) {
5462 case QSEOS_RESULT_SUCCESS:
5463 break;
5464 case QSEOS_RESULT_INCOMPLETE:
5465 pr_err("%s: qseos result incomplete\n", __func__);
5466 ret = __qseecom_process_incomplete_cmd(data, &resp);
5467 if (ret)
5468 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5469 break;
5470 case QSEOS_RESULT_FAILURE:
5471 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5472 ret = -EFAULT;
5473 break;
5474 default:
5475 pr_err("scm_call response result %d not supported\n",
5476 resp.result);
5477 ret = -EFAULT;
5478 break;
5479 }
5480
5481exit_disable_clock:
5482 __qseecom_disable_clk_scale_down(data);
5483
5484exit_register_bus_bandwidth_needs:
5485 if (qseecom.support_bus_scaling) {
5486 mutex_lock(&qsee_bw_mutex);
5487 uret = qseecom_unregister_bus_bandwidth_needs(data);
5488 mutex_unlock(&qsee_bw_mutex);
5489 if (uret)
5490 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5491 uret, ret);
5492 }
5493
5494exit_cpu_restore:
5495 /* Deallocate the handle */
5496 if (!IS_ERR_OR_NULL(ihandle))
5497 ion_free(qseecom.ion_clnt, ihandle);
5498 return ret;
5499}
5500
5501static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5502{
5503 int ret = 0;
5504 struct qseecom_command_scm_resp resp;
5505 struct qseecom_unload_app_ireq req;
5506
5507 /* unavailable client app */
5508 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5509
5510 /* Populate the structure for sending scm call to unload image */
5511 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5512
5513 /* SCM_CALL to unload the external elf */
5514 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5515 sizeof(struct qseecom_unload_app_ireq),
5516 &resp, sizeof(resp));
5517 if (ret) {
5518 pr_err("scm_call to unload failed : ret %d\n",
5519 ret);
5520 ret = -EFAULT;
5521 goto qseecom_unload_external_elf_scm_err;
5522 }
5523 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5524 ret = __qseecom_process_incomplete_cmd(data, &resp);
5525 if (ret)
5526 pr_err("process_incomplete_cmd fail err: %d\n",
5527 ret);
5528 } else {
5529 if (resp.result != QSEOS_RESULT_SUCCESS) {
5530 pr_err("scm_call to unload image failed resp.result =%d\n",
5531 resp.result);
5532 ret = -EFAULT;
5533 }
5534 }
5535
5536qseecom_unload_external_elf_scm_err:
5537
5538 return ret;
5539}
5540
5541static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5542 void __user *argp)
5543{
5544
5545 int32_t ret;
5546 struct qseecom_qseos_app_load_query query_req;
5547 struct qseecom_check_app_ireq req;
5548 struct qseecom_registered_app_list *entry = NULL;
5549 unsigned long flags = 0;
5550 uint32_t app_arch = 0, app_id = 0;
5551 bool found_app = false;
5552
5553 /* Copy the relevant information needed for loading the image */
5554 if (copy_from_user(&query_req,
5555 (void __user *)argp,
5556 sizeof(struct qseecom_qseos_app_load_query))) {
5557 pr_err("copy_from_user failed\n");
5558 return -EFAULT;
5559 }
5560
5561 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5562 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5563 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5564
5565 ret = __qseecom_check_app_exists(req, &app_id);
5566 if (ret) {
5567 pr_err(" scm call to check if app is loaded failed");
5568 return ret; /* scm call failed */
5569 }
5570 if (app_id) {
5571 pr_debug("App id %d (%s) already exists\n", app_id,
5572 (char *)(req.app_name));
5573 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5574 list_for_each_entry(entry,
5575 &qseecom.registered_app_list_head, list){
5576 if (entry->app_id == app_id) {
5577 app_arch = entry->app_arch;
5578 entry->ref_cnt++;
5579 found_app = true;
5580 break;
5581 }
5582 }
5583 spin_unlock_irqrestore(
5584 &qseecom.registered_app_list_lock, flags);
5585 data->client.app_id = app_id;
5586 query_req.app_id = app_id;
5587 if (app_arch) {
5588 data->client.app_arch = app_arch;
5589 query_req.app_arch = app_arch;
5590 } else {
5591 data->client.app_arch = 0;
5592 query_req.app_arch = 0;
5593 }
5594 strlcpy(data->client.app_name, query_req.app_name,
5595 MAX_APP_NAME_SIZE);
5596 /*
5597 * If app was loaded by appsbl before and was not registered,
5598 * regiser this app now.
5599 */
5600 if (!found_app) {
5601 pr_debug("Register app %d [%s] which was loaded before\n",
5602 ret, (char *)query_req.app_name);
5603 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5604 if (!entry) {
5605 pr_err("kmalloc for app entry failed\n");
5606 return -ENOMEM;
5607 }
5608 entry->app_id = app_id;
5609 entry->ref_cnt = 1;
5610 entry->app_arch = data->client.app_arch;
5611 strlcpy(entry->app_name, data->client.app_name,
5612 MAX_APP_NAME_SIZE);
5613 entry->app_blocked = false;
5614 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005615 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005616 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5617 flags);
5618 list_add_tail(&entry->list,
5619 &qseecom.registered_app_list_head);
5620 spin_unlock_irqrestore(
5621 &qseecom.registered_app_list_lock, flags);
5622 }
5623 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5624 pr_err("copy_to_user failed\n");
5625 return -EFAULT;
5626 }
5627 return -EEXIST; /* app already loaded */
5628 } else {
5629 return 0; /* app not loaded */
5630 }
5631}
5632
5633static int __qseecom_get_ce_pipe_info(
5634 enum qseecom_key_management_usage_type usage,
5635 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5636{
5637 int ret = -EINVAL;
5638 int i, j;
5639 struct qseecom_ce_info_use *p = NULL;
5640 int total = 0;
5641 struct qseecom_ce_pipe_entry *pcepipe;
5642
5643 switch (usage) {
5644 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5645 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5646 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5647 if (qseecom.support_fde) {
5648 p = qseecom.ce_info.fde;
5649 total = qseecom.ce_info.num_fde;
5650 } else {
5651 pr_err("system does not support fde\n");
5652 return -EINVAL;
5653 }
5654 break;
5655 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5656 if (qseecom.support_pfe) {
5657 p = qseecom.ce_info.pfe;
5658 total = qseecom.ce_info.num_pfe;
5659 } else {
5660 pr_err("system does not support pfe\n");
5661 return -EINVAL;
5662 }
5663 break;
5664 default:
5665 pr_err("unsupported usage %d\n", usage);
5666 return -EINVAL;
5667 }
5668
5669 for (j = 0; j < total; j++) {
5670 if (p->unit_num == unit) {
5671 pcepipe = p->ce_pipe_entry;
5672 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5673 (*ce_hw)[i] = pcepipe->ce_num;
5674 *pipe = pcepipe->ce_pipe_pair;
5675 pcepipe++;
5676 }
5677 ret = 0;
5678 break;
5679 }
5680 p++;
5681 }
5682 return ret;
5683}
5684
5685static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5686 enum qseecom_key_management_usage_type usage,
5687 struct qseecom_key_generate_ireq *ireq)
5688{
5689 struct qseecom_command_scm_resp resp;
5690 int ret;
5691
5692 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5693 usage >= QSEOS_KM_USAGE_MAX) {
5694 pr_err("Error:: unsupported usage %d\n", usage);
5695 return -EFAULT;
5696 }
5697 ret = __qseecom_enable_clk(CLK_QSEE);
5698 if (ret)
5699 return ret;
5700
5701 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5702 ireq, sizeof(struct qseecom_key_generate_ireq),
5703 &resp, sizeof(resp));
5704 if (ret) {
5705 if (ret == -EINVAL &&
5706 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5707 pr_debug("Key ID exists.\n");
5708 ret = 0;
5709 } else {
5710 pr_err("scm call to generate key failed : %d\n", ret);
5711 ret = -EFAULT;
5712 }
5713 goto generate_key_exit;
5714 }
5715
5716 switch (resp.result) {
5717 case QSEOS_RESULT_SUCCESS:
5718 break;
5719 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5720 pr_debug("Key ID exists.\n");
5721 break;
5722 case QSEOS_RESULT_INCOMPLETE:
5723 ret = __qseecom_process_incomplete_cmd(data, &resp);
5724 if (ret) {
5725 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5726 pr_debug("Key ID exists.\n");
5727 ret = 0;
5728 } else {
5729 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5730 resp.result);
5731 }
5732 }
5733 break;
5734 case QSEOS_RESULT_FAILURE:
5735 default:
5736 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5737 ret = -EINVAL;
5738 break;
5739 }
5740generate_key_exit:
5741 __qseecom_disable_clk(CLK_QSEE);
5742 return ret;
5743}
5744
5745static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5746 enum qseecom_key_management_usage_type usage,
5747 struct qseecom_key_delete_ireq *ireq)
5748{
5749 struct qseecom_command_scm_resp resp;
5750 int ret;
5751
5752 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5753 usage >= QSEOS_KM_USAGE_MAX) {
5754 pr_err("Error:: unsupported usage %d\n", usage);
5755 return -EFAULT;
5756 }
5757 ret = __qseecom_enable_clk(CLK_QSEE);
5758 if (ret)
5759 return ret;
5760
5761 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5762 ireq, sizeof(struct qseecom_key_delete_ireq),
5763 &resp, sizeof(struct qseecom_command_scm_resp));
5764 if (ret) {
5765 if (ret == -EINVAL &&
5766 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5767 pr_debug("Max attempts to input password reached.\n");
5768 ret = -ERANGE;
5769 } else {
5770 pr_err("scm call to delete key failed : %d\n", ret);
5771 ret = -EFAULT;
5772 }
5773 goto del_key_exit;
5774 }
5775
5776 switch (resp.result) {
5777 case QSEOS_RESULT_SUCCESS:
5778 break;
5779 case QSEOS_RESULT_INCOMPLETE:
5780 ret = __qseecom_process_incomplete_cmd(data, &resp);
5781 if (ret) {
5782 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5783 resp.result);
5784 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5785 pr_debug("Max attempts to input password reached.\n");
5786 ret = -ERANGE;
5787 }
5788 }
5789 break;
5790 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5791 pr_debug("Max attempts to input password reached.\n");
5792 ret = -ERANGE;
5793 break;
5794 case QSEOS_RESULT_FAILURE:
5795 default:
5796 pr_err("Delete key scm call failed resp.result %d\n",
5797 resp.result);
5798 ret = -EINVAL;
5799 break;
5800 }
5801del_key_exit:
5802 __qseecom_disable_clk(CLK_QSEE);
5803 return ret;
5804}
5805
5806static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5807 enum qseecom_key_management_usage_type usage,
5808 struct qseecom_key_select_ireq *ireq)
5809{
5810 struct qseecom_command_scm_resp resp;
5811 int ret;
5812
5813 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5814 usage >= QSEOS_KM_USAGE_MAX) {
5815 pr_err("Error:: unsupported usage %d\n", usage);
5816 return -EFAULT;
5817 }
5818 ret = __qseecom_enable_clk(CLK_QSEE);
5819 if (ret)
5820 return ret;
5821
5822 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5823 ret = __qseecom_enable_clk(CLK_CE_DRV);
5824 if (ret)
5825 return ret;
5826 }
5827
5828 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5829 ireq, sizeof(struct qseecom_key_select_ireq),
5830 &resp, sizeof(struct qseecom_command_scm_resp));
5831 if (ret) {
5832 if (ret == -EINVAL &&
5833 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5834 pr_debug("Max attempts to input password reached.\n");
5835 ret = -ERANGE;
5836 } else if (ret == -EINVAL &&
5837 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5838 pr_debug("Set Key operation under processing...\n");
5839 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5840 } else {
5841 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5842 ret);
5843 ret = -EFAULT;
5844 }
5845 goto set_key_exit;
5846 }
5847
5848 switch (resp.result) {
5849 case QSEOS_RESULT_SUCCESS:
5850 break;
5851 case QSEOS_RESULT_INCOMPLETE:
5852 ret = __qseecom_process_incomplete_cmd(data, &resp);
5853 if (ret) {
5854 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5855 resp.result);
5856 if (resp.result ==
5857 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5858 pr_debug("Set Key operation under processing...\n");
5859 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5860 }
5861 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5862 pr_debug("Max attempts to input password reached.\n");
5863 ret = -ERANGE;
5864 }
5865 }
5866 break;
5867 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5868 pr_debug("Max attempts to input password reached.\n");
5869 ret = -ERANGE;
5870 break;
5871 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5872 pr_debug("Set Key operation under processing...\n");
5873 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5874 break;
5875 case QSEOS_RESULT_FAILURE:
5876 default:
5877 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5878 ret = -EINVAL;
5879 break;
5880 }
5881set_key_exit:
5882 __qseecom_disable_clk(CLK_QSEE);
5883 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5884 __qseecom_disable_clk(CLK_CE_DRV);
5885 return ret;
5886}
5887
5888static int __qseecom_update_current_key_user_info(
5889 struct qseecom_dev_handle *data,
5890 enum qseecom_key_management_usage_type usage,
5891 struct qseecom_key_userinfo_update_ireq *ireq)
5892{
5893 struct qseecom_command_scm_resp resp;
5894 int ret;
5895
5896 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5897 usage >= QSEOS_KM_USAGE_MAX) {
5898 pr_err("Error:: unsupported usage %d\n", usage);
5899 return -EFAULT;
5900 }
5901 ret = __qseecom_enable_clk(CLK_QSEE);
5902 if (ret)
5903 return ret;
5904
5905 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5906 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5907 &resp, sizeof(struct qseecom_command_scm_resp));
5908 if (ret) {
5909 if (ret == -EINVAL &&
5910 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5911 pr_debug("Set Key operation under processing...\n");
5912 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5913 } else {
5914 pr_err("scm call to update key userinfo failed: %d\n",
5915 ret);
5916 __qseecom_disable_clk(CLK_QSEE);
5917 return -EFAULT;
5918 }
5919 }
5920
5921 switch (resp.result) {
5922 case QSEOS_RESULT_SUCCESS:
5923 break;
5924 case QSEOS_RESULT_INCOMPLETE:
5925 ret = __qseecom_process_incomplete_cmd(data, &resp);
5926 if (resp.result ==
5927 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5928 pr_debug("Set Key operation under processing...\n");
5929 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5930 }
5931 if (ret)
5932 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5933 resp.result);
5934 break;
5935 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5936 pr_debug("Update Key operation under processing...\n");
5937 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5938 break;
5939 case QSEOS_RESULT_FAILURE:
5940 default:
5941 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5942 ret = -EINVAL;
5943 break;
5944 }
5945
5946 __qseecom_disable_clk(CLK_QSEE);
5947 return ret;
5948}
5949
5950
5951static int qseecom_enable_ice_setup(int usage)
5952{
5953 int ret = 0;
5954
5955 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5956 ret = qcom_ice_setup_ice_hw("ufs", true);
5957 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5958 ret = qcom_ice_setup_ice_hw("sdcc", true);
5959
5960 return ret;
5961}
5962
5963static int qseecom_disable_ice_setup(int usage)
5964{
5965 int ret = 0;
5966
5967 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5968 ret = qcom_ice_setup_ice_hw("ufs", false);
5969 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5970 ret = qcom_ice_setup_ice_hw("sdcc", false);
5971
5972 return ret;
5973}
5974
5975static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5976{
5977 struct qseecom_ce_info_use *pce_info_use, *p;
5978 int total = 0;
5979 int i;
5980
5981 switch (usage) {
5982 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5983 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5984 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5985 p = qseecom.ce_info.fde;
5986 total = qseecom.ce_info.num_fde;
5987 break;
5988 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5989 p = qseecom.ce_info.pfe;
5990 total = qseecom.ce_info.num_pfe;
5991 break;
5992 default:
5993 pr_err("unsupported usage %d\n", usage);
5994 return -EINVAL;
5995 }
5996
5997 pce_info_use = NULL;
5998
5999 for (i = 0; i < total; i++) {
6000 if (p->unit_num == unit) {
6001 pce_info_use = p;
6002 break;
6003 }
6004 p++;
6005 }
6006 if (!pce_info_use) {
6007 pr_err("can not find %d\n", unit);
6008 return -EINVAL;
6009 }
6010 return pce_info_use->num_ce_pipe_entries;
6011}
6012
6013static int qseecom_create_key(struct qseecom_dev_handle *data,
6014 void __user *argp)
6015{
6016 int i;
6017 uint32_t *ce_hw = NULL;
6018 uint32_t pipe = 0;
6019 int ret = 0;
6020 uint32_t flags = 0;
6021 struct qseecom_create_key_req create_key_req;
6022 struct qseecom_key_generate_ireq generate_key_ireq;
6023 struct qseecom_key_select_ireq set_key_ireq;
6024 uint32_t entries = 0;
6025
6026 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6027 if (ret) {
6028 pr_err("copy_from_user failed\n");
6029 return ret;
6030 }
6031
6032 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6033 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6034 pr_err("unsupported usage %d\n", create_key_req.usage);
6035 ret = -EFAULT;
6036 return ret;
6037 }
6038 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6039 create_key_req.usage);
6040 if (entries <= 0) {
6041 pr_err("no ce instance for usage %d instance %d\n",
6042 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6043 ret = -EINVAL;
6044 return ret;
6045 }
6046
6047 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6048 if (!ce_hw) {
6049 ret = -ENOMEM;
6050 return ret;
6051 }
6052 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6053 DEFAULT_CE_INFO_UNIT);
6054 if (ret) {
6055 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6056 ret = -EINVAL;
6057 goto free_buf;
6058 }
6059
6060 if (qseecom.fde_key_size)
6061 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6062 else
6063 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6064
Jiten Patela7bb1d52018-05-11 12:34:26 +05306065 if (qseecom.enable_key_wrap_in_ks == true)
6066 flags |= ENABLE_KEY_WRAP_IN_KS;
6067
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006068 generate_key_ireq.flags = flags;
6069 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6070 memset((void *)generate_key_ireq.key_id,
6071 0, QSEECOM_KEY_ID_SIZE);
6072 memset((void *)generate_key_ireq.hash32,
6073 0, QSEECOM_HASH_SIZE);
6074 memcpy((void *)generate_key_ireq.key_id,
6075 (void *)key_id_array[create_key_req.usage].desc,
6076 QSEECOM_KEY_ID_SIZE);
6077 memcpy((void *)generate_key_ireq.hash32,
6078 (void *)create_key_req.hash32,
6079 QSEECOM_HASH_SIZE);
6080
6081 ret = __qseecom_generate_and_save_key(data,
6082 create_key_req.usage, &generate_key_ireq);
6083 if (ret) {
6084 pr_err("Failed to generate key on storage: %d\n", ret);
6085 goto free_buf;
6086 }
6087
6088 for (i = 0; i < entries; i++) {
6089 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6090 if (create_key_req.usage ==
6091 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6092 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6093 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6094
6095 } else if (create_key_req.usage ==
6096 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6097 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6098 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6099
6100 } else {
6101 set_key_ireq.ce = ce_hw[i];
6102 set_key_ireq.pipe = pipe;
6103 }
6104 set_key_ireq.flags = flags;
6105
6106 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6107 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6108 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6109 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6110 memcpy((void *)set_key_ireq.key_id,
6111 (void *)key_id_array[create_key_req.usage].desc,
6112 QSEECOM_KEY_ID_SIZE);
6113 memcpy((void *)set_key_ireq.hash32,
6114 (void *)create_key_req.hash32,
6115 QSEECOM_HASH_SIZE);
6116 /*
6117 * It will return false if it is GPCE based crypto instance or
6118 * ICE is setup properly
6119 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006120 ret = qseecom_enable_ice_setup(create_key_req.usage);
6121 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006122 goto free_buf;
6123
6124 do {
6125 ret = __qseecom_set_clear_ce_key(data,
6126 create_key_req.usage,
6127 &set_key_ireq);
6128 /*
6129 * wait a little before calling scm again to let other
6130 * processes run
6131 */
6132 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6133 msleep(50);
6134
6135 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6136
6137 qseecom_disable_ice_setup(create_key_req.usage);
6138
6139 if (ret) {
6140 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6141 pipe, ce_hw[i], ret);
6142 goto free_buf;
6143 } else {
6144 pr_err("Set the key successfully\n");
6145 if ((create_key_req.usage ==
6146 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6147 (create_key_req.usage ==
6148 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6149 goto free_buf;
6150 }
6151 }
6152
6153free_buf:
6154 kzfree(ce_hw);
6155 return ret;
6156}
6157
6158static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6159 void __user *argp)
6160{
6161 uint32_t *ce_hw = NULL;
6162 uint32_t pipe = 0;
6163 int ret = 0;
6164 uint32_t flags = 0;
6165 int i, j;
6166 struct qseecom_wipe_key_req wipe_key_req;
6167 struct qseecom_key_delete_ireq delete_key_ireq;
6168 struct qseecom_key_select_ireq clear_key_ireq;
6169 uint32_t entries = 0;
6170
6171 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6172 if (ret) {
6173 pr_err("copy_from_user failed\n");
6174 return ret;
6175 }
6176
6177 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6178 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6179 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6180 ret = -EFAULT;
6181 return ret;
6182 }
6183
6184 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6185 wipe_key_req.usage);
6186 if (entries <= 0) {
6187 pr_err("no ce instance for usage %d instance %d\n",
6188 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6189 ret = -EINVAL;
6190 return ret;
6191 }
6192
6193 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6194 if (!ce_hw) {
6195 ret = -ENOMEM;
6196 return ret;
6197 }
6198
6199 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6200 DEFAULT_CE_INFO_UNIT);
6201 if (ret) {
6202 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6203 ret = -EINVAL;
6204 goto free_buf;
6205 }
6206
6207 if (wipe_key_req.wipe_key_flag) {
6208 delete_key_ireq.flags = flags;
6209 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6210 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6211 memcpy((void *)delete_key_ireq.key_id,
6212 (void *)key_id_array[wipe_key_req.usage].desc,
6213 QSEECOM_KEY_ID_SIZE);
6214 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6215
6216 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6217 &delete_key_ireq);
6218 if (ret) {
6219 pr_err("Failed to delete key from ssd storage: %d\n",
6220 ret);
6221 ret = -EFAULT;
6222 goto free_buf;
6223 }
6224 }
6225
6226 for (j = 0; j < entries; j++) {
6227 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6228 if (wipe_key_req.usage ==
6229 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6230 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6231 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6232 } else if (wipe_key_req.usage ==
6233 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6234 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6235 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6236 } else {
6237 clear_key_ireq.ce = ce_hw[j];
6238 clear_key_ireq.pipe = pipe;
6239 }
6240 clear_key_ireq.flags = flags;
6241 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6242 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6243 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6244 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6245
6246 /*
6247 * It will return false if it is GPCE based crypto instance or
6248 * ICE is setup properly
6249 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006250 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6251 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006252 goto free_buf;
6253
6254 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6255 &clear_key_ireq);
6256
6257 qseecom_disable_ice_setup(wipe_key_req.usage);
6258
6259 if (ret) {
6260 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6261 pipe, ce_hw[j], ret);
6262 ret = -EFAULT;
6263 goto free_buf;
6264 }
6265 }
6266
6267free_buf:
6268 kzfree(ce_hw);
6269 return ret;
6270}
6271
6272static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6273 void __user *argp)
6274{
6275 int ret = 0;
6276 uint32_t flags = 0;
6277 struct qseecom_update_key_userinfo_req update_key_req;
6278 struct qseecom_key_userinfo_update_ireq ireq;
6279
6280 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6281 if (ret) {
6282 pr_err("copy_from_user failed\n");
6283 return ret;
6284 }
6285
6286 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6287 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6288 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6289 return -EFAULT;
6290 }
6291
6292 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6293
6294 if (qseecom.fde_key_size)
6295 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6296 else
6297 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6298
6299 ireq.flags = flags;
6300 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6301 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6302 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6303 memcpy((void *)ireq.key_id,
6304 (void *)key_id_array[update_key_req.usage].desc,
6305 QSEECOM_KEY_ID_SIZE);
6306 memcpy((void *)ireq.current_hash32,
6307 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6308 memcpy((void *)ireq.new_hash32,
6309 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6310
6311 do {
6312 ret = __qseecom_update_current_key_user_info(data,
6313 update_key_req.usage,
6314 &ireq);
6315 /*
6316 * wait a little before calling scm again to let other
6317 * processes run
6318 */
6319 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6320 msleep(50);
6321
6322 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6323 if (ret) {
6324 pr_err("Failed to update key info: %d\n", ret);
6325 return ret;
6326 }
6327 return ret;
6328
6329}
6330static int qseecom_is_es_activated(void __user *argp)
6331{
Zhen Kong26e62742018-05-04 17:19:06 -07006332 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006333 struct qseecom_command_scm_resp resp;
6334 int ret;
6335
6336 if (qseecom.qsee_version < QSEE_VERSION_04) {
6337 pr_err("invalid qsee version\n");
6338 return -ENODEV;
6339 }
6340
6341 if (argp == NULL) {
6342 pr_err("arg is null\n");
6343 return -EINVAL;
6344 }
6345
6346 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6347 &req, sizeof(req), &resp, sizeof(resp));
6348 if (ret) {
6349 pr_err("scm_call failed\n");
6350 return ret;
6351 }
6352
6353 req.is_activated = resp.result;
6354 ret = copy_to_user(argp, &req, sizeof(req));
6355 if (ret) {
6356 pr_err("copy_to_user failed\n");
6357 return ret;
6358 }
6359
6360 return 0;
6361}
6362
6363static int qseecom_save_partition_hash(void __user *argp)
6364{
6365 struct qseecom_save_partition_hash_req req;
6366 struct qseecom_command_scm_resp resp;
6367 int ret;
6368
6369 memset(&resp, 0x00, sizeof(resp));
6370
6371 if (qseecom.qsee_version < QSEE_VERSION_04) {
6372 pr_err("invalid qsee version\n");
6373 return -ENODEV;
6374 }
6375
6376 if (argp == NULL) {
6377 pr_err("arg is null\n");
6378 return -EINVAL;
6379 }
6380
6381 ret = copy_from_user(&req, argp, sizeof(req));
6382 if (ret) {
6383 pr_err("copy_from_user failed\n");
6384 return ret;
6385 }
6386
6387 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6388 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6389 if (ret) {
6390 pr_err("qseecom_scm_call failed\n");
6391 return ret;
6392 }
6393
6394 return 0;
6395}
6396
6397static int qseecom_mdtp_cipher_dip(void __user *argp)
6398{
6399 struct qseecom_mdtp_cipher_dip_req req;
6400 u32 tzbuflenin, tzbuflenout;
6401 char *tzbufin = NULL, *tzbufout = NULL;
6402 struct scm_desc desc = {0};
6403 int ret;
6404
6405 do {
6406 /* Copy the parameters from userspace */
6407 if (argp == NULL) {
6408 pr_err("arg is null\n");
6409 ret = -EINVAL;
6410 break;
6411 }
6412
6413 ret = copy_from_user(&req, argp, sizeof(req));
6414 if (ret) {
6415 pr_err("copy_from_user failed, ret= %d\n", ret);
6416 break;
6417 }
6418
6419 if (req.in_buf == NULL || req.out_buf == NULL ||
6420 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6421 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6422 req.direction > 1) {
6423 pr_err("invalid parameters\n");
6424 ret = -EINVAL;
6425 break;
6426 }
6427
6428 /* Copy the input buffer from userspace to kernel space */
6429 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6430 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6431 if (!tzbufin) {
6432 pr_err("error allocating in buffer\n");
6433 ret = -ENOMEM;
6434 break;
6435 }
6436
6437 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6438 if (ret) {
6439 pr_err("copy_from_user failed, ret=%d\n", ret);
6440 break;
6441 }
6442
6443 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6444
6445 /* Prepare the output buffer in kernel space */
6446 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6447 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6448 if (!tzbufout) {
6449 pr_err("error allocating out buffer\n");
6450 ret = -ENOMEM;
6451 break;
6452 }
6453
6454 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6455
6456 /* Send the command to TZ */
6457 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6458 desc.args[0] = virt_to_phys(tzbufin);
6459 desc.args[1] = req.in_buf_size;
6460 desc.args[2] = virt_to_phys(tzbufout);
6461 desc.args[3] = req.out_buf_size;
6462 desc.args[4] = req.direction;
6463
6464 ret = __qseecom_enable_clk(CLK_QSEE);
6465 if (ret)
6466 break;
6467
Zhen Kong03f220d2019-02-01 17:12:34 -08006468 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006469
6470 __qseecom_disable_clk(CLK_QSEE);
6471
6472 if (ret) {
6473 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6474 ret);
6475 break;
6476 }
6477
6478 /* Copy the output buffer from kernel space to userspace */
6479 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6480 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6481 if (ret) {
6482 pr_err("copy_to_user failed, ret=%d\n", ret);
6483 break;
6484 }
6485 } while (0);
6486
6487 kzfree(tzbufin);
6488 kzfree(tzbufout);
6489
6490 return ret;
6491}
6492
6493static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6494 struct qseecom_qteec_req *req)
6495{
6496 if (!data || !data->client.ihandle) {
6497 pr_err("Client or client handle is not initialized\n");
6498 return -EINVAL;
6499 }
6500
6501 if (data->type != QSEECOM_CLIENT_APP)
6502 return -EFAULT;
6503
6504 if (req->req_len > UINT_MAX - req->resp_len) {
6505 pr_err("Integer overflow detected in req_len & rsp_len\n");
6506 return -EINVAL;
6507 }
6508
6509 if (req->req_len + req->resp_len > data->client.sb_length) {
6510 pr_debug("Not enough memory to fit cmd_buf.\n");
6511 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6512 (req->req_len + req->resp_len), data->client.sb_length);
6513 return -ENOMEM;
6514 }
6515
6516 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6517 pr_err("cmd buffer or response buffer is null\n");
6518 return -EINVAL;
6519 }
6520 if (((uintptr_t)req->req_ptr <
6521 data->client.user_virt_sb_base) ||
6522 ((uintptr_t)req->req_ptr >=
6523 (data->client.user_virt_sb_base + data->client.sb_length))) {
6524 pr_err("cmd buffer address not within shared bufffer\n");
6525 return -EINVAL;
6526 }
6527
6528 if (((uintptr_t)req->resp_ptr <
6529 data->client.user_virt_sb_base) ||
6530 ((uintptr_t)req->resp_ptr >=
6531 (data->client.user_virt_sb_base + data->client.sb_length))) {
6532 pr_err("response buffer address not within shared bufffer\n");
6533 return -EINVAL;
6534 }
6535
6536 if ((req->req_len == 0) || (req->resp_len == 0)) {
6537 pr_err("cmd buf lengtgh/response buf length not valid\n");
6538 return -EINVAL;
6539 }
6540
6541 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6542 pr_err("Integer overflow in req_len & req_ptr\n");
6543 return -EINVAL;
6544 }
6545
6546 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6547 pr_err("Integer overflow in resp_len & resp_ptr\n");
6548 return -EINVAL;
6549 }
6550
6551 if (data->client.user_virt_sb_base >
6552 (ULONG_MAX - data->client.sb_length)) {
6553 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6554 return -EINVAL;
6555 }
6556 if ((((uintptr_t)req->req_ptr + req->req_len) >
6557 ((uintptr_t)data->client.user_virt_sb_base +
6558 data->client.sb_length)) ||
6559 (((uintptr_t)req->resp_ptr + req->resp_len) >
6560 ((uintptr_t)data->client.user_virt_sb_base +
6561 data->client.sb_length))) {
6562 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6563 return -EINVAL;
6564 }
6565 return 0;
6566}
6567
6568static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6569 uint32_t fd_idx, struct sg_table *sg_ptr)
6570{
6571 struct scatterlist *sg = sg_ptr->sgl;
6572 struct qseecom_sg_entry *sg_entry;
6573 void *buf;
6574 uint i;
6575 size_t size;
6576 dma_addr_t coh_pmem;
6577
6578 if (fd_idx >= MAX_ION_FD) {
6579 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6580 return -ENOMEM;
6581 }
6582 /*
6583 * Allocate a buffer, populate it with number of entry plus
6584 * each sg entry's phy addr and length; then return the
6585 * phy_addr of the buffer.
6586 */
6587 size = sizeof(uint32_t) +
6588 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6589 size = (size + PAGE_SIZE) & PAGE_MASK;
6590 buf = dma_alloc_coherent(qseecom.pdev,
6591 size, &coh_pmem, GFP_KERNEL);
6592 if (buf == NULL) {
6593 pr_err("failed to alloc memory for sg buf\n");
6594 return -ENOMEM;
6595 }
6596 *(uint32_t *)buf = sg_ptr->nents;
6597 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6598 for (i = 0; i < sg_ptr->nents; i++) {
6599 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6600 sg_entry->len = sg->length;
6601 sg_entry++;
6602 sg = sg_next(sg);
6603 }
6604 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6605 data->client.sec_buf_fd[fd_idx].vbase = buf;
6606 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6607 data->client.sec_buf_fd[fd_idx].size = size;
6608 return 0;
6609}
6610
6611static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6612 struct qseecom_dev_handle *data, bool cleanup)
6613{
6614 struct ion_handle *ihandle;
6615 int ret = 0;
6616 int i = 0;
6617 uint32_t *update;
6618 struct sg_table *sg_ptr = NULL;
6619 struct scatterlist *sg;
6620 struct qseecom_param_memref *memref;
6621
6622 if (req == NULL) {
6623 pr_err("Invalid address\n");
6624 return -EINVAL;
6625 }
6626 for (i = 0; i < MAX_ION_FD; i++) {
6627 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006628 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006629 req->ifd_data[i].fd);
6630 if (IS_ERR_OR_NULL(ihandle)) {
6631 pr_err("Ion client can't retrieve the handle\n");
6632 return -ENOMEM;
6633 }
6634 if ((req->req_len < sizeof(uint32_t)) ||
6635 (req->ifd_data[i].cmd_buf_offset >
6636 req->req_len - sizeof(uint32_t))) {
6637 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6638 req->req_len,
6639 req->ifd_data[i].cmd_buf_offset);
6640 return -EINVAL;
6641 }
6642 update = (uint32_t *)((char *) req->req_ptr +
6643 req->ifd_data[i].cmd_buf_offset);
6644 if (!update) {
6645 pr_err("update pointer is NULL\n");
6646 return -EINVAL;
6647 }
6648 } else {
6649 continue;
6650 }
6651 /* Populate the cmd data structure with the phys_addr */
6652 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6653 if (IS_ERR_OR_NULL(sg_ptr)) {
6654 pr_err("IOn client could not retrieve sg table\n");
6655 goto err;
6656 }
6657 sg = sg_ptr->sgl;
6658 if (sg == NULL) {
6659 pr_err("sg is NULL\n");
6660 goto err;
6661 }
6662 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6663 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6664 sg_ptr->nents, sg->length);
6665 goto err;
6666 }
6667 /* clean up buf for pre-allocated fd */
6668 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6669 (*update)) {
6670 if (data->client.sec_buf_fd[i].vbase)
6671 dma_free_coherent(qseecom.pdev,
6672 data->client.sec_buf_fd[i].size,
6673 data->client.sec_buf_fd[i].vbase,
6674 data->client.sec_buf_fd[i].pbase);
6675 memset((void *)update, 0,
6676 sizeof(struct qseecom_param_memref));
6677 memset(&(data->client.sec_buf_fd[i]), 0,
6678 sizeof(struct qseecom_sec_buf_fd_info));
6679 goto clean;
6680 }
6681
6682 if (*update == 0) {
6683 /* update buf for pre-allocated fd from secure heap*/
6684 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6685 sg_ptr);
6686 if (ret) {
6687 pr_err("Failed to handle buf for fd[%d]\n", i);
6688 goto err;
6689 }
6690 memref = (struct qseecom_param_memref *)update;
6691 memref->buffer =
6692 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6693 memref->size =
6694 (uint32_t)(data->client.sec_buf_fd[i].size);
6695 } else {
6696 /* update buf for fd from non-secure qseecom heap */
6697 if (sg_ptr->nents != 1) {
6698 pr_err("Num of scat entr (%d) invalid\n",
6699 sg_ptr->nents);
6700 goto err;
6701 }
6702 if (cleanup)
6703 *update = 0;
6704 else
6705 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6706 }
6707clean:
6708 if (cleanup) {
6709 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6710 ihandle, NULL, sg->length,
6711 ION_IOC_INV_CACHES);
6712 if (ret) {
6713 pr_err("cache operation failed %d\n", ret);
6714 goto err;
6715 }
6716 } else {
6717 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6718 ihandle, NULL, sg->length,
6719 ION_IOC_CLEAN_INV_CACHES);
6720 if (ret) {
6721 pr_err("cache operation failed %d\n", ret);
6722 goto err;
6723 }
6724 data->sglistinfo_ptr[i].indexAndFlags =
6725 SGLISTINFO_SET_INDEX_FLAG(
6726 (sg_ptr->nents == 1), 0,
6727 req->ifd_data[i].cmd_buf_offset);
6728 data->sglistinfo_ptr[i].sizeOrCount =
6729 (sg_ptr->nents == 1) ?
6730 sg->length : sg_ptr->nents;
6731 data->sglist_cnt = i + 1;
6732 }
6733 /* Deallocate the handle */
6734 if (!IS_ERR_OR_NULL(ihandle))
6735 ion_free(qseecom.ion_clnt, ihandle);
6736 }
6737 return ret;
6738err:
6739 if (!IS_ERR_OR_NULL(ihandle))
6740 ion_free(qseecom.ion_clnt, ihandle);
6741 return -ENOMEM;
6742}
6743
6744static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6745 struct qseecom_qteec_req *req, uint32_t cmd_id)
6746{
6747 struct qseecom_command_scm_resp resp;
6748 struct qseecom_qteec_ireq ireq;
6749 struct qseecom_qteec_64bit_ireq ireq_64bit;
6750 struct qseecom_registered_app_list *ptr_app;
6751 bool found_app = false;
6752 unsigned long flags;
6753 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006754 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006755 uint32_t reqd_len_sb_in = 0;
6756 void *cmd_buf = NULL;
6757 size_t cmd_len;
6758 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306759 void *req_ptr = NULL;
6760 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006761
6762 ret = __qseecom_qteec_validate_msg(data, req);
6763 if (ret)
6764 return ret;
6765
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306766 req_ptr = req->req_ptr;
6767 resp_ptr = req->resp_ptr;
6768
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006769 /* find app_id & img_name from list */
6770 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6771 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6772 list) {
6773 if ((ptr_app->app_id == data->client.app_id) &&
6774 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6775 found_app = true;
6776 break;
6777 }
6778 }
6779 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6780 if (!found_app) {
6781 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6782 (char *)data->client.app_name);
6783 return -ENOENT;
6784 }
6785
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306786 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6787 (uintptr_t)req->req_ptr);
6788 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6789 (uintptr_t)req->resp_ptr);
6790
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006791 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6792 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6793 ret = __qseecom_update_qteec_req_buf(
6794 (struct qseecom_qteec_modfd_req *)req, data, false);
6795 if (ret)
6796 return ret;
6797 }
6798
6799 if (qseecom.qsee_version < QSEE_VERSION_40) {
6800 ireq.app_id = data->client.app_id;
6801 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306802 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006803 ireq.req_len = req->req_len;
6804 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306805 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006806 ireq.resp_len = req->resp_len;
6807 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6808 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6809 dmac_flush_range((void *)table,
6810 (void *)table + SGLISTINFO_TABLE_SIZE);
6811 cmd_buf = (void *)&ireq;
6812 cmd_len = sizeof(struct qseecom_qteec_ireq);
6813 } else {
6814 ireq_64bit.app_id = data->client.app_id;
6815 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306816 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006817 ireq_64bit.req_len = req->req_len;
6818 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306819 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006820 ireq_64bit.resp_len = req->resp_len;
6821 if ((data->client.app_arch == ELFCLASS32) &&
6822 ((ireq_64bit.req_ptr >=
6823 PHY_ADDR_4G - ireq_64bit.req_len) ||
6824 (ireq_64bit.resp_ptr >=
6825 PHY_ADDR_4G - ireq_64bit.resp_len))){
6826 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6827 data->client.app_name, data->client.app_id);
6828 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6829 ireq_64bit.req_ptr, ireq_64bit.req_len,
6830 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6831 return -EFAULT;
6832 }
6833 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6834 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6835 dmac_flush_range((void *)table,
6836 (void *)table + SGLISTINFO_TABLE_SIZE);
6837 cmd_buf = (void *)&ireq_64bit;
6838 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6839 }
6840 if (qseecom.whitelist_support == true
6841 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6842 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6843 else
6844 *(uint32_t *)cmd_buf = cmd_id;
6845
6846 reqd_len_sb_in = req->req_len + req->resp_len;
6847 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6848 data->client.sb_virt,
6849 reqd_len_sb_in,
6850 ION_IOC_CLEAN_INV_CACHES);
6851 if (ret) {
6852 pr_err("cache operation failed %d\n", ret);
6853 return ret;
6854 }
6855
6856 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6857
6858 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6859 cmd_buf, cmd_len,
6860 &resp, sizeof(resp));
6861 if (ret) {
6862 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6863 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006864 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006865 }
6866
6867 if (qseecom.qsee_reentrancy_support) {
6868 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006869 if (ret)
6870 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006871 } else {
6872 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6873 ret = __qseecom_process_incomplete_cmd(data, &resp);
6874 if (ret) {
6875 pr_err("process_incomplete_cmd failed err: %d\n",
6876 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006877 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006878 }
6879 } else {
6880 if (resp.result != QSEOS_RESULT_SUCCESS) {
6881 pr_err("Response result %d not supported\n",
6882 resp.result);
6883 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006884 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006885 }
6886 }
6887 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006888exit:
6889 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006890 data->client.sb_virt, data->client.sb_length,
6891 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006892 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006893 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006894 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006895 }
6896
6897 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6898 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006899 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006900 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006901 if (ret2)
6902 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006903 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006904 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006905}
6906
6907static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6908 void __user *argp)
6909{
6910 struct qseecom_qteec_modfd_req req;
6911 int ret = 0;
6912
6913 ret = copy_from_user(&req, argp,
6914 sizeof(struct qseecom_qteec_modfd_req));
6915 if (ret) {
6916 pr_err("copy_from_user failed\n");
6917 return ret;
6918 }
6919 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6920 QSEOS_TEE_OPEN_SESSION);
6921
6922 return ret;
6923}
6924
6925static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6926 void __user *argp)
6927{
6928 struct qseecom_qteec_req req;
6929 int ret = 0;
6930
6931 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6932 if (ret) {
6933 pr_err("copy_from_user failed\n");
6934 return ret;
6935 }
6936 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6937 return ret;
6938}
6939
6940static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6941 void __user *argp)
6942{
6943 struct qseecom_qteec_modfd_req req;
6944 struct qseecom_command_scm_resp resp;
6945 struct qseecom_qteec_ireq ireq;
6946 struct qseecom_qteec_64bit_ireq ireq_64bit;
6947 struct qseecom_registered_app_list *ptr_app;
6948 bool found_app = false;
6949 unsigned long flags;
6950 int ret = 0;
6951 int i = 0;
6952 uint32_t reqd_len_sb_in = 0;
6953 void *cmd_buf = NULL;
6954 size_t cmd_len;
6955 struct sglist_info *table = data->sglistinfo_ptr;
6956 void *req_ptr = NULL;
6957 void *resp_ptr = NULL;
6958
6959 ret = copy_from_user(&req, argp,
6960 sizeof(struct qseecom_qteec_modfd_req));
6961 if (ret) {
6962 pr_err("copy_from_user failed\n");
6963 return ret;
6964 }
6965 ret = __qseecom_qteec_validate_msg(data,
6966 (struct qseecom_qteec_req *)(&req));
6967 if (ret)
6968 return ret;
6969 req_ptr = req.req_ptr;
6970 resp_ptr = req.resp_ptr;
6971
6972 /* find app_id & img_name from list */
6973 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6974 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6975 list) {
6976 if ((ptr_app->app_id == data->client.app_id) &&
6977 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6978 found_app = true;
6979 break;
6980 }
6981 }
6982 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6983 if (!found_app) {
6984 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6985 (char *)data->client.app_name);
6986 return -ENOENT;
6987 }
6988
6989 /* validate offsets */
6990 for (i = 0; i < MAX_ION_FD; i++) {
6991 if (req.ifd_data[i].fd) {
6992 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6993 return -EINVAL;
6994 }
6995 }
6996 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6997 (uintptr_t)req.req_ptr);
6998 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6999 (uintptr_t)req.resp_ptr);
7000 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7001 if (ret)
7002 return ret;
7003
7004 if (qseecom.qsee_version < QSEE_VERSION_40) {
7005 ireq.app_id = data->client.app_id;
7006 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7007 (uintptr_t)req_ptr);
7008 ireq.req_len = req.req_len;
7009 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7010 (uintptr_t)resp_ptr);
7011 ireq.resp_len = req.resp_len;
7012 cmd_buf = (void *)&ireq;
7013 cmd_len = sizeof(struct qseecom_qteec_ireq);
7014 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7015 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7016 dmac_flush_range((void *)table,
7017 (void *)table + SGLISTINFO_TABLE_SIZE);
7018 } else {
7019 ireq_64bit.app_id = data->client.app_id;
7020 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7021 (uintptr_t)req_ptr);
7022 ireq_64bit.req_len = req.req_len;
7023 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7024 (uintptr_t)resp_ptr);
7025 ireq_64bit.resp_len = req.resp_len;
7026 cmd_buf = (void *)&ireq_64bit;
7027 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7028 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7029 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7030 dmac_flush_range((void *)table,
7031 (void *)table + SGLISTINFO_TABLE_SIZE);
7032 }
7033 reqd_len_sb_in = req.req_len + req.resp_len;
7034 if (qseecom.whitelist_support == true)
7035 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7036 else
7037 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7038
7039 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7040 data->client.sb_virt,
7041 reqd_len_sb_in,
7042 ION_IOC_CLEAN_INV_CACHES);
7043 if (ret) {
7044 pr_err("cache operation failed %d\n", ret);
7045 return ret;
7046 }
7047
7048 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7049
7050 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7051 cmd_buf, cmd_len,
7052 &resp, sizeof(resp));
7053 if (ret) {
7054 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7055 ret, data->client.app_id);
7056 return ret;
7057 }
7058
7059 if (qseecom.qsee_reentrancy_support) {
7060 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7061 } else {
7062 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7063 ret = __qseecom_process_incomplete_cmd(data, &resp);
7064 if (ret) {
7065 pr_err("process_incomplete_cmd failed err: %d\n",
7066 ret);
7067 return ret;
7068 }
7069 } else {
7070 if (resp.result != QSEOS_RESULT_SUCCESS) {
7071 pr_err("Response result %d not supported\n",
7072 resp.result);
7073 ret = -EINVAL;
7074 }
7075 }
7076 }
7077 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7078 if (ret)
7079 return ret;
7080
7081 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7082 data->client.sb_virt, data->client.sb_length,
7083 ION_IOC_INV_CACHES);
7084 if (ret) {
7085 pr_err("cache operation failed %d\n", ret);
7086 return ret;
7087 }
7088 return 0;
7089}
7090
7091static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7092 void __user *argp)
7093{
7094 struct qseecom_qteec_modfd_req req;
7095 int ret = 0;
7096
7097 ret = copy_from_user(&req, argp,
7098 sizeof(struct qseecom_qteec_modfd_req));
7099 if (ret) {
7100 pr_err("copy_from_user failed\n");
7101 return ret;
7102 }
7103 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7104 QSEOS_TEE_REQUEST_CANCELLATION);
7105
7106 return ret;
7107}
7108
7109static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7110{
7111 if (data->sglist_cnt) {
7112 memset(data->sglistinfo_ptr, 0,
7113 SGLISTINFO_TABLE_SIZE);
7114 data->sglist_cnt = 0;
7115 }
7116}
7117
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307118static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007119 unsigned int cmd, unsigned long arg)
7120{
7121 int ret = 0;
7122 struct qseecom_dev_handle *data = file->private_data;
7123 void __user *argp = (void __user *) arg;
7124 bool perf_enabled = false;
7125
7126 if (!data) {
7127 pr_err("Invalid/uninitialized device handle\n");
7128 return -EINVAL;
7129 }
7130
7131 if (data->abort) {
7132 pr_err("Aborting qseecom driver\n");
7133 return -ENODEV;
7134 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007135 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7136 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7137 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7138 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007139 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007140
7141 switch (cmd) {
7142 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7143 if (data->type != QSEECOM_GENERIC) {
7144 pr_err("reg lstnr req: invalid handle (%d)\n",
7145 data->type);
7146 ret = -EINVAL;
7147 break;
7148 }
7149 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007150 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007151 atomic_inc(&data->ioctl_count);
7152 data->type = QSEECOM_LISTENER_SERVICE;
7153 ret = qseecom_register_listener(data, argp);
7154 atomic_dec(&data->ioctl_count);
7155 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007156 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007157 if (ret)
7158 pr_err("failed qseecom_register_listener: %d\n", ret);
7159 break;
7160 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307161 case QSEECOM_IOCTL_SET_ICE_INFO: {
7162 struct qseecom_ice_data_t ice_data;
7163
7164 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7165 if (ret) {
7166 pr_err("copy_from_user failed\n");
7167 return -EFAULT;
7168 }
7169 qcom_ice_set_fde_flag(ice_data.flag);
7170 break;
7171 }
7172
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007173 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7174 if ((data->listener.id == 0) ||
7175 (data->type != QSEECOM_LISTENER_SERVICE)) {
7176 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7177 data->type, data->listener.id);
7178 ret = -EINVAL;
7179 break;
7180 }
7181 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007182 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007183 atomic_inc(&data->ioctl_count);
7184 ret = qseecom_unregister_listener(data);
7185 atomic_dec(&data->ioctl_count);
7186 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007187 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007188 if (ret)
7189 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7190 break;
7191 }
7192 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7193 if ((data->client.app_id == 0) ||
7194 (data->type != QSEECOM_CLIENT_APP)) {
7195 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7196 data->type, data->client.app_id);
7197 ret = -EINVAL;
7198 break;
7199 }
7200 /* Only one client allowed here at a time */
7201 mutex_lock(&app_access_lock);
7202 if (qseecom.support_bus_scaling) {
7203 /* register bus bw in case the client doesn't do it */
7204 if (!data->mode) {
7205 mutex_lock(&qsee_bw_mutex);
7206 __qseecom_register_bus_bandwidth_needs(
7207 data, HIGH);
7208 mutex_unlock(&qsee_bw_mutex);
7209 }
7210 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7211 if (ret) {
7212 pr_err("Failed to set bw.\n");
7213 ret = -EINVAL;
7214 mutex_unlock(&app_access_lock);
7215 break;
7216 }
7217 }
7218 /*
7219 * On targets where crypto clock is handled by HLOS,
7220 * if clk_access_cnt is zero and perf_enabled is false,
7221 * then the crypto clock was not enabled before sending cmd to
7222 * tz, qseecom will enable the clock to avoid service failure.
7223 */
7224 if (!qseecom.no_clock_support &&
7225 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7226 pr_debug("ce clock is not enabled!\n");
7227 ret = qseecom_perf_enable(data);
7228 if (ret) {
7229 pr_err("Failed to vote for clock with err %d\n",
7230 ret);
7231 mutex_unlock(&app_access_lock);
7232 ret = -EINVAL;
7233 break;
7234 }
7235 perf_enabled = true;
7236 }
7237 atomic_inc(&data->ioctl_count);
7238 ret = qseecom_send_cmd(data, argp);
7239 if (qseecom.support_bus_scaling)
7240 __qseecom_add_bw_scale_down_timer(
7241 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7242 if (perf_enabled) {
7243 qsee_disable_clock_vote(data, CLK_DFAB);
7244 qsee_disable_clock_vote(data, CLK_SFPB);
7245 }
7246 atomic_dec(&data->ioctl_count);
7247 wake_up_all(&data->abort_wq);
7248 mutex_unlock(&app_access_lock);
7249 if (ret)
7250 pr_err("failed qseecom_send_cmd: %d\n", ret);
7251 break;
7252 }
7253 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7254 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7255 if ((data->client.app_id == 0) ||
7256 (data->type != QSEECOM_CLIENT_APP)) {
7257 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7258 data->type, data->client.app_id);
7259 ret = -EINVAL;
7260 break;
7261 }
7262 /* Only one client allowed here at a time */
7263 mutex_lock(&app_access_lock);
7264 if (qseecom.support_bus_scaling) {
7265 if (!data->mode) {
7266 mutex_lock(&qsee_bw_mutex);
7267 __qseecom_register_bus_bandwidth_needs(
7268 data, HIGH);
7269 mutex_unlock(&qsee_bw_mutex);
7270 }
7271 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7272 if (ret) {
7273 pr_err("Failed to set bw.\n");
7274 mutex_unlock(&app_access_lock);
7275 ret = -EINVAL;
7276 break;
7277 }
7278 }
7279 /*
7280 * On targets where crypto clock is handled by HLOS,
7281 * if clk_access_cnt is zero and perf_enabled is false,
7282 * then the crypto clock was not enabled before sending cmd to
7283 * tz, qseecom will enable the clock to avoid service failure.
7284 */
7285 if (!qseecom.no_clock_support &&
7286 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7287 pr_debug("ce clock is not enabled!\n");
7288 ret = qseecom_perf_enable(data);
7289 if (ret) {
7290 pr_err("Failed to vote for clock with err %d\n",
7291 ret);
7292 mutex_unlock(&app_access_lock);
7293 ret = -EINVAL;
7294 break;
7295 }
7296 perf_enabled = true;
7297 }
7298 atomic_inc(&data->ioctl_count);
7299 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7300 ret = qseecom_send_modfd_cmd(data, argp);
7301 else
7302 ret = qseecom_send_modfd_cmd_64(data, argp);
7303 if (qseecom.support_bus_scaling)
7304 __qseecom_add_bw_scale_down_timer(
7305 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7306 if (perf_enabled) {
7307 qsee_disable_clock_vote(data, CLK_DFAB);
7308 qsee_disable_clock_vote(data, CLK_SFPB);
7309 }
7310 atomic_dec(&data->ioctl_count);
7311 wake_up_all(&data->abort_wq);
7312 mutex_unlock(&app_access_lock);
7313 if (ret)
7314 pr_err("failed qseecom_send_cmd: %d\n", ret);
7315 __qseecom_clean_data_sglistinfo(data);
7316 break;
7317 }
7318 case QSEECOM_IOCTL_RECEIVE_REQ: {
7319 if ((data->listener.id == 0) ||
7320 (data->type != QSEECOM_LISTENER_SERVICE)) {
7321 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7322 data->type, data->listener.id);
7323 ret = -EINVAL;
7324 break;
7325 }
7326 atomic_inc(&data->ioctl_count);
7327 ret = qseecom_receive_req(data);
7328 atomic_dec(&data->ioctl_count);
7329 wake_up_all(&data->abort_wq);
7330 if (ret && (ret != -ERESTARTSYS))
7331 pr_err("failed qseecom_receive_req: %d\n", ret);
7332 break;
7333 }
7334 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7335 if ((data->listener.id == 0) ||
7336 (data->type != QSEECOM_LISTENER_SERVICE)) {
7337 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7338 data->type, data->listener.id);
7339 ret = -EINVAL;
7340 break;
7341 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007342 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007343 atomic_inc(&data->ioctl_count);
7344 if (!qseecom.qsee_reentrancy_support)
7345 ret = qseecom_send_resp();
7346 else
7347 ret = qseecom_reentrancy_send_resp(data);
7348 atomic_dec(&data->ioctl_count);
7349 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007350 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007351 if (ret)
7352 pr_err("failed qseecom_send_resp: %d\n", ret);
7353 break;
7354 }
7355 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7356 if ((data->type != QSEECOM_CLIENT_APP) &&
7357 (data->type != QSEECOM_GENERIC) &&
7358 (data->type != QSEECOM_SECURE_SERVICE)) {
7359 pr_err("set mem param req: invalid handle (%d)\n",
7360 data->type);
7361 ret = -EINVAL;
7362 break;
7363 }
7364 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7365 mutex_lock(&app_access_lock);
7366 atomic_inc(&data->ioctl_count);
7367 ret = qseecom_set_client_mem_param(data, argp);
7368 atomic_dec(&data->ioctl_count);
7369 mutex_unlock(&app_access_lock);
7370 if (ret)
7371 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7372 ret);
7373 break;
7374 }
7375 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7376 if ((data->type != QSEECOM_GENERIC) &&
7377 (data->type != QSEECOM_CLIENT_APP)) {
7378 pr_err("load app req: invalid handle (%d)\n",
7379 data->type);
7380 ret = -EINVAL;
7381 break;
7382 }
7383 data->type = QSEECOM_CLIENT_APP;
7384 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7385 mutex_lock(&app_access_lock);
7386 atomic_inc(&data->ioctl_count);
7387 ret = qseecom_load_app(data, argp);
7388 atomic_dec(&data->ioctl_count);
7389 mutex_unlock(&app_access_lock);
7390 if (ret)
7391 pr_err("failed load_app request: %d\n", ret);
7392 break;
7393 }
7394 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7395 if ((data->client.app_id == 0) ||
7396 (data->type != QSEECOM_CLIENT_APP)) {
7397 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7398 data->type, data->client.app_id);
7399 ret = -EINVAL;
7400 break;
7401 }
7402 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7403 mutex_lock(&app_access_lock);
7404 atomic_inc(&data->ioctl_count);
7405 ret = qseecom_unload_app(data, false);
7406 atomic_dec(&data->ioctl_count);
7407 mutex_unlock(&app_access_lock);
7408 if (ret)
7409 pr_err("failed unload_app request: %d\n", ret);
7410 break;
7411 }
7412 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7413 atomic_inc(&data->ioctl_count);
7414 ret = qseecom_get_qseos_version(data, argp);
7415 if (ret)
7416 pr_err("qseecom_get_qseos_version: %d\n", ret);
7417 atomic_dec(&data->ioctl_count);
7418 break;
7419 }
7420 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7421 if ((data->type != QSEECOM_GENERIC) &&
7422 (data->type != QSEECOM_CLIENT_APP)) {
7423 pr_err("perf enable req: invalid handle (%d)\n",
7424 data->type);
7425 ret = -EINVAL;
7426 break;
7427 }
7428 if ((data->type == QSEECOM_CLIENT_APP) &&
7429 (data->client.app_id == 0)) {
7430 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7431 data->type, data->client.app_id);
7432 ret = -EINVAL;
7433 break;
7434 }
7435 atomic_inc(&data->ioctl_count);
7436 if (qseecom.support_bus_scaling) {
7437 mutex_lock(&qsee_bw_mutex);
7438 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7439 mutex_unlock(&qsee_bw_mutex);
7440 } else {
7441 ret = qseecom_perf_enable(data);
7442 if (ret)
7443 pr_err("Fail to vote for clocks %d\n", ret);
7444 }
7445 atomic_dec(&data->ioctl_count);
7446 break;
7447 }
7448 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7449 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7450 (data->type != QSEECOM_CLIENT_APP)) {
7451 pr_err("perf disable req: invalid handle (%d)\n",
7452 data->type);
7453 ret = -EINVAL;
7454 break;
7455 }
7456 if ((data->type == QSEECOM_CLIENT_APP) &&
7457 (data->client.app_id == 0)) {
7458 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7459 data->type, data->client.app_id);
7460 ret = -EINVAL;
7461 break;
7462 }
7463 atomic_inc(&data->ioctl_count);
7464 if (!qseecom.support_bus_scaling) {
7465 qsee_disable_clock_vote(data, CLK_DFAB);
7466 qsee_disable_clock_vote(data, CLK_SFPB);
7467 } else {
7468 mutex_lock(&qsee_bw_mutex);
7469 qseecom_unregister_bus_bandwidth_needs(data);
7470 mutex_unlock(&qsee_bw_mutex);
7471 }
7472 atomic_dec(&data->ioctl_count);
7473 break;
7474 }
7475
7476 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7477 /* If crypto clock is not handled by HLOS, return directly. */
7478 if (qseecom.no_clock_support) {
7479 pr_debug("crypto clock is not handled by HLOS\n");
7480 break;
7481 }
7482 if ((data->client.app_id == 0) ||
7483 (data->type != QSEECOM_CLIENT_APP)) {
7484 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7485 data->type, data->client.app_id);
7486 ret = -EINVAL;
7487 break;
7488 }
7489 atomic_inc(&data->ioctl_count);
7490 ret = qseecom_scale_bus_bandwidth(data, argp);
7491 atomic_dec(&data->ioctl_count);
7492 break;
7493 }
7494 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7495 if (data->type != QSEECOM_GENERIC) {
7496 pr_err("load ext elf req: invalid client handle (%d)\n",
7497 data->type);
7498 ret = -EINVAL;
7499 break;
7500 }
7501 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7502 data->released = true;
7503 mutex_lock(&app_access_lock);
7504 atomic_inc(&data->ioctl_count);
7505 ret = qseecom_load_external_elf(data, argp);
7506 atomic_dec(&data->ioctl_count);
7507 mutex_unlock(&app_access_lock);
7508 if (ret)
7509 pr_err("failed load_external_elf request: %d\n", ret);
7510 break;
7511 }
7512 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7513 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7514 pr_err("unload ext elf req: invalid handle (%d)\n",
7515 data->type);
7516 ret = -EINVAL;
7517 break;
7518 }
7519 data->released = true;
7520 mutex_lock(&app_access_lock);
7521 atomic_inc(&data->ioctl_count);
7522 ret = qseecom_unload_external_elf(data);
7523 atomic_dec(&data->ioctl_count);
7524 mutex_unlock(&app_access_lock);
7525 if (ret)
7526 pr_err("failed unload_app request: %d\n", ret);
7527 break;
7528 }
7529 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7530 data->type = QSEECOM_CLIENT_APP;
7531 mutex_lock(&app_access_lock);
7532 atomic_inc(&data->ioctl_count);
7533 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7534 ret = qseecom_query_app_loaded(data, argp);
7535 atomic_dec(&data->ioctl_count);
7536 mutex_unlock(&app_access_lock);
7537 break;
7538 }
7539 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7540 if (data->type != QSEECOM_GENERIC) {
7541 pr_err("send cmd svc req: invalid handle (%d)\n",
7542 data->type);
7543 ret = -EINVAL;
7544 break;
7545 }
7546 data->type = QSEECOM_SECURE_SERVICE;
7547 if (qseecom.qsee_version < QSEE_VERSION_03) {
7548 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7549 qseecom.qsee_version);
7550 return -EINVAL;
7551 }
7552 mutex_lock(&app_access_lock);
7553 atomic_inc(&data->ioctl_count);
7554 ret = qseecom_send_service_cmd(data, argp);
7555 atomic_dec(&data->ioctl_count);
7556 mutex_unlock(&app_access_lock);
7557 break;
7558 }
7559 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7560 if (!(qseecom.support_pfe || qseecom.support_fde))
7561 pr_err("Features requiring key init not supported\n");
7562 if (data->type != QSEECOM_GENERIC) {
7563 pr_err("create key req: invalid handle (%d)\n",
7564 data->type);
7565 ret = -EINVAL;
7566 break;
7567 }
7568 if (qseecom.qsee_version < QSEE_VERSION_05) {
7569 pr_err("Create Key feature unsupported: qsee ver %u\n",
7570 qseecom.qsee_version);
7571 return -EINVAL;
7572 }
7573 data->released = true;
7574 mutex_lock(&app_access_lock);
7575 atomic_inc(&data->ioctl_count);
7576 ret = qseecom_create_key(data, argp);
7577 if (ret)
7578 pr_err("failed to create encryption key: %d\n", ret);
7579
7580 atomic_dec(&data->ioctl_count);
7581 mutex_unlock(&app_access_lock);
7582 break;
7583 }
7584 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7585 if (!(qseecom.support_pfe || qseecom.support_fde))
7586 pr_err("Features requiring key init not supported\n");
7587 if (data->type != QSEECOM_GENERIC) {
7588 pr_err("wipe key req: invalid handle (%d)\n",
7589 data->type);
7590 ret = -EINVAL;
7591 break;
7592 }
7593 if (qseecom.qsee_version < QSEE_VERSION_05) {
7594 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7595 qseecom.qsee_version);
7596 return -EINVAL;
7597 }
7598 data->released = true;
7599 mutex_lock(&app_access_lock);
7600 atomic_inc(&data->ioctl_count);
7601 ret = qseecom_wipe_key(data, argp);
7602 if (ret)
7603 pr_err("failed to wipe encryption key: %d\n", ret);
7604 atomic_dec(&data->ioctl_count);
7605 mutex_unlock(&app_access_lock);
7606 break;
7607 }
7608 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7609 if (!(qseecom.support_pfe || qseecom.support_fde))
7610 pr_err("Features requiring key init not supported\n");
7611 if (data->type != QSEECOM_GENERIC) {
7612 pr_err("update key req: invalid handle (%d)\n",
7613 data->type);
7614 ret = -EINVAL;
7615 break;
7616 }
7617 if (qseecom.qsee_version < QSEE_VERSION_05) {
7618 pr_err("Update Key feature unsupported in qsee ver %u\n",
7619 qseecom.qsee_version);
7620 return -EINVAL;
7621 }
7622 data->released = true;
7623 mutex_lock(&app_access_lock);
7624 atomic_inc(&data->ioctl_count);
7625 ret = qseecom_update_key_user_info(data, argp);
7626 if (ret)
7627 pr_err("failed to update key user info: %d\n", ret);
7628 atomic_dec(&data->ioctl_count);
7629 mutex_unlock(&app_access_lock);
7630 break;
7631 }
7632 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7633 if (data->type != QSEECOM_GENERIC) {
7634 pr_err("save part hash req: invalid handle (%d)\n",
7635 data->type);
7636 ret = -EINVAL;
7637 break;
7638 }
7639 data->released = true;
7640 mutex_lock(&app_access_lock);
7641 atomic_inc(&data->ioctl_count);
7642 ret = qseecom_save_partition_hash(argp);
7643 atomic_dec(&data->ioctl_count);
7644 mutex_unlock(&app_access_lock);
7645 break;
7646 }
7647 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7648 if (data->type != QSEECOM_GENERIC) {
7649 pr_err("ES activated req: invalid handle (%d)\n",
7650 data->type);
7651 ret = -EINVAL;
7652 break;
7653 }
7654 data->released = true;
7655 mutex_lock(&app_access_lock);
7656 atomic_inc(&data->ioctl_count);
7657 ret = qseecom_is_es_activated(argp);
7658 atomic_dec(&data->ioctl_count);
7659 mutex_unlock(&app_access_lock);
7660 break;
7661 }
7662 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7663 if (data->type != QSEECOM_GENERIC) {
7664 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7665 data->type);
7666 ret = -EINVAL;
7667 break;
7668 }
7669 data->released = true;
7670 mutex_lock(&app_access_lock);
7671 atomic_inc(&data->ioctl_count);
7672 ret = qseecom_mdtp_cipher_dip(argp);
7673 atomic_dec(&data->ioctl_count);
7674 mutex_unlock(&app_access_lock);
7675 break;
7676 }
7677 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7678 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7679 if ((data->listener.id == 0) ||
7680 (data->type != QSEECOM_LISTENER_SERVICE)) {
7681 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7682 data->type, data->listener.id);
7683 ret = -EINVAL;
7684 break;
7685 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007686 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007687 atomic_inc(&data->ioctl_count);
7688 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7689 ret = qseecom_send_modfd_resp(data, argp);
7690 else
7691 ret = qseecom_send_modfd_resp_64(data, argp);
7692 atomic_dec(&data->ioctl_count);
7693 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007694 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007695 if (ret)
7696 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7697 __qseecom_clean_data_sglistinfo(data);
7698 break;
7699 }
7700 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7701 if ((data->client.app_id == 0) ||
7702 (data->type != QSEECOM_CLIENT_APP)) {
7703 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7704 data->type, data->client.app_id);
7705 ret = -EINVAL;
7706 break;
7707 }
7708 if (qseecom.qsee_version < QSEE_VERSION_40) {
7709 pr_err("GP feature unsupported: qsee ver %u\n",
7710 qseecom.qsee_version);
7711 return -EINVAL;
7712 }
7713 /* Only one client allowed here at a time */
7714 mutex_lock(&app_access_lock);
7715 atomic_inc(&data->ioctl_count);
7716 ret = qseecom_qteec_open_session(data, argp);
7717 atomic_dec(&data->ioctl_count);
7718 wake_up_all(&data->abort_wq);
7719 mutex_unlock(&app_access_lock);
7720 if (ret)
7721 pr_err("failed open_session_cmd: %d\n", ret);
7722 __qseecom_clean_data_sglistinfo(data);
7723 break;
7724 }
7725 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7726 if ((data->client.app_id == 0) ||
7727 (data->type != QSEECOM_CLIENT_APP)) {
7728 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7729 data->type, data->client.app_id);
7730 ret = -EINVAL;
7731 break;
7732 }
7733 if (qseecom.qsee_version < QSEE_VERSION_40) {
7734 pr_err("GP feature unsupported: qsee ver %u\n",
7735 qseecom.qsee_version);
7736 return -EINVAL;
7737 }
7738 /* Only one client allowed here at a time */
7739 mutex_lock(&app_access_lock);
7740 atomic_inc(&data->ioctl_count);
7741 ret = qseecom_qteec_close_session(data, argp);
7742 atomic_dec(&data->ioctl_count);
7743 wake_up_all(&data->abort_wq);
7744 mutex_unlock(&app_access_lock);
7745 if (ret)
7746 pr_err("failed close_session_cmd: %d\n", ret);
7747 break;
7748 }
7749 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7750 if ((data->client.app_id == 0) ||
7751 (data->type != QSEECOM_CLIENT_APP)) {
7752 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7753 data->type, data->client.app_id);
7754 ret = -EINVAL;
7755 break;
7756 }
7757 if (qseecom.qsee_version < QSEE_VERSION_40) {
7758 pr_err("GP feature unsupported: qsee ver %u\n",
7759 qseecom.qsee_version);
7760 return -EINVAL;
7761 }
7762 /* Only one client allowed here at a time */
7763 mutex_lock(&app_access_lock);
7764 atomic_inc(&data->ioctl_count);
7765 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7766 atomic_dec(&data->ioctl_count);
7767 wake_up_all(&data->abort_wq);
7768 mutex_unlock(&app_access_lock);
7769 if (ret)
7770 pr_err("failed Invoke cmd: %d\n", ret);
7771 __qseecom_clean_data_sglistinfo(data);
7772 break;
7773 }
7774 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7775 if ((data->client.app_id == 0) ||
7776 (data->type != QSEECOM_CLIENT_APP)) {
7777 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7778 data->type, data->client.app_id);
7779 ret = -EINVAL;
7780 break;
7781 }
7782 if (qseecom.qsee_version < QSEE_VERSION_40) {
7783 pr_err("GP feature unsupported: qsee ver %u\n",
7784 qseecom.qsee_version);
7785 return -EINVAL;
7786 }
7787 /* Only one client allowed here at a time */
7788 mutex_lock(&app_access_lock);
7789 atomic_inc(&data->ioctl_count);
7790 ret = qseecom_qteec_request_cancellation(data, argp);
7791 atomic_dec(&data->ioctl_count);
7792 wake_up_all(&data->abort_wq);
7793 mutex_unlock(&app_access_lock);
7794 if (ret)
7795 pr_err("failed request_cancellation: %d\n", ret);
7796 break;
7797 }
7798 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7799 atomic_inc(&data->ioctl_count);
7800 ret = qseecom_get_ce_info(data, argp);
7801 if (ret)
7802 pr_err("failed get fde ce pipe info: %d\n", ret);
7803 atomic_dec(&data->ioctl_count);
7804 break;
7805 }
7806 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7807 atomic_inc(&data->ioctl_count);
7808 ret = qseecom_free_ce_info(data, argp);
7809 if (ret)
7810 pr_err("failed get fde ce pipe info: %d\n", ret);
7811 atomic_dec(&data->ioctl_count);
7812 break;
7813 }
7814 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7815 atomic_inc(&data->ioctl_count);
7816 ret = qseecom_query_ce_info(data, argp);
7817 if (ret)
7818 pr_err("failed get fde ce pipe info: %d\n", ret);
7819 atomic_dec(&data->ioctl_count);
7820 break;
7821 }
7822 default:
7823 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7824 return -EINVAL;
7825 }
7826 return ret;
7827}
7828
7829static int qseecom_open(struct inode *inode, struct file *file)
7830{
7831 int ret = 0;
7832 struct qseecom_dev_handle *data;
7833
7834 data = kzalloc(sizeof(*data), GFP_KERNEL);
7835 if (!data)
7836 return -ENOMEM;
7837 file->private_data = data;
7838 data->abort = 0;
7839 data->type = QSEECOM_GENERIC;
7840 data->released = false;
7841 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7842 data->mode = INACTIVE;
7843 init_waitqueue_head(&data->abort_wq);
7844 atomic_set(&data->ioctl_count, 0);
7845 return ret;
7846}
7847
7848static int qseecom_release(struct inode *inode, struct file *file)
7849{
7850 struct qseecom_dev_handle *data = file->private_data;
7851 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007852 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007853
7854 if (data->released == false) {
7855 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7856 data->type, data->mode, data);
7857 switch (data->type) {
7858 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007859 pr_debug("release lsnr svc %d\n", data->listener.id);
7860 free_private_data = false;
7861 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007862 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007863 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007864 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007865 break;
7866 case QSEECOM_CLIENT_APP:
7867 mutex_lock(&app_access_lock);
7868 ret = qseecom_unload_app(data, true);
7869 mutex_unlock(&app_access_lock);
7870 break;
7871 case QSEECOM_SECURE_SERVICE:
7872 case QSEECOM_GENERIC:
7873 ret = qseecom_unmap_ion_allocated_memory(data);
7874 if (ret)
7875 pr_err("Ion Unmap failed\n");
7876 break;
7877 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7878 break;
7879 default:
7880 pr_err("Unsupported clnt_handle_type %d",
7881 data->type);
7882 break;
7883 }
7884 }
7885
7886 if (qseecom.support_bus_scaling) {
7887 mutex_lock(&qsee_bw_mutex);
7888 if (data->mode != INACTIVE) {
7889 qseecom_unregister_bus_bandwidth_needs(data);
7890 if (qseecom.cumulative_mode == INACTIVE) {
7891 ret = __qseecom_set_msm_bus_request(INACTIVE);
7892 if (ret)
7893 pr_err("Fail to scale down bus\n");
7894 }
7895 }
7896 mutex_unlock(&qsee_bw_mutex);
7897 } else {
7898 if (data->fast_load_enabled == true)
7899 qsee_disable_clock_vote(data, CLK_SFPB);
7900 if (data->perf_enabled == true)
7901 qsee_disable_clock_vote(data, CLK_DFAB);
7902 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007903
Zhen Kongbcdeda22018-11-16 13:50:51 -08007904 if (free_private_data)
7905 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007906 return ret;
7907}
7908
7909#ifdef CONFIG_COMPAT
7910#include "compat_qseecom.c"
7911#else
7912#define compat_qseecom_ioctl NULL
7913#endif
7914
7915static const struct file_operations qseecom_fops = {
7916 .owner = THIS_MODULE,
7917 .unlocked_ioctl = qseecom_ioctl,
7918 .compat_ioctl = compat_qseecom_ioctl,
7919 .open = qseecom_open,
7920 .release = qseecom_release
7921};
7922
7923static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7924{
7925 int rc = 0;
7926 struct device *pdev;
7927 struct qseecom_clk *qclk;
7928 char *core_clk_src = NULL;
7929 char *core_clk = NULL;
7930 char *iface_clk = NULL;
7931 char *bus_clk = NULL;
7932
7933 switch (ce) {
7934 case CLK_QSEE: {
7935 core_clk_src = "core_clk_src";
7936 core_clk = "core_clk";
7937 iface_clk = "iface_clk";
7938 bus_clk = "bus_clk";
7939 qclk = &qseecom.qsee;
7940 qclk->instance = CLK_QSEE;
7941 break;
7942 };
7943 case CLK_CE_DRV: {
7944 core_clk_src = "ce_drv_core_clk_src";
7945 core_clk = "ce_drv_core_clk";
7946 iface_clk = "ce_drv_iface_clk";
7947 bus_clk = "ce_drv_bus_clk";
7948 qclk = &qseecom.ce_drv;
7949 qclk->instance = CLK_CE_DRV;
7950 break;
7951 };
7952 default:
7953 pr_err("Invalid ce hw instance: %d!\n", ce);
7954 return -EIO;
7955 }
7956
7957 if (qseecom.no_clock_support) {
7958 qclk->ce_core_clk = NULL;
7959 qclk->ce_clk = NULL;
7960 qclk->ce_bus_clk = NULL;
7961 qclk->ce_core_src_clk = NULL;
7962 return 0;
7963 }
7964
7965 pdev = qseecom.pdev;
7966
7967 /* Get CE3 src core clk. */
7968 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7969 if (!IS_ERR(qclk->ce_core_src_clk)) {
7970 rc = clk_set_rate(qclk->ce_core_src_clk,
7971 qseecom.ce_opp_freq_hz);
7972 if (rc) {
7973 clk_put(qclk->ce_core_src_clk);
7974 qclk->ce_core_src_clk = NULL;
7975 pr_err("Unable to set the core src clk @%uMhz.\n",
7976 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7977 return -EIO;
7978 }
7979 } else {
7980 pr_warn("Unable to get CE core src clk, set to NULL\n");
7981 qclk->ce_core_src_clk = NULL;
7982 }
7983
7984 /* Get CE core clk */
7985 qclk->ce_core_clk = clk_get(pdev, core_clk);
7986 if (IS_ERR(qclk->ce_core_clk)) {
7987 rc = PTR_ERR(qclk->ce_core_clk);
7988 pr_err("Unable to get CE core clk\n");
7989 if (qclk->ce_core_src_clk != NULL)
7990 clk_put(qclk->ce_core_src_clk);
7991 return -EIO;
7992 }
7993
7994 /* Get CE Interface clk */
7995 qclk->ce_clk = clk_get(pdev, iface_clk);
7996 if (IS_ERR(qclk->ce_clk)) {
7997 rc = PTR_ERR(qclk->ce_clk);
7998 pr_err("Unable to get CE interface clk\n");
7999 if (qclk->ce_core_src_clk != NULL)
8000 clk_put(qclk->ce_core_src_clk);
8001 clk_put(qclk->ce_core_clk);
8002 return -EIO;
8003 }
8004
8005 /* Get CE AXI clk */
8006 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8007 if (IS_ERR(qclk->ce_bus_clk)) {
8008 rc = PTR_ERR(qclk->ce_bus_clk);
8009 pr_err("Unable to get CE BUS interface clk\n");
8010 if (qclk->ce_core_src_clk != NULL)
8011 clk_put(qclk->ce_core_src_clk);
8012 clk_put(qclk->ce_core_clk);
8013 clk_put(qclk->ce_clk);
8014 return -EIO;
8015 }
8016
8017 return rc;
8018}
8019
8020static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8021{
8022 struct qseecom_clk *qclk;
8023
8024 if (ce == CLK_QSEE)
8025 qclk = &qseecom.qsee;
8026 else
8027 qclk = &qseecom.ce_drv;
8028
8029 if (qclk->ce_clk != NULL) {
8030 clk_put(qclk->ce_clk);
8031 qclk->ce_clk = NULL;
8032 }
8033 if (qclk->ce_core_clk != NULL) {
8034 clk_put(qclk->ce_core_clk);
8035 qclk->ce_core_clk = NULL;
8036 }
8037 if (qclk->ce_bus_clk != NULL) {
8038 clk_put(qclk->ce_bus_clk);
8039 qclk->ce_bus_clk = NULL;
8040 }
8041 if (qclk->ce_core_src_clk != NULL) {
8042 clk_put(qclk->ce_core_src_clk);
8043 qclk->ce_core_src_clk = NULL;
8044 }
8045 qclk->instance = CLK_INVALID;
8046}
8047
8048static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8049{
8050 int rc = 0;
8051 uint32_t hlos_num_ce_hw_instances;
8052 uint32_t disk_encrypt_pipe;
8053 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008054 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008055 int i;
8056 const int *tbl;
8057 int size;
8058 int entry;
8059 struct qseecom_crypto_info *pfde_tbl = NULL;
8060 struct qseecom_crypto_info *p;
8061 int tbl_size;
8062 int j;
8063 bool old_db = true;
8064 struct qseecom_ce_info_use *pce_info_use;
8065 uint32_t *unit_tbl = NULL;
8066 int total_units = 0;
8067 struct qseecom_ce_pipe_entry *pce_entry;
8068
8069 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8070 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8071
8072 if (of_property_read_u32((&pdev->dev)->of_node,
8073 "qcom,qsee-ce-hw-instance",
8074 &qseecom.ce_info.qsee_ce_hw_instance)) {
8075 pr_err("Fail to get qsee ce hw instance information.\n");
8076 rc = -EINVAL;
8077 goto out;
8078 } else {
8079 pr_debug("qsee-ce-hw-instance=0x%x\n",
8080 qseecom.ce_info.qsee_ce_hw_instance);
8081 }
8082
8083 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8084 "qcom,support-fde");
8085 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8086 "qcom,support-pfe");
8087
8088 if (!qseecom.support_pfe && !qseecom.support_fde) {
8089 pr_warn("Device does not support PFE/FDE");
8090 goto out;
8091 }
8092
8093 if (qseecom.support_fde)
8094 tbl = of_get_property((&pdev->dev)->of_node,
8095 "qcom,full-disk-encrypt-info", &size);
8096 else
8097 tbl = NULL;
8098 if (tbl) {
8099 old_db = false;
8100 if (size % sizeof(struct qseecom_crypto_info)) {
8101 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8102 size);
8103 rc = -EINVAL;
8104 goto out;
8105 }
8106 tbl_size = size / sizeof
8107 (struct qseecom_crypto_info);
8108
8109 pfde_tbl = kzalloc(size, GFP_KERNEL);
8110 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8111 total_units = 0;
8112
8113 if (!pfde_tbl || !unit_tbl) {
8114 pr_err("failed to alloc memory\n");
8115 rc = -ENOMEM;
8116 goto out;
8117 }
8118 if (of_property_read_u32_array((&pdev->dev)->of_node,
8119 "qcom,full-disk-encrypt-info",
8120 (u32 *)pfde_tbl, size/sizeof(u32))) {
8121 pr_err("failed to read full-disk-encrypt-info tbl\n");
8122 rc = -EINVAL;
8123 goto out;
8124 }
8125
8126 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8127 for (j = 0; j < total_units; j++) {
8128 if (p->unit_num == *(unit_tbl + j))
8129 break;
8130 }
8131 if (j == total_units) {
8132 *(unit_tbl + total_units) = p->unit_num;
8133 total_units++;
8134 }
8135 }
8136
8137 qseecom.ce_info.num_fde = total_units;
8138 pce_info_use = qseecom.ce_info.fde = kcalloc(
8139 total_units, sizeof(struct qseecom_ce_info_use),
8140 GFP_KERNEL);
8141 if (!pce_info_use) {
8142 pr_err("failed to alloc memory\n");
8143 rc = -ENOMEM;
8144 goto out;
8145 }
8146
8147 for (j = 0; j < total_units; j++, pce_info_use++) {
8148 pce_info_use->unit_num = *(unit_tbl + j);
8149 pce_info_use->alloc = false;
8150 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8151 pce_info_use->num_ce_pipe_entries = 0;
8152 pce_info_use->ce_pipe_entry = NULL;
8153 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8154 if (p->unit_num == pce_info_use->unit_num)
8155 pce_info_use->num_ce_pipe_entries++;
8156 }
8157
8158 entry = pce_info_use->num_ce_pipe_entries;
8159 pce_entry = pce_info_use->ce_pipe_entry =
8160 kcalloc(entry,
8161 sizeof(struct qseecom_ce_pipe_entry),
8162 GFP_KERNEL);
8163 if (pce_entry == NULL) {
8164 pr_err("failed to alloc memory\n");
8165 rc = -ENOMEM;
8166 goto out;
8167 }
8168
8169 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8170 if (p->unit_num == pce_info_use->unit_num) {
8171 pce_entry->ce_num = p->ce;
8172 pce_entry->ce_pipe_pair =
8173 p->pipe_pair;
8174 pce_entry->valid = true;
8175 pce_entry++;
8176 }
8177 }
8178 }
8179 kfree(unit_tbl);
8180 unit_tbl = NULL;
8181 kfree(pfde_tbl);
8182 pfde_tbl = NULL;
8183 }
8184
8185 if (qseecom.support_pfe)
8186 tbl = of_get_property((&pdev->dev)->of_node,
8187 "qcom,per-file-encrypt-info", &size);
8188 else
8189 tbl = NULL;
8190 if (tbl) {
8191 old_db = false;
8192 if (size % sizeof(struct qseecom_crypto_info)) {
8193 pr_err("per-file-encrypt-info tbl size(%d)\n",
8194 size);
8195 rc = -EINVAL;
8196 goto out;
8197 }
8198 tbl_size = size / sizeof
8199 (struct qseecom_crypto_info);
8200
8201 pfde_tbl = kzalloc(size, GFP_KERNEL);
8202 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8203 total_units = 0;
8204 if (!pfde_tbl || !unit_tbl) {
8205 pr_err("failed to alloc memory\n");
8206 rc = -ENOMEM;
8207 goto out;
8208 }
8209 if (of_property_read_u32_array((&pdev->dev)->of_node,
8210 "qcom,per-file-encrypt-info",
8211 (u32 *)pfde_tbl, size/sizeof(u32))) {
8212 pr_err("failed to read per-file-encrypt-info tbl\n");
8213 rc = -EINVAL;
8214 goto out;
8215 }
8216
8217 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8218 for (j = 0; j < total_units; j++) {
8219 if (p->unit_num == *(unit_tbl + j))
8220 break;
8221 }
8222 if (j == total_units) {
8223 *(unit_tbl + total_units) = p->unit_num;
8224 total_units++;
8225 }
8226 }
8227
8228 qseecom.ce_info.num_pfe = total_units;
8229 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8230 total_units, sizeof(struct qseecom_ce_info_use),
8231 GFP_KERNEL);
8232 if (!pce_info_use) {
8233 pr_err("failed to alloc memory\n");
8234 rc = -ENOMEM;
8235 goto out;
8236 }
8237
8238 for (j = 0; j < total_units; j++, pce_info_use++) {
8239 pce_info_use->unit_num = *(unit_tbl + j);
8240 pce_info_use->alloc = false;
8241 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8242 pce_info_use->num_ce_pipe_entries = 0;
8243 pce_info_use->ce_pipe_entry = NULL;
8244 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8245 if (p->unit_num == pce_info_use->unit_num)
8246 pce_info_use->num_ce_pipe_entries++;
8247 }
8248
8249 entry = pce_info_use->num_ce_pipe_entries;
8250 pce_entry = pce_info_use->ce_pipe_entry =
8251 kcalloc(entry,
8252 sizeof(struct qseecom_ce_pipe_entry),
8253 GFP_KERNEL);
8254 if (pce_entry == NULL) {
8255 pr_err("failed to alloc memory\n");
8256 rc = -ENOMEM;
8257 goto out;
8258 }
8259
8260 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8261 if (p->unit_num == pce_info_use->unit_num) {
8262 pce_entry->ce_num = p->ce;
8263 pce_entry->ce_pipe_pair =
8264 p->pipe_pair;
8265 pce_entry->valid = true;
8266 pce_entry++;
8267 }
8268 }
8269 }
8270 kfree(unit_tbl);
8271 unit_tbl = NULL;
8272 kfree(pfde_tbl);
8273 pfde_tbl = NULL;
8274 }
8275
8276 if (!old_db)
8277 goto out1;
8278
8279 if (of_property_read_bool((&pdev->dev)->of_node,
8280 "qcom,support-multiple-ce-hw-instance")) {
8281 if (of_property_read_u32((&pdev->dev)->of_node,
8282 "qcom,hlos-num-ce-hw-instances",
8283 &hlos_num_ce_hw_instances)) {
8284 pr_err("Fail: get hlos number of ce hw instance\n");
8285 rc = -EINVAL;
8286 goto out;
8287 }
8288 } else {
8289 hlos_num_ce_hw_instances = 1;
8290 }
8291
8292 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8293 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8294 MAX_CE_PIPE_PAIR_PER_UNIT);
8295 rc = -EINVAL;
8296 goto out;
8297 }
8298
8299 if (of_property_read_u32_array((&pdev->dev)->of_node,
8300 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8301 hlos_num_ce_hw_instances)) {
8302 pr_err("Fail: get hlos ce hw instance info\n");
8303 rc = -EINVAL;
8304 goto out;
8305 }
8306
8307 if (qseecom.support_fde) {
8308 pce_info_use = qseecom.ce_info.fde =
8309 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8310 if (!pce_info_use) {
8311 pr_err("failed to alloc memory\n");
8312 rc = -ENOMEM;
8313 goto out;
8314 }
8315 /* by default for old db */
8316 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8317 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8318 pce_info_use->alloc = false;
8319 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8320 pce_info_use->ce_pipe_entry = NULL;
8321 if (of_property_read_u32((&pdev->dev)->of_node,
8322 "qcom,disk-encrypt-pipe-pair",
8323 &disk_encrypt_pipe)) {
8324 pr_err("Fail to get FDE pipe information.\n");
8325 rc = -EINVAL;
8326 goto out;
8327 } else {
8328 pr_debug("disk-encrypt-pipe-pair=0x%x",
8329 disk_encrypt_pipe);
8330 }
8331 entry = pce_info_use->num_ce_pipe_entries =
8332 hlos_num_ce_hw_instances;
8333 pce_entry = pce_info_use->ce_pipe_entry =
8334 kcalloc(entry,
8335 sizeof(struct qseecom_ce_pipe_entry),
8336 GFP_KERNEL);
8337 if (pce_entry == NULL) {
8338 pr_err("failed to alloc memory\n");
8339 rc = -ENOMEM;
8340 goto out;
8341 }
8342 for (i = 0; i < entry; i++) {
8343 pce_entry->ce_num = hlos_ce_hw_instance[i];
8344 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8345 pce_entry->valid = 1;
8346 pce_entry++;
8347 }
8348 } else {
8349 pr_warn("Device does not support FDE");
8350 disk_encrypt_pipe = 0xff;
8351 }
8352 if (qseecom.support_pfe) {
8353 pce_info_use = qseecom.ce_info.pfe =
8354 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8355 if (!pce_info_use) {
8356 pr_err("failed to alloc memory\n");
8357 rc = -ENOMEM;
8358 goto out;
8359 }
8360 /* by default for old db */
8361 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8362 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8363 pce_info_use->alloc = false;
8364 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8365 pce_info_use->ce_pipe_entry = NULL;
8366
8367 if (of_property_read_u32((&pdev->dev)->of_node,
8368 "qcom,file-encrypt-pipe-pair",
8369 &file_encrypt_pipe)) {
8370 pr_err("Fail to get PFE pipe information.\n");
8371 rc = -EINVAL;
8372 goto out;
8373 } else {
8374 pr_debug("file-encrypt-pipe-pair=0x%x",
8375 file_encrypt_pipe);
8376 }
8377 entry = pce_info_use->num_ce_pipe_entries =
8378 hlos_num_ce_hw_instances;
8379 pce_entry = pce_info_use->ce_pipe_entry =
8380 kcalloc(entry,
8381 sizeof(struct qseecom_ce_pipe_entry),
8382 GFP_KERNEL);
8383 if (pce_entry == NULL) {
8384 pr_err("failed to alloc memory\n");
8385 rc = -ENOMEM;
8386 goto out;
8387 }
8388 for (i = 0; i < entry; i++) {
8389 pce_entry->ce_num = hlos_ce_hw_instance[i];
8390 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8391 pce_entry->valid = 1;
8392 pce_entry++;
8393 }
8394 } else {
8395 pr_warn("Device does not support PFE");
8396 file_encrypt_pipe = 0xff;
8397 }
8398
8399out1:
8400 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8401 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8402out:
8403 if (rc) {
8404 if (qseecom.ce_info.fde) {
8405 pce_info_use = qseecom.ce_info.fde;
8406 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8407 pce_entry = pce_info_use->ce_pipe_entry;
8408 kfree(pce_entry);
8409 pce_info_use++;
8410 }
8411 }
8412 kfree(qseecom.ce_info.fde);
8413 qseecom.ce_info.fde = NULL;
8414 if (qseecom.ce_info.pfe) {
8415 pce_info_use = qseecom.ce_info.pfe;
8416 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8417 pce_entry = pce_info_use->ce_pipe_entry;
8418 kfree(pce_entry);
8419 pce_info_use++;
8420 }
8421 }
8422 kfree(qseecom.ce_info.pfe);
8423 qseecom.ce_info.pfe = NULL;
8424 }
8425 kfree(unit_tbl);
8426 kfree(pfde_tbl);
8427 return rc;
8428}
8429
8430static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8431 void __user *argp)
8432{
8433 struct qseecom_ce_info_req req;
8434 struct qseecom_ce_info_req *pinfo = &req;
8435 int ret = 0;
8436 int i;
8437 unsigned int entries;
8438 struct qseecom_ce_info_use *pce_info_use, *p;
8439 int total = 0;
8440 bool found = false;
8441 struct qseecom_ce_pipe_entry *pce_entry;
8442
8443 ret = copy_from_user(pinfo, argp,
8444 sizeof(struct qseecom_ce_info_req));
8445 if (ret) {
8446 pr_err("copy_from_user failed\n");
8447 return ret;
8448 }
8449
8450 switch (pinfo->usage) {
8451 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8452 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8453 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8454 if (qseecom.support_fde) {
8455 p = qseecom.ce_info.fde;
8456 total = qseecom.ce_info.num_fde;
8457 } else {
8458 pr_err("system does not support fde\n");
8459 return -EINVAL;
8460 }
8461 break;
8462 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8463 if (qseecom.support_pfe) {
8464 p = qseecom.ce_info.pfe;
8465 total = qseecom.ce_info.num_pfe;
8466 } else {
8467 pr_err("system does not support pfe\n");
8468 return -EINVAL;
8469 }
8470 break;
8471 default:
8472 pr_err("unsupported usage %d\n", pinfo->usage);
8473 return -EINVAL;
8474 }
8475
8476 pce_info_use = NULL;
8477 for (i = 0; i < total; i++) {
8478 if (!p->alloc)
8479 pce_info_use = p;
8480 else if (!memcmp(p->handle, pinfo->handle,
8481 MAX_CE_INFO_HANDLE_SIZE)) {
8482 pce_info_use = p;
8483 found = true;
8484 break;
8485 }
8486 p++;
8487 }
8488
8489 if (pce_info_use == NULL)
8490 return -EBUSY;
8491
8492 pinfo->unit_num = pce_info_use->unit_num;
8493 if (!pce_info_use->alloc) {
8494 pce_info_use->alloc = true;
8495 memcpy(pce_info_use->handle,
8496 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8497 }
8498 if (pce_info_use->num_ce_pipe_entries >
8499 MAX_CE_PIPE_PAIR_PER_UNIT)
8500 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8501 else
8502 entries = pce_info_use->num_ce_pipe_entries;
8503 pinfo->num_ce_pipe_entries = entries;
8504 pce_entry = pce_info_use->ce_pipe_entry;
8505 for (i = 0; i < entries; i++, pce_entry++)
8506 pinfo->ce_pipe_entry[i] = *pce_entry;
8507 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8508 pinfo->ce_pipe_entry[i].valid = 0;
8509
8510 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8511 pr_err("copy_to_user failed\n");
8512 ret = -EFAULT;
8513 }
8514 return ret;
8515}
8516
8517static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8518 void __user *argp)
8519{
8520 struct qseecom_ce_info_req req;
8521 struct qseecom_ce_info_req *pinfo = &req;
8522 int ret = 0;
8523 struct qseecom_ce_info_use *p;
8524 int total = 0;
8525 int i;
8526 bool found = false;
8527
8528 ret = copy_from_user(pinfo, argp,
8529 sizeof(struct qseecom_ce_info_req));
8530 if (ret)
8531 return ret;
8532
8533 switch (pinfo->usage) {
8534 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8535 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8536 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8537 if (qseecom.support_fde) {
8538 p = qseecom.ce_info.fde;
8539 total = qseecom.ce_info.num_fde;
8540 } else {
8541 pr_err("system does not support fde\n");
8542 return -EINVAL;
8543 }
8544 break;
8545 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8546 if (qseecom.support_pfe) {
8547 p = qseecom.ce_info.pfe;
8548 total = qseecom.ce_info.num_pfe;
8549 } else {
8550 pr_err("system does not support pfe\n");
8551 return -EINVAL;
8552 }
8553 break;
8554 default:
8555 pr_err("unsupported usage %d\n", pinfo->usage);
8556 return -EINVAL;
8557 }
8558
8559 for (i = 0; i < total; i++) {
8560 if (p->alloc &&
8561 !memcmp(p->handle, pinfo->handle,
8562 MAX_CE_INFO_HANDLE_SIZE)) {
8563 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8564 p->alloc = false;
8565 found = true;
8566 break;
8567 }
8568 p++;
8569 }
8570 return ret;
8571}
8572
8573static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8574 void __user *argp)
8575{
8576 struct qseecom_ce_info_req req;
8577 struct qseecom_ce_info_req *pinfo = &req;
8578 int ret = 0;
8579 int i;
8580 unsigned int entries;
8581 struct qseecom_ce_info_use *pce_info_use, *p;
8582 int total = 0;
8583 bool found = false;
8584 struct qseecom_ce_pipe_entry *pce_entry;
8585
8586 ret = copy_from_user(pinfo, argp,
8587 sizeof(struct qseecom_ce_info_req));
8588 if (ret)
8589 return ret;
8590
8591 switch (pinfo->usage) {
8592 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8593 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8594 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8595 if (qseecom.support_fde) {
8596 p = qseecom.ce_info.fde;
8597 total = qseecom.ce_info.num_fde;
8598 } else {
8599 pr_err("system does not support fde\n");
8600 return -EINVAL;
8601 }
8602 break;
8603 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8604 if (qseecom.support_pfe) {
8605 p = qseecom.ce_info.pfe;
8606 total = qseecom.ce_info.num_pfe;
8607 } else {
8608 pr_err("system does not support pfe\n");
8609 return -EINVAL;
8610 }
8611 break;
8612 default:
8613 pr_err("unsupported usage %d\n", pinfo->usage);
8614 return -EINVAL;
8615 }
8616
8617 pce_info_use = NULL;
8618 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8619 pinfo->num_ce_pipe_entries = 0;
8620 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8621 pinfo->ce_pipe_entry[i].valid = 0;
8622
8623 for (i = 0; i < total; i++) {
8624
8625 if (p->alloc && !memcmp(p->handle,
8626 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8627 pce_info_use = p;
8628 found = true;
8629 break;
8630 }
8631 p++;
8632 }
8633 if (!pce_info_use)
8634 goto out;
8635 pinfo->unit_num = pce_info_use->unit_num;
8636 if (pce_info_use->num_ce_pipe_entries >
8637 MAX_CE_PIPE_PAIR_PER_UNIT)
8638 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8639 else
8640 entries = pce_info_use->num_ce_pipe_entries;
8641 pinfo->num_ce_pipe_entries = entries;
8642 pce_entry = pce_info_use->ce_pipe_entry;
8643 for (i = 0; i < entries; i++, pce_entry++)
8644 pinfo->ce_pipe_entry[i] = *pce_entry;
8645 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8646 pinfo->ce_pipe_entry[i].valid = 0;
8647out:
8648 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8649 pr_err("copy_to_user failed\n");
8650 ret = -EFAULT;
8651 }
8652 return ret;
8653}
8654
8655/*
8656 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8657 * then whitelist feature is not supported.
8658 */
8659static int qseecom_check_whitelist_feature(void)
8660{
8661 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8662
8663 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8664}
8665
8666static int qseecom_probe(struct platform_device *pdev)
8667{
8668 int rc;
8669 int i;
8670 uint32_t feature = 10;
8671 struct device *class_dev;
8672 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8673 struct qseecom_command_scm_resp resp;
8674 struct qseecom_ce_info_use *pce_info_use = NULL;
8675
8676 qseecom.qsee_bw_count = 0;
8677 qseecom.qsee_perf_client = 0;
8678 qseecom.qsee_sfpb_bw_count = 0;
8679
8680 qseecom.qsee.ce_core_clk = NULL;
8681 qseecom.qsee.ce_clk = NULL;
8682 qseecom.qsee.ce_core_src_clk = NULL;
8683 qseecom.qsee.ce_bus_clk = NULL;
8684
8685 qseecom.cumulative_mode = 0;
8686 qseecom.current_mode = INACTIVE;
8687 qseecom.support_bus_scaling = false;
8688 qseecom.support_fde = false;
8689 qseecom.support_pfe = false;
8690
8691 qseecom.ce_drv.ce_core_clk = NULL;
8692 qseecom.ce_drv.ce_clk = NULL;
8693 qseecom.ce_drv.ce_core_src_clk = NULL;
8694 qseecom.ce_drv.ce_bus_clk = NULL;
8695 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8696
8697 qseecom.app_block_ref_cnt = 0;
8698 init_waitqueue_head(&qseecom.app_block_wq);
8699 qseecom.whitelist_support = true;
8700
8701 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8702 if (rc < 0) {
8703 pr_err("alloc_chrdev_region failed %d\n", rc);
8704 return rc;
8705 }
8706
8707 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8708 if (IS_ERR(driver_class)) {
8709 rc = -ENOMEM;
8710 pr_err("class_create failed %d\n", rc);
8711 goto exit_unreg_chrdev_region;
8712 }
8713
8714 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8715 QSEECOM_DEV);
8716 if (IS_ERR(class_dev)) {
8717 pr_err("class_device_create failed %d\n", rc);
8718 rc = -ENOMEM;
8719 goto exit_destroy_class;
8720 }
8721
8722 cdev_init(&qseecom.cdev, &qseecom_fops);
8723 qseecom.cdev.owner = THIS_MODULE;
8724
8725 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8726 if (rc < 0) {
8727 pr_err("cdev_add failed %d\n", rc);
8728 goto exit_destroy_device;
8729 }
8730
8731 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008732 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8733 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008734 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008735 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8736 spin_lock_init(&qseecom.registered_kclient_list_lock);
8737 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008738 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008739 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008740 qseecom.send_resp_flag = 0;
8741
8742 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008743 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008744 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8745 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008746 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008747 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8748 if (rc) {
8749 pr_err("Failed to get QSEE version info %d\n", rc);
8750 goto exit_del_cdev;
8751 }
8752 qseecom.qsee_version = resp.result;
8753 qseecom.qseos_version = QSEOS_VERSION_14;
8754 qseecom.commonlib_loaded = false;
8755 qseecom.commonlib64_loaded = false;
8756 qseecom.pdev = class_dev;
8757 /* Create ION msm client */
8758 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8759 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8760 pr_err("Ion client cannot be created\n");
8761 rc = -ENOMEM;
8762 goto exit_del_cdev;
8763 }
8764
8765 /* register client for bus scaling */
8766 if (pdev->dev.of_node) {
8767 qseecom.pdev->of_node = pdev->dev.of_node;
8768 qseecom.support_bus_scaling =
8769 of_property_read_bool((&pdev->dev)->of_node,
8770 "qcom,support-bus-scaling");
8771 rc = qseecom_retrieve_ce_data(pdev);
8772 if (rc)
8773 goto exit_destroy_ion_client;
8774 qseecom.appsbl_qseecom_support =
8775 of_property_read_bool((&pdev->dev)->of_node,
8776 "qcom,appsbl-qseecom-support");
8777 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8778 qseecom.appsbl_qseecom_support);
8779
8780 qseecom.commonlib64_loaded =
8781 of_property_read_bool((&pdev->dev)->of_node,
8782 "qcom,commonlib64-loaded-by-uefi");
8783 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8784 qseecom.commonlib64_loaded);
8785 qseecom.fde_key_size =
8786 of_property_read_bool((&pdev->dev)->of_node,
8787 "qcom,fde-key-size");
8788 qseecom.no_clock_support =
8789 of_property_read_bool((&pdev->dev)->of_node,
8790 "qcom,no-clock-support");
8791 if (!qseecom.no_clock_support) {
8792 pr_info("qseecom clocks handled by other subsystem\n");
8793 } else {
8794 pr_info("no-clock-support=0x%x",
8795 qseecom.no_clock_support);
8796 }
8797
8798 if (of_property_read_u32((&pdev->dev)->of_node,
8799 "qcom,qsee-reentrancy-support",
8800 &qseecom.qsee_reentrancy_support)) {
8801 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8802 qseecom.qsee_reentrancy_support = 0;
8803 } else {
8804 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8805 qseecom.qsee_reentrancy_support);
8806 }
8807
Jiten Patela7bb1d52018-05-11 12:34:26 +05308808 qseecom.enable_key_wrap_in_ks =
8809 of_property_read_bool((&pdev->dev)->of_node,
8810 "qcom,enable-key-wrap-in-ks");
8811 if (qseecom.enable_key_wrap_in_ks) {
8812 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8813 qseecom.enable_key_wrap_in_ks);
8814 }
8815
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008816 /*
8817 * The qseecom bus scaling flag can not be enabled when
8818 * crypto clock is not handled by HLOS.
8819 */
8820 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8821 pr_err("support_bus_scaling flag can not be enabled.\n");
8822 rc = -EINVAL;
8823 goto exit_destroy_ion_client;
8824 }
8825
8826 if (of_property_read_u32((&pdev->dev)->of_node,
8827 "qcom,ce-opp-freq",
8828 &qseecom.ce_opp_freq_hz)) {
8829 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8830 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8831 }
8832 rc = __qseecom_init_clk(CLK_QSEE);
8833 if (rc)
8834 goto exit_destroy_ion_client;
8835
8836 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8837 (qseecom.support_pfe || qseecom.support_fde)) {
8838 rc = __qseecom_init_clk(CLK_CE_DRV);
8839 if (rc) {
8840 __qseecom_deinit_clk(CLK_QSEE);
8841 goto exit_destroy_ion_client;
8842 }
8843 } else {
8844 struct qseecom_clk *qclk;
8845
8846 qclk = &qseecom.qsee;
8847 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8848 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8849 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8850 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8851 }
8852
8853 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8854 msm_bus_cl_get_pdata(pdev);
8855 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8856 (!qseecom.is_apps_region_protected &&
8857 !qseecom.appsbl_qseecom_support)) {
8858 struct resource *resource = NULL;
8859 struct qsee_apps_region_info_ireq req;
8860 struct qsee_apps_region_info_64bit_ireq req_64bit;
8861 struct qseecom_command_scm_resp resp;
8862 void *cmd_buf = NULL;
8863 size_t cmd_len;
8864
8865 resource = platform_get_resource_byname(pdev,
8866 IORESOURCE_MEM, "secapp-region");
8867 if (resource) {
8868 if (qseecom.qsee_version < QSEE_VERSION_40) {
8869 req.qsee_cmd_id =
8870 QSEOS_APP_REGION_NOTIFICATION;
8871 req.addr = (uint32_t)resource->start;
8872 req.size = resource_size(resource);
8873 cmd_buf = (void *)&req;
8874 cmd_len = sizeof(struct
8875 qsee_apps_region_info_ireq);
8876 pr_warn("secure app region addr=0x%x size=0x%x",
8877 req.addr, req.size);
8878 } else {
8879 req_64bit.qsee_cmd_id =
8880 QSEOS_APP_REGION_NOTIFICATION;
8881 req_64bit.addr = resource->start;
8882 req_64bit.size = resource_size(
8883 resource);
8884 cmd_buf = (void *)&req_64bit;
8885 cmd_len = sizeof(struct
8886 qsee_apps_region_info_64bit_ireq);
8887 pr_warn("secure app region addr=0x%llx size=0x%x",
8888 req_64bit.addr, req_64bit.size);
8889 }
8890 } else {
8891 pr_err("Fail to get secure app region info\n");
8892 rc = -EINVAL;
8893 goto exit_deinit_clock;
8894 }
8895 rc = __qseecom_enable_clk(CLK_QSEE);
8896 if (rc) {
8897 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8898 rc = -EIO;
8899 goto exit_deinit_clock;
8900 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008901 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008902 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8903 cmd_buf, cmd_len,
8904 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008905 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008906 __qseecom_disable_clk(CLK_QSEE);
8907 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8908 pr_err("send secapp reg fail %d resp.res %d\n",
8909 rc, resp.result);
8910 rc = -EINVAL;
8911 goto exit_deinit_clock;
8912 }
8913 }
8914 /*
8915 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8916 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8917 * Pls add "qseecom.commonlib64_loaded = true" here too.
8918 */
8919 if (qseecom.is_apps_region_protected ||
8920 qseecom.appsbl_qseecom_support)
8921 qseecom.commonlib_loaded = true;
8922 } else {
8923 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8924 pdev->dev.platform_data;
8925 }
8926 if (qseecom.support_bus_scaling) {
8927 init_timer(&(qseecom.bw_scale_down_timer));
8928 INIT_WORK(&qseecom.bw_inactive_req_ws,
8929 qseecom_bw_inactive_req_work);
8930 qseecom.bw_scale_down_timer.function =
8931 qseecom_scale_bus_bandwidth_timer_callback;
8932 }
8933 qseecom.timer_running = false;
8934 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8935 qseecom_platform_support);
8936
8937 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8938 pr_warn("qseecom.whitelist_support = %d\n",
8939 qseecom.whitelist_support);
8940
8941 if (!qseecom.qsee_perf_client)
8942 pr_err("Unable to register bus client\n");
8943
Zhen Kongc4c162a2019-01-23 12:07:12 -08008944 /*create a kthread to process pending listener unregister task */
8945 qseecom.unregister_lsnr_kthread_task = kthread_run(
8946 __qseecom_unregister_listener_kthread_func,
8947 NULL, "qseecom-unreg-lsnr");
8948 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8949 pr_err("failed to create kthread to unregister listener\n");
8950 rc = -EINVAL;
8951 goto exit_deinit_clock;
8952 }
8953 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8954 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008955 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8956 return 0;
8957
8958exit_deinit_clock:
8959 __qseecom_deinit_clk(CLK_QSEE);
8960 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8961 (qseecom.support_pfe || qseecom.support_fde))
8962 __qseecom_deinit_clk(CLK_CE_DRV);
8963exit_destroy_ion_client:
8964 if (qseecom.ce_info.fde) {
8965 pce_info_use = qseecom.ce_info.fde;
8966 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8967 kzfree(pce_info_use->ce_pipe_entry);
8968 pce_info_use++;
8969 }
8970 kfree(qseecom.ce_info.fde);
8971 }
8972 if (qseecom.ce_info.pfe) {
8973 pce_info_use = qseecom.ce_info.pfe;
8974 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8975 kzfree(pce_info_use->ce_pipe_entry);
8976 pce_info_use++;
8977 }
8978 kfree(qseecom.ce_info.pfe);
8979 }
8980 ion_client_destroy(qseecom.ion_clnt);
8981exit_del_cdev:
8982 cdev_del(&qseecom.cdev);
8983exit_destroy_device:
8984 device_destroy(driver_class, qseecom_device_no);
8985exit_destroy_class:
8986 class_destroy(driver_class);
8987exit_unreg_chrdev_region:
8988 unregister_chrdev_region(qseecom_device_no, 1);
8989 return rc;
8990}
8991
8992static int qseecom_remove(struct platform_device *pdev)
8993{
8994 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308995 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008996 unsigned long flags = 0;
8997 int ret = 0;
8998 int i;
8999 struct qseecom_ce_pipe_entry *pce_entry;
9000 struct qseecom_ce_info_use *pce_info_use;
9001
9002 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9003 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9004
Monika Singhe711b162018-04-24 09:54:50 +05309005 list_for_each_entry_safe(kclient, kclient_tmp,
9006 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009007
9008 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009009 if (!kclient->handle) {
9010 list_del(&kclient->list);
9011 kzfree(kclient);
9012 break;
9013 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009014
9015 list_del(&kclient->list);
9016 mutex_lock(&app_access_lock);
9017 ret = qseecom_unload_app(kclient->handle->dev, false);
9018 mutex_unlock(&app_access_lock);
9019 if (!ret) {
9020 kzfree(kclient->handle->dev);
9021 kzfree(kclient->handle);
9022 kzfree(kclient);
9023 }
9024 }
9025
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009026 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9027
9028 if (qseecom.qseos_version > QSEEE_VERSION_00)
9029 qseecom_unload_commonlib_image();
9030
9031 if (qseecom.qsee_perf_client)
9032 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9033 0);
9034 if (pdev->dev.platform_data != NULL)
9035 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9036
9037 if (qseecom.support_bus_scaling) {
9038 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9039 del_timer_sync(&qseecom.bw_scale_down_timer);
9040 }
9041
9042 if (qseecom.ce_info.fde) {
9043 pce_info_use = qseecom.ce_info.fde;
9044 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9045 pce_entry = pce_info_use->ce_pipe_entry;
9046 kfree(pce_entry);
9047 pce_info_use++;
9048 }
9049 }
9050 kfree(qseecom.ce_info.fde);
9051 if (qseecom.ce_info.pfe) {
9052 pce_info_use = qseecom.ce_info.pfe;
9053 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9054 pce_entry = pce_info_use->ce_pipe_entry;
9055 kfree(pce_entry);
9056 pce_info_use++;
9057 }
9058 }
9059 kfree(qseecom.ce_info.pfe);
9060
9061 /* register client for bus scaling */
9062 if (pdev->dev.of_node) {
9063 __qseecom_deinit_clk(CLK_QSEE);
9064 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9065 (qseecom.support_pfe || qseecom.support_fde))
9066 __qseecom_deinit_clk(CLK_CE_DRV);
9067 }
9068
9069 ion_client_destroy(qseecom.ion_clnt);
9070
Zhen Kongc4c162a2019-01-23 12:07:12 -08009071 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9072
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009073 cdev_del(&qseecom.cdev);
9074
9075 device_destroy(driver_class, qseecom_device_no);
9076
9077 class_destroy(driver_class);
9078
9079 unregister_chrdev_region(qseecom_device_no, 1);
9080
9081 return ret;
9082}
9083
9084static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9085{
9086 int ret = 0;
9087 struct qseecom_clk *qclk;
9088
9089 qclk = &qseecom.qsee;
9090 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9091 if (qseecom.no_clock_support)
9092 return 0;
9093
9094 mutex_lock(&qsee_bw_mutex);
9095 mutex_lock(&clk_access_lock);
9096
9097 if (qseecom.current_mode != INACTIVE) {
9098 ret = msm_bus_scale_client_update_request(
9099 qseecom.qsee_perf_client, INACTIVE);
9100 if (ret)
9101 pr_err("Fail to scale down bus\n");
9102 else
9103 qseecom.current_mode = INACTIVE;
9104 }
9105
9106 if (qclk->clk_access_cnt) {
9107 if (qclk->ce_clk != NULL)
9108 clk_disable_unprepare(qclk->ce_clk);
9109 if (qclk->ce_core_clk != NULL)
9110 clk_disable_unprepare(qclk->ce_core_clk);
9111 if (qclk->ce_bus_clk != NULL)
9112 clk_disable_unprepare(qclk->ce_bus_clk);
9113 }
9114
9115 del_timer_sync(&(qseecom.bw_scale_down_timer));
9116 qseecom.timer_running = false;
9117
9118 mutex_unlock(&clk_access_lock);
9119 mutex_unlock(&qsee_bw_mutex);
9120 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9121
9122 return 0;
9123}
9124
9125static int qseecom_resume(struct platform_device *pdev)
9126{
9127 int mode = 0;
9128 int ret = 0;
9129 struct qseecom_clk *qclk;
9130
9131 qclk = &qseecom.qsee;
9132 if (qseecom.no_clock_support)
9133 goto exit;
9134
9135 mutex_lock(&qsee_bw_mutex);
9136 mutex_lock(&clk_access_lock);
9137 if (qseecom.cumulative_mode >= HIGH)
9138 mode = HIGH;
9139 else
9140 mode = qseecom.cumulative_mode;
9141
9142 if (qseecom.cumulative_mode != INACTIVE) {
9143 ret = msm_bus_scale_client_update_request(
9144 qseecom.qsee_perf_client, mode);
9145 if (ret)
9146 pr_err("Fail to scale up bus to %d\n", mode);
9147 else
9148 qseecom.current_mode = mode;
9149 }
9150
9151 if (qclk->clk_access_cnt) {
9152 if (qclk->ce_core_clk != NULL) {
9153 ret = clk_prepare_enable(qclk->ce_core_clk);
9154 if (ret) {
9155 pr_err("Unable to enable/prep CE core clk\n");
9156 qclk->clk_access_cnt = 0;
9157 goto err;
9158 }
9159 }
9160 if (qclk->ce_clk != NULL) {
9161 ret = clk_prepare_enable(qclk->ce_clk);
9162 if (ret) {
9163 pr_err("Unable to enable/prep CE iface clk\n");
9164 qclk->clk_access_cnt = 0;
9165 goto ce_clk_err;
9166 }
9167 }
9168 if (qclk->ce_bus_clk != NULL) {
9169 ret = clk_prepare_enable(qclk->ce_bus_clk);
9170 if (ret) {
9171 pr_err("Unable to enable/prep CE bus clk\n");
9172 qclk->clk_access_cnt = 0;
9173 goto ce_bus_clk_err;
9174 }
9175 }
9176 }
9177
9178 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9179 qseecom.bw_scale_down_timer.expires = jiffies +
9180 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9181 mod_timer(&(qseecom.bw_scale_down_timer),
9182 qseecom.bw_scale_down_timer.expires);
9183 qseecom.timer_running = true;
9184 }
9185
9186 mutex_unlock(&clk_access_lock);
9187 mutex_unlock(&qsee_bw_mutex);
9188 goto exit;
9189
9190ce_bus_clk_err:
9191 if (qclk->ce_clk)
9192 clk_disable_unprepare(qclk->ce_clk);
9193ce_clk_err:
9194 if (qclk->ce_core_clk)
9195 clk_disable_unprepare(qclk->ce_core_clk);
9196err:
9197 mutex_unlock(&clk_access_lock);
9198 mutex_unlock(&qsee_bw_mutex);
9199 ret = -EIO;
9200exit:
9201 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9202 return ret;
9203}
9204
9205static const struct of_device_id qseecom_match[] = {
9206 {
9207 .compatible = "qcom,qseecom",
9208 },
9209 {}
9210};
9211
9212static struct platform_driver qseecom_plat_driver = {
9213 .probe = qseecom_probe,
9214 .remove = qseecom_remove,
9215 .suspend = qseecom_suspend,
9216 .resume = qseecom_resume,
9217 .driver = {
9218 .name = "qseecom",
9219 .owner = THIS_MODULE,
9220 .of_match_table = qseecom_match,
9221 },
9222};
9223
9224static int qseecom_init(void)
9225{
9226 return platform_driver_register(&qseecom_plat_driver);
9227}
9228
9229static void qseecom_exit(void)
9230{
9231 platform_driver_unregister(&qseecom_plat_driver);
9232}
9233
9234MODULE_LICENSE("GPL v2");
9235MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9236
9237module_init(qseecom_init);
9238module_exit(qseecom_exit);