blob: 66c1ee87706d210236dc79cb1afa5b7f1a95c805 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
334};
335
336struct qseecom_listener_handle {
337 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800338 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800339 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700340};
341
342static struct qseecom_control qseecom;
343
344struct qseecom_dev_handle {
345 enum qseecom_client_handle_type type;
346 union {
347 struct qseecom_client_handle client;
348 struct qseecom_listener_handle listener;
349 };
350 bool released;
351 int abort;
352 wait_queue_head_t abort_wq;
353 atomic_t ioctl_count;
354 bool perf_enabled;
355 bool fast_load_enabled;
356 enum qseecom_bandwidth_request_mode mode;
357 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
358 uint32_t sglist_cnt;
359 bool use_legacy_cmd;
360};
361
362struct qseecom_key_id_usage_desc {
363 uint8_t desc[QSEECOM_KEY_ID_SIZE];
364};
365
366struct qseecom_crypto_info {
367 unsigned int unit_num;
368 unsigned int ce;
369 unsigned int pipe_pair;
370};
371
372static struct qseecom_key_id_usage_desc key_id_array[] = {
373 {
374 .desc = "Undefined Usage Index",
375 },
376
377 {
378 .desc = "Full Disk Encryption",
379 },
380
381 {
382 .desc = "Per File Encryption",
383 },
384
385 {
386 .desc = "UFS ICE Full Disk Encryption",
387 },
388
389 {
390 .desc = "SDCC ICE Full Disk Encryption",
391 },
392};
393
394/* Function proto types */
395static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
396static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
397static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
398static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
399static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
400static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
401 char *cmnlib_name);
402static int qseecom_enable_ice_setup(int usage);
403static int qseecom_disable_ice_setup(int usage);
404static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
405static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
406 void __user *argp);
407static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
408 void __user *argp);
409static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
410 void __user *argp);
411
412static int get_qseecom_keymaster_status(char *str)
413{
414 get_option(&str, &qseecom.is_apps_region_protected);
415 return 1;
416}
417__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
418
Zhen Kong03f220d2019-02-01 17:12:34 -0800419
420#define QSEECOM_SCM_EBUSY_WAIT_MS 30
421#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
422
423static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
424{
425 int ret = 0;
426 int retry_count = 0;
427
428 do {
429 ret = scm_call2_noretry(smc_id, desc);
430 if (ret == -EBUSY) {
431 mutex_unlock(&app_access_lock);
432 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
433 mutex_lock(&app_access_lock);
434 }
435 if (retry_count == 33)
436 pr_warn("secure world has been busy for 1 second!\n");
437 } while (ret == -EBUSY &&
438 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
439 return ret;
440}
441
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700442static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
443 const void *req_buf, void *resp_buf)
444{
445 int ret = 0;
446 uint32_t smc_id = 0;
447 uint32_t qseos_cmd_id = 0;
448 struct scm_desc desc = {0};
449 struct qseecom_command_scm_resp *scm_resp = NULL;
450
451 if (!req_buf || !resp_buf) {
452 pr_err("Invalid buffer pointer\n");
453 return -EINVAL;
454 }
455 qseos_cmd_id = *(uint32_t *)req_buf;
456 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
457
458 switch (svc_id) {
459 case 6: {
460 if (tz_cmd_id == 3) {
461 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
462 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
463 desc.args[0] = *(uint32_t *)req_buf;
464 } else {
465 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
466 svc_id, tz_cmd_id);
467 return -EINVAL;
468 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800469 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700470 break;
471 }
472 case SCM_SVC_ES: {
473 switch (tz_cmd_id) {
474 case SCM_SAVE_PARTITION_HASH_ID: {
475 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
476 struct qseecom_save_partition_hash_req *p_hash_req =
477 (struct qseecom_save_partition_hash_req *)
478 req_buf;
479 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
480
481 if (!tzbuf)
482 return -ENOMEM;
483 memset(tzbuf, 0, tzbuflen);
484 memcpy(tzbuf, p_hash_req->digest,
485 SHA256_DIGEST_LENGTH);
486 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
487 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
488 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
489 desc.args[0] = p_hash_req->partition_id;
490 desc.args[1] = virt_to_phys(tzbuf);
491 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800492 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700493 kzfree(tzbuf);
494 break;
495 }
496 default: {
497 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
498 tz_cmd_id);
499 ret = -EINVAL;
500 break;
501 }
502 } /* end of switch (tz_cmd_id) */
503 break;
504 } /* end of case SCM_SVC_ES */
505 case SCM_SVC_TZSCHEDULER: {
506 switch (qseos_cmd_id) {
507 case QSEOS_APP_START_COMMAND: {
508 struct qseecom_load_app_ireq *req;
509 struct qseecom_load_app_64bit_ireq *req_64bit;
510
511 smc_id = TZ_OS_APP_START_ID;
512 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
513 if (qseecom.qsee_version < QSEE_VERSION_40) {
514 req = (struct qseecom_load_app_ireq *)req_buf;
515 desc.args[0] = req->mdt_len;
516 desc.args[1] = req->img_len;
517 desc.args[2] = req->phy_addr;
518 } else {
519 req_64bit =
520 (struct qseecom_load_app_64bit_ireq *)
521 req_buf;
522 desc.args[0] = req_64bit->mdt_len;
523 desc.args[1] = req_64bit->img_len;
524 desc.args[2] = req_64bit->phy_addr;
525 }
526 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800527 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700528 break;
529 }
530 case QSEOS_APP_SHUTDOWN_COMMAND: {
531 struct qseecom_unload_app_ireq *req;
532
533 req = (struct qseecom_unload_app_ireq *)req_buf;
534 smc_id = TZ_OS_APP_SHUTDOWN_ID;
535 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
536 desc.args[0] = req->app_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800537 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700538 break;
539 }
540 case QSEOS_APP_LOOKUP_COMMAND: {
541 struct qseecom_check_app_ireq *req;
542 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
543 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
544
545 if (!tzbuf)
546 return -ENOMEM;
547 req = (struct qseecom_check_app_ireq *)req_buf;
548 pr_debug("Lookup app_name = %s\n", req->app_name);
549 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
550 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
551 smc_id = TZ_OS_APP_LOOKUP_ID;
552 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
553 desc.args[0] = virt_to_phys(tzbuf);
554 desc.args[1] = strlen(req->app_name);
555 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800556 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700557 kzfree(tzbuf);
558 break;
559 }
560 case QSEOS_APP_REGION_NOTIFICATION: {
561 struct qsee_apps_region_info_ireq *req;
562 struct qsee_apps_region_info_64bit_ireq *req_64bit;
563
564 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
565 desc.arginfo =
566 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
567 if (qseecom.qsee_version < QSEE_VERSION_40) {
568 req = (struct qsee_apps_region_info_ireq *)
569 req_buf;
570 desc.args[0] = req->addr;
571 desc.args[1] = req->size;
572 } else {
573 req_64bit =
574 (struct qsee_apps_region_info_64bit_ireq *)
575 req_buf;
576 desc.args[0] = req_64bit->addr;
577 desc.args[1] = req_64bit->size;
578 }
579 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800580 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700581 break;
582 }
583 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
584 struct qseecom_load_lib_image_ireq *req;
585 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
586
587 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
588 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
589 if (qseecom.qsee_version < QSEE_VERSION_40) {
590 req = (struct qseecom_load_lib_image_ireq *)
591 req_buf;
592 desc.args[0] = req->mdt_len;
593 desc.args[1] = req->img_len;
594 desc.args[2] = req->phy_addr;
595 } else {
596 req_64bit =
597 (struct qseecom_load_lib_image_64bit_ireq *)
598 req_buf;
599 desc.args[0] = req_64bit->mdt_len;
600 desc.args[1] = req_64bit->img_len;
601 desc.args[2] = req_64bit->phy_addr;
602 }
603 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800604 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700605 break;
606 }
607 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
608 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
609 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
610 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800611 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700612 break;
613 }
614 case QSEOS_REGISTER_LISTENER: {
615 struct qseecom_register_listener_ireq *req;
616 struct qseecom_register_listener_64bit_ireq *req_64bit;
617
618 desc.arginfo =
619 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
620 if (qseecom.qsee_version < QSEE_VERSION_40) {
621 req = (struct qseecom_register_listener_ireq *)
622 req_buf;
623 desc.args[0] = req->listener_id;
624 desc.args[1] = req->sb_ptr;
625 desc.args[2] = req->sb_len;
626 } else {
627 req_64bit =
628 (struct qseecom_register_listener_64bit_ireq *)
629 req_buf;
630 desc.args[0] = req_64bit->listener_id;
631 desc.args[1] = req_64bit->sb_ptr;
632 desc.args[2] = req_64bit->sb_len;
633 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700634 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700635 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800636 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800637 if (ret == -EIO) {
638 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700639 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700640 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800641 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700642 }
643 break;
644 }
645 case QSEOS_DEREGISTER_LISTENER: {
646 struct qseecom_unregister_listener_ireq *req;
647
648 req = (struct qseecom_unregister_listener_ireq *)
649 req_buf;
650 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
651 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
652 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800653 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700654 break;
655 }
656 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
657 struct qseecom_client_listener_data_irsp *req;
658
659 req = (struct qseecom_client_listener_data_irsp *)
660 req_buf;
661 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
662 desc.arginfo =
663 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
664 desc.args[0] = req->listener_id;
665 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800666 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700667 break;
668 }
669 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
670 struct qseecom_client_listener_data_irsp *req;
671 struct qseecom_client_listener_data_64bit_irsp *req_64;
672
673 smc_id =
674 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
675 desc.arginfo =
676 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
677 if (qseecom.qsee_version < QSEE_VERSION_40) {
678 req =
679 (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 desc.args[0] = req->listener_id;
682 desc.args[1] = req->status;
683 desc.args[2] = req->sglistinfo_ptr;
684 desc.args[3] = req->sglistinfo_len;
685 } else {
686 req_64 =
687 (struct qseecom_client_listener_data_64bit_irsp *)
688 req_buf;
689 desc.args[0] = req_64->listener_id;
690 desc.args[1] = req_64->status;
691 desc.args[2] = req_64->sglistinfo_ptr;
692 desc.args[3] = req_64->sglistinfo_len;
693 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800694 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700695 break;
696 }
697 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
698 struct qseecom_load_app_ireq *req;
699 struct qseecom_load_app_64bit_ireq *req_64bit;
700
701 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
702 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
703 if (qseecom.qsee_version < QSEE_VERSION_40) {
704 req = (struct qseecom_load_app_ireq *)req_buf;
705 desc.args[0] = req->mdt_len;
706 desc.args[1] = req->img_len;
707 desc.args[2] = req->phy_addr;
708 } else {
709 req_64bit =
710 (struct qseecom_load_app_64bit_ireq *)req_buf;
711 desc.args[0] = req_64bit->mdt_len;
712 desc.args[1] = req_64bit->img_len;
713 desc.args[2] = req_64bit->phy_addr;
714 }
715 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800716 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700717 break;
718 }
719 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
720 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
721 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
722 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800723 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700724 break;
725 }
726
727 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
728 struct qseecom_client_send_data_ireq *req;
729 struct qseecom_client_send_data_64bit_ireq *req_64bit;
730
731 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
732 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
733 if (qseecom.qsee_version < QSEE_VERSION_40) {
734 req = (struct qseecom_client_send_data_ireq *)
735 req_buf;
736 desc.args[0] = req->app_id;
737 desc.args[1] = req->req_ptr;
738 desc.args[2] = req->req_len;
739 desc.args[3] = req->rsp_ptr;
740 desc.args[4] = req->rsp_len;
741 } else {
742 req_64bit =
743 (struct qseecom_client_send_data_64bit_ireq *)
744 req_buf;
745 desc.args[0] = req_64bit->app_id;
746 desc.args[1] = req_64bit->req_ptr;
747 desc.args[2] = req_64bit->req_len;
748 desc.args[3] = req_64bit->rsp_ptr;
749 desc.args[4] = req_64bit->rsp_len;
750 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800751 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700752 break;
753 }
754 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
755 struct qseecom_client_send_data_ireq *req;
756 struct qseecom_client_send_data_64bit_ireq *req_64bit;
757
758 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
759 desc.arginfo =
760 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
761 if (qseecom.qsee_version < QSEE_VERSION_40) {
762 req = (struct qseecom_client_send_data_ireq *)
763 req_buf;
764 desc.args[0] = req->app_id;
765 desc.args[1] = req->req_ptr;
766 desc.args[2] = req->req_len;
767 desc.args[3] = req->rsp_ptr;
768 desc.args[4] = req->rsp_len;
769 desc.args[5] = req->sglistinfo_ptr;
770 desc.args[6] = req->sglistinfo_len;
771 } else {
772 req_64bit =
773 (struct qseecom_client_send_data_64bit_ireq *)
774 req_buf;
775 desc.args[0] = req_64bit->app_id;
776 desc.args[1] = req_64bit->req_ptr;
777 desc.args[2] = req_64bit->req_len;
778 desc.args[3] = req_64bit->rsp_ptr;
779 desc.args[4] = req_64bit->rsp_len;
780 desc.args[5] = req_64bit->sglistinfo_ptr;
781 desc.args[6] = req_64bit->sglistinfo_len;
782 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800783 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700784 break;
785 }
786 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
787 struct qseecom_client_send_service_ireq *req;
788
789 req = (struct qseecom_client_send_service_ireq *)
790 req_buf;
791 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
792 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
793 desc.args[0] = req->key_type;
794 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800795 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700796 break;
797 }
798 case QSEOS_RPMB_ERASE_COMMAND: {
799 smc_id = TZ_OS_RPMB_ERASE_ID;
800 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800802 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700803 break;
804 }
805 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
806 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
807 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
808 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800809 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700810 break;
811 }
812 case QSEOS_GENERATE_KEY: {
813 u32 tzbuflen = PAGE_ALIGN(sizeof
814 (struct qseecom_key_generate_ireq) -
815 sizeof(uint32_t));
816 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
817
818 if (!tzbuf)
819 return -ENOMEM;
820 memset(tzbuf, 0, tzbuflen);
821 memcpy(tzbuf, req_buf + sizeof(uint32_t),
822 (sizeof(struct qseecom_key_generate_ireq) -
823 sizeof(uint32_t)));
824 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
825 smc_id = TZ_OS_KS_GEN_KEY_ID;
826 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
827 desc.args[0] = virt_to_phys(tzbuf);
828 desc.args[1] = tzbuflen;
829 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800830 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700831 kzfree(tzbuf);
832 break;
833 }
834 case QSEOS_DELETE_KEY: {
835 u32 tzbuflen = PAGE_ALIGN(sizeof
836 (struct qseecom_key_delete_ireq) -
837 sizeof(uint32_t));
838 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
839
840 if (!tzbuf)
841 return -ENOMEM;
842 memset(tzbuf, 0, tzbuflen);
843 memcpy(tzbuf, req_buf + sizeof(uint32_t),
844 (sizeof(struct qseecom_key_delete_ireq) -
845 sizeof(uint32_t)));
846 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
847 smc_id = TZ_OS_KS_DEL_KEY_ID;
848 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
849 desc.args[0] = virt_to_phys(tzbuf);
850 desc.args[1] = tzbuflen;
851 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800852 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700853 kzfree(tzbuf);
854 break;
855 }
856 case QSEOS_SET_KEY: {
857 u32 tzbuflen = PAGE_ALIGN(sizeof
858 (struct qseecom_key_select_ireq) -
859 sizeof(uint32_t));
860 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
861
862 if (!tzbuf)
863 return -ENOMEM;
864 memset(tzbuf, 0, tzbuflen);
865 memcpy(tzbuf, req_buf + sizeof(uint32_t),
866 (sizeof(struct qseecom_key_select_ireq) -
867 sizeof(uint32_t)));
868 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
869 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
870 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
871 desc.args[0] = virt_to_phys(tzbuf);
872 desc.args[1] = tzbuflen;
873 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800874 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700875 kzfree(tzbuf);
876 break;
877 }
878 case QSEOS_UPDATE_KEY_USERINFO: {
879 u32 tzbuflen = PAGE_ALIGN(sizeof
880 (struct qseecom_key_userinfo_update_ireq) -
881 sizeof(uint32_t));
882 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
883
884 if (!tzbuf)
885 return -ENOMEM;
886 memset(tzbuf, 0, tzbuflen);
887 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
888 (struct qseecom_key_userinfo_update_ireq) -
889 sizeof(uint32_t)));
890 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
891 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
892 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
893 desc.args[0] = virt_to_phys(tzbuf);
894 desc.args[1] = tzbuflen;
895 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800896 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700897 kzfree(tzbuf);
898 break;
899 }
900 case QSEOS_TEE_OPEN_SESSION: {
901 struct qseecom_qteec_ireq *req;
902 struct qseecom_qteec_64bit_ireq *req_64bit;
903
904 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
905 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
906 if (qseecom.qsee_version < QSEE_VERSION_40) {
907 req = (struct qseecom_qteec_ireq *)req_buf;
908 desc.args[0] = req->app_id;
909 desc.args[1] = req->req_ptr;
910 desc.args[2] = req->req_len;
911 desc.args[3] = req->resp_ptr;
912 desc.args[4] = req->resp_len;
913 } else {
914 req_64bit = (struct qseecom_qteec_64bit_ireq *)
915 req_buf;
916 desc.args[0] = req_64bit->app_id;
917 desc.args[1] = req_64bit->req_ptr;
918 desc.args[2] = req_64bit->req_len;
919 desc.args[3] = req_64bit->resp_ptr;
920 desc.args[4] = req_64bit->resp_len;
921 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800922 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700923 break;
924 }
925 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
926 struct qseecom_qteec_ireq *req;
927 struct qseecom_qteec_64bit_ireq *req_64bit;
928
929 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
930 desc.arginfo =
931 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
932 if (qseecom.qsee_version < QSEE_VERSION_40) {
933 req = (struct qseecom_qteec_ireq *)req_buf;
934 desc.args[0] = req->app_id;
935 desc.args[1] = req->req_ptr;
936 desc.args[2] = req->req_len;
937 desc.args[3] = req->resp_ptr;
938 desc.args[4] = req->resp_len;
939 desc.args[5] = req->sglistinfo_ptr;
940 desc.args[6] = req->sglistinfo_len;
941 } else {
942 req_64bit = (struct qseecom_qteec_64bit_ireq *)
943 req_buf;
944 desc.args[0] = req_64bit->app_id;
945 desc.args[1] = req_64bit->req_ptr;
946 desc.args[2] = req_64bit->req_len;
947 desc.args[3] = req_64bit->resp_ptr;
948 desc.args[4] = req_64bit->resp_len;
949 desc.args[5] = req_64bit->sglistinfo_ptr;
950 desc.args[6] = req_64bit->sglistinfo_len;
951 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800952 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700953 break;
954 }
955 case QSEOS_TEE_INVOKE_COMMAND: {
956 struct qseecom_qteec_ireq *req;
957 struct qseecom_qteec_64bit_ireq *req_64bit;
958
959 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
960 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
961 if (qseecom.qsee_version < QSEE_VERSION_40) {
962 req = (struct qseecom_qteec_ireq *)req_buf;
963 desc.args[0] = req->app_id;
964 desc.args[1] = req->req_ptr;
965 desc.args[2] = req->req_len;
966 desc.args[3] = req->resp_ptr;
967 desc.args[4] = req->resp_len;
968 } else {
969 req_64bit = (struct qseecom_qteec_64bit_ireq *)
970 req_buf;
971 desc.args[0] = req_64bit->app_id;
972 desc.args[1] = req_64bit->req_ptr;
973 desc.args[2] = req_64bit->req_len;
974 desc.args[3] = req_64bit->resp_ptr;
975 desc.args[4] = req_64bit->resp_len;
976 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800977 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700978 break;
979 }
980 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
981 struct qseecom_qteec_ireq *req;
982 struct qseecom_qteec_64bit_ireq *req_64bit;
983
984 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
985 desc.arginfo =
986 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
987 if (qseecom.qsee_version < QSEE_VERSION_40) {
988 req = (struct qseecom_qteec_ireq *)req_buf;
989 desc.args[0] = req->app_id;
990 desc.args[1] = req->req_ptr;
991 desc.args[2] = req->req_len;
992 desc.args[3] = req->resp_ptr;
993 desc.args[4] = req->resp_len;
994 desc.args[5] = req->sglistinfo_ptr;
995 desc.args[6] = req->sglistinfo_len;
996 } else {
997 req_64bit = (struct qseecom_qteec_64bit_ireq *)
998 req_buf;
999 desc.args[0] = req_64bit->app_id;
1000 desc.args[1] = req_64bit->req_ptr;
1001 desc.args[2] = req_64bit->req_len;
1002 desc.args[3] = req_64bit->resp_ptr;
1003 desc.args[4] = req_64bit->resp_len;
1004 desc.args[5] = req_64bit->sglistinfo_ptr;
1005 desc.args[6] = req_64bit->sglistinfo_len;
1006 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001007 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001008 break;
1009 }
1010 case QSEOS_TEE_CLOSE_SESSION: {
1011 struct qseecom_qteec_ireq *req;
1012 struct qseecom_qteec_64bit_ireq *req_64bit;
1013
1014 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1015 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1016 if (qseecom.qsee_version < QSEE_VERSION_40) {
1017 req = (struct qseecom_qteec_ireq *)req_buf;
1018 desc.args[0] = req->app_id;
1019 desc.args[1] = req->req_ptr;
1020 desc.args[2] = req->req_len;
1021 desc.args[3] = req->resp_ptr;
1022 desc.args[4] = req->resp_len;
1023 } else {
1024 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1025 req_buf;
1026 desc.args[0] = req_64bit->app_id;
1027 desc.args[1] = req_64bit->req_ptr;
1028 desc.args[2] = req_64bit->req_len;
1029 desc.args[3] = req_64bit->resp_ptr;
1030 desc.args[4] = req_64bit->resp_len;
1031 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001032 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001033 break;
1034 }
1035 case QSEOS_TEE_REQUEST_CANCELLATION: {
1036 struct qseecom_qteec_ireq *req;
1037 struct qseecom_qteec_64bit_ireq *req_64bit;
1038
1039 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1040 desc.arginfo =
1041 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1042 if (qseecom.qsee_version < QSEE_VERSION_40) {
1043 req = (struct qseecom_qteec_ireq *)req_buf;
1044 desc.args[0] = req->app_id;
1045 desc.args[1] = req->req_ptr;
1046 desc.args[2] = req->req_len;
1047 desc.args[3] = req->resp_ptr;
1048 desc.args[4] = req->resp_len;
1049 } else {
1050 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1051 req_buf;
1052 desc.args[0] = req_64bit->app_id;
1053 desc.args[1] = req_64bit->req_ptr;
1054 desc.args[2] = req_64bit->req_len;
1055 desc.args[3] = req_64bit->resp_ptr;
1056 desc.args[4] = req_64bit->resp_len;
1057 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001058 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001059 break;
1060 }
1061 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1062 struct qseecom_continue_blocked_request_ireq *req =
1063 (struct qseecom_continue_blocked_request_ireq *)
1064 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001065 if (qseecom.smcinvoke_support)
1066 smc_id =
1067 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1068 else
1069 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001070 desc.arginfo =
1071 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001072 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001073 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001074 break;
1075 }
1076 default: {
1077 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1078 qseos_cmd_id);
1079 ret = -EINVAL;
1080 break;
1081 }
1082 } /*end of switch (qsee_cmd_id) */
1083 break;
1084 } /*end of case SCM_SVC_TZSCHEDULER*/
1085 default: {
1086 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1087 svc_id);
1088 ret = -EINVAL;
1089 break;
1090 }
1091 } /*end of switch svc_id */
1092 scm_resp->result = desc.ret[0];
1093 scm_resp->resp_type = desc.ret[1];
1094 scm_resp->data = desc.ret[2];
1095 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1096 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1097 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1098 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1099 return ret;
1100}
1101
1102
1103static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1104 size_t cmd_len, void *resp_buf, size_t resp_len)
1105{
1106 if (!is_scm_armv8())
1107 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1108 resp_buf, resp_len);
1109 else
1110 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1111}
1112
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001113static struct qseecom_registered_listener_list *__qseecom_find_svc(
1114 int32_t listener_id)
1115{
1116 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001117
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118 list_for_each_entry(entry,
1119 &qseecom.registered_listener_list_head, list) {
1120 if (entry->svc.listener_id == listener_id)
1121 break;
1122 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001124 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001125 return NULL;
1126 }
1127
1128 return entry;
1129}
1130
1131static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1132 struct qseecom_dev_handle *handle,
1133 struct qseecom_register_listener_req *listener)
1134{
1135 int ret = 0;
1136 struct qseecom_register_listener_ireq req;
1137 struct qseecom_register_listener_64bit_ireq req_64bit;
1138 struct qseecom_command_scm_resp resp;
1139 ion_phys_addr_t pa;
1140 void *cmd_buf = NULL;
1141 size_t cmd_len;
1142
1143 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001144 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 listener->ifd_data_fd);
1146 if (IS_ERR_OR_NULL(svc->ihandle)) {
1147 pr_err("Ion client could not retrieve the handle\n");
1148 return -ENOMEM;
1149 }
1150
1151 /* Get the physical address of the ION BUF */
1152 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1153 if (ret) {
1154 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1155 ret);
1156 return ret;
1157 }
1158 /* Populate the structure for sending scm call to load image */
1159 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1160 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1161 pr_err("ION memory mapping for listener shared buffer failed\n");
1162 return -ENOMEM;
1163 }
1164 svc->sb_phys = (phys_addr_t)pa;
1165
1166 if (qseecom.qsee_version < QSEE_VERSION_40) {
1167 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1168 req.listener_id = svc->svc.listener_id;
1169 req.sb_len = svc->sb_length;
1170 req.sb_ptr = (uint32_t)svc->sb_phys;
1171 cmd_buf = (void *)&req;
1172 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1173 } else {
1174 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1175 req_64bit.listener_id = svc->svc.listener_id;
1176 req_64bit.sb_len = svc->sb_length;
1177 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1178 cmd_buf = (void *)&req_64bit;
1179 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1180 }
1181
1182 resp.result = QSEOS_RESULT_INCOMPLETE;
1183
Zhen Kongc4c162a2019-01-23 12:07:12 -08001184 mutex_unlock(&listener_access_lock);
1185 mutex_lock(&app_access_lock);
1186 __qseecom_reentrancy_check_if_no_app_blocked(
1187 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001188 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1189 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001190 mutex_unlock(&app_access_lock);
1191 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001192 if (ret) {
1193 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1194 return -EINVAL;
1195 }
1196
1197 if (resp.result != QSEOS_RESULT_SUCCESS) {
1198 pr_err("Error SB registration req: resp.result = %d\n",
1199 resp.result);
1200 return -EPERM;
1201 }
1202 return 0;
1203}
1204
1205static int qseecom_register_listener(struct qseecom_dev_handle *data,
1206 void __user *argp)
1207{
1208 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001209 struct qseecom_register_listener_req rcvd_lstnr;
1210 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001211 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212
1213 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1214 if (ret) {
1215 pr_err("copy_from_user failed\n");
1216 return ret;
1217 }
1218 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1219 rcvd_lstnr.sb_size))
1220 return -EFAULT;
1221
Zhen Kong3c674612018-09-06 22:51:27 -07001222 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001223
1224 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1225 if (ptr_svc) {
1226 if (ptr_svc->unregister_pending == false) {
1227 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001228 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 data->released = true;
1230 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 } else {
1232 /*wait until listener is unregistered*/
1233 pr_debug("register %d has to wait\n",
1234 rcvd_lstnr.listener_id);
1235 mutex_unlock(&listener_access_lock);
1236 ret = wait_event_freezable(
1237 qseecom.register_lsnr_pending_wq,
1238 list_empty(
1239 &qseecom.unregister_lsnr_pending_list_head));
1240 if (ret) {
1241 pr_err("interrupted register_pending_wq %d\n",
1242 rcvd_lstnr.listener_id);
1243 mutex_lock(&listener_access_lock);
1244 return -ERESTARTSYS;
1245 }
1246 mutex_lock(&listener_access_lock);
1247 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1250 if (!new_entry)
1251 return -ENOMEM;
1252 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001253 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001254
1255 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1256 new_entry->sb_length = rcvd_lstnr.sb_size;
1257 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1258 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001259 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1260 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 kzfree(new_entry);
1262 return -ENOMEM;
1263 }
1264
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001265 init_waitqueue_head(&new_entry->rcv_req_wq);
1266 init_waitqueue_head(&new_entry->listener_block_app_wq);
1267 new_entry->send_resp_flag = 0;
1268 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001269 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270
Zhen Kong3c674612018-09-06 22:51:27 -07001271 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 return ret;
1273}
1274
Zhen Kongbcdeda22018-11-16 13:50:51 -08001275static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1276 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001277{
1278 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_command_scm_resp resp;
1281 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1282
1283 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1284 req.listener_id = data->listener.id;
1285 resp.result = QSEOS_RESULT_INCOMPLETE;
1286
Zhen Kongc4c162a2019-01-23 12:07:12 -08001287 mutex_unlock(&listener_access_lock);
1288 mutex_lock(&app_access_lock);
1289 __qseecom_reentrancy_check_if_no_app_blocked(
1290 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1292 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001293 mutex_unlock(&app_access_lock);
1294 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (ret) {
1296 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1297 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001298 if (ret == -EBUSY)
1299 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001300 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001301 }
1302
1303 if (resp.result != QSEOS_RESULT_SUCCESS) {
1304 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1305 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001306 ret = -EPERM;
1307 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001308 }
1309
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 while (atomic_read(&data->ioctl_count) > 1) {
1311 if (wait_event_freezable(data->abort_wq,
1312 atomic_read(&data->ioctl_count) <= 1)) {
1313 pr_err("Interrupted from abort\n");
1314 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001315 }
1316 }
1317
Zhen Kong3c674612018-09-06 22:51:27 -07001318exit:
1319 if (ptr_svc->sb_virt) {
1320 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001321 if (!IS_ERR_OR_NULL(ihandle)) {
1322 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1323 ion_free(qseecom.ion_clnt, ihandle);
1324 }
1325 }
Zhen Kong3c674612018-09-06 22:51:27 -07001326 list_del(&ptr_svc->list);
1327 kzfree(ptr_svc);
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001330 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 return ret;
1332}
1333
Zhen Kongbcdeda22018-11-16 13:50:51 -08001334static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1335{
1336 struct qseecom_registered_listener_list *ptr_svc = NULL;
1337 struct qseecom_unregister_pending_list *entry = NULL;
1338
1339 ptr_svc = __qseecom_find_svc(data->listener.id);
1340 if (!ptr_svc) {
1341 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1342 return -ENODATA;
1343 }
1344 /* stop CA thread waiting for listener response */
1345 ptr_svc->abort = 1;
1346 wake_up_interruptible_all(&qseecom.send_resp_wq);
1347
Zhen Kongc4c162a2019-01-23 12:07:12 -08001348 /* stop listener thread waiting for listener request */
1349 data->abort = 1;
1350 wake_up_all(&ptr_svc->rcv_req_wq);
1351
Zhen Kongbcdeda22018-11-16 13:50:51 -08001352 /* return directly if pending*/
1353 if (ptr_svc->unregister_pending)
1354 return 0;
1355
1356 /*add unregistration into pending list*/
1357 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1358 if (!entry)
1359 return -ENOMEM;
1360 entry->data = data;
1361 list_add_tail(&entry->list,
1362 &qseecom.unregister_lsnr_pending_list_head);
1363 ptr_svc->unregister_pending = true;
1364 pr_debug("unregister %d pending\n", data->listener.id);
1365 return 0;
1366}
1367
1368static void __qseecom_processing_pending_lsnr_unregister(void)
1369{
1370 struct qseecom_unregister_pending_list *entry = NULL;
1371 struct qseecom_registered_listener_list *ptr_svc = NULL;
1372 struct list_head *pos;
1373 int ret = 0;
1374
1375 mutex_lock(&listener_access_lock);
1376 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1377 pos = qseecom.unregister_lsnr_pending_list_head.next;
1378 entry = list_entry(pos,
1379 struct qseecom_unregister_pending_list, list);
1380 if (entry && entry->data) {
1381 pr_debug("process pending unregister %d\n",
1382 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001383 /* don't process if qseecom_release is not called*/
1384 if (!entry->data->listener.release_called)
1385 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001386 ptr_svc = __qseecom_find_svc(
1387 entry->data->listener.id);
1388 if (ptr_svc) {
1389 ret = __qseecom_unregister_listener(
1390 entry->data, ptr_svc);
1391 if (ret == -EBUSY) {
1392 pr_debug("unregister %d pending again\n",
1393 entry->data->listener.id);
1394 mutex_unlock(&listener_access_lock);
1395 return;
1396 }
1397 } else
1398 pr_err("invalid listener %d\n",
1399 entry->data->listener.id);
1400 kzfree(entry->data);
1401 }
1402 list_del(pos);
1403 kzfree(entry);
1404 }
1405 mutex_unlock(&listener_access_lock);
1406 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1407}
1408
Zhen Kongc4c162a2019-01-23 12:07:12 -08001409static void __wakeup_unregister_listener_kthread(void)
1410{
1411 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1412 LSNR_UNREG_KT_WAKEUP);
1413 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1414}
1415
1416static int __qseecom_unregister_listener_kthread_func(void *data)
1417{
1418 while (!kthread_should_stop()) {
1419 wait_event_freezable(
1420 qseecom.unregister_lsnr_kthread_wq,
1421 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1422 == LSNR_UNREG_KT_WAKEUP);
1423 pr_debug("kthread to unregister listener is called %d\n",
1424 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1425 __qseecom_processing_pending_lsnr_unregister();
1426 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1427 LSNR_UNREG_KT_SLEEP);
1428 }
1429 pr_warn("kthread to unregister listener stopped\n");
1430 return 0;
1431}
1432
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001433static int __qseecom_set_msm_bus_request(uint32_t mode)
1434{
1435 int ret = 0;
1436 struct qseecom_clk *qclk;
1437
1438 qclk = &qseecom.qsee;
1439 if (qclk->ce_core_src_clk != NULL) {
1440 if (mode == INACTIVE) {
1441 __qseecom_disable_clk(CLK_QSEE);
1442 } else {
1443 ret = __qseecom_enable_clk(CLK_QSEE);
1444 if (ret)
1445 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1446 ret, mode);
1447 }
1448 }
1449
1450 if ((!ret) && (qseecom.current_mode != mode)) {
1451 ret = msm_bus_scale_client_update_request(
1452 qseecom.qsee_perf_client, mode);
1453 if (ret) {
1454 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1455 ret, mode);
1456 if (qclk->ce_core_src_clk != NULL) {
1457 if (mode == INACTIVE) {
1458 ret = __qseecom_enable_clk(CLK_QSEE);
1459 if (ret)
1460 pr_err("CLK enable failed\n");
1461 } else
1462 __qseecom_disable_clk(CLK_QSEE);
1463 }
1464 }
1465 qseecom.current_mode = mode;
1466 }
1467 return ret;
1468}
1469
1470static void qseecom_bw_inactive_req_work(struct work_struct *work)
1471{
1472 mutex_lock(&app_access_lock);
1473 mutex_lock(&qsee_bw_mutex);
1474 if (qseecom.timer_running)
1475 __qseecom_set_msm_bus_request(INACTIVE);
1476 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1477 qseecom.current_mode, qseecom.cumulative_mode);
1478 qseecom.timer_running = false;
1479 mutex_unlock(&qsee_bw_mutex);
1480 mutex_unlock(&app_access_lock);
1481}
1482
1483static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1484{
1485 schedule_work(&qseecom.bw_inactive_req_ws);
1486}
1487
1488static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1489{
1490 struct qseecom_clk *qclk;
1491 int ret = 0;
1492
1493 mutex_lock(&clk_access_lock);
1494 if (ce == CLK_QSEE)
1495 qclk = &qseecom.qsee;
1496 else
1497 qclk = &qseecom.ce_drv;
1498
1499 if (qclk->clk_access_cnt > 2) {
1500 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1501 ret = -EINVAL;
1502 goto err_dec_ref_cnt;
1503 }
1504 if (qclk->clk_access_cnt == 2)
1505 qclk->clk_access_cnt--;
1506
1507err_dec_ref_cnt:
1508 mutex_unlock(&clk_access_lock);
1509 return ret;
1510}
1511
1512
1513static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1514{
1515 int32_t ret = 0;
1516 int32_t request_mode = INACTIVE;
1517
1518 mutex_lock(&qsee_bw_mutex);
1519 if (mode == 0) {
1520 if (qseecom.cumulative_mode > MEDIUM)
1521 request_mode = HIGH;
1522 else
1523 request_mode = qseecom.cumulative_mode;
1524 } else {
1525 request_mode = mode;
1526 }
1527
1528 ret = __qseecom_set_msm_bus_request(request_mode);
1529 if (ret) {
1530 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1531 ret, request_mode);
1532 goto err_scale_timer;
1533 }
1534
1535 if (qseecom.timer_running) {
1536 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1537 if (ret) {
1538 pr_err("Failed to decrease clk ref count.\n");
1539 goto err_scale_timer;
1540 }
1541 del_timer_sync(&(qseecom.bw_scale_down_timer));
1542 qseecom.timer_running = false;
1543 }
1544err_scale_timer:
1545 mutex_unlock(&qsee_bw_mutex);
1546 return ret;
1547}
1548
1549
1550static int qseecom_unregister_bus_bandwidth_needs(
1551 struct qseecom_dev_handle *data)
1552{
1553 int32_t ret = 0;
1554
1555 qseecom.cumulative_mode -= data->mode;
1556 data->mode = INACTIVE;
1557
1558 return ret;
1559}
1560
1561static int __qseecom_register_bus_bandwidth_needs(
1562 struct qseecom_dev_handle *data, uint32_t request_mode)
1563{
1564 int32_t ret = 0;
1565
1566 if (data->mode == INACTIVE) {
1567 qseecom.cumulative_mode += request_mode;
1568 data->mode = request_mode;
1569 } else {
1570 if (data->mode != request_mode) {
1571 qseecom.cumulative_mode -= data->mode;
1572 qseecom.cumulative_mode += request_mode;
1573 data->mode = request_mode;
1574 }
1575 }
1576 return ret;
1577}
1578
1579static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1580{
1581 int ret = 0;
1582
1583 ret = qsee_vote_for_clock(data, CLK_DFAB);
1584 if (ret) {
1585 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1586 goto perf_enable_exit;
1587 }
1588 ret = qsee_vote_for_clock(data, CLK_SFPB);
1589 if (ret) {
1590 qsee_disable_clock_vote(data, CLK_DFAB);
1591 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1592 goto perf_enable_exit;
1593 }
1594
1595perf_enable_exit:
1596 return ret;
1597}
1598
1599static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1600 void __user *argp)
1601{
1602 int32_t ret = 0;
1603 int32_t req_mode;
1604
1605 if (qseecom.no_clock_support)
1606 return 0;
1607
1608 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1609 if (ret) {
1610 pr_err("copy_from_user failed\n");
1611 return ret;
1612 }
1613 if (req_mode > HIGH) {
1614 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1615 return -EINVAL;
1616 }
1617
1618 /*
1619 * Register bus bandwidth needs if bus scaling feature is enabled;
1620 * otherwise, qseecom enable/disable clocks for the client directly.
1621 */
1622 if (qseecom.support_bus_scaling) {
1623 mutex_lock(&qsee_bw_mutex);
1624 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1625 mutex_unlock(&qsee_bw_mutex);
1626 } else {
1627 pr_debug("Bus scaling feature is NOT enabled\n");
1628 pr_debug("request bandwidth mode %d for the client\n",
1629 req_mode);
1630 if (req_mode != INACTIVE) {
1631 ret = qseecom_perf_enable(data);
1632 if (ret)
1633 pr_err("Failed to vote for clock with err %d\n",
1634 ret);
1635 } else {
1636 qsee_disable_clock_vote(data, CLK_DFAB);
1637 qsee_disable_clock_vote(data, CLK_SFPB);
1638 }
1639 }
1640 return ret;
1641}
1642
1643static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1644{
1645 if (qseecom.no_clock_support)
1646 return;
1647
1648 mutex_lock(&qsee_bw_mutex);
1649 qseecom.bw_scale_down_timer.expires = jiffies +
1650 msecs_to_jiffies(duration);
1651 mod_timer(&(qseecom.bw_scale_down_timer),
1652 qseecom.bw_scale_down_timer.expires);
1653 qseecom.timer_running = true;
1654 mutex_unlock(&qsee_bw_mutex);
1655}
1656
1657static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1658{
1659 if (!qseecom.support_bus_scaling)
1660 qsee_disable_clock_vote(data, CLK_SFPB);
1661 else
1662 __qseecom_add_bw_scale_down_timer(
1663 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1664}
1665
1666static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1667{
1668 int ret = 0;
1669
1670 if (qseecom.support_bus_scaling) {
1671 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1672 if (ret)
1673 pr_err("Failed to set bw MEDIUM.\n");
1674 } else {
1675 ret = qsee_vote_for_clock(data, CLK_SFPB);
1676 if (ret)
1677 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1678 }
1679 return ret;
1680}
1681
1682static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1683 void __user *argp)
1684{
1685 ion_phys_addr_t pa;
1686 int32_t ret;
1687 struct qseecom_set_sb_mem_param_req req;
1688 size_t len;
1689
1690 /* Copy the relevant information needed for loading the image */
1691 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1692 return -EFAULT;
1693
1694 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1695 (req.sb_len == 0)) {
1696 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1697 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1698 return -EFAULT;
1699 }
1700 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1701 req.sb_len))
1702 return -EFAULT;
1703
1704 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001705 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001706 req.ifd_data_fd);
1707 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1708 pr_err("Ion client could not retrieve the handle\n");
1709 return -ENOMEM;
1710 }
1711 /* Get the physical address of the ION BUF */
1712 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1713 if (ret) {
1714
1715 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1716 ret);
1717 return ret;
1718 }
1719
1720 if (len < req.sb_len) {
1721 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1722 req.sb_len, len);
1723 return -EINVAL;
1724 }
1725 /* Populate the structure for sending scm call to load image */
1726 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1727 data->client.ihandle);
1728 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1729 pr_err("ION memory mapping for client shared buf failed\n");
1730 return -ENOMEM;
1731 }
1732 data->client.sb_phys = (phys_addr_t)pa;
1733 data->client.sb_length = req.sb_len;
1734 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1735 return 0;
1736}
1737
Zhen Kong26e62742018-05-04 17:19:06 -07001738static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1739 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001740{
1741 int ret;
1742
1743 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001744 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001745}
1746
1747static int __qseecom_reentrancy_listener_has_sent_rsp(
1748 struct qseecom_dev_handle *data,
1749 struct qseecom_registered_listener_list *ptr_svc)
1750{
1751 int ret;
1752
1753 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001754 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001755}
1756
1757static void __qseecom_clean_listener_sglistinfo(
1758 struct qseecom_registered_listener_list *ptr_svc)
1759{
1760 if (ptr_svc->sglist_cnt) {
1761 memset(ptr_svc->sglistinfo_ptr, 0,
1762 SGLISTINFO_TABLE_SIZE);
1763 ptr_svc->sglist_cnt = 0;
1764 }
1765}
1766
1767static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1768 struct qseecom_command_scm_resp *resp)
1769{
1770 int ret = 0;
1771 int rc = 0;
1772 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001773 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1774 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1775 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 struct qseecom_registered_listener_list *ptr_svc = NULL;
1777 sigset_t new_sigset;
1778 sigset_t old_sigset;
1779 uint32_t status;
1780 void *cmd_buf = NULL;
1781 size_t cmd_len;
1782 struct sglist_info *table = NULL;
1783
Zhen Kongbcdeda22018-11-16 13:50:51 -08001784 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001785 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1786 lstnr = resp->data;
1787 /*
1788 * Wake up blocking lsitener service with the lstnr id
1789 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001790 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001791 list_for_each_entry(ptr_svc,
1792 &qseecom.registered_listener_list_head, list) {
1793 if (ptr_svc->svc.listener_id == lstnr) {
1794 ptr_svc->listener_in_use = true;
1795 ptr_svc->rcv_req_flag = 1;
1796 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1797 break;
1798 }
1799 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001800
1801 if (ptr_svc == NULL) {
1802 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001803 rc = -EINVAL;
1804 status = QSEOS_RESULT_FAILURE;
1805 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 }
1807
1808 if (!ptr_svc->ihandle) {
1809 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001810 rc = -EINVAL;
1811 status = QSEOS_RESULT_FAILURE;
1812 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001813 }
1814
1815 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001816 pr_err("Service %d does not exist\n",
1817 lstnr);
1818 rc = -ERESTARTSYS;
1819 ptr_svc = NULL;
1820 status = QSEOS_RESULT_FAILURE;
1821 goto err_resp;
1822 }
1823
1824 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001825 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001826 lstnr, ptr_svc->abort);
1827 rc = -ENODEV;
1828 status = QSEOS_RESULT_FAILURE;
1829 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 }
Zhen Kong25731112018-09-20 13:10:03 -07001831
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001832 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1833
1834 /* initialize the new signal mask with all signals*/
1835 sigfillset(&new_sigset);
1836 /* block all signals */
1837 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1838
Zhen Kongbcdeda22018-11-16 13:50:51 -08001839 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001840 do {
1841 /*
1842 * When reentrancy is not supported, check global
1843 * send_resp_flag; otherwise, check this listener's
1844 * send_resp_flag.
1845 */
1846 if (!qseecom.qsee_reentrancy_support &&
1847 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001848 __qseecom_listener_has_sent_rsp(
1849 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001850 break;
1851 }
1852
1853 if (qseecom.qsee_reentrancy_support &&
1854 !wait_event_freezable(qseecom.send_resp_wq,
1855 __qseecom_reentrancy_listener_has_sent_rsp(
1856 data, ptr_svc))) {
1857 break;
1858 }
1859 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001860 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 /* restore signal mask */
1862 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001863 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1865 data->client.app_id, lstnr, ret);
1866 rc = -ENODEV;
1867 status = QSEOS_RESULT_FAILURE;
1868 } else {
1869 status = QSEOS_RESULT_SUCCESS;
1870 }
Zhen Kong26e62742018-05-04 17:19:06 -07001871err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001873 if (ptr_svc) {
1874 ptr_svc->send_resp_flag = 0;
1875 table = ptr_svc->sglistinfo_ptr;
1876 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001877 if (qseecom.qsee_version < QSEE_VERSION_40) {
1878 send_data_rsp.listener_id = lstnr;
1879 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001880 if (table) {
1881 send_data_rsp.sglistinfo_ptr =
1882 (uint32_t)virt_to_phys(table);
1883 send_data_rsp.sglistinfo_len =
1884 SGLISTINFO_TABLE_SIZE;
1885 dmac_flush_range((void *)table,
1886 (void *)table + SGLISTINFO_TABLE_SIZE);
1887 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001888 cmd_buf = (void *)&send_data_rsp;
1889 cmd_len = sizeof(send_data_rsp);
1890 } else {
1891 send_data_rsp_64bit.listener_id = lstnr;
1892 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001893 if (table) {
1894 send_data_rsp_64bit.sglistinfo_ptr =
1895 virt_to_phys(table);
1896 send_data_rsp_64bit.sglistinfo_len =
1897 SGLISTINFO_TABLE_SIZE;
1898 dmac_flush_range((void *)table,
1899 (void *)table + SGLISTINFO_TABLE_SIZE);
1900 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001901 cmd_buf = (void *)&send_data_rsp_64bit;
1902 cmd_len = sizeof(send_data_rsp_64bit);
1903 }
Zhen Kong7d500032018-08-06 16:58:31 -07001904 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1906 else
1907 *(uint32_t *)cmd_buf =
1908 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001909 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001910 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1911 ptr_svc->ihandle,
1912 ptr_svc->sb_virt, ptr_svc->sb_length,
1913 ION_IOC_CLEAN_INV_CACHES);
1914 if (ret) {
1915 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001916 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001917 }
1918 }
1919
1920 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1921 ret = __qseecom_enable_clk(CLK_QSEE);
1922 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001923 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001924 }
1925
1926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001928 if (ptr_svc) {
1929 ptr_svc->listener_in_use = false;
1930 __qseecom_clean_listener_sglistinfo(ptr_svc);
1931 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001932 if (ret) {
1933 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1934 ret, data->client.app_id);
1935 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1936 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001937 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001938 }
Zhen Kong26e62742018-05-04 17:19:06 -07001939 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1940 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1942 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1943 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1944 resp->result, data->client.app_id, lstnr);
1945 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001947 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001948exit:
1949 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001950 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1951 __qseecom_disable_clk(CLK_QSEE);
1952
1953 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 qseecom.app_block_ref_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001955 if (rc)
1956 return rc;
1957
1958 return ret;
1959}
1960
Zhen Konga91aaf02018-02-02 17:21:04 -08001961static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001962 struct qseecom_command_scm_resp *resp,
1963 struct qseecom_registered_app_list *ptr_app,
1964 struct qseecom_dev_handle *data)
1965{
1966 struct qseecom_registered_listener_list *list_ptr;
1967 int ret = 0;
1968 struct qseecom_continue_blocked_request_ireq ireq;
1969 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001970 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001971 sigset_t new_sigset;
1972 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001973 unsigned long flags;
1974 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975
1976 if (!resp || !data) {
1977 pr_err("invalid resp or data pointer\n");
1978 ret = -EINVAL;
1979 goto exit;
1980 }
1981
1982 /* find app_id & img_name from list */
Zhen Konge4804722019-02-27 21:13:18 -08001983 if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001984 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1985 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1986 list) {
1987 if ((ptr_app->app_id == data->client.app_id) &&
1988 (!strcmp(ptr_app->app_name,
1989 data->client.app_name))) {
1990 found_app = true;
1991 break;
1992 }
1993 }
1994 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1995 flags);
1996 if (!found_app) {
1997 pr_err("app_id %d (%s) is not found\n",
1998 data->client.app_id,
1999 (char *)data->client.app_name);
2000 ret = -ENOENT;
2001 goto exit;
2002 }
2003 }
2004
Zhen Kongd8cc0052017-11-13 15:13:31 -08002005 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002006 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002007 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002008 list_ptr = __qseecom_find_svc(resp->data);
2009 if (!list_ptr) {
2010 pr_err("Invalid listener ID %d\n", resp->data);
2011 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002012 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002013 goto exit;
2014 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 ptr_app->blocked_on_listener_id = resp->data;
2016
2017 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2018 resp->data, list_ptr->listener_in_use,
2019 session_id, data->client.app_id);
2020
2021 /* sleep until listener is available */
2022 sigfillset(&new_sigset);
2023 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2024
2025 do {
2026 qseecom.app_block_ref_cnt++;
2027 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002028 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002029 mutex_unlock(&app_access_lock);
2030 wait_event_freezable(
2031 list_ptr->listener_block_app_wq,
2032 !list_ptr->listener_in_use);
2033 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002034 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002035 ptr_app->app_blocked = false;
2036 qseecom.app_block_ref_cnt--;
2037 } while (list_ptr->listener_in_use);
2038
2039 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2040
2041 ptr_app->blocked_on_listener_id = 0;
2042 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2043 resp->data, session_id, data->client.app_id);
2044
2045 /* notify TZ that listener is available */
2046 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2047
2048 if (qseecom.smcinvoke_support)
2049 ireq.app_or_session_id = session_id;
2050 else
2051 ireq.app_or_session_id = data->client.app_id;
2052
2053 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2054 &ireq, sizeof(ireq),
2055 &continue_resp, sizeof(continue_resp));
2056 if (ret && qseecom.smcinvoke_support) {
2057 /* retry with legacy cmd */
2058 qseecom.smcinvoke_support = false;
2059 ireq.app_or_session_id = data->client.app_id;
2060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2061 &ireq, sizeof(ireq),
2062 &continue_resp, sizeof(continue_resp));
2063 qseecom.smcinvoke_support = true;
2064 if (ret) {
2065 pr_err("unblock app %d or session %d fail\n",
2066 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002067 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002068 goto exit;
2069 }
2070 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002071 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002072 resp->result = continue_resp.result;
2073 resp->resp_type = continue_resp.resp_type;
2074 resp->data = continue_resp.data;
2075 pr_debug("unblock resp = %d\n", resp->result);
2076 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2077
2078 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2079 pr_err("Unexpected unblock resp %d\n", resp->result);
2080 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002081 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002082exit:
2083 return ret;
2084}
2085
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002086static int __qseecom_reentrancy_process_incomplete_cmd(
2087 struct qseecom_dev_handle *data,
2088 struct qseecom_command_scm_resp *resp)
2089{
2090 int ret = 0;
2091 int rc = 0;
2092 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002093 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2094 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2095 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002096 struct qseecom_registered_listener_list *ptr_svc = NULL;
2097 sigset_t new_sigset;
2098 sigset_t old_sigset;
2099 uint32_t status;
2100 void *cmd_buf = NULL;
2101 size_t cmd_len;
2102 struct sglist_info *table = NULL;
2103
Zhen Kong26e62742018-05-04 17:19:06 -07002104 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002105 lstnr = resp->data;
2106 /*
2107 * Wake up blocking lsitener service with the lstnr id
2108 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002109 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002110 list_for_each_entry(ptr_svc,
2111 &qseecom.registered_listener_list_head, list) {
2112 if (ptr_svc->svc.listener_id == lstnr) {
2113 ptr_svc->listener_in_use = true;
2114 ptr_svc->rcv_req_flag = 1;
2115 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2116 break;
2117 }
2118 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002119
2120 if (ptr_svc == NULL) {
2121 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002122 rc = -EINVAL;
2123 status = QSEOS_RESULT_FAILURE;
2124 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002125 }
2126
2127 if (!ptr_svc->ihandle) {
2128 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002129 rc = -EINVAL;
2130 status = QSEOS_RESULT_FAILURE;
2131 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002132 }
2133
2134 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002135 pr_err("Service %d does not exist\n",
2136 lstnr);
2137 rc = -ERESTARTSYS;
2138 ptr_svc = NULL;
2139 status = QSEOS_RESULT_FAILURE;
2140 goto err_resp;
2141 }
2142
2143 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002144 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002145 lstnr, ptr_svc->abort);
2146 rc = -ENODEV;
2147 status = QSEOS_RESULT_FAILURE;
2148 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002149 }
Zhen Kong25731112018-09-20 13:10:03 -07002150
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002151 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2152
2153 /* initialize the new signal mask with all signals*/
2154 sigfillset(&new_sigset);
2155
2156 /* block all signals */
2157 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2158
2159 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002160 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002161 mutex_unlock(&app_access_lock);
2162 do {
2163 if (!wait_event_freezable(qseecom.send_resp_wq,
2164 __qseecom_reentrancy_listener_has_sent_rsp(
2165 data, ptr_svc))) {
2166 break;
2167 }
2168 } while (1);
2169 /* lock mutex again after resp sent */
2170 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002171 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002172 ptr_svc->send_resp_flag = 0;
2173 qseecom.send_resp_flag = 0;
2174
2175 /* restore signal mask */
2176 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002177 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002178 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2179 data->client.app_id, lstnr, ret);
2180 rc = -ENODEV;
2181 status = QSEOS_RESULT_FAILURE;
2182 } else {
2183 status = QSEOS_RESULT_SUCCESS;
2184 }
Zhen Kong26e62742018-05-04 17:19:06 -07002185err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002186 if (ptr_svc)
2187 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002188 if (qseecom.qsee_version < QSEE_VERSION_40) {
2189 send_data_rsp.listener_id = lstnr;
2190 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002191 if (table) {
2192 send_data_rsp.sglistinfo_ptr =
2193 (uint32_t)virt_to_phys(table);
2194 send_data_rsp.sglistinfo_len =
2195 SGLISTINFO_TABLE_SIZE;
2196 dmac_flush_range((void *)table,
2197 (void *)table + SGLISTINFO_TABLE_SIZE);
2198 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002199 cmd_buf = (void *)&send_data_rsp;
2200 cmd_len = sizeof(send_data_rsp);
2201 } else {
2202 send_data_rsp_64bit.listener_id = lstnr;
2203 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002204 if (table) {
2205 send_data_rsp_64bit.sglistinfo_ptr =
2206 virt_to_phys(table);
2207 send_data_rsp_64bit.sglistinfo_len =
2208 SGLISTINFO_TABLE_SIZE;
2209 dmac_flush_range((void *)table,
2210 (void *)table + SGLISTINFO_TABLE_SIZE);
2211 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002212 cmd_buf = (void *)&send_data_rsp_64bit;
2213 cmd_len = sizeof(send_data_rsp_64bit);
2214 }
Zhen Kong7d500032018-08-06 16:58:31 -07002215 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002216 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2217 else
2218 *(uint32_t *)cmd_buf =
2219 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002220 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002221 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2222 ptr_svc->ihandle,
2223 ptr_svc->sb_virt, ptr_svc->sb_length,
2224 ION_IOC_CLEAN_INV_CACHES);
2225 if (ret) {
2226 pr_err("cache operation failed %d\n", ret);
2227 return ret;
2228 }
2229 }
2230 if (lstnr == RPMB_SERVICE) {
2231 ret = __qseecom_enable_clk(CLK_QSEE);
2232 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002233 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002234 }
2235
2236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2237 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002238 if (ptr_svc) {
2239 ptr_svc->listener_in_use = false;
2240 __qseecom_clean_listener_sglistinfo(ptr_svc);
2241 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2242 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002243
2244 if (ret) {
2245 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2246 ret, data->client.app_id);
2247 goto exit;
2248 }
2249
2250 switch (resp->result) {
2251 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2252 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2253 lstnr, data->client.app_id, resp->data);
2254 if (lstnr == resp->data) {
2255 pr_err("lstnr %d should not be blocked!\n",
2256 lstnr);
2257 ret = -EINVAL;
2258 goto exit;
2259 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002260 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002261 ret = __qseecom_process_reentrancy_blocked_on_listener(
2262 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002263 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002264 if (ret) {
2265 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2266 data->client.app_id,
2267 data->client.app_name, resp->data);
2268 goto exit;
2269 }
2270 case QSEOS_RESULT_SUCCESS:
2271 case QSEOS_RESULT_INCOMPLETE:
2272 break;
2273 default:
2274 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2275 resp->result, data->client.app_id, lstnr);
2276 ret = -EINVAL;
2277 goto exit;
2278 }
2279exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002280 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002281 if (lstnr == RPMB_SERVICE)
2282 __qseecom_disable_clk(CLK_QSEE);
2283
2284 }
2285 if (rc)
2286 return rc;
2287
2288 return ret;
2289}
2290
2291/*
2292 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2293 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2294 * So, needs to first check if no app blocked before sending OS level scm call,
2295 * then wait until all apps are unblocked.
2296 */
2297static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2298{
2299 sigset_t new_sigset, old_sigset;
2300
2301 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2302 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2303 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2304 /* thread sleep until this app unblocked */
2305 while (qseecom.app_block_ref_cnt > 0) {
2306 sigfillset(&new_sigset);
2307 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2308 mutex_unlock(&app_access_lock);
2309 do {
2310 if (!wait_event_freezable(qseecom.app_block_wq,
2311 (qseecom.app_block_ref_cnt == 0)))
2312 break;
2313 } while (1);
2314 mutex_lock(&app_access_lock);
2315 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2316 }
2317 }
2318}
2319
2320/*
2321 * scm_call of send data will fail if this TA is blocked or there are more
2322 * than one TA requesting listener services; So, first check to see if need
2323 * to wait.
2324 */
2325static void __qseecom_reentrancy_check_if_this_app_blocked(
2326 struct qseecom_registered_app_list *ptr_app)
2327{
2328 sigset_t new_sigset, old_sigset;
2329
2330 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002331 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002332 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2333 /* thread sleep until this app unblocked */
2334 sigfillset(&new_sigset);
2335 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2336 mutex_unlock(&app_access_lock);
2337 do {
2338 if (!wait_event_freezable(qseecom.app_block_wq,
2339 (!ptr_app->app_blocked &&
2340 qseecom.app_block_ref_cnt <= 1)))
2341 break;
2342 } while (1);
2343 mutex_lock(&app_access_lock);
2344 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2345 }
Zhen Kongdea10592018-07-30 17:50:10 -07002346 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002347 }
2348}
2349
2350static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2351 uint32_t *app_id)
2352{
2353 int32_t ret;
2354 struct qseecom_command_scm_resp resp;
2355 bool found_app = false;
2356 struct qseecom_registered_app_list *entry = NULL;
2357 unsigned long flags = 0;
2358
2359 if (!app_id) {
2360 pr_err("Null pointer to app_id\n");
2361 return -EINVAL;
2362 }
2363 *app_id = 0;
2364
2365 /* check if app exists and has been registered locally */
2366 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2367 list_for_each_entry(entry,
2368 &qseecom.registered_app_list_head, list) {
2369 if (!strcmp(entry->app_name, req.app_name)) {
2370 found_app = true;
2371 break;
2372 }
2373 }
2374 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2375 if (found_app) {
2376 pr_debug("Found app with id %d\n", entry->app_id);
2377 *app_id = entry->app_id;
2378 return 0;
2379 }
2380
2381 memset((void *)&resp, 0, sizeof(resp));
2382
2383 /* SCM_CALL to check if app_id for the mentioned app exists */
2384 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2385 sizeof(struct qseecom_check_app_ireq),
2386 &resp, sizeof(resp));
2387 if (ret) {
2388 pr_err("scm_call to check if app is already loaded failed\n");
2389 return -EINVAL;
2390 }
2391
2392 if (resp.result == QSEOS_RESULT_FAILURE)
2393 return 0;
2394
2395 switch (resp.resp_type) {
2396 /*qsee returned listener type response */
2397 case QSEOS_LISTENER_ID:
2398 pr_err("resp type is of listener type instead of app");
2399 return -EINVAL;
2400 case QSEOS_APP_ID:
2401 *app_id = resp.data;
2402 return 0;
2403 default:
2404 pr_err("invalid resp type (%d) from qsee",
2405 resp.resp_type);
2406 return -ENODEV;
2407 }
2408}
2409
2410static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2411{
2412 struct qseecom_registered_app_list *entry = NULL;
2413 unsigned long flags = 0;
2414 u32 app_id = 0;
2415 struct ion_handle *ihandle; /* Ion handle */
2416 struct qseecom_load_img_req load_img_req;
2417 int32_t ret = 0;
2418 ion_phys_addr_t pa = 0;
2419 size_t len;
2420 struct qseecom_command_scm_resp resp;
2421 struct qseecom_check_app_ireq req;
2422 struct qseecom_load_app_ireq load_req;
2423 struct qseecom_load_app_64bit_ireq load_req_64bit;
2424 void *cmd_buf = NULL;
2425 size_t cmd_len;
2426 bool first_time = false;
2427
2428 /* Copy the relevant information needed for loading the image */
2429 if (copy_from_user(&load_img_req,
2430 (void __user *)argp,
2431 sizeof(struct qseecom_load_img_req))) {
2432 pr_err("copy_from_user failed\n");
2433 return -EFAULT;
2434 }
2435
2436 /* Check and load cmnlib */
2437 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2438 if (!qseecom.commonlib_loaded &&
2439 load_img_req.app_arch == ELFCLASS32) {
2440 ret = qseecom_load_commonlib_image(data, "cmnlib");
2441 if (ret) {
2442 pr_err("failed to load cmnlib\n");
2443 return -EIO;
2444 }
2445 qseecom.commonlib_loaded = true;
2446 pr_debug("cmnlib is loaded\n");
2447 }
2448
2449 if (!qseecom.commonlib64_loaded &&
2450 load_img_req.app_arch == ELFCLASS64) {
2451 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2452 if (ret) {
2453 pr_err("failed to load cmnlib64\n");
2454 return -EIO;
2455 }
2456 qseecom.commonlib64_loaded = true;
2457 pr_debug("cmnlib64 is loaded\n");
2458 }
2459 }
2460
2461 if (qseecom.support_bus_scaling) {
2462 mutex_lock(&qsee_bw_mutex);
2463 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2464 mutex_unlock(&qsee_bw_mutex);
2465 if (ret)
2466 return ret;
2467 }
2468
2469 /* Vote for the SFPB clock */
2470 ret = __qseecom_enable_clk_scale_up(data);
2471 if (ret)
2472 goto enable_clk_err;
2473
2474 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2475 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2476 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2477
2478 ret = __qseecom_check_app_exists(req, &app_id);
2479 if (ret < 0)
2480 goto loadapp_err;
2481
2482 if (app_id) {
2483 pr_debug("App id %d (%s) already exists\n", app_id,
2484 (char *)(req.app_name));
2485 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2486 list_for_each_entry(entry,
2487 &qseecom.registered_app_list_head, list){
2488 if (entry->app_id == app_id) {
2489 entry->ref_cnt++;
2490 break;
2491 }
2492 }
2493 spin_unlock_irqrestore(
2494 &qseecom.registered_app_list_lock, flags);
2495 ret = 0;
2496 } else {
2497 first_time = true;
2498 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2499 (char *)(load_img_req.img_name));
2500 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002501 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002502 load_img_req.ifd_data_fd);
2503 if (IS_ERR_OR_NULL(ihandle)) {
2504 pr_err("Ion client could not retrieve the handle\n");
2505 ret = -ENOMEM;
2506 goto loadapp_err;
2507 }
2508
2509 /* Get the physical address of the ION BUF */
2510 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2511 if (ret) {
2512 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2513 ret);
2514 goto loadapp_err;
2515 }
2516 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2517 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2518 len, load_img_req.mdt_len,
2519 load_img_req.img_len);
2520 ret = -EINVAL;
2521 goto loadapp_err;
2522 }
2523 /* Populate the structure for sending scm call to load image */
2524 if (qseecom.qsee_version < QSEE_VERSION_40) {
2525 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2526 load_req.mdt_len = load_img_req.mdt_len;
2527 load_req.img_len = load_img_req.img_len;
2528 strlcpy(load_req.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_req.phy_addr = (uint32_t)pa;
2531 cmd_buf = (void *)&load_req;
2532 cmd_len = sizeof(struct qseecom_load_app_ireq);
2533 } else {
2534 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2535 load_req_64bit.mdt_len = load_img_req.mdt_len;
2536 load_req_64bit.img_len = load_img_req.img_len;
2537 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2538 MAX_APP_NAME_SIZE);
2539 load_req_64bit.phy_addr = (uint64_t)pa;
2540 cmd_buf = (void *)&load_req_64bit;
2541 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2542 }
2543
2544 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2545 ION_IOC_CLEAN_INV_CACHES);
2546 if (ret) {
2547 pr_err("cache operation failed %d\n", ret);
2548 goto loadapp_err;
2549 }
2550
2551 /* SCM_CALL to load the app and get the app_id back */
2552 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2553 cmd_len, &resp, sizeof(resp));
2554 if (ret) {
2555 pr_err("scm_call to load app failed\n");
2556 if (!IS_ERR_OR_NULL(ihandle))
2557 ion_free(qseecom.ion_clnt, ihandle);
2558 ret = -EINVAL;
2559 goto loadapp_err;
2560 }
2561
2562 if (resp.result == QSEOS_RESULT_FAILURE) {
2563 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2564 if (!IS_ERR_OR_NULL(ihandle))
2565 ion_free(qseecom.ion_clnt, ihandle);
2566 ret = -EFAULT;
2567 goto loadapp_err;
2568 }
2569
2570 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2571 ret = __qseecom_process_incomplete_cmd(data, &resp);
2572 if (ret) {
2573 pr_err("process_incomplete_cmd failed err: %d\n",
2574 ret);
2575 if (!IS_ERR_OR_NULL(ihandle))
2576 ion_free(qseecom.ion_clnt, ihandle);
2577 ret = -EFAULT;
2578 goto loadapp_err;
2579 }
2580 }
2581
2582 if (resp.result != QSEOS_RESULT_SUCCESS) {
2583 pr_err("scm_call failed resp.result unknown, %d\n",
2584 resp.result);
2585 if (!IS_ERR_OR_NULL(ihandle))
2586 ion_free(qseecom.ion_clnt, ihandle);
2587 ret = -EFAULT;
2588 goto loadapp_err;
2589 }
2590
2591 app_id = resp.data;
2592
2593 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2594 if (!entry) {
2595 ret = -ENOMEM;
2596 goto loadapp_err;
2597 }
2598 entry->app_id = app_id;
2599 entry->ref_cnt = 1;
2600 entry->app_arch = load_img_req.app_arch;
2601 /*
2602 * keymaster app may be first loaded as "keymaste" by qseecomd,
2603 * and then used as "keymaster" on some targets. To avoid app
2604 * name checking error, register "keymaster" into app_list and
2605 * thread private data.
2606 */
2607 if (!strcmp(load_img_req.img_name, "keymaste"))
2608 strlcpy(entry->app_name, "keymaster",
2609 MAX_APP_NAME_SIZE);
2610 else
2611 strlcpy(entry->app_name, load_img_req.img_name,
2612 MAX_APP_NAME_SIZE);
2613 entry->app_blocked = false;
2614 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002615 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002616
2617 /* Deallocate the handle */
2618 if (!IS_ERR_OR_NULL(ihandle))
2619 ion_free(qseecom.ion_clnt, ihandle);
2620
2621 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2622 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2623 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2624 flags);
2625
2626 pr_warn("App with id %u (%s) now loaded\n", app_id,
2627 (char *)(load_img_req.img_name));
2628 }
2629 data->client.app_id = app_id;
2630 data->client.app_arch = load_img_req.app_arch;
2631 if (!strcmp(load_img_req.img_name, "keymaste"))
2632 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2633 else
2634 strlcpy(data->client.app_name, load_img_req.img_name,
2635 MAX_APP_NAME_SIZE);
2636 load_img_req.app_id = app_id;
2637 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2638 pr_err("copy_to_user failed\n");
2639 ret = -EFAULT;
2640 if (first_time == true) {
2641 spin_lock_irqsave(
2642 &qseecom.registered_app_list_lock, flags);
2643 list_del(&entry->list);
2644 spin_unlock_irqrestore(
2645 &qseecom.registered_app_list_lock, flags);
2646 kzfree(entry);
2647 }
2648 }
2649
2650loadapp_err:
2651 __qseecom_disable_clk_scale_down(data);
2652enable_clk_err:
2653 if (qseecom.support_bus_scaling) {
2654 mutex_lock(&qsee_bw_mutex);
2655 qseecom_unregister_bus_bandwidth_needs(data);
2656 mutex_unlock(&qsee_bw_mutex);
2657 }
2658 return ret;
2659}
2660
2661static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2662{
2663 int ret = 1; /* Set unload app */
2664
2665 wake_up_all(&qseecom.send_resp_wq);
2666 if (qseecom.qsee_reentrancy_support)
2667 mutex_unlock(&app_access_lock);
2668 while (atomic_read(&data->ioctl_count) > 1) {
2669 if (wait_event_freezable(data->abort_wq,
2670 atomic_read(&data->ioctl_count) <= 1)) {
2671 pr_err("Interrupted from abort\n");
2672 ret = -ERESTARTSYS;
2673 break;
2674 }
2675 }
2676 if (qseecom.qsee_reentrancy_support)
2677 mutex_lock(&app_access_lock);
2678 return ret;
2679}
2680
2681static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2682{
2683 int ret = 0;
2684
2685 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2686 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2687 ion_free(qseecom.ion_clnt, data->client.ihandle);
2688 data->client.ihandle = NULL;
2689 }
2690 return ret;
2691}
2692
2693static int qseecom_unload_app(struct qseecom_dev_handle *data,
2694 bool app_crash)
2695{
2696 unsigned long flags;
2697 unsigned long flags1;
2698 int ret = 0;
2699 struct qseecom_command_scm_resp resp;
2700 struct qseecom_registered_app_list *ptr_app = NULL;
2701 bool unload = false;
2702 bool found_app = false;
2703 bool found_dead_app = false;
2704
2705 if (!data) {
2706 pr_err("Invalid/uninitialized device handle\n");
2707 return -EINVAL;
2708 }
2709
2710 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2711 pr_debug("Do not unload keymaster app from tz\n");
2712 goto unload_exit;
2713 }
2714
2715 __qseecom_cleanup_app(data);
2716 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2717
2718 if (data->client.app_id > 0) {
2719 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2720 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2721 list) {
2722 if (ptr_app->app_id == data->client.app_id) {
2723 if (!strcmp((void *)ptr_app->app_name,
2724 (void *)data->client.app_name)) {
2725 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002726 if (ptr_app->app_blocked ||
2727 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002728 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002729 if (app_crash || ptr_app->ref_cnt == 1)
2730 unload = true;
2731 break;
2732 }
2733 found_dead_app = true;
2734 break;
2735 }
2736 }
2737 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2738 flags);
2739 if (found_app == false && found_dead_app == false) {
2740 pr_err("Cannot find app with id = %d (%s)\n",
2741 data->client.app_id,
2742 (char *)data->client.app_name);
2743 ret = -EINVAL;
2744 goto unload_exit;
2745 }
2746 }
2747
2748 if (found_dead_app)
2749 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2750 (char *)data->client.app_name);
2751
2752 if (unload) {
2753 struct qseecom_unload_app_ireq req;
2754 /* Populate the structure for sending scm call to load image */
2755 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2756 req.app_id = data->client.app_id;
2757
2758 /* SCM_CALL to unload the app */
2759 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2760 sizeof(struct qseecom_unload_app_ireq),
2761 &resp, sizeof(resp));
2762 if (ret) {
2763 pr_err("scm_call to unload app (id = %d) failed\n",
2764 req.app_id);
2765 ret = -EFAULT;
2766 goto unload_exit;
2767 } else {
2768 pr_warn("App id %d now unloaded\n", req.app_id);
2769 }
2770 if (resp.result == QSEOS_RESULT_FAILURE) {
2771 pr_err("app (%d) unload_failed!!\n",
2772 data->client.app_id);
2773 ret = -EFAULT;
2774 goto unload_exit;
2775 }
2776 if (resp.result == QSEOS_RESULT_SUCCESS)
2777 pr_debug("App (%d) is unloaded!!\n",
2778 data->client.app_id);
2779 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2780 ret = __qseecom_process_incomplete_cmd(data, &resp);
2781 if (ret) {
2782 pr_err("process_incomplete_cmd fail err: %d\n",
2783 ret);
2784 goto unload_exit;
2785 }
2786 }
2787 }
2788
Zhen Kong7d500032018-08-06 16:58:31 -07002789unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002790 if (found_app) {
2791 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2792 if (app_crash) {
2793 ptr_app->ref_cnt = 0;
2794 pr_debug("app_crash: ref_count = 0\n");
2795 } else {
2796 if (ptr_app->ref_cnt == 1) {
2797 ptr_app->ref_cnt = 0;
2798 pr_debug("ref_count set to 0\n");
2799 } else {
2800 ptr_app->ref_cnt--;
2801 pr_debug("Can't unload app(%d) inuse\n",
2802 ptr_app->app_id);
2803 }
2804 }
2805 if (unload) {
2806 list_del(&ptr_app->list);
2807 kzfree(ptr_app);
2808 }
2809 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2810 flags1);
2811 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002812 qseecom_unmap_ion_allocated_memory(data);
2813 data->released = true;
2814 return ret;
2815}
2816
2817static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2818 unsigned long virt)
2819{
2820 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2821}
2822
2823static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2824 unsigned long virt)
2825{
2826 return (uintptr_t)data->client.sb_virt +
2827 (virt - data->client.user_virt_sb_base);
2828}
2829
2830int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2831 struct qseecom_send_svc_cmd_req *req_ptr,
2832 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2833{
2834 int ret = 0;
2835 void *req_buf = NULL;
2836
2837 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2838 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2839 req_ptr, send_svc_ireq_ptr);
2840 return -EINVAL;
2841 }
2842
2843 /* Clients need to ensure req_buf is at base offset of shared buffer */
2844 if ((uintptr_t)req_ptr->cmd_req_buf !=
2845 data_ptr->client.user_virt_sb_base) {
2846 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2847 return -EINVAL;
2848 }
2849
2850 if (data_ptr->client.sb_length <
2851 sizeof(struct qseecom_rpmb_provision_key)) {
2852 pr_err("shared buffer is too small to hold key type\n");
2853 return -EINVAL;
2854 }
2855 req_buf = data_ptr->client.sb_virt;
2856
2857 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2858 send_svc_ireq_ptr->key_type =
2859 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2860 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2861 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2862 data_ptr, (uintptr_t)req_ptr->resp_buf));
2863 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2864
2865 return ret;
2866}
2867
2868int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2869 struct qseecom_send_svc_cmd_req *req_ptr,
2870 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2871{
2872 int ret = 0;
2873 uint32_t reqd_len_sb_in = 0;
2874
2875 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2876 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2877 req_ptr, send_svc_ireq_ptr);
2878 return -EINVAL;
2879 }
2880
2881 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2882 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2883 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2884 pr_err("Required: %u, Available: %zu\n",
2885 reqd_len_sb_in, data_ptr->client.sb_length);
2886 return -ENOMEM;
2887 }
2888
2889 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2890 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2891 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2892 data_ptr, (uintptr_t)req_ptr->resp_buf));
2893 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2894
2895 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2896 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2897
2898
2899 return ret;
2900}
2901
2902static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2903 struct qseecom_send_svc_cmd_req *req)
2904{
2905 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2906 pr_err("req or cmd buffer or response buffer is null\n");
2907 return -EINVAL;
2908 }
2909
2910 if (!data || !data->client.ihandle) {
2911 pr_err("Client or client handle is not initialized\n");
2912 return -EINVAL;
2913 }
2914
2915 if (data->client.sb_virt == NULL) {
2916 pr_err("sb_virt null\n");
2917 return -EINVAL;
2918 }
2919
2920 if (data->client.user_virt_sb_base == 0) {
2921 pr_err("user_virt_sb_base is null\n");
2922 return -EINVAL;
2923 }
2924
2925 if (data->client.sb_length == 0) {
2926 pr_err("sb_length is 0\n");
2927 return -EINVAL;
2928 }
2929
2930 if (((uintptr_t)req->cmd_req_buf <
2931 data->client.user_virt_sb_base) ||
2932 ((uintptr_t)req->cmd_req_buf >=
2933 (data->client.user_virt_sb_base + data->client.sb_length))) {
2934 pr_err("cmd buffer address not within shared bufffer\n");
2935 return -EINVAL;
2936 }
2937 if (((uintptr_t)req->resp_buf <
2938 data->client.user_virt_sb_base) ||
2939 ((uintptr_t)req->resp_buf >=
2940 (data->client.user_virt_sb_base + data->client.sb_length))) {
2941 pr_err("response buffer address not within shared bufffer\n");
2942 return -EINVAL;
2943 }
2944 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2945 (req->cmd_req_len > data->client.sb_length) ||
2946 (req->resp_len > data->client.sb_length)) {
2947 pr_err("cmd buf length or response buf length not valid\n");
2948 return -EINVAL;
2949 }
2950 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2951 pr_err("Integer overflow detected in req_len & rsp_len\n");
2952 return -EINVAL;
2953 }
2954
2955 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2956 pr_debug("Not enough memory to fit cmd_buf.\n");
2957 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2958 (req->cmd_req_len + req->resp_len),
2959 data->client.sb_length);
2960 return -ENOMEM;
2961 }
2962 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2963 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2964 return -EINVAL;
2965 }
2966 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2967 pr_err("Integer overflow in resp_len & resp_buf\n");
2968 return -EINVAL;
2969 }
2970 if (data->client.user_virt_sb_base >
2971 (ULONG_MAX - data->client.sb_length)) {
2972 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2973 return -EINVAL;
2974 }
2975 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2976 ((uintptr_t)data->client.user_virt_sb_base +
2977 data->client.sb_length)) ||
2978 (((uintptr_t)req->resp_buf + req->resp_len) >
2979 ((uintptr_t)data->client.user_virt_sb_base +
2980 data->client.sb_length))) {
2981 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2982 return -EINVAL;
2983 }
2984 return 0;
2985}
2986
2987static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2988 void __user *argp)
2989{
2990 int ret = 0;
2991 struct qseecom_client_send_service_ireq send_svc_ireq;
2992 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2993 struct qseecom_command_scm_resp resp;
2994 struct qseecom_send_svc_cmd_req req;
2995 void *send_req_ptr;
2996 size_t req_buf_size;
2997
2998 /*struct qseecom_command_scm_resp resp;*/
2999
3000 if (copy_from_user(&req,
3001 (void __user *)argp,
3002 sizeof(req))) {
3003 pr_err("copy_from_user failed\n");
3004 return -EFAULT;
3005 }
3006
3007 if (__validate_send_service_cmd_inputs(data, &req))
3008 return -EINVAL;
3009
3010 data->type = QSEECOM_SECURE_SERVICE;
3011
3012 switch (req.cmd_id) {
3013 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3014 case QSEOS_RPMB_ERASE_COMMAND:
3015 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3016 send_req_ptr = &send_svc_ireq;
3017 req_buf_size = sizeof(send_svc_ireq);
3018 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3019 send_req_ptr))
3020 return -EINVAL;
3021 break;
3022 case QSEOS_FSM_LTEOTA_REQ_CMD:
3023 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3024 case QSEOS_FSM_IKE_REQ_CMD:
3025 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3026 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3027 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3028 case QSEOS_FSM_ENCFS_REQ_CMD:
3029 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3030 send_req_ptr = &send_fsm_key_svc_ireq;
3031 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3032 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3033 send_req_ptr))
3034 return -EINVAL;
3035 break;
3036 default:
3037 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3038 return -EINVAL;
3039 }
3040
3041 if (qseecom.support_bus_scaling) {
3042 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3043 if (ret) {
3044 pr_err("Fail to set bw HIGH\n");
3045 return ret;
3046 }
3047 } else {
3048 ret = qseecom_perf_enable(data);
3049 if (ret) {
3050 pr_err("Failed to vote for clocks with err %d\n", ret);
3051 goto exit;
3052 }
3053 }
3054
3055 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3056 data->client.sb_virt, data->client.sb_length,
3057 ION_IOC_CLEAN_INV_CACHES);
3058 if (ret) {
3059 pr_err("cache operation failed %d\n", ret);
3060 goto exit;
3061 }
3062 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3063 (const void *)send_req_ptr,
3064 req_buf_size, &resp, sizeof(resp));
3065 if (ret) {
3066 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3067 if (!qseecom.support_bus_scaling) {
3068 qsee_disable_clock_vote(data, CLK_DFAB);
3069 qsee_disable_clock_vote(data, CLK_SFPB);
3070 } else {
3071 __qseecom_add_bw_scale_down_timer(
3072 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3073 }
3074 goto exit;
3075 }
3076 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3077 data->client.sb_virt, data->client.sb_length,
3078 ION_IOC_INV_CACHES);
3079 if (ret) {
3080 pr_err("cache operation failed %d\n", ret);
3081 goto exit;
3082 }
3083 switch (resp.result) {
3084 case QSEOS_RESULT_SUCCESS:
3085 break;
3086 case QSEOS_RESULT_INCOMPLETE:
3087 pr_debug("qseos_result_incomplete\n");
3088 ret = __qseecom_process_incomplete_cmd(data, &resp);
3089 if (ret) {
3090 pr_err("process_incomplete_cmd fail with result: %d\n",
3091 resp.result);
3092 }
3093 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3094 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303095 if (put_user(resp.result,
3096 (uint32_t __user *)req.resp_buf)) {
3097 ret = -EINVAL;
3098 goto exit;
3099 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003100 ret = 0;
3101 }
3102 break;
3103 case QSEOS_RESULT_FAILURE:
3104 pr_err("scm call failed with resp.result: %d\n", resp.result);
3105 ret = -EINVAL;
3106 break;
3107 default:
3108 pr_err("Response result %d not supported\n",
3109 resp.result);
3110 ret = -EINVAL;
3111 break;
3112 }
3113 if (!qseecom.support_bus_scaling) {
3114 qsee_disable_clock_vote(data, CLK_DFAB);
3115 qsee_disable_clock_vote(data, CLK_SFPB);
3116 } else {
3117 __qseecom_add_bw_scale_down_timer(
3118 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3119 }
3120
3121exit:
3122 return ret;
3123}
3124
3125static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3126 struct qseecom_send_cmd_req *req)
3127
3128{
3129 if (!data || !data->client.ihandle) {
3130 pr_err("Client or client handle is not initialized\n");
3131 return -EINVAL;
3132 }
3133 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3134 (req->cmd_req_buf == NULL)) {
3135 pr_err("cmd buffer or response buffer is null\n");
3136 return -EINVAL;
3137 }
3138 if (((uintptr_t)req->cmd_req_buf <
3139 data->client.user_virt_sb_base) ||
3140 ((uintptr_t)req->cmd_req_buf >=
3141 (data->client.user_virt_sb_base + data->client.sb_length))) {
3142 pr_err("cmd buffer address not within shared bufffer\n");
3143 return -EINVAL;
3144 }
3145 if (((uintptr_t)req->resp_buf <
3146 data->client.user_virt_sb_base) ||
3147 ((uintptr_t)req->resp_buf >=
3148 (data->client.user_virt_sb_base + data->client.sb_length))) {
3149 pr_err("response buffer address not within shared bufffer\n");
3150 return -EINVAL;
3151 }
3152 if ((req->cmd_req_len == 0) ||
3153 (req->cmd_req_len > data->client.sb_length) ||
3154 (req->resp_len > data->client.sb_length)) {
3155 pr_err("cmd buf length or response buf length not valid\n");
3156 return -EINVAL;
3157 }
3158 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3159 pr_err("Integer overflow detected in req_len & rsp_len\n");
3160 return -EINVAL;
3161 }
3162
3163 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3164 pr_debug("Not enough memory to fit cmd_buf.\n");
3165 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3166 (req->cmd_req_len + req->resp_len),
3167 data->client.sb_length);
3168 return -ENOMEM;
3169 }
3170 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3171 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3172 return -EINVAL;
3173 }
3174 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3175 pr_err("Integer overflow in resp_len & resp_buf\n");
3176 return -EINVAL;
3177 }
3178 if (data->client.user_virt_sb_base >
3179 (ULONG_MAX - data->client.sb_length)) {
3180 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3181 return -EINVAL;
3182 }
3183 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3184 ((uintptr_t)data->client.user_virt_sb_base +
3185 data->client.sb_length)) ||
3186 (((uintptr_t)req->resp_buf + req->resp_len) >
3187 ((uintptr_t)data->client.user_virt_sb_base +
3188 data->client.sb_length))) {
3189 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3190 return -EINVAL;
3191 }
3192 return 0;
3193}
3194
3195int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3196 struct qseecom_registered_app_list *ptr_app,
3197 struct qseecom_dev_handle *data)
3198{
3199 int ret = 0;
3200
3201 switch (resp->result) {
3202 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3203 pr_warn("App(%d) %s is blocked on listener %d\n",
3204 data->client.app_id, data->client.app_name,
3205 resp->data);
3206 ret = __qseecom_process_reentrancy_blocked_on_listener(
3207 resp, ptr_app, data);
3208 if (ret) {
3209 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3210 data->client.app_id, data->client.app_name, resp->data);
3211 return ret;
3212 }
3213
3214 case QSEOS_RESULT_INCOMPLETE:
3215 qseecom.app_block_ref_cnt++;
3216 ptr_app->app_blocked = true;
3217 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3218 ptr_app->app_blocked = false;
3219 qseecom.app_block_ref_cnt--;
3220 wake_up_interruptible(&qseecom.app_block_wq);
3221 if (ret)
3222 pr_err("process_incomplete_cmd failed err: %d\n",
3223 ret);
3224 return ret;
3225 case QSEOS_RESULT_SUCCESS:
3226 return ret;
3227 default:
3228 pr_err("Response result %d not supported\n",
3229 resp->result);
3230 return -EINVAL;
3231 }
3232}
3233
3234static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3235 struct qseecom_send_cmd_req *req)
3236{
3237 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003238 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003239 u32 reqd_len_sb_in = 0;
3240 struct qseecom_client_send_data_ireq send_data_req = {0};
3241 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3242 struct qseecom_command_scm_resp resp;
3243 unsigned long flags;
3244 struct qseecom_registered_app_list *ptr_app;
3245 bool found_app = false;
3246 void *cmd_buf = NULL;
3247 size_t cmd_len;
3248 struct sglist_info *table = data->sglistinfo_ptr;
3249
3250 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3251 /* find app_id & img_name from list */
3252 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3253 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3254 list) {
3255 if ((ptr_app->app_id == data->client.app_id) &&
3256 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3257 found_app = true;
3258 break;
3259 }
3260 }
3261 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3262
3263 if (!found_app) {
3264 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3265 (char *)data->client.app_name);
3266 return -ENOENT;
3267 }
3268
3269 if (qseecom.qsee_version < QSEE_VERSION_40) {
3270 send_data_req.app_id = data->client.app_id;
3271 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3272 data, (uintptr_t)req->cmd_req_buf));
3273 send_data_req.req_len = req->cmd_req_len;
3274 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3275 data, (uintptr_t)req->resp_buf));
3276 send_data_req.rsp_len = req->resp_len;
3277 send_data_req.sglistinfo_ptr =
3278 (uint32_t)virt_to_phys(table);
3279 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3280 dmac_flush_range((void *)table,
3281 (void *)table + SGLISTINFO_TABLE_SIZE);
3282 cmd_buf = (void *)&send_data_req;
3283 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3284 } else {
3285 send_data_req_64bit.app_id = data->client.app_id;
3286 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3287 (uintptr_t)req->cmd_req_buf);
3288 send_data_req_64bit.req_len = req->cmd_req_len;
3289 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3290 (uintptr_t)req->resp_buf);
3291 send_data_req_64bit.rsp_len = req->resp_len;
3292 /* check if 32bit app's phys_addr region is under 4GB.*/
3293 if ((data->client.app_arch == ELFCLASS32) &&
3294 ((send_data_req_64bit.req_ptr >=
3295 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3296 (send_data_req_64bit.rsp_ptr >=
3297 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3298 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3299 data->client.app_name,
3300 send_data_req_64bit.req_ptr,
3301 send_data_req_64bit.req_len,
3302 send_data_req_64bit.rsp_ptr,
3303 send_data_req_64bit.rsp_len);
3304 return -EFAULT;
3305 }
3306 send_data_req_64bit.sglistinfo_ptr =
3307 (uint64_t)virt_to_phys(table);
3308 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3309 dmac_flush_range((void *)table,
3310 (void *)table + SGLISTINFO_TABLE_SIZE);
3311 cmd_buf = (void *)&send_data_req_64bit;
3312 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3313 }
3314
3315 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3316 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3317 else
3318 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3319
3320 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3321 data->client.sb_virt,
3322 reqd_len_sb_in,
3323 ION_IOC_CLEAN_INV_CACHES);
3324 if (ret) {
3325 pr_err("cache operation failed %d\n", ret);
3326 return ret;
3327 }
3328
3329 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3330
3331 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3332 cmd_buf, cmd_len,
3333 &resp, sizeof(resp));
3334 if (ret) {
3335 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3336 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003337 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003338 }
3339
3340 if (qseecom.qsee_reentrancy_support) {
3341 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003342 if (ret)
3343 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003344 } else {
3345 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3346 ret = __qseecom_process_incomplete_cmd(data, &resp);
3347 if (ret) {
3348 pr_err("process_incomplete_cmd failed err: %d\n",
3349 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003350 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003351 }
3352 } else {
3353 if (resp.result != QSEOS_RESULT_SUCCESS) {
3354 pr_err("Response result %d not supported\n",
3355 resp.result);
3356 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003357 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003358 }
3359 }
3360 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003361exit:
3362 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003363 data->client.sb_virt, data->client.sb_length,
3364 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003365 if (ret2) {
3366 pr_err("cache operation failed %d\n", ret2);
3367 return ret2;
3368 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003369 return ret;
3370}
3371
3372static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3373{
3374 int ret = 0;
3375 struct qseecom_send_cmd_req req;
3376
3377 ret = copy_from_user(&req, argp, sizeof(req));
3378 if (ret) {
3379 pr_err("copy_from_user failed\n");
3380 return ret;
3381 }
3382
3383 if (__validate_send_cmd_inputs(data, &req))
3384 return -EINVAL;
3385
3386 ret = __qseecom_send_cmd(data, &req);
3387
3388 if (ret)
3389 return ret;
3390
3391 return ret;
3392}
3393
3394int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3395 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3396 struct qseecom_dev_handle *data, int i) {
3397
3398 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3399 (req->ifd_data[i].fd > 0)) {
3400 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3401 (req->ifd_data[i].cmd_buf_offset >
3402 req->cmd_req_len - sizeof(uint32_t))) {
3403 pr_err("Invalid offset (req len) 0x%x\n",
3404 req->ifd_data[i].cmd_buf_offset);
3405 return -EINVAL;
3406 }
3407 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3408 (lstnr_resp->ifd_data[i].fd > 0)) {
3409 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3410 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3411 lstnr_resp->resp_len - sizeof(uint32_t))) {
3412 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3413 lstnr_resp->ifd_data[i].cmd_buf_offset);
3414 return -EINVAL;
3415 }
3416 }
3417 return 0;
3418}
3419
3420static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3421 struct qseecom_dev_handle *data)
3422{
3423 struct ion_handle *ihandle;
3424 char *field;
3425 int ret = 0;
3426 int i = 0;
3427 uint32_t len = 0;
3428 struct scatterlist *sg;
3429 struct qseecom_send_modfd_cmd_req *req = NULL;
3430 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3431 struct qseecom_registered_listener_list *this_lstnr = NULL;
3432 uint32_t offset;
3433 struct sg_table *sg_ptr;
3434
3435 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3436 (data->type != QSEECOM_CLIENT_APP))
3437 return -EFAULT;
3438
3439 if (msg == NULL) {
3440 pr_err("Invalid address\n");
3441 return -EINVAL;
3442 }
3443 if (data->type == QSEECOM_LISTENER_SERVICE) {
3444 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3445 this_lstnr = __qseecom_find_svc(data->listener.id);
3446 if (IS_ERR_OR_NULL(this_lstnr)) {
3447 pr_err("Invalid listener ID\n");
3448 return -ENOMEM;
3449 }
3450 } else {
3451 req = (struct qseecom_send_modfd_cmd_req *)msg;
3452 }
3453
3454 for (i = 0; i < MAX_ION_FD; i++) {
3455 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3456 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003457 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003458 req->ifd_data[i].fd);
3459 if (IS_ERR_OR_NULL(ihandle)) {
3460 pr_err("Ion client can't retrieve the handle\n");
3461 return -ENOMEM;
3462 }
3463 field = (char *) req->cmd_req_buf +
3464 req->ifd_data[i].cmd_buf_offset;
3465 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3466 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003467 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003468 lstnr_resp->ifd_data[i].fd);
3469 if (IS_ERR_OR_NULL(ihandle)) {
3470 pr_err("Ion client can't retrieve the handle\n");
3471 return -ENOMEM;
3472 }
3473 field = lstnr_resp->resp_buf_ptr +
3474 lstnr_resp->ifd_data[i].cmd_buf_offset;
3475 } else {
3476 continue;
3477 }
3478 /* Populate the cmd data structure with the phys_addr */
3479 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3480 if (IS_ERR_OR_NULL(sg_ptr)) {
3481 pr_err("IOn client could not retrieve sg table\n");
3482 goto err;
3483 }
3484 if (sg_ptr->nents == 0) {
3485 pr_err("Num of scattered entries is 0\n");
3486 goto err;
3487 }
3488 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3489 pr_err("Num of scattered entries");
3490 pr_err(" (%d) is greater than max supported %d\n",
3491 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3492 goto err;
3493 }
3494 sg = sg_ptr->sgl;
3495 if (sg_ptr->nents == 1) {
3496 uint32_t *update;
3497
3498 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3499 goto err;
3500 if ((data->type == QSEECOM_CLIENT_APP &&
3501 (data->client.app_arch == ELFCLASS32 ||
3502 data->client.app_arch == ELFCLASS64)) ||
3503 (data->type == QSEECOM_LISTENER_SERVICE)) {
3504 /*
3505 * Check if sg list phy add region is under 4GB
3506 */
3507 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3508 (!cleanup) &&
3509 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3510 >= PHY_ADDR_4G - sg->length)) {
3511 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3512 data->client.app_name,
3513 &(sg_dma_address(sg_ptr->sgl)),
3514 sg->length);
3515 goto err;
3516 }
3517 update = (uint32_t *) field;
3518 *update = cleanup ? 0 :
3519 (uint32_t)sg_dma_address(sg_ptr->sgl);
3520 } else {
3521 pr_err("QSEE app arch %u is not supported\n",
3522 data->client.app_arch);
3523 goto err;
3524 }
3525 len += (uint32_t)sg->length;
3526 } else {
3527 struct qseecom_sg_entry *update;
3528 int j = 0;
3529
3530 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3531 (req->ifd_data[i].fd > 0)) {
3532
3533 if ((req->cmd_req_len <
3534 SG_ENTRY_SZ * sg_ptr->nents) ||
3535 (req->ifd_data[i].cmd_buf_offset >
3536 (req->cmd_req_len -
3537 SG_ENTRY_SZ * sg_ptr->nents))) {
3538 pr_err("Invalid offset = 0x%x\n",
3539 req->ifd_data[i].cmd_buf_offset);
3540 goto err;
3541 }
3542
3543 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3544 (lstnr_resp->ifd_data[i].fd > 0)) {
3545
3546 if ((lstnr_resp->resp_len <
3547 SG_ENTRY_SZ * sg_ptr->nents) ||
3548 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3549 (lstnr_resp->resp_len -
3550 SG_ENTRY_SZ * sg_ptr->nents))) {
3551 goto err;
3552 }
3553 }
3554 if ((data->type == QSEECOM_CLIENT_APP &&
3555 (data->client.app_arch == ELFCLASS32 ||
3556 data->client.app_arch == ELFCLASS64)) ||
3557 (data->type == QSEECOM_LISTENER_SERVICE)) {
3558 update = (struct qseecom_sg_entry *)field;
3559 for (j = 0; j < sg_ptr->nents; j++) {
3560 /*
3561 * Check if sg list PA is under 4GB
3562 */
3563 if ((qseecom.qsee_version >=
3564 QSEE_VERSION_40) &&
3565 (!cleanup) &&
3566 ((uint64_t)(sg_dma_address(sg))
3567 >= PHY_ADDR_4G - sg->length)) {
3568 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3569 data->client.app_name,
3570 &(sg_dma_address(sg)),
3571 sg->length);
3572 goto err;
3573 }
3574 update->phys_addr = cleanup ? 0 :
3575 (uint32_t)sg_dma_address(sg);
3576 update->len = cleanup ? 0 : sg->length;
3577 update++;
3578 len += sg->length;
3579 sg = sg_next(sg);
3580 }
3581 } else {
3582 pr_err("QSEE app arch %u is not supported\n",
3583 data->client.app_arch);
3584 goto err;
3585 }
3586 }
3587
3588 if (cleanup) {
3589 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3590 ihandle, NULL, len,
3591 ION_IOC_INV_CACHES);
3592 if (ret) {
3593 pr_err("cache operation failed %d\n", ret);
3594 goto err;
3595 }
3596 } else {
3597 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3598 ihandle, NULL, len,
3599 ION_IOC_CLEAN_INV_CACHES);
3600 if (ret) {
3601 pr_err("cache operation failed %d\n", ret);
3602 goto err;
3603 }
3604 if (data->type == QSEECOM_CLIENT_APP) {
3605 offset = req->ifd_data[i].cmd_buf_offset;
3606 data->sglistinfo_ptr[i].indexAndFlags =
3607 SGLISTINFO_SET_INDEX_FLAG(
3608 (sg_ptr->nents == 1), 0, offset);
3609 data->sglistinfo_ptr[i].sizeOrCount =
3610 (sg_ptr->nents == 1) ?
3611 sg->length : sg_ptr->nents;
3612 data->sglist_cnt = i + 1;
3613 } else {
3614 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3615 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3616 (uintptr_t)this_lstnr->sb_virt);
3617 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3618 SGLISTINFO_SET_INDEX_FLAG(
3619 (sg_ptr->nents == 1), 0, offset);
3620 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3621 (sg_ptr->nents == 1) ?
3622 sg->length : sg_ptr->nents;
3623 this_lstnr->sglist_cnt = i + 1;
3624 }
3625 }
3626 /* Deallocate the handle */
3627 if (!IS_ERR_OR_NULL(ihandle))
3628 ion_free(qseecom.ion_clnt, ihandle);
3629 }
3630 return ret;
3631err:
3632 if (!IS_ERR_OR_NULL(ihandle))
3633 ion_free(qseecom.ion_clnt, ihandle);
3634 return -ENOMEM;
3635}
3636
3637static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3638 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3639{
3640 struct scatterlist *sg = sg_ptr->sgl;
3641 struct qseecom_sg_entry_64bit *sg_entry;
3642 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3643 void *buf;
3644 uint i;
3645 size_t size;
3646 dma_addr_t coh_pmem;
3647
3648 if (fd_idx >= MAX_ION_FD) {
3649 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3650 return -ENOMEM;
3651 }
3652 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3653 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3654 /* Allocate a contiguous kernel buffer */
3655 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3656 size = (size + PAGE_SIZE) & PAGE_MASK;
3657 buf = dma_alloc_coherent(qseecom.pdev,
3658 size, &coh_pmem, GFP_KERNEL);
3659 if (buf == NULL) {
3660 pr_err("failed to alloc memory for sg buf\n");
3661 return -ENOMEM;
3662 }
3663 /* update qseecom_sg_list_buf_hdr_64bit */
3664 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3665 buf_hdr->new_buf_phys_addr = coh_pmem;
3666 buf_hdr->nents_total = sg_ptr->nents;
3667 /* save the left sg entries into new allocated buf */
3668 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3669 for (i = 0; i < sg_ptr->nents; i++) {
3670 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3671 sg_entry->len = sg->length;
3672 sg_entry++;
3673 sg = sg_next(sg);
3674 }
3675
3676 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3677 data->client.sec_buf_fd[fd_idx].vbase = buf;
3678 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3679 data->client.sec_buf_fd[fd_idx].size = size;
3680
3681 return 0;
3682}
3683
3684static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3685 struct qseecom_dev_handle *data)
3686{
3687 struct ion_handle *ihandle;
3688 char *field;
3689 int ret = 0;
3690 int i = 0;
3691 uint32_t len = 0;
3692 struct scatterlist *sg;
3693 struct qseecom_send_modfd_cmd_req *req = NULL;
3694 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3695 struct qseecom_registered_listener_list *this_lstnr = NULL;
3696 uint32_t offset;
3697 struct sg_table *sg_ptr;
3698
3699 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3700 (data->type != QSEECOM_CLIENT_APP))
3701 return -EFAULT;
3702
3703 if (msg == NULL) {
3704 pr_err("Invalid address\n");
3705 return -EINVAL;
3706 }
3707 if (data->type == QSEECOM_LISTENER_SERVICE) {
3708 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3709 this_lstnr = __qseecom_find_svc(data->listener.id);
3710 if (IS_ERR_OR_NULL(this_lstnr)) {
3711 pr_err("Invalid listener ID\n");
3712 return -ENOMEM;
3713 }
3714 } else {
3715 req = (struct qseecom_send_modfd_cmd_req *)msg;
3716 }
3717
3718 for (i = 0; i < MAX_ION_FD; i++) {
3719 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3720 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003721 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003722 req->ifd_data[i].fd);
3723 if (IS_ERR_OR_NULL(ihandle)) {
3724 pr_err("Ion client can't retrieve the handle\n");
3725 return -ENOMEM;
3726 }
3727 field = (char *) req->cmd_req_buf +
3728 req->ifd_data[i].cmd_buf_offset;
3729 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3730 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003731 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003732 lstnr_resp->ifd_data[i].fd);
3733 if (IS_ERR_OR_NULL(ihandle)) {
3734 pr_err("Ion client can't retrieve the handle\n");
3735 return -ENOMEM;
3736 }
3737 field = lstnr_resp->resp_buf_ptr +
3738 lstnr_resp->ifd_data[i].cmd_buf_offset;
3739 } else {
3740 continue;
3741 }
3742 /* Populate the cmd data structure with the phys_addr */
3743 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3744 if (IS_ERR_OR_NULL(sg_ptr)) {
3745 pr_err("IOn client could not retrieve sg table\n");
3746 goto err;
3747 }
3748 if (sg_ptr->nents == 0) {
3749 pr_err("Num of scattered entries is 0\n");
3750 goto err;
3751 }
3752 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3753 pr_warn("Num of scattered entries");
3754 pr_warn(" (%d) is greater than %d\n",
3755 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3756 if (cleanup) {
3757 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3758 data->client.sec_buf_fd[i].vbase)
3759 dma_free_coherent(qseecom.pdev,
3760 data->client.sec_buf_fd[i].size,
3761 data->client.sec_buf_fd[i].vbase,
3762 data->client.sec_buf_fd[i].pbase);
3763 } else {
3764 ret = __qseecom_allocate_sg_list_buffer(data,
3765 field, i, sg_ptr);
3766 if (ret) {
3767 pr_err("Failed to allocate sg list buffer\n");
3768 goto err;
3769 }
3770 }
3771 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3772 sg = sg_ptr->sgl;
3773 goto cleanup;
3774 }
3775 sg = sg_ptr->sgl;
3776 if (sg_ptr->nents == 1) {
3777 uint64_t *update_64bit;
3778
3779 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3780 goto err;
3781 /* 64bit app uses 64bit address */
3782 update_64bit = (uint64_t *) field;
3783 *update_64bit = cleanup ? 0 :
3784 (uint64_t)sg_dma_address(sg_ptr->sgl);
3785 len += (uint32_t)sg->length;
3786 } else {
3787 struct qseecom_sg_entry_64bit *update_64bit;
3788 int j = 0;
3789
3790 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3791 (req->ifd_data[i].fd > 0)) {
3792
3793 if ((req->cmd_req_len <
3794 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3795 (req->ifd_data[i].cmd_buf_offset >
3796 (req->cmd_req_len -
3797 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3798 pr_err("Invalid offset = 0x%x\n",
3799 req->ifd_data[i].cmd_buf_offset);
3800 goto err;
3801 }
3802
3803 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3804 (lstnr_resp->ifd_data[i].fd > 0)) {
3805
3806 if ((lstnr_resp->resp_len <
3807 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3808 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3809 (lstnr_resp->resp_len -
3810 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3811 goto err;
3812 }
3813 }
3814 /* 64bit app uses 64bit address */
3815 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3816 for (j = 0; j < sg_ptr->nents; j++) {
3817 update_64bit->phys_addr = cleanup ? 0 :
3818 (uint64_t)sg_dma_address(sg);
3819 update_64bit->len = cleanup ? 0 :
3820 (uint32_t)sg->length;
3821 update_64bit++;
3822 len += sg->length;
3823 sg = sg_next(sg);
3824 }
3825 }
3826cleanup:
3827 if (cleanup) {
3828 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3829 ihandle, NULL, len,
3830 ION_IOC_INV_CACHES);
3831 if (ret) {
3832 pr_err("cache operation failed %d\n", ret);
3833 goto err;
3834 }
3835 } else {
3836 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3837 ihandle, NULL, len,
3838 ION_IOC_CLEAN_INV_CACHES);
3839 if (ret) {
3840 pr_err("cache operation failed %d\n", ret);
3841 goto err;
3842 }
3843 if (data->type == QSEECOM_CLIENT_APP) {
3844 offset = req->ifd_data[i].cmd_buf_offset;
3845 data->sglistinfo_ptr[i].indexAndFlags =
3846 SGLISTINFO_SET_INDEX_FLAG(
3847 (sg_ptr->nents == 1), 1, offset);
3848 data->sglistinfo_ptr[i].sizeOrCount =
3849 (sg_ptr->nents == 1) ?
3850 sg->length : sg_ptr->nents;
3851 data->sglist_cnt = i + 1;
3852 } else {
3853 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3854 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3855 (uintptr_t)this_lstnr->sb_virt);
3856 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3857 SGLISTINFO_SET_INDEX_FLAG(
3858 (sg_ptr->nents == 1), 1, offset);
3859 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3860 (sg_ptr->nents == 1) ?
3861 sg->length : sg_ptr->nents;
3862 this_lstnr->sglist_cnt = i + 1;
3863 }
3864 }
3865 /* Deallocate the handle */
3866 if (!IS_ERR_OR_NULL(ihandle))
3867 ion_free(qseecom.ion_clnt, ihandle);
3868 }
3869 return ret;
3870err:
3871 for (i = 0; i < MAX_ION_FD; i++)
3872 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3873 data->client.sec_buf_fd[i].vbase)
3874 dma_free_coherent(qseecom.pdev,
3875 data->client.sec_buf_fd[i].size,
3876 data->client.sec_buf_fd[i].vbase,
3877 data->client.sec_buf_fd[i].pbase);
3878 if (!IS_ERR_OR_NULL(ihandle))
3879 ion_free(qseecom.ion_clnt, ihandle);
3880 return -ENOMEM;
3881}
3882
3883static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3884 void __user *argp,
3885 bool is_64bit_addr)
3886{
3887 int ret = 0;
3888 int i;
3889 struct qseecom_send_modfd_cmd_req req;
3890 struct qseecom_send_cmd_req send_cmd_req;
3891
3892 ret = copy_from_user(&req, argp, sizeof(req));
3893 if (ret) {
3894 pr_err("copy_from_user failed\n");
3895 return ret;
3896 }
3897
3898 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3899 send_cmd_req.cmd_req_len = req.cmd_req_len;
3900 send_cmd_req.resp_buf = req.resp_buf;
3901 send_cmd_req.resp_len = req.resp_len;
3902
3903 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3904 return -EINVAL;
3905
3906 /* validate offsets */
3907 for (i = 0; i < MAX_ION_FD; i++) {
3908 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3909 pr_err("Invalid offset %d = 0x%x\n",
3910 i, req.ifd_data[i].cmd_buf_offset);
3911 return -EINVAL;
3912 }
3913 }
3914 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3915 (uintptr_t)req.cmd_req_buf);
3916 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3917 (uintptr_t)req.resp_buf);
3918
3919 if (!is_64bit_addr) {
3920 ret = __qseecom_update_cmd_buf(&req, false, data);
3921 if (ret)
3922 return ret;
3923 ret = __qseecom_send_cmd(data, &send_cmd_req);
3924 if (ret)
3925 return ret;
3926 ret = __qseecom_update_cmd_buf(&req, true, data);
3927 if (ret)
3928 return ret;
3929 } else {
3930 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3931 if (ret)
3932 return ret;
3933 ret = __qseecom_send_cmd(data, &send_cmd_req);
3934 if (ret)
3935 return ret;
3936 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3937 if (ret)
3938 return ret;
3939 }
3940
3941 return ret;
3942}
3943
3944static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3945 void __user *argp)
3946{
3947 return __qseecom_send_modfd_cmd(data, argp, false);
3948}
3949
3950static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3951 void __user *argp)
3952{
3953 return __qseecom_send_modfd_cmd(data, argp, true);
3954}
3955
3956
3957
3958static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3959 struct qseecom_registered_listener_list *svc)
3960{
3961 int ret;
3962
Zhen Kongf5087172018-10-11 17:22:05 -07003963 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003964 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003965}
3966
3967static int qseecom_receive_req(struct qseecom_dev_handle *data)
3968{
3969 int ret = 0;
3970 struct qseecom_registered_listener_list *this_lstnr;
3971
Zhen Kongbcdeda22018-11-16 13:50:51 -08003972 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003973 this_lstnr = __qseecom_find_svc(data->listener.id);
3974 if (!this_lstnr) {
3975 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003976 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003977 return -ENODATA;
3978 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003979 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003980
3981 while (1) {
3982 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3983 __qseecom_listener_has_rcvd_req(data,
3984 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003985 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003986 (uint32_t)data->listener.id);
3987 /* woken up for different reason */
3988 return -ERESTARTSYS;
3989 }
3990
Zhen Kongbcdeda22018-11-16 13:50:51 -08003991 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003992 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003993 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003994 return -ENODEV;
3995 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003996 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003997 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08003998 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003999 break;
4000 }
4001 return ret;
4002}
4003
4004static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4005{
4006 unsigned char app_arch = 0;
4007 struct elf32_hdr *ehdr;
4008 struct elf64_hdr *ehdr64;
4009
4010 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4011
4012 switch (app_arch) {
4013 case ELFCLASS32: {
4014 ehdr = (struct elf32_hdr *)fw_entry->data;
4015 if (fw_entry->size < sizeof(*ehdr)) {
4016 pr_err("%s: Not big enough to be an elf32 header\n",
4017 qseecom.pdev->init_name);
4018 return false;
4019 }
4020 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4021 pr_err("%s: Not an elf32 header\n",
4022 qseecom.pdev->init_name);
4023 return false;
4024 }
4025 if (ehdr->e_phnum == 0) {
4026 pr_err("%s: No loadable segments\n",
4027 qseecom.pdev->init_name);
4028 return false;
4029 }
4030 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4031 sizeof(struct elf32_hdr) > fw_entry->size) {
4032 pr_err("%s: Program headers not within mdt\n",
4033 qseecom.pdev->init_name);
4034 return false;
4035 }
4036 break;
4037 }
4038 case ELFCLASS64: {
4039 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4040 if (fw_entry->size < sizeof(*ehdr64)) {
4041 pr_err("%s: Not big enough to be an elf64 header\n",
4042 qseecom.pdev->init_name);
4043 return false;
4044 }
4045 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4046 pr_err("%s: Not an elf64 header\n",
4047 qseecom.pdev->init_name);
4048 return false;
4049 }
4050 if (ehdr64->e_phnum == 0) {
4051 pr_err("%s: No loadable segments\n",
4052 qseecom.pdev->init_name);
4053 return false;
4054 }
4055 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4056 sizeof(struct elf64_hdr) > fw_entry->size) {
4057 pr_err("%s: Program headers not within mdt\n",
4058 qseecom.pdev->init_name);
4059 return false;
4060 }
4061 break;
4062 }
4063 default: {
4064 pr_err("QSEE app arch %u is not supported\n", app_arch);
4065 return false;
4066 }
4067 }
4068 return true;
4069}
4070
4071static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4072 uint32_t *app_arch)
4073{
4074 int ret = -1;
4075 int i = 0, rc = 0;
4076 const struct firmware *fw_entry = NULL;
4077 char fw_name[MAX_APP_NAME_SIZE];
4078 struct elf32_hdr *ehdr;
4079 struct elf64_hdr *ehdr64;
4080 int num_images = 0;
4081
4082 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4083 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4084 if (rc) {
4085 pr_err("error with request_firmware\n");
4086 ret = -EIO;
4087 goto err;
4088 }
4089 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4090 ret = -EIO;
4091 goto err;
4092 }
4093 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4094 *fw_size = fw_entry->size;
4095 if (*app_arch == ELFCLASS32) {
4096 ehdr = (struct elf32_hdr *)fw_entry->data;
4097 num_images = ehdr->e_phnum;
4098 } else if (*app_arch == ELFCLASS64) {
4099 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4100 num_images = ehdr64->e_phnum;
4101 } else {
4102 pr_err("QSEE %s app, arch %u is not supported\n",
4103 appname, *app_arch);
4104 ret = -EIO;
4105 goto err;
4106 }
4107 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4108 release_firmware(fw_entry);
4109 fw_entry = NULL;
4110 for (i = 0; i < num_images; i++) {
4111 memset(fw_name, 0, sizeof(fw_name));
4112 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4113 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4114 if (ret)
4115 goto err;
4116 if (*fw_size > U32_MAX - fw_entry->size) {
4117 pr_err("QSEE %s app file size overflow\n", appname);
4118 ret = -EINVAL;
4119 goto err;
4120 }
4121 *fw_size += fw_entry->size;
4122 release_firmware(fw_entry);
4123 fw_entry = NULL;
4124 }
4125
4126 return ret;
4127err:
4128 if (fw_entry)
4129 release_firmware(fw_entry);
4130 *fw_size = 0;
4131 return ret;
4132}
4133
4134static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4135 uint32_t fw_size,
4136 struct qseecom_load_app_ireq *load_req)
4137{
4138 int ret = -1;
4139 int i = 0, rc = 0;
4140 const struct firmware *fw_entry = NULL;
4141 char fw_name[MAX_APP_NAME_SIZE];
4142 u8 *img_data_ptr = img_data;
4143 struct elf32_hdr *ehdr;
4144 struct elf64_hdr *ehdr64;
4145 int num_images = 0;
4146 unsigned char app_arch = 0;
4147
4148 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4149 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4150 if (rc) {
4151 ret = -EIO;
4152 goto err;
4153 }
4154
4155 load_req->img_len = fw_entry->size;
4156 if (load_req->img_len > fw_size) {
4157 pr_err("app %s size %zu is larger than buf size %u\n",
4158 appname, fw_entry->size, fw_size);
4159 ret = -EINVAL;
4160 goto err;
4161 }
4162 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4163 img_data_ptr = img_data_ptr + fw_entry->size;
4164 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4165
4166 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4167 if (app_arch == ELFCLASS32) {
4168 ehdr = (struct elf32_hdr *)fw_entry->data;
4169 num_images = ehdr->e_phnum;
4170 } else if (app_arch == ELFCLASS64) {
4171 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4172 num_images = ehdr64->e_phnum;
4173 } else {
4174 pr_err("QSEE %s app, arch %u is not supported\n",
4175 appname, app_arch);
4176 ret = -EIO;
4177 goto err;
4178 }
4179 release_firmware(fw_entry);
4180 fw_entry = NULL;
4181 for (i = 0; i < num_images; i++) {
4182 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4183 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4184 if (ret) {
4185 pr_err("Failed to locate blob %s\n", fw_name);
4186 goto err;
4187 }
4188 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4189 (fw_entry->size + load_req->img_len > fw_size)) {
4190 pr_err("Invalid file size for %s\n", fw_name);
4191 ret = -EINVAL;
4192 goto err;
4193 }
4194 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4195 img_data_ptr = img_data_ptr + fw_entry->size;
4196 load_req->img_len += fw_entry->size;
4197 release_firmware(fw_entry);
4198 fw_entry = NULL;
4199 }
4200 return ret;
4201err:
4202 release_firmware(fw_entry);
4203 return ret;
4204}
4205
4206static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4207 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4208{
4209 size_t len = 0;
4210 int ret = 0;
4211 ion_phys_addr_t pa;
4212 struct ion_handle *ihandle = NULL;
4213 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004214 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004215 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004216
Zhen Kong3dd92792017-12-08 09:47:15 -08004217 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004218 if (retry++) {
4219 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004220 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004221 mutex_lock(&app_access_lock);
4222 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004223 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004224 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004225 } while (IS_ERR_OR_NULL(ihandle) &&
4226 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004227
4228 if (IS_ERR_OR_NULL(ihandle)) {
4229 pr_err("ION alloc failed\n");
4230 return -ENOMEM;
4231 }
4232 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4233 ihandle);
4234
4235 if (IS_ERR_OR_NULL(img_data)) {
4236 pr_err("ION memory mapping for image loading failed\n");
4237 ret = -ENOMEM;
4238 goto exit_ion_free;
4239 }
4240 /* Get the physical address of the ION BUF */
4241 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4242 if (ret) {
4243 pr_err("physical memory retrieval failure\n");
4244 ret = -EIO;
4245 goto exit_ion_unmap_kernel;
4246 }
4247
4248 *pihandle = ihandle;
4249 *data = img_data;
4250 *paddr = pa;
4251 return ret;
4252
4253exit_ion_unmap_kernel:
4254 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4255exit_ion_free:
4256 ion_free(qseecom.ion_clnt, ihandle);
4257 ihandle = NULL;
4258 return ret;
4259}
4260
4261static void __qseecom_free_img_data(struct ion_handle **ihandle)
4262{
4263 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4264 ion_free(qseecom.ion_clnt, *ihandle);
4265 *ihandle = NULL;
4266}
4267
4268static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4269 uint32_t *app_id)
4270{
4271 int ret = -1;
4272 uint32_t fw_size = 0;
4273 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4274 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4275 struct qseecom_command_scm_resp resp;
4276 u8 *img_data = NULL;
4277 ion_phys_addr_t pa = 0;
4278 struct ion_handle *ihandle = NULL;
4279 void *cmd_buf = NULL;
4280 size_t cmd_len;
4281 uint32_t app_arch = 0;
4282
4283 if (!data || !appname || !app_id) {
4284 pr_err("Null pointer to data or appname or appid\n");
4285 return -EINVAL;
4286 }
4287 *app_id = 0;
4288 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4289 return -EIO;
4290 data->client.app_arch = app_arch;
4291
4292 /* Check and load cmnlib */
4293 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4294 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4295 ret = qseecom_load_commonlib_image(data, "cmnlib");
4296 if (ret) {
4297 pr_err("failed to load cmnlib\n");
4298 return -EIO;
4299 }
4300 qseecom.commonlib_loaded = true;
4301 pr_debug("cmnlib is loaded\n");
4302 }
4303
4304 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4305 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4306 if (ret) {
4307 pr_err("failed to load cmnlib64\n");
4308 return -EIO;
4309 }
4310 qseecom.commonlib64_loaded = true;
4311 pr_debug("cmnlib64 is loaded\n");
4312 }
4313 }
4314
4315 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4316 if (ret)
4317 return ret;
4318
4319 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4320 if (ret) {
4321 ret = -EIO;
4322 goto exit_free_img_data;
4323 }
4324
4325 /* Populate the load_req parameters */
4326 if (qseecom.qsee_version < QSEE_VERSION_40) {
4327 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4328 load_req.mdt_len = load_req.mdt_len;
4329 load_req.img_len = load_req.img_len;
4330 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4331 load_req.phy_addr = (uint32_t)pa;
4332 cmd_buf = (void *)&load_req;
4333 cmd_len = sizeof(struct qseecom_load_app_ireq);
4334 } else {
4335 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4336 load_req_64bit.mdt_len = load_req.mdt_len;
4337 load_req_64bit.img_len = load_req.img_len;
4338 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4339 load_req_64bit.phy_addr = (uint64_t)pa;
4340 cmd_buf = (void *)&load_req_64bit;
4341 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4342 }
4343
4344 if (qseecom.support_bus_scaling) {
4345 mutex_lock(&qsee_bw_mutex);
4346 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4347 mutex_unlock(&qsee_bw_mutex);
4348 if (ret) {
4349 ret = -EIO;
4350 goto exit_free_img_data;
4351 }
4352 }
4353
4354 ret = __qseecom_enable_clk_scale_up(data);
4355 if (ret) {
4356 ret = -EIO;
4357 goto exit_unregister_bus_bw_need;
4358 }
4359
4360 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4361 img_data, fw_size,
4362 ION_IOC_CLEAN_INV_CACHES);
4363 if (ret) {
4364 pr_err("cache operation failed %d\n", ret);
4365 goto exit_disable_clk_vote;
4366 }
4367
4368 /* SCM_CALL to load the image */
4369 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4370 &resp, sizeof(resp));
4371 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004372 pr_err("scm_call to load failed : ret %d, result %x\n",
4373 ret, resp.result);
4374 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4375 ret = -EEXIST;
4376 else
4377 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004378 goto exit_disable_clk_vote;
4379 }
4380
4381 switch (resp.result) {
4382 case QSEOS_RESULT_SUCCESS:
4383 *app_id = resp.data;
4384 break;
4385 case QSEOS_RESULT_INCOMPLETE:
4386 ret = __qseecom_process_incomplete_cmd(data, &resp);
4387 if (ret)
4388 pr_err("process_incomplete_cmd FAILED\n");
4389 else
4390 *app_id = resp.data;
4391 break;
4392 case QSEOS_RESULT_FAILURE:
4393 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4394 break;
4395 default:
4396 pr_err("scm call return unknown response %d\n", resp.result);
4397 ret = -EINVAL;
4398 break;
4399 }
4400
4401exit_disable_clk_vote:
4402 __qseecom_disable_clk_scale_down(data);
4403
4404exit_unregister_bus_bw_need:
4405 if (qseecom.support_bus_scaling) {
4406 mutex_lock(&qsee_bw_mutex);
4407 qseecom_unregister_bus_bandwidth_needs(data);
4408 mutex_unlock(&qsee_bw_mutex);
4409 }
4410
4411exit_free_img_data:
4412 __qseecom_free_img_data(&ihandle);
4413 return ret;
4414}
4415
4416static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4417 char *cmnlib_name)
4418{
4419 int ret = 0;
4420 uint32_t fw_size = 0;
4421 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4422 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4423 struct qseecom_command_scm_resp resp;
4424 u8 *img_data = NULL;
4425 ion_phys_addr_t pa = 0;
4426 void *cmd_buf = NULL;
4427 size_t cmd_len;
4428 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004429 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004430
4431 if (!cmnlib_name) {
4432 pr_err("cmnlib_name is NULL\n");
4433 return -EINVAL;
4434 }
4435 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4436 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4437 cmnlib_name, strlen(cmnlib_name));
4438 return -EINVAL;
4439 }
4440
4441 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4442 return -EIO;
4443
Zhen Kong3bafb312017-10-18 10:27:20 -07004444 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004445 &img_data, fw_size, &pa);
4446 if (ret)
4447 return -EIO;
4448
4449 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4450 if (ret) {
4451 ret = -EIO;
4452 goto exit_free_img_data;
4453 }
4454 if (qseecom.qsee_version < QSEE_VERSION_40) {
4455 load_req.phy_addr = (uint32_t)pa;
4456 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4457 cmd_buf = (void *)&load_req;
4458 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4459 } else {
4460 load_req_64bit.phy_addr = (uint64_t)pa;
4461 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4462 load_req_64bit.img_len = load_req.img_len;
4463 load_req_64bit.mdt_len = load_req.mdt_len;
4464 cmd_buf = (void *)&load_req_64bit;
4465 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4466 }
4467
4468 if (qseecom.support_bus_scaling) {
4469 mutex_lock(&qsee_bw_mutex);
4470 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4471 mutex_unlock(&qsee_bw_mutex);
4472 if (ret) {
4473 ret = -EIO;
4474 goto exit_free_img_data;
4475 }
4476 }
4477
4478 /* Vote for the SFPB clock */
4479 ret = __qseecom_enable_clk_scale_up(data);
4480 if (ret) {
4481 ret = -EIO;
4482 goto exit_unregister_bus_bw_need;
4483 }
4484
Zhen Kong3bafb312017-10-18 10:27:20 -07004485 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004486 img_data, fw_size,
4487 ION_IOC_CLEAN_INV_CACHES);
4488 if (ret) {
4489 pr_err("cache operation failed %d\n", ret);
4490 goto exit_disable_clk_vote;
4491 }
4492
4493 /* SCM_CALL to load the image */
4494 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4495 &resp, sizeof(resp));
4496 if (ret) {
4497 pr_err("scm_call to load failed : ret %d\n", ret);
4498 ret = -EIO;
4499 goto exit_disable_clk_vote;
4500 }
4501
4502 switch (resp.result) {
4503 case QSEOS_RESULT_SUCCESS:
4504 break;
4505 case QSEOS_RESULT_FAILURE:
4506 pr_err("scm call failed w/response result%d\n", resp.result);
4507 ret = -EINVAL;
4508 goto exit_disable_clk_vote;
4509 case QSEOS_RESULT_INCOMPLETE:
4510 ret = __qseecom_process_incomplete_cmd(data, &resp);
4511 if (ret) {
4512 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4513 goto exit_disable_clk_vote;
4514 }
4515 break;
4516 default:
4517 pr_err("scm call return unknown response %d\n", resp.result);
4518 ret = -EINVAL;
4519 goto exit_disable_clk_vote;
4520 }
4521
4522exit_disable_clk_vote:
4523 __qseecom_disable_clk_scale_down(data);
4524
4525exit_unregister_bus_bw_need:
4526 if (qseecom.support_bus_scaling) {
4527 mutex_lock(&qsee_bw_mutex);
4528 qseecom_unregister_bus_bandwidth_needs(data);
4529 mutex_unlock(&qsee_bw_mutex);
4530 }
4531
4532exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004533 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004534 return ret;
4535}
4536
4537static int qseecom_unload_commonlib_image(void)
4538{
4539 int ret = -EINVAL;
4540 struct qseecom_unload_lib_image_ireq unload_req = {0};
4541 struct qseecom_command_scm_resp resp;
4542
4543 /* Populate the remaining parameters */
4544 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4545
4546 /* SCM_CALL to load the image */
4547 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4548 sizeof(struct qseecom_unload_lib_image_ireq),
4549 &resp, sizeof(resp));
4550 if (ret) {
4551 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4552 ret = -EIO;
4553 } else {
4554 switch (resp.result) {
4555 case QSEOS_RESULT_SUCCESS:
4556 break;
4557 case QSEOS_RESULT_FAILURE:
4558 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4559 break;
4560 default:
4561 pr_err("scm call return unknown response %d\n",
4562 resp.result);
4563 ret = -EINVAL;
4564 break;
4565 }
4566 }
4567
4568 return ret;
4569}
4570
4571int qseecom_start_app(struct qseecom_handle **handle,
4572 char *app_name, uint32_t size)
4573{
4574 int32_t ret = 0;
4575 unsigned long flags = 0;
4576 struct qseecom_dev_handle *data = NULL;
4577 struct qseecom_check_app_ireq app_ireq;
4578 struct qseecom_registered_app_list *entry = NULL;
4579 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4580 bool found_app = false;
4581 size_t len;
4582 ion_phys_addr_t pa;
4583 uint32_t fw_size, app_arch;
4584 uint32_t app_id = 0;
4585
Zhen Kongc4c162a2019-01-23 12:07:12 -08004586 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004587
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004588 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4589 pr_err("Not allowed to be called in %d state\n",
4590 atomic_read(&qseecom.qseecom_state));
4591 return -EPERM;
4592 }
4593 if (!app_name) {
4594 pr_err("failed to get the app name\n");
4595 return -EINVAL;
4596 }
4597
Zhen Kong64a6d7282017-06-16 11:55:07 -07004598 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004599 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004600 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004601 return -EINVAL;
4602 }
4603
4604 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4605 if (!(*handle))
4606 return -ENOMEM;
4607
4608 data = kzalloc(sizeof(*data), GFP_KERNEL);
4609 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304610 ret = -ENOMEM;
4611 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004612 }
4613 data->abort = 0;
4614 data->type = QSEECOM_CLIENT_APP;
4615 data->released = false;
4616 data->client.sb_length = size;
4617 data->client.user_virt_sb_base = 0;
4618 data->client.ihandle = NULL;
4619
4620 init_waitqueue_head(&data->abort_wq);
4621
4622 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4623 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4624 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4625 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304626 ret = -ENOMEM;
4627 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004628 }
4629 mutex_lock(&app_access_lock);
4630
Zhen Kong5d02be92018-05-29 16:17:29 -07004631recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004632 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4633 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4634 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4635 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304636 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004637
4638 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4639 if (app_id) {
4640 pr_warn("App id %d for [%s] app exists\n", app_id,
4641 (char *)app_ireq.app_name);
4642 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4643 list_for_each_entry(entry,
4644 &qseecom.registered_app_list_head, list){
4645 if (entry->app_id == app_id) {
4646 entry->ref_cnt++;
4647 found_app = true;
4648 break;
4649 }
4650 }
4651 spin_unlock_irqrestore(
4652 &qseecom.registered_app_list_lock, flags);
4653 if (!found_app)
4654 pr_warn("App_id %d [%s] was loaded but not registered\n",
4655 ret, (char *)app_ireq.app_name);
4656 } else {
4657 /* load the app and get the app_id */
4658 pr_debug("%s: Loading app for the first time'\n",
4659 qseecom.pdev->init_name);
4660 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004661 if (ret == -EEXIST) {
4662 pr_err("recheck if TA %s is loaded\n", app_name);
4663 goto recheck;
4664 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304665 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004666 }
4667 data->client.app_id = app_id;
4668 if (!found_app) {
4669 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4670 if (!entry) {
4671 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304672 ret = -ENOMEM;
4673 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004674 }
4675 entry->app_id = app_id;
4676 entry->ref_cnt = 1;
4677 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4678 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4679 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304680 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004681 }
4682 entry->app_arch = app_arch;
4683 entry->app_blocked = false;
4684 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004685 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004686 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4687 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4688 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4689 flags);
4690 }
4691
4692 /* Get the physical address of the ION BUF */
4693 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4694 if (ret) {
4695 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4696 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304697 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004698 }
4699
4700 /* Populate the structure for sending scm call to load image */
4701 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4702 data->client.ihandle);
4703 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4704 pr_err("ION memory mapping for client shared buf failed\n");
4705 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304706 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004707 }
4708 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4709 data->client.sb_phys = (phys_addr_t)pa;
4710 (*handle)->dev = (void *)data;
4711 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4712 (*handle)->sbuf_len = data->client.sb_length;
4713
4714 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4715 if (!kclient_entry) {
4716 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304717 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004718 }
4719 kclient_entry->handle = *handle;
4720
4721 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4722 list_add_tail(&kclient_entry->list,
4723 &qseecom.registered_kclient_list_head);
4724 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4725
4726 mutex_unlock(&app_access_lock);
4727 return 0;
4728
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304729exit_ion_unmap_kernel:
4730 if (!IS_ERR_OR_NULL(data->client.ihandle))
4731 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4732exit_entry_free:
4733 kfree(entry);
4734exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004735 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304736 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4737 ion_free(qseecom.ion_clnt, data->client.ihandle);
4738 data->client.ihandle = NULL;
4739 }
4740exit_data_free:
4741 kfree(data);
4742exit_handle_free:
4743 if (*handle) {
4744 kfree(*handle);
4745 *handle = NULL;
4746 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004747 return ret;
4748}
4749EXPORT_SYMBOL(qseecom_start_app);
4750
4751int qseecom_shutdown_app(struct qseecom_handle **handle)
4752{
4753 int ret = -EINVAL;
4754 struct qseecom_dev_handle *data;
4755
4756 struct qseecom_registered_kclient_list *kclient = NULL;
4757 unsigned long flags = 0;
4758 bool found_handle = false;
4759
Zhen Kongc4c162a2019-01-23 12:07:12 -08004760 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004761
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004762 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4763 pr_err("Not allowed to be called in %d state\n",
4764 atomic_read(&qseecom.qseecom_state));
4765 return -EPERM;
4766 }
4767
4768 if ((handle == NULL) || (*handle == NULL)) {
4769 pr_err("Handle is not initialized\n");
4770 return -EINVAL;
4771 }
4772 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4773 mutex_lock(&app_access_lock);
4774
4775 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4776 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4777 list) {
4778 if (kclient->handle == (*handle)) {
4779 list_del(&kclient->list);
4780 found_handle = true;
4781 break;
4782 }
4783 }
4784 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4785 if (!found_handle)
4786 pr_err("Unable to find the handle, exiting\n");
4787 else
4788 ret = qseecom_unload_app(data, false);
4789
4790 mutex_unlock(&app_access_lock);
4791 if (ret == 0) {
4792 kzfree(data);
4793 kzfree(*handle);
4794 kzfree(kclient);
4795 *handle = NULL;
4796 }
4797
4798 return ret;
4799}
4800EXPORT_SYMBOL(qseecom_shutdown_app);
4801
4802int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4803 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4804{
4805 int ret = 0;
4806 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4807 struct qseecom_dev_handle *data;
4808 bool perf_enabled = false;
4809
Zhen Kongc4c162a2019-01-23 12:07:12 -08004810 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004811
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004812 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4813 pr_err("Not allowed to be called in %d state\n",
4814 atomic_read(&qseecom.qseecom_state));
4815 return -EPERM;
4816 }
4817
4818 if (handle == NULL) {
4819 pr_err("Handle is not initialized\n");
4820 return -EINVAL;
4821 }
4822 data = handle->dev;
4823
4824 req.cmd_req_len = sbuf_len;
4825 req.resp_len = rbuf_len;
4826 req.cmd_req_buf = send_buf;
4827 req.resp_buf = resp_buf;
4828
4829 if (__validate_send_cmd_inputs(data, &req))
4830 return -EINVAL;
4831
4832 mutex_lock(&app_access_lock);
4833 if (qseecom.support_bus_scaling) {
4834 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4835 if (ret) {
4836 pr_err("Failed to set bw.\n");
4837 mutex_unlock(&app_access_lock);
4838 return ret;
4839 }
4840 }
4841 /*
4842 * On targets where crypto clock is handled by HLOS,
4843 * if clk_access_cnt is zero and perf_enabled is false,
4844 * then the crypto clock was not enabled before sending cmd
4845 * to tz, qseecom will enable the clock to avoid service failure.
4846 */
4847 if (!qseecom.no_clock_support &&
4848 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4849 pr_debug("ce clock is not enabled!\n");
4850 ret = qseecom_perf_enable(data);
4851 if (ret) {
4852 pr_err("Failed to vote for clock with err %d\n",
4853 ret);
4854 mutex_unlock(&app_access_lock);
4855 return -EINVAL;
4856 }
4857 perf_enabled = true;
4858 }
4859 if (!strcmp(data->client.app_name, "securemm"))
4860 data->use_legacy_cmd = true;
4861
4862 ret = __qseecom_send_cmd(data, &req);
4863 data->use_legacy_cmd = false;
4864 if (qseecom.support_bus_scaling)
4865 __qseecom_add_bw_scale_down_timer(
4866 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4867
4868 if (perf_enabled) {
4869 qsee_disable_clock_vote(data, CLK_DFAB);
4870 qsee_disable_clock_vote(data, CLK_SFPB);
4871 }
4872
4873 mutex_unlock(&app_access_lock);
4874
4875 if (ret)
4876 return ret;
4877
4878 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4879 req.resp_len, req.resp_buf);
4880 return ret;
4881}
4882EXPORT_SYMBOL(qseecom_send_command);
4883
4884int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4885{
4886 int ret = 0;
4887
4888 if ((handle == NULL) || (handle->dev == NULL)) {
4889 pr_err("No valid kernel client\n");
4890 return -EINVAL;
4891 }
4892 if (high) {
4893 if (qseecom.support_bus_scaling) {
4894 mutex_lock(&qsee_bw_mutex);
4895 __qseecom_register_bus_bandwidth_needs(handle->dev,
4896 HIGH);
4897 mutex_unlock(&qsee_bw_mutex);
4898 } else {
4899 ret = qseecom_perf_enable(handle->dev);
4900 if (ret)
4901 pr_err("Failed to vote for clock with err %d\n",
4902 ret);
4903 }
4904 } else {
4905 if (!qseecom.support_bus_scaling) {
4906 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4907 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4908 } else {
4909 mutex_lock(&qsee_bw_mutex);
4910 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4911 mutex_unlock(&qsee_bw_mutex);
4912 }
4913 }
4914 return ret;
4915}
4916EXPORT_SYMBOL(qseecom_set_bandwidth);
4917
4918int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4919{
4920 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4921 struct qseecom_dev_handle dummy_private_data = {0};
4922 struct qseecom_command_scm_resp resp;
4923 int ret = 0;
4924
4925 if (!desc) {
4926 pr_err("desc is NULL\n");
4927 return -EINVAL;
4928 }
4929
4930 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004931 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004932 resp.data = desc->ret[2]; /*listener_id*/
4933
Zhen Konge7f525f2017-12-01 18:26:25 -08004934 dummy_private_data.client.app_id = desc->ret[1];
4935 dummy_app_entry.app_id = desc->ret[1];
4936
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004937 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004938 if (qseecom.qsee_reentrancy_support)
4939 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004940 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004941 else
4942 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4943 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004944 mutex_unlock(&app_access_lock);
4945 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004946 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004947 (int)desc->ret[0], (int)desc->ret[2],
4948 (int)desc->ret[1], ret);
4949 desc->ret[0] = resp.result;
4950 desc->ret[1] = resp.resp_type;
4951 desc->ret[2] = resp.data;
4952 return ret;
4953}
4954EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4955
4956static int qseecom_send_resp(void)
4957{
4958 qseecom.send_resp_flag = 1;
4959 wake_up_interruptible(&qseecom.send_resp_wq);
4960 return 0;
4961}
4962
4963static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4964{
4965 struct qseecom_registered_listener_list *this_lstnr = NULL;
4966
4967 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4968 this_lstnr = __qseecom_find_svc(data->listener.id);
4969 if (this_lstnr == NULL)
4970 return -EINVAL;
4971 qseecom.send_resp_flag = 1;
4972 this_lstnr->send_resp_flag = 1;
4973 wake_up_interruptible(&qseecom.send_resp_wq);
4974 return 0;
4975}
4976
4977static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4978 struct qseecom_send_modfd_listener_resp *resp,
4979 struct qseecom_registered_listener_list *this_lstnr)
4980{
4981 int i;
4982
4983 if (!data || !resp || !this_lstnr) {
4984 pr_err("listener handle or resp msg is null\n");
4985 return -EINVAL;
4986 }
4987
4988 if (resp->resp_buf_ptr == NULL) {
4989 pr_err("resp buffer is null\n");
4990 return -EINVAL;
4991 }
4992 /* validate resp buf length */
4993 if ((resp->resp_len == 0) ||
4994 (resp->resp_len > this_lstnr->sb_length)) {
4995 pr_err("resp buf length %d not valid\n", resp->resp_len);
4996 return -EINVAL;
4997 }
4998
4999 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5000 pr_err("Integer overflow in resp_len & resp_buf\n");
5001 return -EINVAL;
5002 }
5003 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5004 (ULONG_MAX - this_lstnr->sb_length)) {
5005 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5006 return -EINVAL;
5007 }
5008 /* validate resp buf */
5009 if (((uintptr_t)resp->resp_buf_ptr <
5010 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5011 ((uintptr_t)resp->resp_buf_ptr >=
5012 ((uintptr_t)this_lstnr->user_virt_sb_base +
5013 this_lstnr->sb_length)) ||
5014 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5015 ((uintptr_t)this_lstnr->user_virt_sb_base +
5016 this_lstnr->sb_length))) {
5017 pr_err("resp buf is out of shared buffer region\n");
5018 return -EINVAL;
5019 }
5020
5021 /* validate offsets */
5022 for (i = 0; i < MAX_ION_FD; i++) {
5023 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5024 pr_err("Invalid offset %d = 0x%x\n",
5025 i, resp->ifd_data[i].cmd_buf_offset);
5026 return -EINVAL;
5027 }
5028 }
5029
5030 return 0;
5031}
5032
5033static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5034 void __user *argp, bool is_64bit_addr)
5035{
5036 struct qseecom_send_modfd_listener_resp resp;
5037 struct qseecom_registered_listener_list *this_lstnr = NULL;
5038
5039 if (copy_from_user(&resp, argp, sizeof(resp))) {
5040 pr_err("copy_from_user failed");
5041 return -EINVAL;
5042 }
5043
5044 this_lstnr = __qseecom_find_svc(data->listener.id);
5045 if (this_lstnr == NULL)
5046 return -EINVAL;
5047
5048 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5049 return -EINVAL;
5050
5051 resp.resp_buf_ptr = this_lstnr->sb_virt +
5052 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5053
5054 if (!is_64bit_addr)
5055 __qseecom_update_cmd_buf(&resp, false, data);
5056 else
5057 __qseecom_update_cmd_buf_64(&resp, false, data);
5058 qseecom.send_resp_flag = 1;
5059 this_lstnr->send_resp_flag = 1;
5060 wake_up_interruptible(&qseecom.send_resp_wq);
5061 return 0;
5062}
5063
5064static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5065 void __user *argp)
5066{
5067 return __qseecom_send_modfd_resp(data, argp, false);
5068}
5069
5070static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5071 void __user *argp)
5072{
5073 return __qseecom_send_modfd_resp(data, argp, true);
5074}
5075
5076static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5077 void __user *argp)
5078{
5079 struct qseecom_qseos_version_req req;
5080
5081 if (copy_from_user(&req, argp, sizeof(req))) {
5082 pr_err("copy_from_user failed");
5083 return -EINVAL;
5084 }
5085 req.qseos_version = qseecom.qseos_version;
5086 if (copy_to_user(argp, &req, sizeof(req))) {
5087 pr_err("copy_to_user failed");
5088 return -EINVAL;
5089 }
5090 return 0;
5091}
5092
5093static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5094{
5095 int rc = 0;
5096 struct qseecom_clk *qclk = NULL;
5097
5098 if (qseecom.no_clock_support)
5099 return 0;
5100
5101 if (ce == CLK_QSEE)
5102 qclk = &qseecom.qsee;
5103 if (ce == CLK_CE_DRV)
5104 qclk = &qseecom.ce_drv;
5105
5106 if (qclk == NULL) {
5107 pr_err("CLK type not supported\n");
5108 return -EINVAL;
5109 }
5110 mutex_lock(&clk_access_lock);
5111
5112 if (qclk->clk_access_cnt == ULONG_MAX) {
5113 pr_err("clk_access_cnt beyond limitation\n");
5114 goto err;
5115 }
5116 if (qclk->clk_access_cnt > 0) {
5117 qclk->clk_access_cnt++;
5118 mutex_unlock(&clk_access_lock);
5119 return rc;
5120 }
5121
5122 /* Enable CE core clk */
5123 if (qclk->ce_core_clk != NULL) {
5124 rc = clk_prepare_enable(qclk->ce_core_clk);
5125 if (rc) {
5126 pr_err("Unable to enable/prepare CE core clk\n");
5127 goto err;
5128 }
5129 }
5130 /* Enable CE clk */
5131 if (qclk->ce_clk != NULL) {
5132 rc = clk_prepare_enable(qclk->ce_clk);
5133 if (rc) {
5134 pr_err("Unable to enable/prepare CE iface clk\n");
5135 goto ce_clk_err;
5136 }
5137 }
5138 /* Enable AXI clk */
5139 if (qclk->ce_bus_clk != NULL) {
5140 rc = clk_prepare_enable(qclk->ce_bus_clk);
5141 if (rc) {
5142 pr_err("Unable to enable/prepare CE bus clk\n");
5143 goto ce_bus_clk_err;
5144 }
5145 }
5146 qclk->clk_access_cnt++;
5147 mutex_unlock(&clk_access_lock);
5148 return 0;
5149
5150ce_bus_clk_err:
5151 if (qclk->ce_clk != NULL)
5152 clk_disable_unprepare(qclk->ce_clk);
5153ce_clk_err:
5154 if (qclk->ce_core_clk != NULL)
5155 clk_disable_unprepare(qclk->ce_core_clk);
5156err:
5157 mutex_unlock(&clk_access_lock);
5158 return -EIO;
5159}
5160
5161static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5162{
5163 struct qseecom_clk *qclk;
5164
5165 if (qseecom.no_clock_support)
5166 return;
5167
5168 if (ce == CLK_QSEE)
5169 qclk = &qseecom.qsee;
5170 else
5171 qclk = &qseecom.ce_drv;
5172
5173 mutex_lock(&clk_access_lock);
5174
5175 if (qclk->clk_access_cnt == 0) {
5176 mutex_unlock(&clk_access_lock);
5177 return;
5178 }
5179
5180 if (qclk->clk_access_cnt == 1) {
5181 if (qclk->ce_clk != NULL)
5182 clk_disable_unprepare(qclk->ce_clk);
5183 if (qclk->ce_core_clk != NULL)
5184 clk_disable_unprepare(qclk->ce_core_clk);
5185 if (qclk->ce_bus_clk != NULL)
5186 clk_disable_unprepare(qclk->ce_bus_clk);
5187 }
5188 qclk->clk_access_cnt--;
5189 mutex_unlock(&clk_access_lock);
5190}
5191
5192static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5193 int32_t clk_type)
5194{
5195 int ret = 0;
5196 struct qseecom_clk *qclk;
5197
5198 if (qseecom.no_clock_support)
5199 return 0;
5200
5201 qclk = &qseecom.qsee;
5202 if (!qseecom.qsee_perf_client)
5203 return ret;
5204
5205 switch (clk_type) {
5206 case CLK_DFAB:
5207 mutex_lock(&qsee_bw_mutex);
5208 if (!qseecom.qsee_bw_count) {
5209 if (qseecom.qsee_sfpb_bw_count > 0)
5210 ret = msm_bus_scale_client_update_request(
5211 qseecom.qsee_perf_client, 3);
5212 else {
5213 if (qclk->ce_core_src_clk != NULL)
5214 ret = __qseecom_enable_clk(CLK_QSEE);
5215 if (!ret) {
5216 ret =
5217 msm_bus_scale_client_update_request(
5218 qseecom.qsee_perf_client, 1);
5219 if ((ret) &&
5220 (qclk->ce_core_src_clk != NULL))
5221 __qseecom_disable_clk(CLK_QSEE);
5222 }
5223 }
5224 if (ret)
5225 pr_err("DFAB Bandwidth req failed (%d)\n",
5226 ret);
5227 else {
5228 qseecom.qsee_bw_count++;
5229 data->perf_enabled = true;
5230 }
5231 } else {
5232 qseecom.qsee_bw_count++;
5233 data->perf_enabled = true;
5234 }
5235 mutex_unlock(&qsee_bw_mutex);
5236 break;
5237 case CLK_SFPB:
5238 mutex_lock(&qsee_bw_mutex);
5239 if (!qseecom.qsee_sfpb_bw_count) {
5240 if (qseecom.qsee_bw_count > 0)
5241 ret = msm_bus_scale_client_update_request(
5242 qseecom.qsee_perf_client, 3);
5243 else {
5244 if (qclk->ce_core_src_clk != NULL)
5245 ret = __qseecom_enable_clk(CLK_QSEE);
5246 if (!ret) {
5247 ret =
5248 msm_bus_scale_client_update_request(
5249 qseecom.qsee_perf_client, 2);
5250 if ((ret) &&
5251 (qclk->ce_core_src_clk != NULL))
5252 __qseecom_disable_clk(CLK_QSEE);
5253 }
5254 }
5255
5256 if (ret)
5257 pr_err("SFPB Bandwidth req failed (%d)\n",
5258 ret);
5259 else {
5260 qseecom.qsee_sfpb_bw_count++;
5261 data->fast_load_enabled = true;
5262 }
5263 } else {
5264 qseecom.qsee_sfpb_bw_count++;
5265 data->fast_load_enabled = true;
5266 }
5267 mutex_unlock(&qsee_bw_mutex);
5268 break;
5269 default:
5270 pr_err("Clock type not defined\n");
5271 break;
5272 }
5273 return ret;
5274}
5275
5276static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5277 int32_t clk_type)
5278{
5279 int32_t ret = 0;
5280 struct qseecom_clk *qclk;
5281
5282 qclk = &qseecom.qsee;
5283
5284 if (qseecom.no_clock_support)
5285 return;
5286 if (!qseecom.qsee_perf_client)
5287 return;
5288
5289 switch (clk_type) {
5290 case CLK_DFAB:
5291 mutex_lock(&qsee_bw_mutex);
5292 if (qseecom.qsee_bw_count == 0) {
5293 pr_err("Client error.Extra call to disable DFAB clk\n");
5294 mutex_unlock(&qsee_bw_mutex);
5295 return;
5296 }
5297
5298 if (qseecom.qsee_bw_count == 1) {
5299 if (qseecom.qsee_sfpb_bw_count > 0)
5300 ret = msm_bus_scale_client_update_request(
5301 qseecom.qsee_perf_client, 2);
5302 else {
5303 ret = msm_bus_scale_client_update_request(
5304 qseecom.qsee_perf_client, 0);
5305 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5306 __qseecom_disable_clk(CLK_QSEE);
5307 }
5308 if (ret)
5309 pr_err("SFPB Bandwidth req fail (%d)\n",
5310 ret);
5311 else {
5312 qseecom.qsee_bw_count--;
5313 data->perf_enabled = false;
5314 }
5315 } else {
5316 qseecom.qsee_bw_count--;
5317 data->perf_enabled = false;
5318 }
5319 mutex_unlock(&qsee_bw_mutex);
5320 break;
5321 case CLK_SFPB:
5322 mutex_lock(&qsee_bw_mutex);
5323 if (qseecom.qsee_sfpb_bw_count == 0) {
5324 pr_err("Client error.Extra call to disable SFPB clk\n");
5325 mutex_unlock(&qsee_bw_mutex);
5326 return;
5327 }
5328 if (qseecom.qsee_sfpb_bw_count == 1) {
5329 if (qseecom.qsee_bw_count > 0)
5330 ret = msm_bus_scale_client_update_request(
5331 qseecom.qsee_perf_client, 1);
5332 else {
5333 ret = msm_bus_scale_client_update_request(
5334 qseecom.qsee_perf_client, 0);
5335 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5336 __qseecom_disable_clk(CLK_QSEE);
5337 }
5338 if (ret)
5339 pr_err("SFPB Bandwidth req fail (%d)\n",
5340 ret);
5341 else {
5342 qseecom.qsee_sfpb_bw_count--;
5343 data->fast_load_enabled = false;
5344 }
5345 } else {
5346 qseecom.qsee_sfpb_bw_count--;
5347 data->fast_load_enabled = false;
5348 }
5349 mutex_unlock(&qsee_bw_mutex);
5350 break;
5351 default:
5352 pr_err("Clock type not defined\n");
5353 break;
5354 }
5355
5356}
5357
5358static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5359 void __user *argp)
5360{
5361 struct ion_handle *ihandle; /* Ion handle */
5362 struct qseecom_load_img_req load_img_req;
5363 int uret = 0;
5364 int ret;
5365 ion_phys_addr_t pa = 0;
5366 size_t len;
5367 struct qseecom_load_app_ireq load_req;
5368 struct qseecom_load_app_64bit_ireq load_req_64bit;
5369 struct qseecom_command_scm_resp resp;
5370 void *cmd_buf = NULL;
5371 size_t cmd_len;
5372 /* Copy the relevant information needed for loading the image */
5373 if (copy_from_user(&load_img_req,
5374 (void __user *)argp,
5375 sizeof(struct qseecom_load_img_req))) {
5376 pr_err("copy_from_user failed\n");
5377 return -EFAULT;
5378 }
5379
5380 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005381 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005382 load_img_req.ifd_data_fd);
5383 if (IS_ERR_OR_NULL(ihandle)) {
5384 pr_err("Ion client could not retrieve the handle\n");
5385 return -ENOMEM;
5386 }
5387
5388 /* Get the physical address of the ION BUF */
5389 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5390 if (ret) {
5391 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5392 ret);
5393 return ret;
5394 }
5395 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5396 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5397 len, load_img_req.mdt_len,
5398 load_img_req.img_len);
5399 return ret;
5400 }
5401 /* Populate the structure for sending scm call to load image */
5402 if (qseecom.qsee_version < QSEE_VERSION_40) {
5403 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5404 load_req.mdt_len = load_img_req.mdt_len;
5405 load_req.img_len = load_img_req.img_len;
5406 load_req.phy_addr = (uint32_t)pa;
5407 cmd_buf = (void *)&load_req;
5408 cmd_len = sizeof(struct qseecom_load_app_ireq);
5409 } else {
5410 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5411 load_req_64bit.mdt_len = load_img_req.mdt_len;
5412 load_req_64bit.img_len = load_img_req.img_len;
5413 load_req_64bit.phy_addr = (uint64_t)pa;
5414 cmd_buf = (void *)&load_req_64bit;
5415 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5416 }
5417
5418 if (qseecom.support_bus_scaling) {
5419 mutex_lock(&qsee_bw_mutex);
5420 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5421 mutex_unlock(&qsee_bw_mutex);
5422 if (ret) {
5423 ret = -EIO;
5424 goto exit_cpu_restore;
5425 }
5426 }
5427
5428 /* Vote for the SFPB clock */
5429 ret = __qseecom_enable_clk_scale_up(data);
5430 if (ret) {
5431 ret = -EIO;
5432 goto exit_register_bus_bandwidth_needs;
5433 }
5434 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5435 ION_IOC_CLEAN_INV_CACHES);
5436 if (ret) {
5437 pr_err("cache operation failed %d\n", ret);
5438 goto exit_disable_clock;
5439 }
5440 /* SCM_CALL to load the external elf */
5441 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5442 &resp, sizeof(resp));
5443 if (ret) {
5444 pr_err("scm_call to load failed : ret %d\n",
5445 ret);
5446 ret = -EFAULT;
5447 goto exit_disable_clock;
5448 }
5449
5450 switch (resp.result) {
5451 case QSEOS_RESULT_SUCCESS:
5452 break;
5453 case QSEOS_RESULT_INCOMPLETE:
5454 pr_err("%s: qseos result incomplete\n", __func__);
5455 ret = __qseecom_process_incomplete_cmd(data, &resp);
5456 if (ret)
5457 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5458 break;
5459 case QSEOS_RESULT_FAILURE:
5460 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5461 ret = -EFAULT;
5462 break;
5463 default:
5464 pr_err("scm_call response result %d not supported\n",
5465 resp.result);
5466 ret = -EFAULT;
5467 break;
5468 }
5469
5470exit_disable_clock:
5471 __qseecom_disable_clk_scale_down(data);
5472
5473exit_register_bus_bandwidth_needs:
5474 if (qseecom.support_bus_scaling) {
5475 mutex_lock(&qsee_bw_mutex);
5476 uret = qseecom_unregister_bus_bandwidth_needs(data);
5477 mutex_unlock(&qsee_bw_mutex);
5478 if (uret)
5479 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5480 uret, ret);
5481 }
5482
5483exit_cpu_restore:
5484 /* Deallocate the handle */
5485 if (!IS_ERR_OR_NULL(ihandle))
5486 ion_free(qseecom.ion_clnt, ihandle);
5487 return ret;
5488}
5489
5490static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5491{
5492 int ret = 0;
5493 struct qseecom_command_scm_resp resp;
5494 struct qseecom_unload_app_ireq req;
5495
5496 /* unavailable client app */
5497 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5498
5499 /* Populate the structure for sending scm call to unload image */
5500 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5501
5502 /* SCM_CALL to unload the external elf */
5503 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5504 sizeof(struct qseecom_unload_app_ireq),
5505 &resp, sizeof(resp));
5506 if (ret) {
5507 pr_err("scm_call to unload failed : ret %d\n",
5508 ret);
5509 ret = -EFAULT;
5510 goto qseecom_unload_external_elf_scm_err;
5511 }
5512 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5513 ret = __qseecom_process_incomplete_cmd(data, &resp);
5514 if (ret)
5515 pr_err("process_incomplete_cmd fail err: %d\n",
5516 ret);
5517 } else {
5518 if (resp.result != QSEOS_RESULT_SUCCESS) {
5519 pr_err("scm_call to unload image failed resp.result =%d\n",
5520 resp.result);
5521 ret = -EFAULT;
5522 }
5523 }
5524
5525qseecom_unload_external_elf_scm_err:
5526
5527 return ret;
5528}
5529
5530static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5531 void __user *argp)
5532{
5533
5534 int32_t ret;
5535 struct qseecom_qseos_app_load_query query_req;
5536 struct qseecom_check_app_ireq req;
5537 struct qseecom_registered_app_list *entry = NULL;
5538 unsigned long flags = 0;
5539 uint32_t app_arch = 0, app_id = 0;
5540 bool found_app = false;
5541
5542 /* Copy the relevant information needed for loading the image */
5543 if (copy_from_user(&query_req,
5544 (void __user *)argp,
5545 sizeof(struct qseecom_qseos_app_load_query))) {
5546 pr_err("copy_from_user failed\n");
5547 return -EFAULT;
5548 }
5549
5550 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5551 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5552 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5553
5554 ret = __qseecom_check_app_exists(req, &app_id);
5555 if (ret) {
5556 pr_err(" scm call to check if app is loaded failed");
5557 return ret; /* scm call failed */
5558 }
5559 if (app_id) {
5560 pr_debug("App id %d (%s) already exists\n", app_id,
5561 (char *)(req.app_name));
5562 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5563 list_for_each_entry(entry,
5564 &qseecom.registered_app_list_head, list){
5565 if (entry->app_id == app_id) {
5566 app_arch = entry->app_arch;
5567 entry->ref_cnt++;
5568 found_app = true;
5569 break;
5570 }
5571 }
5572 spin_unlock_irqrestore(
5573 &qseecom.registered_app_list_lock, flags);
5574 data->client.app_id = app_id;
5575 query_req.app_id = app_id;
5576 if (app_arch) {
5577 data->client.app_arch = app_arch;
5578 query_req.app_arch = app_arch;
5579 } else {
5580 data->client.app_arch = 0;
5581 query_req.app_arch = 0;
5582 }
5583 strlcpy(data->client.app_name, query_req.app_name,
5584 MAX_APP_NAME_SIZE);
5585 /*
5586 * If app was loaded by appsbl before and was not registered,
5587 * regiser this app now.
5588 */
5589 if (!found_app) {
5590 pr_debug("Register app %d [%s] which was loaded before\n",
5591 ret, (char *)query_req.app_name);
5592 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5593 if (!entry) {
5594 pr_err("kmalloc for app entry failed\n");
5595 return -ENOMEM;
5596 }
5597 entry->app_id = app_id;
5598 entry->ref_cnt = 1;
5599 entry->app_arch = data->client.app_arch;
5600 strlcpy(entry->app_name, data->client.app_name,
5601 MAX_APP_NAME_SIZE);
5602 entry->app_blocked = false;
5603 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005604 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005605 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5606 flags);
5607 list_add_tail(&entry->list,
5608 &qseecom.registered_app_list_head);
5609 spin_unlock_irqrestore(
5610 &qseecom.registered_app_list_lock, flags);
5611 }
5612 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5613 pr_err("copy_to_user failed\n");
5614 return -EFAULT;
5615 }
5616 return -EEXIST; /* app already loaded */
5617 } else {
5618 return 0; /* app not loaded */
5619 }
5620}
5621
5622static int __qseecom_get_ce_pipe_info(
5623 enum qseecom_key_management_usage_type usage,
5624 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5625{
5626 int ret = -EINVAL;
5627 int i, j;
5628 struct qseecom_ce_info_use *p = NULL;
5629 int total = 0;
5630 struct qseecom_ce_pipe_entry *pcepipe;
5631
5632 switch (usage) {
5633 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5634 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5635 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5636 if (qseecom.support_fde) {
5637 p = qseecom.ce_info.fde;
5638 total = qseecom.ce_info.num_fde;
5639 } else {
5640 pr_err("system does not support fde\n");
5641 return -EINVAL;
5642 }
5643 break;
5644 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5645 if (qseecom.support_pfe) {
5646 p = qseecom.ce_info.pfe;
5647 total = qseecom.ce_info.num_pfe;
5648 } else {
5649 pr_err("system does not support pfe\n");
5650 return -EINVAL;
5651 }
5652 break;
5653 default:
5654 pr_err("unsupported usage %d\n", usage);
5655 return -EINVAL;
5656 }
5657
5658 for (j = 0; j < total; j++) {
5659 if (p->unit_num == unit) {
5660 pcepipe = p->ce_pipe_entry;
5661 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5662 (*ce_hw)[i] = pcepipe->ce_num;
5663 *pipe = pcepipe->ce_pipe_pair;
5664 pcepipe++;
5665 }
5666 ret = 0;
5667 break;
5668 }
5669 p++;
5670 }
5671 return ret;
5672}
5673
5674static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5675 enum qseecom_key_management_usage_type usage,
5676 struct qseecom_key_generate_ireq *ireq)
5677{
5678 struct qseecom_command_scm_resp resp;
5679 int ret;
5680
5681 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5682 usage >= QSEOS_KM_USAGE_MAX) {
5683 pr_err("Error:: unsupported usage %d\n", usage);
5684 return -EFAULT;
5685 }
5686 ret = __qseecom_enable_clk(CLK_QSEE);
5687 if (ret)
5688 return ret;
5689
5690 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5691 ireq, sizeof(struct qseecom_key_generate_ireq),
5692 &resp, sizeof(resp));
5693 if (ret) {
5694 if (ret == -EINVAL &&
5695 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5696 pr_debug("Key ID exists.\n");
5697 ret = 0;
5698 } else {
5699 pr_err("scm call to generate key failed : %d\n", ret);
5700 ret = -EFAULT;
5701 }
5702 goto generate_key_exit;
5703 }
5704
5705 switch (resp.result) {
5706 case QSEOS_RESULT_SUCCESS:
5707 break;
5708 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5709 pr_debug("Key ID exists.\n");
5710 break;
5711 case QSEOS_RESULT_INCOMPLETE:
5712 ret = __qseecom_process_incomplete_cmd(data, &resp);
5713 if (ret) {
5714 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5715 pr_debug("Key ID exists.\n");
5716 ret = 0;
5717 } else {
5718 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5719 resp.result);
5720 }
5721 }
5722 break;
5723 case QSEOS_RESULT_FAILURE:
5724 default:
5725 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5726 ret = -EINVAL;
5727 break;
5728 }
5729generate_key_exit:
5730 __qseecom_disable_clk(CLK_QSEE);
5731 return ret;
5732}
5733
5734static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5735 enum qseecom_key_management_usage_type usage,
5736 struct qseecom_key_delete_ireq *ireq)
5737{
5738 struct qseecom_command_scm_resp resp;
5739 int ret;
5740
5741 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5742 usage >= QSEOS_KM_USAGE_MAX) {
5743 pr_err("Error:: unsupported usage %d\n", usage);
5744 return -EFAULT;
5745 }
5746 ret = __qseecom_enable_clk(CLK_QSEE);
5747 if (ret)
5748 return ret;
5749
5750 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5751 ireq, sizeof(struct qseecom_key_delete_ireq),
5752 &resp, sizeof(struct qseecom_command_scm_resp));
5753 if (ret) {
5754 if (ret == -EINVAL &&
5755 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5756 pr_debug("Max attempts to input password reached.\n");
5757 ret = -ERANGE;
5758 } else {
5759 pr_err("scm call to delete key failed : %d\n", ret);
5760 ret = -EFAULT;
5761 }
5762 goto del_key_exit;
5763 }
5764
5765 switch (resp.result) {
5766 case QSEOS_RESULT_SUCCESS:
5767 break;
5768 case QSEOS_RESULT_INCOMPLETE:
5769 ret = __qseecom_process_incomplete_cmd(data, &resp);
5770 if (ret) {
5771 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5772 resp.result);
5773 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5774 pr_debug("Max attempts to input password reached.\n");
5775 ret = -ERANGE;
5776 }
5777 }
5778 break;
5779 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5780 pr_debug("Max attempts to input password reached.\n");
5781 ret = -ERANGE;
5782 break;
5783 case QSEOS_RESULT_FAILURE:
5784 default:
5785 pr_err("Delete key scm call failed resp.result %d\n",
5786 resp.result);
5787 ret = -EINVAL;
5788 break;
5789 }
5790del_key_exit:
5791 __qseecom_disable_clk(CLK_QSEE);
5792 return ret;
5793}
5794
5795static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5796 enum qseecom_key_management_usage_type usage,
5797 struct qseecom_key_select_ireq *ireq)
5798{
5799 struct qseecom_command_scm_resp resp;
5800 int ret;
5801
5802 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5803 usage >= QSEOS_KM_USAGE_MAX) {
5804 pr_err("Error:: unsupported usage %d\n", usage);
5805 return -EFAULT;
5806 }
5807 ret = __qseecom_enable_clk(CLK_QSEE);
5808 if (ret)
5809 return ret;
5810
5811 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5812 ret = __qseecom_enable_clk(CLK_CE_DRV);
5813 if (ret)
5814 return ret;
5815 }
5816
5817 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5818 ireq, sizeof(struct qseecom_key_select_ireq),
5819 &resp, sizeof(struct qseecom_command_scm_resp));
5820 if (ret) {
5821 if (ret == -EINVAL &&
5822 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5823 pr_debug("Max attempts to input password reached.\n");
5824 ret = -ERANGE;
5825 } else if (ret == -EINVAL &&
5826 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5827 pr_debug("Set Key operation under processing...\n");
5828 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5829 } else {
5830 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5831 ret);
5832 ret = -EFAULT;
5833 }
5834 goto set_key_exit;
5835 }
5836
5837 switch (resp.result) {
5838 case QSEOS_RESULT_SUCCESS:
5839 break;
5840 case QSEOS_RESULT_INCOMPLETE:
5841 ret = __qseecom_process_incomplete_cmd(data, &resp);
5842 if (ret) {
5843 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5844 resp.result);
5845 if (resp.result ==
5846 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5847 pr_debug("Set Key operation under processing...\n");
5848 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5849 }
5850 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5851 pr_debug("Max attempts to input password reached.\n");
5852 ret = -ERANGE;
5853 }
5854 }
5855 break;
5856 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5857 pr_debug("Max attempts to input password reached.\n");
5858 ret = -ERANGE;
5859 break;
5860 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5861 pr_debug("Set Key operation under processing...\n");
5862 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5863 break;
5864 case QSEOS_RESULT_FAILURE:
5865 default:
5866 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5867 ret = -EINVAL;
5868 break;
5869 }
5870set_key_exit:
5871 __qseecom_disable_clk(CLK_QSEE);
5872 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5873 __qseecom_disable_clk(CLK_CE_DRV);
5874 return ret;
5875}
5876
5877static int __qseecom_update_current_key_user_info(
5878 struct qseecom_dev_handle *data,
5879 enum qseecom_key_management_usage_type usage,
5880 struct qseecom_key_userinfo_update_ireq *ireq)
5881{
5882 struct qseecom_command_scm_resp resp;
5883 int ret;
5884
5885 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5886 usage >= QSEOS_KM_USAGE_MAX) {
5887 pr_err("Error:: unsupported usage %d\n", usage);
5888 return -EFAULT;
5889 }
5890 ret = __qseecom_enable_clk(CLK_QSEE);
5891 if (ret)
5892 return ret;
5893
5894 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5895 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5896 &resp, sizeof(struct qseecom_command_scm_resp));
5897 if (ret) {
5898 if (ret == -EINVAL &&
5899 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5900 pr_debug("Set Key operation under processing...\n");
5901 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5902 } else {
5903 pr_err("scm call to update key userinfo failed: %d\n",
5904 ret);
5905 __qseecom_disable_clk(CLK_QSEE);
5906 return -EFAULT;
5907 }
5908 }
5909
5910 switch (resp.result) {
5911 case QSEOS_RESULT_SUCCESS:
5912 break;
5913 case QSEOS_RESULT_INCOMPLETE:
5914 ret = __qseecom_process_incomplete_cmd(data, &resp);
5915 if (resp.result ==
5916 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5917 pr_debug("Set Key operation under processing...\n");
5918 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5919 }
5920 if (ret)
5921 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5922 resp.result);
5923 break;
5924 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5925 pr_debug("Update Key operation under processing...\n");
5926 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5927 break;
5928 case QSEOS_RESULT_FAILURE:
5929 default:
5930 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5931 ret = -EINVAL;
5932 break;
5933 }
5934
5935 __qseecom_disable_clk(CLK_QSEE);
5936 return ret;
5937}
5938
5939
5940static int qseecom_enable_ice_setup(int usage)
5941{
5942 int ret = 0;
5943
5944 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5945 ret = qcom_ice_setup_ice_hw("ufs", true);
5946 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5947 ret = qcom_ice_setup_ice_hw("sdcc", true);
5948
5949 return ret;
5950}
5951
5952static int qseecom_disable_ice_setup(int usage)
5953{
5954 int ret = 0;
5955
5956 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5957 ret = qcom_ice_setup_ice_hw("ufs", false);
5958 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5959 ret = qcom_ice_setup_ice_hw("sdcc", false);
5960
5961 return ret;
5962}
5963
5964static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5965{
5966 struct qseecom_ce_info_use *pce_info_use, *p;
5967 int total = 0;
5968 int i;
5969
5970 switch (usage) {
5971 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5972 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5973 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5974 p = qseecom.ce_info.fde;
5975 total = qseecom.ce_info.num_fde;
5976 break;
5977 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5978 p = qseecom.ce_info.pfe;
5979 total = qseecom.ce_info.num_pfe;
5980 break;
5981 default:
5982 pr_err("unsupported usage %d\n", usage);
5983 return -EINVAL;
5984 }
5985
5986 pce_info_use = NULL;
5987
5988 for (i = 0; i < total; i++) {
5989 if (p->unit_num == unit) {
5990 pce_info_use = p;
5991 break;
5992 }
5993 p++;
5994 }
5995 if (!pce_info_use) {
5996 pr_err("can not find %d\n", unit);
5997 return -EINVAL;
5998 }
5999 return pce_info_use->num_ce_pipe_entries;
6000}
6001
6002static int qseecom_create_key(struct qseecom_dev_handle *data,
6003 void __user *argp)
6004{
6005 int i;
6006 uint32_t *ce_hw = NULL;
6007 uint32_t pipe = 0;
6008 int ret = 0;
6009 uint32_t flags = 0;
6010 struct qseecom_create_key_req create_key_req;
6011 struct qseecom_key_generate_ireq generate_key_ireq;
6012 struct qseecom_key_select_ireq set_key_ireq;
6013 uint32_t entries = 0;
6014
6015 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6016 if (ret) {
6017 pr_err("copy_from_user failed\n");
6018 return ret;
6019 }
6020
6021 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6022 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6023 pr_err("unsupported usage %d\n", create_key_req.usage);
6024 ret = -EFAULT;
6025 return ret;
6026 }
6027 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6028 create_key_req.usage);
6029 if (entries <= 0) {
6030 pr_err("no ce instance for usage %d instance %d\n",
6031 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6032 ret = -EINVAL;
6033 return ret;
6034 }
6035
6036 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6037 if (!ce_hw) {
6038 ret = -ENOMEM;
6039 return ret;
6040 }
6041 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6042 DEFAULT_CE_INFO_UNIT);
6043 if (ret) {
6044 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6045 ret = -EINVAL;
6046 goto free_buf;
6047 }
6048
6049 if (qseecom.fde_key_size)
6050 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6051 else
6052 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6053
Jiten Patela7bb1d52018-05-11 12:34:26 +05306054 if (qseecom.enable_key_wrap_in_ks == true)
6055 flags |= ENABLE_KEY_WRAP_IN_KS;
6056
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006057 generate_key_ireq.flags = flags;
6058 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6059 memset((void *)generate_key_ireq.key_id,
6060 0, QSEECOM_KEY_ID_SIZE);
6061 memset((void *)generate_key_ireq.hash32,
6062 0, QSEECOM_HASH_SIZE);
6063 memcpy((void *)generate_key_ireq.key_id,
6064 (void *)key_id_array[create_key_req.usage].desc,
6065 QSEECOM_KEY_ID_SIZE);
6066 memcpy((void *)generate_key_ireq.hash32,
6067 (void *)create_key_req.hash32,
6068 QSEECOM_HASH_SIZE);
6069
6070 ret = __qseecom_generate_and_save_key(data,
6071 create_key_req.usage, &generate_key_ireq);
6072 if (ret) {
6073 pr_err("Failed to generate key on storage: %d\n", ret);
6074 goto free_buf;
6075 }
6076
6077 for (i = 0; i < entries; i++) {
6078 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6079 if (create_key_req.usage ==
6080 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6081 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6082 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6083
6084 } else if (create_key_req.usage ==
6085 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6086 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6087 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6088
6089 } else {
6090 set_key_ireq.ce = ce_hw[i];
6091 set_key_ireq.pipe = pipe;
6092 }
6093 set_key_ireq.flags = flags;
6094
6095 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6096 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6097 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6098 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6099 memcpy((void *)set_key_ireq.key_id,
6100 (void *)key_id_array[create_key_req.usage].desc,
6101 QSEECOM_KEY_ID_SIZE);
6102 memcpy((void *)set_key_ireq.hash32,
6103 (void *)create_key_req.hash32,
6104 QSEECOM_HASH_SIZE);
6105 /*
6106 * It will return false if it is GPCE based crypto instance or
6107 * ICE is setup properly
6108 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006109 ret = qseecom_enable_ice_setup(create_key_req.usage);
6110 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006111 goto free_buf;
6112
6113 do {
6114 ret = __qseecom_set_clear_ce_key(data,
6115 create_key_req.usage,
6116 &set_key_ireq);
6117 /*
6118 * wait a little before calling scm again to let other
6119 * processes run
6120 */
6121 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6122 msleep(50);
6123
6124 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6125
6126 qseecom_disable_ice_setup(create_key_req.usage);
6127
6128 if (ret) {
6129 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6130 pipe, ce_hw[i], ret);
6131 goto free_buf;
6132 } else {
6133 pr_err("Set the key successfully\n");
6134 if ((create_key_req.usage ==
6135 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6136 (create_key_req.usage ==
6137 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6138 goto free_buf;
6139 }
6140 }
6141
6142free_buf:
6143 kzfree(ce_hw);
6144 return ret;
6145}
6146
6147static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6148 void __user *argp)
6149{
6150 uint32_t *ce_hw = NULL;
6151 uint32_t pipe = 0;
6152 int ret = 0;
6153 uint32_t flags = 0;
6154 int i, j;
6155 struct qseecom_wipe_key_req wipe_key_req;
6156 struct qseecom_key_delete_ireq delete_key_ireq;
6157 struct qseecom_key_select_ireq clear_key_ireq;
6158 uint32_t entries = 0;
6159
6160 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6161 if (ret) {
6162 pr_err("copy_from_user failed\n");
6163 return ret;
6164 }
6165
6166 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6167 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6168 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6169 ret = -EFAULT;
6170 return ret;
6171 }
6172
6173 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6174 wipe_key_req.usage);
6175 if (entries <= 0) {
6176 pr_err("no ce instance for usage %d instance %d\n",
6177 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6178 ret = -EINVAL;
6179 return ret;
6180 }
6181
6182 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6183 if (!ce_hw) {
6184 ret = -ENOMEM;
6185 return ret;
6186 }
6187
6188 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6189 DEFAULT_CE_INFO_UNIT);
6190 if (ret) {
6191 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6192 ret = -EINVAL;
6193 goto free_buf;
6194 }
6195
6196 if (wipe_key_req.wipe_key_flag) {
6197 delete_key_ireq.flags = flags;
6198 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6199 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6200 memcpy((void *)delete_key_ireq.key_id,
6201 (void *)key_id_array[wipe_key_req.usage].desc,
6202 QSEECOM_KEY_ID_SIZE);
6203 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6204
6205 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6206 &delete_key_ireq);
6207 if (ret) {
6208 pr_err("Failed to delete key from ssd storage: %d\n",
6209 ret);
6210 ret = -EFAULT;
6211 goto free_buf;
6212 }
6213 }
6214
6215 for (j = 0; j < entries; j++) {
6216 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6217 if (wipe_key_req.usage ==
6218 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6219 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6220 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6221 } else if (wipe_key_req.usage ==
6222 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6223 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6224 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6225 } else {
6226 clear_key_ireq.ce = ce_hw[j];
6227 clear_key_ireq.pipe = pipe;
6228 }
6229 clear_key_ireq.flags = flags;
6230 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6231 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6232 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6233 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6234
6235 /*
6236 * It will return false if it is GPCE based crypto instance or
6237 * ICE is setup properly
6238 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006239 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6240 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006241 goto free_buf;
6242
6243 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6244 &clear_key_ireq);
6245
6246 qseecom_disable_ice_setup(wipe_key_req.usage);
6247
6248 if (ret) {
6249 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6250 pipe, ce_hw[j], ret);
6251 ret = -EFAULT;
6252 goto free_buf;
6253 }
6254 }
6255
6256free_buf:
6257 kzfree(ce_hw);
6258 return ret;
6259}
6260
6261static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6262 void __user *argp)
6263{
6264 int ret = 0;
6265 uint32_t flags = 0;
6266 struct qseecom_update_key_userinfo_req update_key_req;
6267 struct qseecom_key_userinfo_update_ireq ireq;
6268
6269 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6270 if (ret) {
6271 pr_err("copy_from_user failed\n");
6272 return ret;
6273 }
6274
6275 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6276 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6277 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6278 return -EFAULT;
6279 }
6280
6281 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6282
6283 if (qseecom.fde_key_size)
6284 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6285 else
6286 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6287
6288 ireq.flags = flags;
6289 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6290 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6291 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6292 memcpy((void *)ireq.key_id,
6293 (void *)key_id_array[update_key_req.usage].desc,
6294 QSEECOM_KEY_ID_SIZE);
6295 memcpy((void *)ireq.current_hash32,
6296 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6297 memcpy((void *)ireq.new_hash32,
6298 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6299
6300 do {
6301 ret = __qseecom_update_current_key_user_info(data,
6302 update_key_req.usage,
6303 &ireq);
6304 /*
6305 * wait a little before calling scm again to let other
6306 * processes run
6307 */
6308 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6309 msleep(50);
6310
6311 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6312 if (ret) {
6313 pr_err("Failed to update key info: %d\n", ret);
6314 return ret;
6315 }
6316 return ret;
6317
6318}
6319static int qseecom_is_es_activated(void __user *argp)
6320{
Zhen Kong26e62742018-05-04 17:19:06 -07006321 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006322 struct qseecom_command_scm_resp resp;
6323 int ret;
6324
6325 if (qseecom.qsee_version < QSEE_VERSION_04) {
6326 pr_err("invalid qsee version\n");
6327 return -ENODEV;
6328 }
6329
6330 if (argp == NULL) {
6331 pr_err("arg is null\n");
6332 return -EINVAL;
6333 }
6334
6335 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6336 &req, sizeof(req), &resp, sizeof(resp));
6337 if (ret) {
6338 pr_err("scm_call failed\n");
6339 return ret;
6340 }
6341
6342 req.is_activated = resp.result;
6343 ret = copy_to_user(argp, &req, sizeof(req));
6344 if (ret) {
6345 pr_err("copy_to_user failed\n");
6346 return ret;
6347 }
6348
6349 return 0;
6350}
6351
6352static int qseecom_save_partition_hash(void __user *argp)
6353{
6354 struct qseecom_save_partition_hash_req req;
6355 struct qseecom_command_scm_resp resp;
6356 int ret;
6357
6358 memset(&resp, 0x00, sizeof(resp));
6359
6360 if (qseecom.qsee_version < QSEE_VERSION_04) {
6361 pr_err("invalid qsee version\n");
6362 return -ENODEV;
6363 }
6364
6365 if (argp == NULL) {
6366 pr_err("arg is null\n");
6367 return -EINVAL;
6368 }
6369
6370 ret = copy_from_user(&req, argp, sizeof(req));
6371 if (ret) {
6372 pr_err("copy_from_user failed\n");
6373 return ret;
6374 }
6375
6376 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6377 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6378 if (ret) {
6379 pr_err("qseecom_scm_call failed\n");
6380 return ret;
6381 }
6382
6383 return 0;
6384}
6385
6386static int qseecom_mdtp_cipher_dip(void __user *argp)
6387{
6388 struct qseecom_mdtp_cipher_dip_req req;
6389 u32 tzbuflenin, tzbuflenout;
6390 char *tzbufin = NULL, *tzbufout = NULL;
6391 struct scm_desc desc = {0};
6392 int ret;
6393
6394 do {
6395 /* Copy the parameters from userspace */
6396 if (argp == NULL) {
6397 pr_err("arg is null\n");
6398 ret = -EINVAL;
6399 break;
6400 }
6401
6402 ret = copy_from_user(&req, argp, sizeof(req));
6403 if (ret) {
6404 pr_err("copy_from_user failed, ret= %d\n", ret);
6405 break;
6406 }
6407
6408 if (req.in_buf == NULL || req.out_buf == NULL ||
6409 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6410 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6411 req.direction > 1) {
6412 pr_err("invalid parameters\n");
6413 ret = -EINVAL;
6414 break;
6415 }
6416
6417 /* Copy the input buffer from userspace to kernel space */
6418 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6419 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6420 if (!tzbufin) {
6421 pr_err("error allocating in buffer\n");
6422 ret = -ENOMEM;
6423 break;
6424 }
6425
6426 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6427 if (ret) {
6428 pr_err("copy_from_user failed, ret=%d\n", ret);
6429 break;
6430 }
6431
6432 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6433
6434 /* Prepare the output buffer in kernel space */
6435 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6436 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6437 if (!tzbufout) {
6438 pr_err("error allocating out buffer\n");
6439 ret = -ENOMEM;
6440 break;
6441 }
6442
6443 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6444
6445 /* Send the command to TZ */
6446 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6447 desc.args[0] = virt_to_phys(tzbufin);
6448 desc.args[1] = req.in_buf_size;
6449 desc.args[2] = virt_to_phys(tzbufout);
6450 desc.args[3] = req.out_buf_size;
6451 desc.args[4] = req.direction;
6452
6453 ret = __qseecom_enable_clk(CLK_QSEE);
6454 if (ret)
6455 break;
6456
Zhen Kong03f220d2019-02-01 17:12:34 -08006457 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006458
6459 __qseecom_disable_clk(CLK_QSEE);
6460
6461 if (ret) {
6462 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6463 ret);
6464 break;
6465 }
6466
6467 /* Copy the output buffer from kernel space to userspace */
6468 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6469 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6470 if (ret) {
6471 pr_err("copy_to_user failed, ret=%d\n", ret);
6472 break;
6473 }
6474 } while (0);
6475
6476 kzfree(tzbufin);
6477 kzfree(tzbufout);
6478
6479 return ret;
6480}
6481
6482static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6483 struct qseecom_qteec_req *req)
6484{
6485 if (!data || !data->client.ihandle) {
6486 pr_err("Client or client handle is not initialized\n");
6487 return -EINVAL;
6488 }
6489
6490 if (data->type != QSEECOM_CLIENT_APP)
6491 return -EFAULT;
6492
6493 if (req->req_len > UINT_MAX - req->resp_len) {
6494 pr_err("Integer overflow detected in req_len & rsp_len\n");
6495 return -EINVAL;
6496 }
6497
6498 if (req->req_len + req->resp_len > data->client.sb_length) {
6499 pr_debug("Not enough memory to fit cmd_buf.\n");
6500 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6501 (req->req_len + req->resp_len), data->client.sb_length);
6502 return -ENOMEM;
6503 }
6504
6505 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6506 pr_err("cmd buffer or response buffer is null\n");
6507 return -EINVAL;
6508 }
6509 if (((uintptr_t)req->req_ptr <
6510 data->client.user_virt_sb_base) ||
6511 ((uintptr_t)req->req_ptr >=
6512 (data->client.user_virt_sb_base + data->client.sb_length))) {
6513 pr_err("cmd buffer address not within shared bufffer\n");
6514 return -EINVAL;
6515 }
6516
6517 if (((uintptr_t)req->resp_ptr <
6518 data->client.user_virt_sb_base) ||
6519 ((uintptr_t)req->resp_ptr >=
6520 (data->client.user_virt_sb_base + data->client.sb_length))) {
6521 pr_err("response buffer address not within shared bufffer\n");
6522 return -EINVAL;
6523 }
6524
6525 if ((req->req_len == 0) || (req->resp_len == 0)) {
6526 pr_err("cmd buf lengtgh/response buf length not valid\n");
6527 return -EINVAL;
6528 }
6529
6530 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6531 pr_err("Integer overflow in req_len & req_ptr\n");
6532 return -EINVAL;
6533 }
6534
6535 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6536 pr_err("Integer overflow in resp_len & resp_ptr\n");
6537 return -EINVAL;
6538 }
6539
6540 if (data->client.user_virt_sb_base >
6541 (ULONG_MAX - data->client.sb_length)) {
6542 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6543 return -EINVAL;
6544 }
6545 if ((((uintptr_t)req->req_ptr + req->req_len) >
6546 ((uintptr_t)data->client.user_virt_sb_base +
6547 data->client.sb_length)) ||
6548 (((uintptr_t)req->resp_ptr + req->resp_len) >
6549 ((uintptr_t)data->client.user_virt_sb_base +
6550 data->client.sb_length))) {
6551 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6552 return -EINVAL;
6553 }
6554 return 0;
6555}
6556
6557static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6558 uint32_t fd_idx, struct sg_table *sg_ptr)
6559{
6560 struct scatterlist *sg = sg_ptr->sgl;
6561 struct qseecom_sg_entry *sg_entry;
6562 void *buf;
6563 uint i;
6564 size_t size;
6565 dma_addr_t coh_pmem;
6566
6567 if (fd_idx >= MAX_ION_FD) {
6568 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6569 return -ENOMEM;
6570 }
6571 /*
6572 * Allocate a buffer, populate it with number of entry plus
6573 * each sg entry's phy addr and length; then return the
6574 * phy_addr of the buffer.
6575 */
6576 size = sizeof(uint32_t) +
6577 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6578 size = (size + PAGE_SIZE) & PAGE_MASK;
6579 buf = dma_alloc_coherent(qseecom.pdev,
6580 size, &coh_pmem, GFP_KERNEL);
6581 if (buf == NULL) {
6582 pr_err("failed to alloc memory for sg buf\n");
6583 return -ENOMEM;
6584 }
6585 *(uint32_t *)buf = sg_ptr->nents;
6586 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6587 for (i = 0; i < sg_ptr->nents; i++) {
6588 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6589 sg_entry->len = sg->length;
6590 sg_entry++;
6591 sg = sg_next(sg);
6592 }
6593 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6594 data->client.sec_buf_fd[fd_idx].vbase = buf;
6595 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6596 data->client.sec_buf_fd[fd_idx].size = size;
6597 return 0;
6598}
6599
6600static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6601 struct qseecom_dev_handle *data, bool cleanup)
6602{
6603 struct ion_handle *ihandle;
6604 int ret = 0;
6605 int i = 0;
6606 uint32_t *update;
6607 struct sg_table *sg_ptr = NULL;
6608 struct scatterlist *sg;
6609 struct qseecom_param_memref *memref;
6610
6611 if (req == NULL) {
6612 pr_err("Invalid address\n");
6613 return -EINVAL;
6614 }
6615 for (i = 0; i < MAX_ION_FD; i++) {
6616 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006617 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006618 req->ifd_data[i].fd);
6619 if (IS_ERR_OR_NULL(ihandle)) {
6620 pr_err("Ion client can't retrieve the handle\n");
6621 return -ENOMEM;
6622 }
6623 if ((req->req_len < sizeof(uint32_t)) ||
6624 (req->ifd_data[i].cmd_buf_offset >
6625 req->req_len - sizeof(uint32_t))) {
6626 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6627 req->req_len,
6628 req->ifd_data[i].cmd_buf_offset);
6629 return -EINVAL;
6630 }
6631 update = (uint32_t *)((char *) req->req_ptr +
6632 req->ifd_data[i].cmd_buf_offset);
6633 if (!update) {
6634 pr_err("update pointer is NULL\n");
6635 return -EINVAL;
6636 }
6637 } else {
6638 continue;
6639 }
6640 /* Populate the cmd data structure with the phys_addr */
6641 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6642 if (IS_ERR_OR_NULL(sg_ptr)) {
6643 pr_err("IOn client could not retrieve sg table\n");
6644 goto err;
6645 }
6646 sg = sg_ptr->sgl;
6647 if (sg == NULL) {
6648 pr_err("sg is NULL\n");
6649 goto err;
6650 }
6651 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6652 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6653 sg_ptr->nents, sg->length);
6654 goto err;
6655 }
6656 /* clean up buf for pre-allocated fd */
6657 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6658 (*update)) {
6659 if (data->client.sec_buf_fd[i].vbase)
6660 dma_free_coherent(qseecom.pdev,
6661 data->client.sec_buf_fd[i].size,
6662 data->client.sec_buf_fd[i].vbase,
6663 data->client.sec_buf_fd[i].pbase);
6664 memset((void *)update, 0,
6665 sizeof(struct qseecom_param_memref));
6666 memset(&(data->client.sec_buf_fd[i]), 0,
6667 sizeof(struct qseecom_sec_buf_fd_info));
6668 goto clean;
6669 }
6670
6671 if (*update == 0) {
6672 /* update buf for pre-allocated fd from secure heap*/
6673 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6674 sg_ptr);
6675 if (ret) {
6676 pr_err("Failed to handle buf for fd[%d]\n", i);
6677 goto err;
6678 }
6679 memref = (struct qseecom_param_memref *)update;
6680 memref->buffer =
6681 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6682 memref->size =
6683 (uint32_t)(data->client.sec_buf_fd[i].size);
6684 } else {
6685 /* update buf for fd from non-secure qseecom heap */
6686 if (sg_ptr->nents != 1) {
6687 pr_err("Num of scat entr (%d) invalid\n",
6688 sg_ptr->nents);
6689 goto err;
6690 }
6691 if (cleanup)
6692 *update = 0;
6693 else
6694 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6695 }
6696clean:
6697 if (cleanup) {
6698 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6699 ihandle, NULL, sg->length,
6700 ION_IOC_INV_CACHES);
6701 if (ret) {
6702 pr_err("cache operation failed %d\n", ret);
6703 goto err;
6704 }
6705 } else {
6706 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6707 ihandle, NULL, sg->length,
6708 ION_IOC_CLEAN_INV_CACHES);
6709 if (ret) {
6710 pr_err("cache operation failed %d\n", ret);
6711 goto err;
6712 }
6713 data->sglistinfo_ptr[i].indexAndFlags =
6714 SGLISTINFO_SET_INDEX_FLAG(
6715 (sg_ptr->nents == 1), 0,
6716 req->ifd_data[i].cmd_buf_offset);
6717 data->sglistinfo_ptr[i].sizeOrCount =
6718 (sg_ptr->nents == 1) ?
6719 sg->length : sg_ptr->nents;
6720 data->sglist_cnt = i + 1;
6721 }
6722 /* Deallocate the handle */
6723 if (!IS_ERR_OR_NULL(ihandle))
6724 ion_free(qseecom.ion_clnt, ihandle);
6725 }
6726 return ret;
6727err:
6728 if (!IS_ERR_OR_NULL(ihandle))
6729 ion_free(qseecom.ion_clnt, ihandle);
6730 return -ENOMEM;
6731}
6732
6733static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6734 struct qseecom_qteec_req *req, uint32_t cmd_id)
6735{
6736 struct qseecom_command_scm_resp resp;
6737 struct qseecom_qteec_ireq ireq;
6738 struct qseecom_qteec_64bit_ireq ireq_64bit;
6739 struct qseecom_registered_app_list *ptr_app;
6740 bool found_app = false;
6741 unsigned long flags;
6742 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006743 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744 uint32_t reqd_len_sb_in = 0;
6745 void *cmd_buf = NULL;
6746 size_t cmd_len;
6747 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306748 void *req_ptr = NULL;
6749 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006750
6751 ret = __qseecom_qteec_validate_msg(data, req);
6752 if (ret)
6753 return ret;
6754
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306755 req_ptr = req->req_ptr;
6756 resp_ptr = req->resp_ptr;
6757
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006758 /* find app_id & img_name from list */
6759 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6760 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6761 list) {
6762 if ((ptr_app->app_id == data->client.app_id) &&
6763 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6764 found_app = true;
6765 break;
6766 }
6767 }
6768 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6769 if (!found_app) {
6770 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6771 (char *)data->client.app_name);
6772 return -ENOENT;
6773 }
6774
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306775 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6776 (uintptr_t)req->req_ptr);
6777 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6778 (uintptr_t)req->resp_ptr);
6779
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006780 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6781 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6782 ret = __qseecom_update_qteec_req_buf(
6783 (struct qseecom_qteec_modfd_req *)req, data, false);
6784 if (ret)
6785 return ret;
6786 }
6787
6788 if (qseecom.qsee_version < QSEE_VERSION_40) {
6789 ireq.app_id = data->client.app_id;
6790 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306791 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006792 ireq.req_len = req->req_len;
6793 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306794 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006795 ireq.resp_len = req->resp_len;
6796 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6797 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6798 dmac_flush_range((void *)table,
6799 (void *)table + SGLISTINFO_TABLE_SIZE);
6800 cmd_buf = (void *)&ireq;
6801 cmd_len = sizeof(struct qseecom_qteec_ireq);
6802 } else {
6803 ireq_64bit.app_id = data->client.app_id;
6804 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306805 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006806 ireq_64bit.req_len = req->req_len;
6807 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306808 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006809 ireq_64bit.resp_len = req->resp_len;
6810 if ((data->client.app_arch == ELFCLASS32) &&
6811 ((ireq_64bit.req_ptr >=
6812 PHY_ADDR_4G - ireq_64bit.req_len) ||
6813 (ireq_64bit.resp_ptr >=
6814 PHY_ADDR_4G - ireq_64bit.resp_len))){
6815 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6816 data->client.app_name, data->client.app_id);
6817 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6818 ireq_64bit.req_ptr, ireq_64bit.req_len,
6819 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6820 return -EFAULT;
6821 }
6822 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6823 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6824 dmac_flush_range((void *)table,
6825 (void *)table + SGLISTINFO_TABLE_SIZE);
6826 cmd_buf = (void *)&ireq_64bit;
6827 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6828 }
6829 if (qseecom.whitelist_support == true
6830 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6831 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6832 else
6833 *(uint32_t *)cmd_buf = cmd_id;
6834
6835 reqd_len_sb_in = req->req_len + req->resp_len;
6836 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6837 data->client.sb_virt,
6838 reqd_len_sb_in,
6839 ION_IOC_CLEAN_INV_CACHES);
6840 if (ret) {
6841 pr_err("cache operation failed %d\n", ret);
6842 return ret;
6843 }
6844
6845 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6846
6847 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6848 cmd_buf, cmd_len,
6849 &resp, sizeof(resp));
6850 if (ret) {
6851 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6852 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006853 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006854 }
6855
6856 if (qseecom.qsee_reentrancy_support) {
6857 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006858 if (ret)
6859 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006860 } else {
6861 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6862 ret = __qseecom_process_incomplete_cmd(data, &resp);
6863 if (ret) {
6864 pr_err("process_incomplete_cmd failed err: %d\n",
6865 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006866 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006867 }
6868 } else {
6869 if (resp.result != QSEOS_RESULT_SUCCESS) {
6870 pr_err("Response result %d not supported\n",
6871 resp.result);
6872 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006873 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006874 }
6875 }
6876 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006877exit:
6878 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006879 data->client.sb_virt, data->client.sb_length,
6880 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006881 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006882 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006883 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006884 }
6885
6886 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6887 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006888 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006889 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006890 if (ret2)
6891 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006892 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006893 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006894}
6895
6896static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6897 void __user *argp)
6898{
6899 struct qseecom_qteec_modfd_req req;
6900 int ret = 0;
6901
6902 ret = copy_from_user(&req, argp,
6903 sizeof(struct qseecom_qteec_modfd_req));
6904 if (ret) {
6905 pr_err("copy_from_user failed\n");
6906 return ret;
6907 }
6908 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6909 QSEOS_TEE_OPEN_SESSION);
6910
6911 return ret;
6912}
6913
6914static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6915 void __user *argp)
6916{
6917 struct qseecom_qteec_req req;
6918 int ret = 0;
6919
6920 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6921 if (ret) {
6922 pr_err("copy_from_user failed\n");
6923 return ret;
6924 }
6925 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6926 return ret;
6927}
6928
6929static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6930 void __user *argp)
6931{
6932 struct qseecom_qteec_modfd_req req;
6933 struct qseecom_command_scm_resp resp;
6934 struct qseecom_qteec_ireq ireq;
6935 struct qseecom_qteec_64bit_ireq ireq_64bit;
6936 struct qseecom_registered_app_list *ptr_app;
6937 bool found_app = false;
6938 unsigned long flags;
6939 int ret = 0;
6940 int i = 0;
6941 uint32_t reqd_len_sb_in = 0;
6942 void *cmd_buf = NULL;
6943 size_t cmd_len;
6944 struct sglist_info *table = data->sglistinfo_ptr;
6945 void *req_ptr = NULL;
6946 void *resp_ptr = NULL;
6947
6948 ret = copy_from_user(&req, argp,
6949 sizeof(struct qseecom_qteec_modfd_req));
6950 if (ret) {
6951 pr_err("copy_from_user failed\n");
6952 return ret;
6953 }
6954 ret = __qseecom_qteec_validate_msg(data,
6955 (struct qseecom_qteec_req *)(&req));
6956 if (ret)
6957 return ret;
6958 req_ptr = req.req_ptr;
6959 resp_ptr = req.resp_ptr;
6960
6961 /* find app_id & img_name from list */
6962 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6963 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6964 list) {
6965 if ((ptr_app->app_id == data->client.app_id) &&
6966 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6967 found_app = true;
6968 break;
6969 }
6970 }
6971 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6972 if (!found_app) {
6973 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6974 (char *)data->client.app_name);
6975 return -ENOENT;
6976 }
6977
6978 /* validate offsets */
6979 for (i = 0; i < MAX_ION_FD; i++) {
6980 if (req.ifd_data[i].fd) {
6981 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6982 return -EINVAL;
6983 }
6984 }
6985 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6986 (uintptr_t)req.req_ptr);
6987 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6988 (uintptr_t)req.resp_ptr);
6989 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6990 if (ret)
6991 return ret;
6992
6993 if (qseecom.qsee_version < QSEE_VERSION_40) {
6994 ireq.app_id = data->client.app_id;
6995 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6996 (uintptr_t)req_ptr);
6997 ireq.req_len = req.req_len;
6998 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6999 (uintptr_t)resp_ptr);
7000 ireq.resp_len = req.resp_len;
7001 cmd_buf = (void *)&ireq;
7002 cmd_len = sizeof(struct qseecom_qteec_ireq);
7003 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7004 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7005 dmac_flush_range((void *)table,
7006 (void *)table + SGLISTINFO_TABLE_SIZE);
7007 } else {
7008 ireq_64bit.app_id = data->client.app_id;
7009 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7010 (uintptr_t)req_ptr);
7011 ireq_64bit.req_len = req.req_len;
7012 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7013 (uintptr_t)resp_ptr);
7014 ireq_64bit.resp_len = req.resp_len;
7015 cmd_buf = (void *)&ireq_64bit;
7016 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7017 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7018 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7019 dmac_flush_range((void *)table,
7020 (void *)table + SGLISTINFO_TABLE_SIZE);
7021 }
7022 reqd_len_sb_in = req.req_len + req.resp_len;
7023 if (qseecom.whitelist_support == true)
7024 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7025 else
7026 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7027
7028 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7029 data->client.sb_virt,
7030 reqd_len_sb_in,
7031 ION_IOC_CLEAN_INV_CACHES);
7032 if (ret) {
7033 pr_err("cache operation failed %d\n", ret);
7034 return ret;
7035 }
7036
7037 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7038
7039 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7040 cmd_buf, cmd_len,
7041 &resp, sizeof(resp));
7042 if (ret) {
7043 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7044 ret, data->client.app_id);
7045 return ret;
7046 }
7047
7048 if (qseecom.qsee_reentrancy_support) {
7049 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7050 } else {
7051 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7052 ret = __qseecom_process_incomplete_cmd(data, &resp);
7053 if (ret) {
7054 pr_err("process_incomplete_cmd failed err: %d\n",
7055 ret);
7056 return ret;
7057 }
7058 } else {
7059 if (resp.result != QSEOS_RESULT_SUCCESS) {
7060 pr_err("Response result %d not supported\n",
7061 resp.result);
7062 ret = -EINVAL;
7063 }
7064 }
7065 }
7066 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7067 if (ret)
7068 return ret;
7069
7070 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7071 data->client.sb_virt, data->client.sb_length,
7072 ION_IOC_INV_CACHES);
7073 if (ret) {
7074 pr_err("cache operation failed %d\n", ret);
7075 return ret;
7076 }
7077 return 0;
7078}
7079
7080static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7081 void __user *argp)
7082{
7083 struct qseecom_qteec_modfd_req req;
7084 int ret = 0;
7085
7086 ret = copy_from_user(&req, argp,
7087 sizeof(struct qseecom_qteec_modfd_req));
7088 if (ret) {
7089 pr_err("copy_from_user failed\n");
7090 return ret;
7091 }
7092 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7093 QSEOS_TEE_REQUEST_CANCELLATION);
7094
7095 return ret;
7096}
7097
7098static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7099{
7100 if (data->sglist_cnt) {
7101 memset(data->sglistinfo_ptr, 0,
7102 SGLISTINFO_TABLE_SIZE);
7103 data->sglist_cnt = 0;
7104 }
7105}
7106
7107static inline long qseecom_ioctl(struct file *file,
7108 unsigned int cmd, unsigned long arg)
7109{
7110 int ret = 0;
7111 struct qseecom_dev_handle *data = file->private_data;
7112 void __user *argp = (void __user *) arg;
7113 bool perf_enabled = false;
7114
7115 if (!data) {
7116 pr_err("Invalid/uninitialized device handle\n");
7117 return -EINVAL;
7118 }
7119
7120 if (data->abort) {
7121 pr_err("Aborting qseecom driver\n");
7122 return -ENODEV;
7123 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007124 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7125 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7126 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7127 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007128 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007129
7130 switch (cmd) {
7131 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7132 if (data->type != QSEECOM_GENERIC) {
7133 pr_err("reg lstnr req: invalid handle (%d)\n",
7134 data->type);
7135 ret = -EINVAL;
7136 break;
7137 }
7138 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007139 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007140 atomic_inc(&data->ioctl_count);
7141 data->type = QSEECOM_LISTENER_SERVICE;
7142 ret = qseecom_register_listener(data, argp);
7143 atomic_dec(&data->ioctl_count);
7144 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007145 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007146 if (ret)
7147 pr_err("failed qseecom_register_listener: %d\n", ret);
7148 break;
7149 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307150 case QSEECOM_IOCTL_SET_ICE_INFO: {
7151 struct qseecom_ice_data_t ice_data;
7152
7153 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7154 if (ret) {
7155 pr_err("copy_from_user failed\n");
7156 return -EFAULT;
7157 }
7158 qcom_ice_set_fde_flag(ice_data.flag);
7159 break;
7160 }
7161
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007162 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7163 if ((data->listener.id == 0) ||
7164 (data->type != QSEECOM_LISTENER_SERVICE)) {
7165 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7166 data->type, data->listener.id);
7167 ret = -EINVAL;
7168 break;
7169 }
7170 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007171 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007172 atomic_inc(&data->ioctl_count);
7173 ret = qseecom_unregister_listener(data);
7174 atomic_dec(&data->ioctl_count);
7175 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007176 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007177 if (ret)
7178 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7179 break;
7180 }
7181 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7182 if ((data->client.app_id == 0) ||
7183 (data->type != QSEECOM_CLIENT_APP)) {
7184 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7185 data->type, data->client.app_id);
7186 ret = -EINVAL;
7187 break;
7188 }
7189 /* Only one client allowed here at a time */
7190 mutex_lock(&app_access_lock);
7191 if (qseecom.support_bus_scaling) {
7192 /* register bus bw in case the client doesn't do it */
7193 if (!data->mode) {
7194 mutex_lock(&qsee_bw_mutex);
7195 __qseecom_register_bus_bandwidth_needs(
7196 data, HIGH);
7197 mutex_unlock(&qsee_bw_mutex);
7198 }
7199 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7200 if (ret) {
7201 pr_err("Failed to set bw.\n");
7202 ret = -EINVAL;
7203 mutex_unlock(&app_access_lock);
7204 break;
7205 }
7206 }
7207 /*
7208 * On targets where crypto clock is handled by HLOS,
7209 * if clk_access_cnt is zero and perf_enabled is false,
7210 * then the crypto clock was not enabled before sending cmd to
7211 * tz, qseecom will enable the clock to avoid service failure.
7212 */
7213 if (!qseecom.no_clock_support &&
7214 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7215 pr_debug("ce clock is not enabled!\n");
7216 ret = qseecom_perf_enable(data);
7217 if (ret) {
7218 pr_err("Failed to vote for clock with err %d\n",
7219 ret);
7220 mutex_unlock(&app_access_lock);
7221 ret = -EINVAL;
7222 break;
7223 }
7224 perf_enabled = true;
7225 }
7226 atomic_inc(&data->ioctl_count);
7227 ret = qseecom_send_cmd(data, argp);
7228 if (qseecom.support_bus_scaling)
7229 __qseecom_add_bw_scale_down_timer(
7230 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7231 if (perf_enabled) {
7232 qsee_disable_clock_vote(data, CLK_DFAB);
7233 qsee_disable_clock_vote(data, CLK_SFPB);
7234 }
7235 atomic_dec(&data->ioctl_count);
7236 wake_up_all(&data->abort_wq);
7237 mutex_unlock(&app_access_lock);
7238 if (ret)
7239 pr_err("failed qseecom_send_cmd: %d\n", ret);
7240 break;
7241 }
7242 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7243 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7244 if ((data->client.app_id == 0) ||
7245 (data->type != QSEECOM_CLIENT_APP)) {
7246 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7247 data->type, data->client.app_id);
7248 ret = -EINVAL;
7249 break;
7250 }
7251 /* Only one client allowed here at a time */
7252 mutex_lock(&app_access_lock);
7253 if (qseecom.support_bus_scaling) {
7254 if (!data->mode) {
7255 mutex_lock(&qsee_bw_mutex);
7256 __qseecom_register_bus_bandwidth_needs(
7257 data, HIGH);
7258 mutex_unlock(&qsee_bw_mutex);
7259 }
7260 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7261 if (ret) {
7262 pr_err("Failed to set bw.\n");
7263 mutex_unlock(&app_access_lock);
7264 ret = -EINVAL;
7265 break;
7266 }
7267 }
7268 /*
7269 * On targets where crypto clock is handled by HLOS,
7270 * if clk_access_cnt is zero and perf_enabled is false,
7271 * then the crypto clock was not enabled before sending cmd to
7272 * tz, qseecom will enable the clock to avoid service failure.
7273 */
7274 if (!qseecom.no_clock_support &&
7275 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7276 pr_debug("ce clock is not enabled!\n");
7277 ret = qseecom_perf_enable(data);
7278 if (ret) {
7279 pr_err("Failed to vote for clock with err %d\n",
7280 ret);
7281 mutex_unlock(&app_access_lock);
7282 ret = -EINVAL;
7283 break;
7284 }
7285 perf_enabled = true;
7286 }
7287 atomic_inc(&data->ioctl_count);
7288 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7289 ret = qseecom_send_modfd_cmd(data, argp);
7290 else
7291 ret = qseecom_send_modfd_cmd_64(data, argp);
7292 if (qseecom.support_bus_scaling)
7293 __qseecom_add_bw_scale_down_timer(
7294 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7295 if (perf_enabled) {
7296 qsee_disable_clock_vote(data, CLK_DFAB);
7297 qsee_disable_clock_vote(data, CLK_SFPB);
7298 }
7299 atomic_dec(&data->ioctl_count);
7300 wake_up_all(&data->abort_wq);
7301 mutex_unlock(&app_access_lock);
7302 if (ret)
7303 pr_err("failed qseecom_send_cmd: %d\n", ret);
7304 __qseecom_clean_data_sglistinfo(data);
7305 break;
7306 }
7307 case QSEECOM_IOCTL_RECEIVE_REQ: {
7308 if ((data->listener.id == 0) ||
7309 (data->type != QSEECOM_LISTENER_SERVICE)) {
7310 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7311 data->type, data->listener.id);
7312 ret = -EINVAL;
7313 break;
7314 }
7315 atomic_inc(&data->ioctl_count);
7316 ret = qseecom_receive_req(data);
7317 atomic_dec(&data->ioctl_count);
7318 wake_up_all(&data->abort_wq);
7319 if (ret && (ret != -ERESTARTSYS))
7320 pr_err("failed qseecom_receive_req: %d\n", ret);
7321 break;
7322 }
7323 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7324 if ((data->listener.id == 0) ||
7325 (data->type != QSEECOM_LISTENER_SERVICE)) {
7326 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7327 data->type, data->listener.id);
7328 ret = -EINVAL;
7329 break;
7330 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007331 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007332 atomic_inc(&data->ioctl_count);
7333 if (!qseecom.qsee_reentrancy_support)
7334 ret = qseecom_send_resp();
7335 else
7336 ret = qseecom_reentrancy_send_resp(data);
7337 atomic_dec(&data->ioctl_count);
7338 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007339 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007340 if (ret)
7341 pr_err("failed qseecom_send_resp: %d\n", ret);
7342 break;
7343 }
7344 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7345 if ((data->type != QSEECOM_CLIENT_APP) &&
7346 (data->type != QSEECOM_GENERIC) &&
7347 (data->type != QSEECOM_SECURE_SERVICE)) {
7348 pr_err("set mem param req: invalid handle (%d)\n",
7349 data->type);
7350 ret = -EINVAL;
7351 break;
7352 }
7353 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7354 mutex_lock(&app_access_lock);
7355 atomic_inc(&data->ioctl_count);
7356 ret = qseecom_set_client_mem_param(data, argp);
7357 atomic_dec(&data->ioctl_count);
7358 mutex_unlock(&app_access_lock);
7359 if (ret)
7360 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7361 ret);
7362 break;
7363 }
7364 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7365 if ((data->type != QSEECOM_GENERIC) &&
7366 (data->type != QSEECOM_CLIENT_APP)) {
7367 pr_err("load app req: invalid handle (%d)\n",
7368 data->type);
7369 ret = -EINVAL;
7370 break;
7371 }
7372 data->type = QSEECOM_CLIENT_APP;
7373 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7374 mutex_lock(&app_access_lock);
7375 atomic_inc(&data->ioctl_count);
7376 ret = qseecom_load_app(data, argp);
7377 atomic_dec(&data->ioctl_count);
7378 mutex_unlock(&app_access_lock);
7379 if (ret)
7380 pr_err("failed load_app request: %d\n", ret);
7381 break;
7382 }
7383 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7384 if ((data->client.app_id == 0) ||
7385 (data->type != QSEECOM_CLIENT_APP)) {
7386 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7387 data->type, data->client.app_id);
7388 ret = -EINVAL;
7389 break;
7390 }
7391 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7392 mutex_lock(&app_access_lock);
7393 atomic_inc(&data->ioctl_count);
7394 ret = qseecom_unload_app(data, false);
7395 atomic_dec(&data->ioctl_count);
7396 mutex_unlock(&app_access_lock);
7397 if (ret)
7398 pr_err("failed unload_app request: %d\n", ret);
7399 break;
7400 }
7401 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7402 atomic_inc(&data->ioctl_count);
7403 ret = qseecom_get_qseos_version(data, argp);
7404 if (ret)
7405 pr_err("qseecom_get_qseos_version: %d\n", ret);
7406 atomic_dec(&data->ioctl_count);
7407 break;
7408 }
7409 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7410 if ((data->type != QSEECOM_GENERIC) &&
7411 (data->type != QSEECOM_CLIENT_APP)) {
7412 pr_err("perf enable req: invalid handle (%d)\n",
7413 data->type);
7414 ret = -EINVAL;
7415 break;
7416 }
7417 if ((data->type == QSEECOM_CLIENT_APP) &&
7418 (data->client.app_id == 0)) {
7419 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7420 data->type, data->client.app_id);
7421 ret = -EINVAL;
7422 break;
7423 }
7424 atomic_inc(&data->ioctl_count);
7425 if (qseecom.support_bus_scaling) {
7426 mutex_lock(&qsee_bw_mutex);
7427 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7428 mutex_unlock(&qsee_bw_mutex);
7429 } else {
7430 ret = qseecom_perf_enable(data);
7431 if (ret)
7432 pr_err("Fail to vote for clocks %d\n", ret);
7433 }
7434 atomic_dec(&data->ioctl_count);
7435 break;
7436 }
7437 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7438 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7439 (data->type != QSEECOM_CLIENT_APP)) {
7440 pr_err("perf disable req: invalid handle (%d)\n",
7441 data->type);
7442 ret = -EINVAL;
7443 break;
7444 }
7445 if ((data->type == QSEECOM_CLIENT_APP) &&
7446 (data->client.app_id == 0)) {
7447 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7448 data->type, data->client.app_id);
7449 ret = -EINVAL;
7450 break;
7451 }
7452 atomic_inc(&data->ioctl_count);
7453 if (!qseecom.support_bus_scaling) {
7454 qsee_disable_clock_vote(data, CLK_DFAB);
7455 qsee_disable_clock_vote(data, CLK_SFPB);
7456 } else {
7457 mutex_lock(&qsee_bw_mutex);
7458 qseecom_unregister_bus_bandwidth_needs(data);
7459 mutex_unlock(&qsee_bw_mutex);
7460 }
7461 atomic_dec(&data->ioctl_count);
7462 break;
7463 }
7464
7465 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7466 /* If crypto clock is not handled by HLOS, return directly. */
7467 if (qseecom.no_clock_support) {
7468 pr_debug("crypto clock is not handled by HLOS\n");
7469 break;
7470 }
7471 if ((data->client.app_id == 0) ||
7472 (data->type != QSEECOM_CLIENT_APP)) {
7473 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7474 data->type, data->client.app_id);
7475 ret = -EINVAL;
7476 break;
7477 }
7478 atomic_inc(&data->ioctl_count);
7479 ret = qseecom_scale_bus_bandwidth(data, argp);
7480 atomic_dec(&data->ioctl_count);
7481 break;
7482 }
7483 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7484 if (data->type != QSEECOM_GENERIC) {
7485 pr_err("load ext elf req: invalid client handle (%d)\n",
7486 data->type);
7487 ret = -EINVAL;
7488 break;
7489 }
7490 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7491 data->released = true;
7492 mutex_lock(&app_access_lock);
7493 atomic_inc(&data->ioctl_count);
7494 ret = qseecom_load_external_elf(data, argp);
7495 atomic_dec(&data->ioctl_count);
7496 mutex_unlock(&app_access_lock);
7497 if (ret)
7498 pr_err("failed load_external_elf request: %d\n", ret);
7499 break;
7500 }
7501 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7502 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7503 pr_err("unload ext elf req: invalid handle (%d)\n",
7504 data->type);
7505 ret = -EINVAL;
7506 break;
7507 }
7508 data->released = true;
7509 mutex_lock(&app_access_lock);
7510 atomic_inc(&data->ioctl_count);
7511 ret = qseecom_unload_external_elf(data);
7512 atomic_dec(&data->ioctl_count);
7513 mutex_unlock(&app_access_lock);
7514 if (ret)
7515 pr_err("failed unload_app request: %d\n", ret);
7516 break;
7517 }
7518 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7519 data->type = QSEECOM_CLIENT_APP;
7520 mutex_lock(&app_access_lock);
7521 atomic_inc(&data->ioctl_count);
7522 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7523 ret = qseecom_query_app_loaded(data, argp);
7524 atomic_dec(&data->ioctl_count);
7525 mutex_unlock(&app_access_lock);
7526 break;
7527 }
7528 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7529 if (data->type != QSEECOM_GENERIC) {
7530 pr_err("send cmd svc req: invalid handle (%d)\n",
7531 data->type);
7532 ret = -EINVAL;
7533 break;
7534 }
7535 data->type = QSEECOM_SECURE_SERVICE;
7536 if (qseecom.qsee_version < QSEE_VERSION_03) {
7537 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7538 qseecom.qsee_version);
7539 return -EINVAL;
7540 }
7541 mutex_lock(&app_access_lock);
7542 atomic_inc(&data->ioctl_count);
7543 ret = qseecom_send_service_cmd(data, argp);
7544 atomic_dec(&data->ioctl_count);
7545 mutex_unlock(&app_access_lock);
7546 break;
7547 }
7548 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7549 if (!(qseecom.support_pfe || qseecom.support_fde))
7550 pr_err("Features requiring key init not supported\n");
7551 if (data->type != QSEECOM_GENERIC) {
7552 pr_err("create key req: invalid handle (%d)\n",
7553 data->type);
7554 ret = -EINVAL;
7555 break;
7556 }
7557 if (qseecom.qsee_version < QSEE_VERSION_05) {
7558 pr_err("Create Key feature unsupported: qsee ver %u\n",
7559 qseecom.qsee_version);
7560 return -EINVAL;
7561 }
7562 data->released = true;
7563 mutex_lock(&app_access_lock);
7564 atomic_inc(&data->ioctl_count);
7565 ret = qseecom_create_key(data, argp);
7566 if (ret)
7567 pr_err("failed to create encryption key: %d\n", ret);
7568
7569 atomic_dec(&data->ioctl_count);
7570 mutex_unlock(&app_access_lock);
7571 break;
7572 }
7573 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7574 if (!(qseecom.support_pfe || qseecom.support_fde))
7575 pr_err("Features requiring key init not supported\n");
7576 if (data->type != QSEECOM_GENERIC) {
7577 pr_err("wipe key req: invalid handle (%d)\n",
7578 data->type);
7579 ret = -EINVAL;
7580 break;
7581 }
7582 if (qseecom.qsee_version < QSEE_VERSION_05) {
7583 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7584 qseecom.qsee_version);
7585 return -EINVAL;
7586 }
7587 data->released = true;
7588 mutex_lock(&app_access_lock);
7589 atomic_inc(&data->ioctl_count);
7590 ret = qseecom_wipe_key(data, argp);
7591 if (ret)
7592 pr_err("failed to wipe encryption key: %d\n", ret);
7593 atomic_dec(&data->ioctl_count);
7594 mutex_unlock(&app_access_lock);
7595 break;
7596 }
7597 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7598 if (!(qseecom.support_pfe || qseecom.support_fde))
7599 pr_err("Features requiring key init not supported\n");
7600 if (data->type != QSEECOM_GENERIC) {
7601 pr_err("update key req: invalid handle (%d)\n",
7602 data->type);
7603 ret = -EINVAL;
7604 break;
7605 }
7606 if (qseecom.qsee_version < QSEE_VERSION_05) {
7607 pr_err("Update Key feature unsupported in qsee ver %u\n",
7608 qseecom.qsee_version);
7609 return -EINVAL;
7610 }
7611 data->released = true;
7612 mutex_lock(&app_access_lock);
7613 atomic_inc(&data->ioctl_count);
7614 ret = qseecom_update_key_user_info(data, argp);
7615 if (ret)
7616 pr_err("failed to update key user info: %d\n", ret);
7617 atomic_dec(&data->ioctl_count);
7618 mutex_unlock(&app_access_lock);
7619 break;
7620 }
7621 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7622 if (data->type != QSEECOM_GENERIC) {
7623 pr_err("save part hash req: invalid handle (%d)\n",
7624 data->type);
7625 ret = -EINVAL;
7626 break;
7627 }
7628 data->released = true;
7629 mutex_lock(&app_access_lock);
7630 atomic_inc(&data->ioctl_count);
7631 ret = qseecom_save_partition_hash(argp);
7632 atomic_dec(&data->ioctl_count);
7633 mutex_unlock(&app_access_lock);
7634 break;
7635 }
7636 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7637 if (data->type != QSEECOM_GENERIC) {
7638 pr_err("ES activated req: invalid handle (%d)\n",
7639 data->type);
7640 ret = -EINVAL;
7641 break;
7642 }
7643 data->released = true;
7644 mutex_lock(&app_access_lock);
7645 atomic_inc(&data->ioctl_count);
7646 ret = qseecom_is_es_activated(argp);
7647 atomic_dec(&data->ioctl_count);
7648 mutex_unlock(&app_access_lock);
7649 break;
7650 }
7651 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7652 if (data->type != QSEECOM_GENERIC) {
7653 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7654 data->type);
7655 ret = -EINVAL;
7656 break;
7657 }
7658 data->released = true;
7659 mutex_lock(&app_access_lock);
7660 atomic_inc(&data->ioctl_count);
7661 ret = qseecom_mdtp_cipher_dip(argp);
7662 atomic_dec(&data->ioctl_count);
7663 mutex_unlock(&app_access_lock);
7664 break;
7665 }
7666 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7667 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7668 if ((data->listener.id == 0) ||
7669 (data->type != QSEECOM_LISTENER_SERVICE)) {
7670 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7671 data->type, data->listener.id);
7672 ret = -EINVAL;
7673 break;
7674 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007675 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007676 atomic_inc(&data->ioctl_count);
7677 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7678 ret = qseecom_send_modfd_resp(data, argp);
7679 else
7680 ret = qseecom_send_modfd_resp_64(data, argp);
7681 atomic_dec(&data->ioctl_count);
7682 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007683 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007684 if (ret)
7685 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7686 __qseecom_clean_data_sglistinfo(data);
7687 break;
7688 }
7689 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7690 if ((data->client.app_id == 0) ||
7691 (data->type != QSEECOM_CLIENT_APP)) {
7692 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7693 data->type, data->client.app_id);
7694 ret = -EINVAL;
7695 break;
7696 }
7697 if (qseecom.qsee_version < QSEE_VERSION_40) {
7698 pr_err("GP feature unsupported: qsee ver %u\n",
7699 qseecom.qsee_version);
7700 return -EINVAL;
7701 }
7702 /* Only one client allowed here at a time */
7703 mutex_lock(&app_access_lock);
7704 atomic_inc(&data->ioctl_count);
7705 ret = qseecom_qteec_open_session(data, argp);
7706 atomic_dec(&data->ioctl_count);
7707 wake_up_all(&data->abort_wq);
7708 mutex_unlock(&app_access_lock);
7709 if (ret)
7710 pr_err("failed open_session_cmd: %d\n", ret);
7711 __qseecom_clean_data_sglistinfo(data);
7712 break;
7713 }
7714 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7715 if ((data->client.app_id == 0) ||
7716 (data->type != QSEECOM_CLIENT_APP)) {
7717 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7718 data->type, data->client.app_id);
7719 ret = -EINVAL;
7720 break;
7721 }
7722 if (qseecom.qsee_version < QSEE_VERSION_40) {
7723 pr_err("GP feature unsupported: qsee ver %u\n",
7724 qseecom.qsee_version);
7725 return -EINVAL;
7726 }
7727 /* Only one client allowed here at a time */
7728 mutex_lock(&app_access_lock);
7729 atomic_inc(&data->ioctl_count);
7730 ret = qseecom_qteec_close_session(data, argp);
7731 atomic_dec(&data->ioctl_count);
7732 wake_up_all(&data->abort_wq);
7733 mutex_unlock(&app_access_lock);
7734 if (ret)
7735 pr_err("failed close_session_cmd: %d\n", ret);
7736 break;
7737 }
7738 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7739 if ((data->client.app_id == 0) ||
7740 (data->type != QSEECOM_CLIENT_APP)) {
7741 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7742 data->type, data->client.app_id);
7743 ret = -EINVAL;
7744 break;
7745 }
7746 if (qseecom.qsee_version < QSEE_VERSION_40) {
7747 pr_err("GP feature unsupported: qsee ver %u\n",
7748 qseecom.qsee_version);
7749 return -EINVAL;
7750 }
7751 /* Only one client allowed here at a time */
7752 mutex_lock(&app_access_lock);
7753 atomic_inc(&data->ioctl_count);
7754 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7755 atomic_dec(&data->ioctl_count);
7756 wake_up_all(&data->abort_wq);
7757 mutex_unlock(&app_access_lock);
7758 if (ret)
7759 pr_err("failed Invoke cmd: %d\n", ret);
7760 __qseecom_clean_data_sglistinfo(data);
7761 break;
7762 }
7763 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7764 if ((data->client.app_id == 0) ||
7765 (data->type != QSEECOM_CLIENT_APP)) {
7766 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7767 data->type, data->client.app_id);
7768 ret = -EINVAL;
7769 break;
7770 }
7771 if (qseecom.qsee_version < QSEE_VERSION_40) {
7772 pr_err("GP feature unsupported: qsee ver %u\n",
7773 qseecom.qsee_version);
7774 return -EINVAL;
7775 }
7776 /* Only one client allowed here at a time */
7777 mutex_lock(&app_access_lock);
7778 atomic_inc(&data->ioctl_count);
7779 ret = qseecom_qteec_request_cancellation(data, argp);
7780 atomic_dec(&data->ioctl_count);
7781 wake_up_all(&data->abort_wq);
7782 mutex_unlock(&app_access_lock);
7783 if (ret)
7784 pr_err("failed request_cancellation: %d\n", ret);
7785 break;
7786 }
7787 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7788 atomic_inc(&data->ioctl_count);
7789 ret = qseecom_get_ce_info(data, argp);
7790 if (ret)
7791 pr_err("failed get fde ce pipe info: %d\n", ret);
7792 atomic_dec(&data->ioctl_count);
7793 break;
7794 }
7795 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7796 atomic_inc(&data->ioctl_count);
7797 ret = qseecom_free_ce_info(data, argp);
7798 if (ret)
7799 pr_err("failed get fde ce pipe info: %d\n", ret);
7800 atomic_dec(&data->ioctl_count);
7801 break;
7802 }
7803 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7804 atomic_inc(&data->ioctl_count);
7805 ret = qseecom_query_ce_info(data, argp);
7806 if (ret)
7807 pr_err("failed get fde ce pipe info: %d\n", ret);
7808 atomic_dec(&data->ioctl_count);
7809 break;
7810 }
7811 default:
7812 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7813 return -EINVAL;
7814 }
7815 return ret;
7816}
7817
7818static int qseecom_open(struct inode *inode, struct file *file)
7819{
7820 int ret = 0;
7821 struct qseecom_dev_handle *data;
7822
7823 data = kzalloc(sizeof(*data), GFP_KERNEL);
7824 if (!data)
7825 return -ENOMEM;
7826 file->private_data = data;
7827 data->abort = 0;
7828 data->type = QSEECOM_GENERIC;
7829 data->released = false;
7830 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7831 data->mode = INACTIVE;
7832 init_waitqueue_head(&data->abort_wq);
7833 atomic_set(&data->ioctl_count, 0);
7834 return ret;
7835}
7836
7837static int qseecom_release(struct inode *inode, struct file *file)
7838{
7839 struct qseecom_dev_handle *data = file->private_data;
7840 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007841 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007842
7843 if (data->released == false) {
7844 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7845 data->type, data->mode, data);
7846 switch (data->type) {
7847 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007848 pr_debug("release lsnr svc %d\n", data->listener.id);
7849 free_private_data = false;
7850 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007851 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007852 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007853 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007854 break;
7855 case QSEECOM_CLIENT_APP:
7856 mutex_lock(&app_access_lock);
7857 ret = qseecom_unload_app(data, true);
7858 mutex_unlock(&app_access_lock);
7859 break;
7860 case QSEECOM_SECURE_SERVICE:
7861 case QSEECOM_GENERIC:
7862 ret = qseecom_unmap_ion_allocated_memory(data);
7863 if (ret)
7864 pr_err("Ion Unmap failed\n");
7865 break;
7866 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7867 break;
7868 default:
7869 pr_err("Unsupported clnt_handle_type %d",
7870 data->type);
7871 break;
7872 }
7873 }
7874
7875 if (qseecom.support_bus_scaling) {
7876 mutex_lock(&qsee_bw_mutex);
7877 if (data->mode != INACTIVE) {
7878 qseecom_unregister_bus_bandwidth_needs(data);
7879 if (qseecom.cumulative_mode == INACTIVE) {
7880 ret = __qseecom_set_msm_bus_request(INACTIVE);
7881 if (ret)
7882 pr_err("Fail to scale down bus\n");
7883 }
7884 }
7885 mutex_unlock(&qsee_bw_mutex);
7886 } else {
7887 if (data->fast_load_enabled == true)
7888 qsee_disable_clock_vote(data, CLK_SFPB);
7889 if (data->perf_enabled == true)
7890 qsee_disable_clock_vote(data, CLK_DFAB);
7891 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007892
Zhen Kongbcdeda22018-11-16 13:50:51 -08007893 if (free_private_data)
7894 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007895 return ret;
7896}
7897
7898#ifdef CONFIG_COMPAT
7899#include "compat_qseecom.c"
7900#else
7901#define compat_qseecom_ioctl NULL
7902#endif
7903
7904static const struct file_operations qseecom_fops = {
7905 .owner = THIS_MODULE,
7906 .unlocked_ioctl = qseecom_ioctl,
7907 .compat_ioctl = compat_qseecom_ioctl,
7908 .open = qseecom_open,
7909 .release = qseecom_release
7910};
7911
7912static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7913{
7914 int rc = 0;
7915 struct device *pdev;
7916 struct qseecom_clk *qclk;
7917 char *core_clk_src = NULL;
7918 char *core_clk = NULL;
7919 char *iface_clk = NULL;
7920 char *bus_clk = NULL;
7921
7922 switch (ce) {
7923 case CLK_QSEE: {
7924 core_clk_src = "core_clk_src";
7925 core_clk = "core_clk";
7926 iface_clk = "iface_clk";
7927 bus_clk = "bus_clk";
7928 qclk = &qseecom.qsee;
7929 qclk->instance = CLK_QSEE;
7930 break;
7931 };
7932 case CLK_CE_DRV: {
7933 core_clk_src = "ce_drv_core_clk_src";
7934 core_clk = "ce_drv_core_clk";
7935 iface_clk = "ce_drv_iface_clk";
7936 bus_clk = "ce_drv_bus_clk";
7937 qclk = &qseecom.ce_drv;
7938 qclk->instance = CLK_CE_DRV;
7939 break;
7940 };
7941 default:
7942 pr_err("Invalid ce hw instance: %d!\n", ce);
7943 return -EIO;
7944 }
7945
7946 if (qseecom.no_clock_support) {
7947 qclk->ce_core_clk = NULL;
7948 qclk->ce_clk = NULL;
7949 qclk->ce_bus_clk = NULL;
7950 qclk->ce_core_src_clk = NULL;
7951 return 0;
7952 }
7953
7954 pdev = qseecom.pdev;
7955
7956 /* Get CE3 src core clk. */
7957 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7958 if (!IS_ERR(qclk->ce_core_src_clk)) {
7959 rc = clk_set_rate(qclk->ce_core_src_clk,
7960 qseecom.ce_opp_freq_hz);
7961 if (rc) {
7962 clk_put(qclk->ce_core_src_clk);
7963 qclk->ce_core_src_clk = NULL;
7964 pr_err("Unable to set the core src clk @%uMhz.\n",
7965 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7966 return -EIO;
7967 }
7968 } else {
7969 pr_warn("Unable to get CE core src clk, set to NULL\n");
7970 qclk->ce_core_src_clk = NULL;
7971 }
7972
7973 /* Get CE core clk */
7974 qclk->ce_core_clk = clk_get(pdev, core_clk);
7975 if (IS_ERR(qclk->ce_core_clk)) {
7976 rc = PTR_ERR(qclk->ce_core_clk);
7977 pr_err("Unable to get CE core clk\n");
7978 if (qclk->ce_core_src_clk != NULL)
7979 clk_put(qclk->ce_core_src_clk);
7980 return -EIO;
7981 }
7982
7983 /* Get CE Interface clk */
7984 qclk->ce_clk = clk_get(pdev, iface_clk);
7985 if (IS_ERR(qclk->ce_clk)) {
7986 rc = PTR_ERR(qclk->ce_clk);
7987 pr_err("Unable to get CE interface clk\n");
7988 if (qclk->ce_core_src_clk != NULL)
7989 clk_put(qclk->ce_core_src_clk);
7990 clk_put(qclk->ce_core_clk);
7991 return -EIO;
7992 }
7993
7994 /* Get CE AXI clk */
7995 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7996 if (IS_ERR(qclk->ce_bus_clk)) {
7997 rc = PTR_ERR(qclk->ce_bus_clk);
7998 pr_err("Unable to get CE BUS interface clk\n");
7999 if (qclk->ce_core_src_clk != NULL)
8000 clk_put(qclk->ce_core_src_clk);
8001 clk_put(qclk->ce_core_clk);
8002 clk_put(qclk->ce_clk);
8003 return -EIO;
8004 }
8005
8006 return rc;
8007}
8008
8009static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8010{
8011 struct qseecom_clk *qclk;
8012
8013 if (ce == CLK_QSEE)
8014 qclk = &qseecom.qsee;
8015 else
8016 qclk = &qseecom.ce_drv;
8017
8018 if (qclk->ce_clk != NULL) {
8019 clk_put(qclk->ce_clk);
8020 qclk->ce_clk = NULL;
8021 }
8022 if (qclk->ce_core_clk != NULL) {
8023 clk_put(qclk->ce_core_clk);
8024 qclk->ce_core_clk = NULL;
8025 }
8026 if (qclk->ce_bus_clk != NULL) {
8027 clk_put(qclk->ce_bus_clk);
8028 qclk->ce_bus_clk = NULL;
8029 }
8030 if (qclk->ce_core_src_clk != NULL) {
8031 clk_put(qclk->ce_core_src_clk);
8032 qclk->ce_core_src_clk = NULL;
8033 }
8034 qclk->instance = CLK_INVALID;
8035}
8036
8037static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8038{
8039 int rc = 0;
8040 uint32_t hlos_num_ce_hw_instances;
8041 uint32_t disk_encrypt_pipe;
8042 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008043 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008044 int i;
8045 const int *tbl;
8046 int size;
8047 int entry;
8048 struct qseecom_crypto_info *pfde_tbl = NULL;
8049 struct qseecom_crypto_info *p;
8050 int tbl_size;
8051 int j;
8052 bool old_db = true;
8053 struct qseecom_ce_info_use *pce_info_use;
8054 uint32_t *unit_tbl = NULL;
8055 int total_units = 0;
8056 struct qseecom_ce_pipe_entry *pce_entry;
8057
8058 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8059 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8060
8061 if (of_property_read_u32((&pdev->dev)->of_node,
8062 "qcom,qsee-ce-hw-instance",
8063 &qseecom.ce_info.qsee_ce_hw_instance)) {
8064 pr_err("Fail to get qsee ce hw instance information.\n");
8065 rc = -EINVAL;
8066 goto out;
8067 } else {
8068 pr_debug("qsee-ce-hw-instance=0x%x\n",
8069 qseecom.ce_info.qsee_ce_hw_instance);
8070 }
8071
8072 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8073 "qcom,support-fde");
8074 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8075 "qcom,support-pfe");
8076
8077 if (!qseecom.support_pfe && !qseecom.support_fde) {
8078 pr_warn("Device does not support PFE/FDE");
8079 goto out;
8080 }
8081
8082 if (qseecom.support_fde)
8083 tbl = of_get_property((&pdev->dev)->of_node,
8084 "qcom,full-disk-encrypt-info", &size);
8085 else
8086 tbl = NULL;
8087 if (tbl) {
8088 old_db = false;
8089 if (size % sizeof(struct qseecom_crypto_info)) {
8090 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8091 size);
8092 rc = -EINVAL;
8093 goto out;
8094 }
8095 tbl_size = size / sizeof
8096 (struct qseecom_crypto_info);
8097
8098 pfde_tbl = kzalloc(size, GFP_KERNEL);
8099 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8100 total_units = 0;
8101
8102 if (!pfde_tbl || !unit_tbl) {
8103 pr_err("failed to alloc memory\n");
8104 rc = -ENOMEM;
8105 goto out;
8106 }
8107 if (of_property_read_u32_array((&pdev->dev)->of_node,
8108 "qcom,full-disk-encrypt-info",
8109 (u32 *)pfde_tbl, size/sizeof(u32))) {
8110 pr_err("failed to read full-disk-encrypt-info tbl\n");
8111 rc = -EINVAL;
8112 goto out;
8113 }
8114
8115 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8116 for (j = 0; j < total_units; j++) {
8117 if (p->unit_num == *(unit_tbl + j))
8118 break;
8119 }
8120 if (j == total_units) {
8121 *(unit_tbl + total_units) = p->unit_num;
8122 total_units++;
8123 }
8124 }
8125
8126 qseecom.ce_info.num_fde = total_units;
8127 pce_info_use = qseecom.ce_info.fde = kcalloc(
8128 total_units, sizeof(struct qseecom_ce_info_use),
8129 GFP_KERNEL);
8130 if (!pce_info_use) {
8131 pr_err("failed to alloc memory\n");
8132 rc = -ENOMEM;
8133 goto out;
8134 }
8135
8136 for (j = 0; j < total_units; j++, pce_info_use++) {
8137 pce_info_use->unit_num = *(unit_tbl + j);
8138 pce_info_use->alloc = false;
8139 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8140 pce_info_use->num_ce_pipe_entries = 0;
8141 pce_info_use->ce_pipe_entry = NULL;
8142 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8143 if (p->unit_num == pce_info_use->unit_num)
8144 pce_info_use->num_ce_pipe_entries++;
8145 }
8146
8147 entry = pce_info_use->num_ce_pipe_entries;
8148 pce_entry = pce_info_use->ce_pipe_entry =
8149 kcalloc(entry,
8150 sizeof(struct qseecom_ce_pipe_entry),
8151 GFP_KERNEL);
8152 if (pce_entry == NULL) {
8153 pr_err("failed to alloc memory\n");
8154 rc = -ENOMEM;
8155 goto out;
8156 }
8157
8158 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8159 if (p->unit_num == pce_info_use->unit_num) {
8160 pce_entry->ce_num = p->ce;
8161 pce_entry->ce_pipe_pair =
8162 p->pipe_pair;
8163 pce_entry->valid = true;
8164 pce_entry++;
8165 }
8166 }
8167 }
8168 kfree(unit_tbl);
8169 unit_tbl = NULL;
8170 kfree(pfde_tbl);
8171 pfde_tbl = NULL;
8172 }
8173
8174 if (qseecom.support_pfe)
8175 tbl = of_get_property((&pdev->dev)->of_node,
8176 "qcom,per-file-encrypt-info", &size);
8177 else
8178 tbl = NULL;
8179 if (tbl) {
8180 old_db = false;
8181 if (size % sizeof(struct qseecom_crypto_info)) {
8182 pr_err("per-file-encrypt-info tbl size(%d)\n",
8183 size);
8184 rc = -EINVAL;
8185 goto out;
8186 }
8187 tbl_size = size / sizeof
8188 (struct qseecom_crypto_info);
8189
8190 pfde_tbl = kzalloc(size, GFP_KERNEL);
8191 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8192 total_units = 0;
8193 if (!pfde_tbl || !unit_tbl) {
8194 pr_err("failed to alloc memory\n");
8195 rc = -ENOMEM;
8196 goto out;
8197 }
8198 if (of_property_read_u32_array((&pdev->dev)->of_node,
8199 "qcom,per-file-encrypt-info",
8200 (u32 *)pfde_tbl, size/sizeof(u32))) {
8201 pr_err("failed to read per-file-encrypt-info tbl\n");
8202 rc = -EINVAL;
8203 goto out;
8204 }
8205
8206 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8207 for (j = 0; j < total_units; j++) {
8208 if (p->unit_num == *(unit_tbl + j))
8209 break;
8210 }
8211 if (j == total_units) {
8212 *(unit_tbl + total_units) = p->unit_num;
8213 total_units++;
8214 }
8215 }
8216
8217 qseecom.ce_info.num_pfe = total_units;
8218 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8219 total_units, sizeof(struct qseecom_ce_info_use),
8220 GFP_KERNEL);
8221 if (!pce_info_use) {
8222 pr_err("failed to alloc memory\n");
8223 rc = -ENOMEM;
8224 goto out;
8225 }
8226
8227 for (j = 0; j < total_units; j++, pce_info_use++) {
8228 pce_info_use->unit_num = *(unit_tbl + j);
8229 pce_info_use->alloc = false;
8230 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8231 pce_info_use->num_ce_pipe_entries = 0;
8232 pce_info_use->ce_pipe_entry = NULL;
8233 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8234 if (p->unit_num == pce_info_use->unit_num)
8235 pce_info_use->num_ce_pipe_entries++;
8236 }
8237
8238 entry = pce_info_use->num_ce_pipe_entries;
8239 pce_entry = pce_info_use->ce_pipe_entry =
8240 kcalloc(entry,
8241 sizeof(struct qseecom_ce_pipe_entry),
8242 GFP_KERNEL);
8243 if (pce_entry == NULL) {
8244 pr_err("failed to alloc memory\n");
8245 rc = -ENOMEM;
8246 goto out;
8247 }
8248
8249 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8250 if (p->unit_num == pce_info_use->unit_num) {
8251 pce_entry->ce_num = p->ce;
8252 pce_entry->ce_pipe_pair =
8253 p->pipe_pair;
8254 pce_entry->valid = true;
8255 pce_entry++;
8256 }
8257 }
8258 }
8259 kfree(unit_tbl);
8260 unit_tbl = NULL;
8261 kfree(pfde_tbl);
8262 pfde_tbl = NULL;
8263 }
8264
8265 if (!old_db)
8266 goto out1;
8267
8268 if (of_property_read_bool((&pdev->dev)->of_node,
8269 "qcom,support-multiple-ce-hw-instance")) {
8270 if (of_property_read_u32((&pdev->dev)->of_node,
8271 "qcom,hlos-num-ce-hw-instances",
8272 &hlos_num_ce_hw_instances)) {
8273 pr_err("Fail: get hlos number of ce hw instance\n");
8274 rc = -EINVAL;
8275 goto out;
8276 }
8277 } else {
8278 hlos_num_ce_hw_instances = 1;
8279 }
8280
8281 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8282 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8283 MAX_CE_PIPE_PAIR_PER_UNIT);
8284 rc = -EINVAL;
8285 goto out;
8286 }
8287
8288 if (of_property_read_u32_array((&pdev->dev)->of_node,
8289 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8290 hlos_num_ce_hw_instances)) {
8291 pr_err("Fail: get hlos ce hw instance info\n");
8292 rc = -EINVAL;
8293 goto out;
8294 }
8295
8296 if (qseecom.support_fde) {
8297 pce_info_use = qseecom.ce_info.fde =
8298 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8299 if (!pce_info_use) {
8300 pr_err("failed to alloc memory\n");
8301 rc = -ENOMEM;
8302 goto out;
8303 }
8304 /* by default for old db */
8305 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8306 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8307 pce_info_use->alloc = false;
8308 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8309 pce_info_use->ce_pipe_entry = NULL;
8310 if (of_property_read_u32((&pdev->dev)->of_node,
8311 "qcom,disk-encrypt-pipe-pair",
8312 &disk_encrypt_pipe)) {
8313 pr_err("Fail to get FDE pipe information.\n");
8314 rc = -EINVAL;
8315 goto out;
8316 } else {
8317 pr_debug("disk-encrypt-pipe-pair=0x%x",
8318 disk_encrypt_pipe);
8319 }
8320 entry = pce_info_use->num_ce_pipe_entries =
8321 hlos_num_ce_hw_instances;
8322 pce_entry = pce_info_use->ce_pipe_entry =
8323 kcalloc(entry,
8324 sizeof(struct qseecom_ce_pipe_entry),
8325 GFP_KERNEL);
8326 if (pce_entry == NULL) {
8327 pr_err("failed to alloc memory\n");
8328 rc = -ENOMEM;
8329 goto out;
8330 }
8331 for (i = 0; i < entry; i++) {
8332 pce_entry->ce_num = hlos_ce_hw_instance[i];
8333 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8334 pce_entry->valid = 1;
8335 pce_entry++;
8336 }
8337 } else {
8338 pr_warn("Device does not support FDE");
8339 disk_encrypt_pipe = 0xff;
8340 }
8341 if (qseecom.support_pfe) {
8342 pce_info_use = qseecom.ce_info.pfe =
8343 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8344 if (!pce_info_use) {
8345 pr_err("failed to alloc memory\n");
8346 rc = -ENOMEM;
8347 goto out;
8348 }
8349 /* by default for old db */
8350 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8351 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8352 pce_info_use->alloc = false;
8353 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8354 pce_info_use->ce_pipe_entry = NULL;
8355
8356 if (of_property_read_u32((&pdev->dev)->of_node,
8357 "qcom,file-encrypt-pipe-pair",
8358 &file_encrypt_pipe)) {
8359 pr_err("Fail to get PFE pipe information.\n");
8360 rc = -EINVAL;
8361 goto out;
8362 } else {
8363 pr_debug("file-encrypt-pipe-pair=0x%x",
8364 file_encrypt_pipe);
8365 }
8366 entry = pce_info_use->num_ce_pipe_entries =
8367 hlos_num_ce_hw_instances;
8368 pce_entry = pce_info_use->ce_pipe_entry =
8369 kcalloc(entry,
8370 sizeof(struct qseecom_ce_pipe_entry),
8371 GFP_KERNEL);
8372 if (pce_entry == NULL) {
8373 pr_err("failed to alloc memory\n");
8374 rc = -ENOMEM;
8375 goto out;
8376 }
8377 for (i = 0; i < entry; i++) {
8378 pce_entry->ce_num = hlos_ce_hw_instance[i];
8379 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8380 pce_entry->valid = 1;
8381 pce_entry++;
8382 }
8383 } else {
8384 pr_warn("Device does not support PFE");
8385 file_encrypt_pipe = 0xff;
8386 }
8387
8388out1:
8389 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8390 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8391out:
8392 if (rc) {
8393 if (qseecom.ce_info.fde) {
8394 pce_info_use = qseecom.ce_info.fde;
8395 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8396 pce_entry = pce_info_use->ce_pipe_entry;
8397 kfree(pce_entry);
8398 pce_info_use++;
8399 }
8400 }
8401 kfree(qseecom.ce_info.fde);
8402 qseecom.ce_info.fde = NULL;
8403 if (qseecom.ce_info.pfe) {
8404 pce_info_use = qseecom.ce_info.pfe;
8405 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8406 pce_entry = pce_info_use->ce_pipe_entry;
8407 kfree(pce_entry);
8408 pce_info_use++;
8409 }
8410 }
8411 kfree(qseecom.ce_info.pfe);
8412 qseecom.ce_info.pfe = NULL;
8413 }
8414 kfree(unit_tbl);
8415 kfree(pfde_tbl);
8416 return rc;
8417}
8418
8419static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8420 void __user *argp)
8421{
8422 struct qseecom_ce_info_req req;
8423 struct qseecom_ce_info_req *pinfo = &req;
8424 int ret = 0;
8425 int i;
8426 unsigned int entries;
8427 struct qseecom_ce_info_use *pce_info_use, *p;
8428 int total = 0;
8429 bool found = false;
8430 struct qseecom_ce_pipe_entry *pce_entry;
8431
8432 ret = copy_from_user(pinfo, argp,
8433 sizeof(struct qseecom_ce_info_req));
8434 if (ret) {
8435 pr_err("copy_from_user failed\n");
8436 return ret;
8437 }
8438
8439 switch (pinfo->usage) {
8440 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8441 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8442 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8443 if (qseecom.support_fde) {
8444 p = qseecom.ce_info.fde;
8445 total = qseecom.ce_info.num_fde;
8446 } else {
8447 pr_err("system does not support fde\n");
8448 return -EINVAL;
8449 }
8450 break;
8451 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8452 if (qseecom.support_pfe) {
8453 p = qseecom.ce_info.pfe;
8454 total = qseecom.ce_info.num_pfe;
8455 } else {
8456 pr_err("system does not support pfe\n");
8457 return -EINVAL;
8458 }
8459 break;
8460 default:
8461 pr_err("unsupported usage %d\n", pinfo->usage);
8462 return -EINVAL;
8463 }
8464
8465 pce_info_use = NULL;
8466 for (i = 0; i < total; i++) {
8467 if (!p->alloc)
8468 pce_info_use = p;
8469 else if (!memcmp(p->handle, pinfo->handle,
8470 MAX_CE_INFO_HANDLE_SIZE)) {
8471 pce_info_use = p;
8472 found = true;
8473 break;
8474 }
8475 p++;
8476 }
8477
8478 if (pce_info_use == NULL)
8479 return -EBUSY;
8480
8481 pinfo->unit_num = pce_info_use->unit_num;
8482 if (!pce_info_use->alloc) {
8483 pce_info_use->alloc = true;
8484 memcpy(pce_info_use->handle,
8485 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8486 }
8487 if (pce_info_use->num_ce_pipe_entries >
8488 MAX_CE_PIPE_PAIR_PER_UNIT)
8489 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8490 else
8491 entries = pce_info_use->num_ce_pipe_entries;
8492 pinfo->num_ce_pipe_entries = entries;
8493 pce_entry = pce_info_use->ce_pipe_entry;
8494 for (i = 0; i < entries; i++, pce_entry++)
8495 pinfo->ce_pipe_entry[i] = *pce_entry;
8496 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8497 pinfo->ce_pipe_entry[i].valid = 0;
8498
8499 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8500 pr_err("copy_to_user failed\n");
8501 ret = -EFAULT;
8502 }
8503 return ret;
8504}
8505
8506static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8507 void __user *argp)
8508{
8509 struct qseecom_ce_info_req req;
8510 struct qseecom_ce_info_req *pinfo = &req;
8511 int ret = 0;
8512 struct qseecom_ce_info_use *p;
8513 int total = 0;
8514 int i;
8515 bool found = false;
8516
8517 ret = copy_from_user(pinfo, argp,
8518 sizeof(struct qseecom_ce_info_req));
8519 if (ret)
8520 return ret;
8521
8522 switch (pinfo->usage) {
8523 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8524 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8525 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8526 if (qseecom.support_fde) {
8527 p = qseecom.ce_info.fde;
8528 total = qseecom.ce_info.num_fde;
8529 } else {
8530 pr_err("system does not support fde\n");
8531 return -EINVAL;
8532 }
8533 break;
8534 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8535 if (qseecom.support_pfe) {
8536 p = qseecom.ce_info.pfe;
8537 total = qseecom.ce_info.num_pfe;
8538 } else {
8539 pr_err("system does not support pfe\n");
8540 return -EINVAL;
8541 }
8542 break;
8543 default:
8544 pr_err("unsupported usage %d\n", pinfo->usage);
8545 return -EINVAL;
8546 }
8547
8548 for (i = 0; i < total; i++) {
8549 if (p->alloc &&
8550 !memcmp(p->handle, pinfo->handle,
8551 MAX_CE_INFO_HANDLE_SIZE)) {
8552 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8553 p->alloc = false;
8554 found = true;
8555 break;
8556 }
8557 p++;
8558 }
8559 return ret;
8560}
8561
8562static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8563 void __user *argp)
8564{
8565 struct qseecom_ce_info_req req;
8566 struct qseecom_ce_info_req *pinfo = &req;
8567 int ret = 0;
8568 int i;
8569 unsigned int entries;
8570 struct qseecom_ce_info_use *pce_info_use, *p;
8571 int total = 0;
8572 bool found = false;
8573 struct qseecom_ce_pipe_entry *pce_entry;
8574
8575 ret = copy_from_user(pinfo, argp,
8576 sizeof(struct qseecom_ce_info_req));
8577 if (ret)
8578 return ret;
8579
8580 switch (pinfo->usage) {
8581 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8582 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8583 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8584 if (qseecom.support_fde) {
8585 p = qseecom.ce_info.fde;
8586 total = qseecom.ce_info.num_fde;
8587 } else {
8588 pr_err("system does not support fde\n");
8589 return -EINVAL;
8590 }
8591 break;
8592 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8593 if (qseecom.support_pfe) {
8594 p = qseecom.ce_info.pfe;
8595 total = qseecom.ce_info.num_pfe;
8596 } else {
8597 pr_err("system does not support pfe\n");
8598 return -EINVAL;
8599 }
8600 break;
8601 default:
8602 pr_err("unsupported usage %d\n", pinfo->usage);
8603 return -EINVAL;
8604 }
8605
8606 pce_info_use = NULL;
8607 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8608 pinfo->num_ce_pipe_entries = 0;
8609 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8610 pinfo->ce_pipe_entry[i].valid = 0;
8611
8612 for (i = 0; i < total; i++) {
8613
8614 if (p->alloc && !memcmp(p->handle,
8615 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8616 pce_info_use = p;
8617 found = true;
8618 break;
8619 }
8620 p++;
8621 }
8622 if (!pce_info_use)
8623 goto out;
8624 pinfo->unit_num = pce_info_use->unit_num;
8625 if (pce_info_use->num_ce_pipe_entries >
8626 MAX_CE_PIPE_PAIR_PER_UNIT)
8627 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8628 else
8629 entries = pce_info_use->num_ce_pipe_entries;
8630 pinfo->num_ce_pipe_entries = entries;
8631 pce_entry = pce_info_use->ce_pipe_entry;
8632 for (i = 0; i < entries; i++, pce_entry++)
8633 pinfo->ce_pipe_entry[i] = *pce_entry;
8634 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8635 pinfo->ce_pipe_entry[i].valid = 0;
8636out:
8637 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8638 pr_err("copy_to_user failed\n");
8639 ret = -EFAULT;
8640 }
8641 return ret;
8642}
8643
8644/*
8645 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8646 * then whitelist feature is not supported.
8647 */
8648static int qseecom_check_whitelist_feature(void)
8649{
8650 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8651
8652 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8653}
8654
8655static int qseecom_probe(struct platform_device *pdev)
8656{
8657 int rc;
8658 int i;
8659 uint32_t feature = 10;
8660 struct device *class_dev;
8661 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8662 struct qseecom_command_scm_resp resp;
8663 struct qseecom_ce_info_use *pce_info_use = NULL;
8664
8665 qseecom.qsee_bw_count = 0;
8666 qseecom.qsee_perf_client = 0;
8667 qseecom.qsee_sfpb_bw_count = 0;
8668
8669 qseecom.qsee.ce_core_clk = NULL;
8670 qseecom.qsee.ce_clk = NULL;
8671 qseecom.qsee.ce_core_src_clk = NULL;
8672 qseecom.qsee.ce_bus_clk = NULL;
8673
8674 qseecom.cumulative_mode = 0;
8675 qseecom.current_mode = INACTIVE;
8676 qseecom.support_bus_scaling = false;
8677 qseecom.support_fde = false;
8678 qseecom.support_pfe = false;
8679
8680 qseecom.ce_drv.ce_core_clk = NULL;
8681 qseecom.ce_drv.ce_clk = NULL;
8682 qseecom.ce_drv.ce_core_src_clk = NULL;
8683 qseecom.ce_drv.ce_bus_clk = NULL;
8684 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8685
8686 qseecom.app_block_ref_cnt = 0;
8687 init_waitqueue_head(&qseecom.app_block_wq);
8688 qseecom.whitelist_support = true;
8689
8690 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8691 if (rc < 0) {
8692 pr_err("alloc_chrdev_region failed %d\n", rc);
8693 return rc;
8694 }
8695
8696 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8697 if (IS_ERR(driver_class)) {
8698 rc = -ENOMEM;
8699 pr_err("class_create failed %d\n", rc);
8700 goto exit_unreg_chrdev_region;
8701 }
8702
8703 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8704 QSEECOM_DEV);
8705 if (IS_ERR(class_dev)) {
8706 pr_err("class_device_create failed %d\n", rc);
8707 rc = -ENOMEM;
8708 goto exit_destroy_class;
8709 }
8710
8711 cdev_init(&qseecom.cdev, &qseecom_fops);
8712 qseecom.cdev.owner = THIS_MODULE;
8713
8714 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8715 if (rc < 0) {
8716 pr_err("cdev_add failed %d\n", rc);
8717 goto exit_destroy_device;
8718 }
8719
8720 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008721 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8722 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008723 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008724 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8725 spin_lock_init(&qseecom.registered_kclient_list_lock);
8726 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008727 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008728 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008729 qseecom.send_resp_flag = 0;
8730
8731 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008732 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008733 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8734 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008735 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008736 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8737 if (rc) {
8738 pr_err("Failed to get QSEE version info %d\n", rc);
8739 goto exit_del_cdev;
8740 }
8741 qseecom.qsee_version = resp.result;
8742 qseecom.qseos_version = QSEOS_VERSION_14;
8743 qseecom.commonlib_loaded = false;
8744 qseecom.commonlib64_loaded = false;
8745 qseecom.pdev = class_dev;
8746 /* Create ION msm client */
8747 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8748 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8749 pr_err("Ion client cannot be created\n");
8750 rc = -ENOMEM;
8751 goto exit_del_cdev;
8752 }
8753
8754 /* register client for bus scaling */
8755 if (pdev->dev.of_node) {
8756 qseecom.pdev->of_node = pdev->dev.of_node;
8757 qseecom.support_bus_scaling =
8758 of_property_read_bool((&pdev->dev)->of_node,
8759 "qcom,support-bus-scaling");
8760 rc = qseecom_retrieve_ce_data(pdev);
8761 if (rc)
8762 goto exit_destroy_ion_client;
8763 qseecom.appsbl_qseecom_support =
8764 of_property_read_bool((&pdev->dev)->of_node,
8765 "qcom,appsbl-qseecom-support");
8766 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8767 qseecom.appsbl_qseecom_support);
8768
8769 qseecom.commonlib64_loaded =
8770 of_property_read_bool((&pdev->dev)->of_node,
8771 "qcom,commonlib64-loaded-by-uefi");
8772 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8773 qseecom.commonlib64_loaded);
8774 qseecom.fde_key_size =
8775 of_property_read_bool((&pdev->dev)->of_node,
8776 "qcom,fde-key-size");
8777 qseecom.no_clock_support =
8778 of_property_read_bool((&pdev->dev)->of_node,
8779 "qcom,no-clock-support");
8780 if (!qseecom.no_clock_support) {
8781 pr_info("qseecom clocks handled by other subsystem\n");
8782 } else {
8783 pr_info("no-clock-support=0x%x",
8784 qseecom.no_clock_support);
8785 }
8786
8787 if (of_property_read_u32((&pdev->dev)->of_node,
8788 "qcom,qsee-reentrancy-support",
8789 &qseecom.qsee_reentrancy_support)) {
8790 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8791 qseecom.qsee_reentrancy_support = 0;
8792 } else {
8793 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8794 qseecom.qsee_reentrancy_support);
8795 }
8796
Jiten Patela7bb1d52018-05-11 12:34:26 +05308797 qseecom.enable_key_wrap_in_ks =
8798 of_property_read_bool((&pdev->dev)->of_node,
8799 "qcom,enable-key-wrap-in-ks");
8800 if (qseecom.enable_key_wrap_in_ks) {
8801 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8802 qseecom.enable_key_wrap_in_ks);
8803 }
8804
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008805 /*
8806 * The qseecom bus scaling flag can not be enabled when
8807 * crypto clock is not handled by HLOS.
8808 */
8809 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8810 pr_err("support_bus_scaling flag can not be enabled.\n");
8811 rc = -EINVAL;
8812 goto exit_destroy_ion_client;
8813 }
8814
8815 if (of_property_read_u32((&pdev->dev)->of_node,
8816 "qcom,ce-opp-freq",
8817 &qseecom.ce_opp_freq_hz)) {
8818 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8819 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8820 }
8821 rc = __qseecom_init_clk(CLK_QSEE);
8822 if (rc)
8823 goto exit_destroy_ion_client;
8824
8825 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8826 (qseecom.support_pfe || qseecom.support_fde)) {
8827 rc = __qseecom_init_clk(CLK_CE_DRV);
8828 if (rc) {
8829 __qseecom_deinit_clk(CLK_QSEE);
8830 goto exit_destroy_ion_client;
8831 }
8832 } else {
8833 struct qseecom_clk *qclk;
8834
8835 qclk = &qseecom.qsee;
8836 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8837 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8838 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8839 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8840 }
8841
8842 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8843 msm_bus_cl_get_pdata(pdev);
8844 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8845 (!qseecom.is_apps_region_protected &&
8846 !qseecom.appsbl_qseecom_support)) {
8847 struct resource *resource = NULL;
8848 struct qsee_apps_region_info_ireq req;
8849 struct qsee_apps_region_info_64bit_ireq req_64bit;
8850 struct qseecom_command_scm_resp resp;
8851 void *cmd_buf = NULL;
8852 size_t cmd_len;
8853
8854 resource = platform_get_resource_byname(pdev,
8855 IORESOURCE_MEM, "secapp-region");
8856 if (resource) {
8857 if (qseecom.qsee_version < QSEE_VERSION_40) {
8858 req.qsee_cmd_id =
8859 QSEOS_APP_REGION_NOTIFICATION;
8860 req.addr = (uint32_t)resource->start;
8861 req.size = resource_size(resource);
8862 cmd_buf = (void *)&req;
8863 cmd_len = sizeof(struct
8864 qsee_apps_region_info_ireq);
8865 pr_warn("secure app region addr=0x%x size=0x%x",
8866 req.addr, req.size);
8867 } else {
8868 req_64bit.qsee_cmd_id =
8869 QSEOS_APP_REGION_NOTIFICATION;
8870 req_64bit.addr = resource->start;
8871 req_64bit.size = resource_size(
8872 resource);
8873 cmd_buf = (void *)&req_64bit;
8874 cmd_len = sizeof(struct
8875 qsee_apps_region_info_64bit_ireq);
8876 pr_warn("secure app region addr=0x%llx size=0x%x",
8877 req_64bit.addr, req_64bit.size);
8878 }
8879 } else {
8880 pr_err("Fail to get secure app region info\n");
8881 rc = -EINVAL;
8882 goto exit_deinit_clock;
8883 }
8884 rc = __qseecom_enable_clk(CLK_QSEE);
8885 if (rc) {
8886 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8887 rc = -EIO;
8888 goto exit_deinit_clock;
8889 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008890 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008891 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8892 cmd_buf, cmd_len,
8893 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008894 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008895 __qseecom_disable_clk(CLK_QSEE);
8896 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8897 pr_err("send secapp reg fail %d resp.res %d\n",
8898 rc, resp.result);
8899 rc = -EINVAL;
8900 goto exit_deinit_clock;
8901 }
8902 }
8903 /*
8904 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8905 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8906 * Pls add "qseecom.commonlib64_loaded = true" here too.
8907 */
8908 if (qseecom.is_apps_region_protected ||
8909 qseecom.appsbl_qseecom_support)
8910 qseecom.commonlib_loaded = true;
8911 } else {
8912 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8913 pdev->dev.platform_data;
8914 }
8915 if (qseecom.support_bus_scaling) {
8916 init_timer(&(qseecom.bw_scale_down_timer));
8917 INIT_WORK(&qseecom.bw_inactive_req_ws,
8918 qseecom_bw_inactive_req_work);
8919 qseecom.bw_scale_down_timer.function =
8920 qseecom_scale_bus_bandwidth_timer_callback;
8921 }
8922 qseecom.timer_running = false;
8923 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8924 qseecom_platform_support);
8925
8926 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8927 pr_warn("qseecom.whitelist_support = %d\n",
8928 qseecom.whitelist_support);
8929
8930 if (!qseecom.qsee_perf_client)
8931 pr_err("Unable to register bus client\n");
8932
Zhen Kongc4c162a2019-01-23 12:07:12 -08008933 /*create a kthread to process pending listener unregister task */
8934 qseecom.unregister_lsnr_kthread_task = kthread_run(
8935 __qseecom_unregister_listener_kthread_func,
8936 NULL, "qseecom-unreg-lsnr");
8937 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8938 pr_err("failed to create kthread to unregister listener\n");
8939 rc = -EINVAL;
8940 goto exit_deinit_clock;
8941 }
8942 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8943 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008944 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8945 return 0;
8946
8947exit_deinit_clock:
8948 __qseecom_deinit_clk(CLK_QSEE);
8949 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8950 (qseecom.support_pfe || qseecom.support_fde))
8951 __qseecom_deinit_clk(CLK_CE_DRV);
8952exit_destroy_ion_client:
8953 if (qseecom.ce_info.fde) {
8954 pce_info_use = qseecom.ce_info.fde;
8955 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8956 kzfree(pce_info_use->ce_pipe_entry);
8957 pce_info_use++;
8958 }
8959 kfree(qseecom.ce_info.fde);
8960 }
8961 if (qseecom.ce_info.pfe) {
8962 pce_info_use = qseecom.ce_info.pfe;
8963 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8964 kzfree(pce_info_use->ce_pipe_entry);
8965 pce_info_use++;
8966 }
8967 kfree(qseecom.ce_info.pfe);
8968 }
8969 ion_client_destroy(qseecom.ion_clnt);
8970exit_del_cdev:
8971 cdev_del(&qseecom.cdev);
8972exit_destroy_device:
8973 device_destroy(driver_class, qseecom_device_no);
8974exit_destroy_class:
8975 class_destroy(driver_class);
8976exit_unreg_chrdev_region:
8977 unregister_chrdev_region(qseecom_device_no, 1);
8978 return rc;
8979}
8980
8981static int qseecom_remove(struct platform_device *pdev)
8982{
8983 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308984 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008985 unsigned long flags = 0;
8986 int ret = 0;
8987 int i;
8988 struct qseecom_ce_pipe_entry *pce_entry;
8989 struct qseecom_ce_info_use *pce_info_use;
8990
8991 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8992 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8993
Monika Singhe711b162018-04-24 09:54:50 +05308994 list_for_each_entry_safe(kclient, kclient_tmp,
8995 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008996
8997 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008998 if (!kclient->handle) {
8999 list_del(&kclient->list);
9000 kzfree(kclient);
9001 break;
9002 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009003
9004 list_del(&kclient->list);
9005 mutex_lock(&app_access_lock);
9006 ret = qseecom_unload_app(kclient->handle->dev, false);
9007 mutex_unlock(&app_access_lock);
9008 if (!ret) {
9009 kzfree(kclient->handle->dev);
9010 kzfree(kclient->handle);
9011 kzfree(kclient);
9012 }
9013 }
9014
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009015 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9016
9017 if (qseecom.qseos_version > QSEEE_VERSION_00)
9018 qseecom_unload_commonlib_image();
9019
9020 if (qseecom.qsee_perf_client)
9021 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9022 0);
9023 if (pdev->dev.platform_data != NULL)
9024 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9025
9026 if (qseecom.support_bus_scaling) {
9027 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9028 del_timer_sync(&qseecom.bw_scale_down_timer);
9029 }
9030
9031 if (qseecom.ce_info.fde) {
9032 pce_info_use = qseecom.ce_info.fde;
9033 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9034 pce_entry = pce_info_use->ce_pipe_entry;
9035 kfree(pce_entry);
9036 pce_info_use++;
9037 }
9038 }
9039 kfree(qseecom.ce_info.fde);
9040 if (qseecom.ce_info.pfe) {
9041 pce_info_use = qseecom.ce_info.pfe;
9042 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9043 pce_entry = pce_info_use->ce_pipe_entry;
9044 kfree(pce_entry);
9045 pce_info_use++;
9046 }
9047 }
9048 kfree(qseecom.ce_info.pfe);
9049
9050 /* register client for bus scaling */
9051 if (pdev->dev.of_node) {
9052 __qseecom_deinit_clk(CLK_QSEE);
9053 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9054 (qseecom.support_pfe || qseecom.support_fde))
9055 __qseecom_deinit_clk(CLK_CE_DRV);
9056 }
9057
9058 ion_client_destroy(qseecom.ion_clnt);
9059
Zhen Kongc4c162a2019-01-23 12:07:12 -08009060 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9061
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009062 cdev_del(&qseecom.cdev);
9063
9064 device_destroy(driver_class, qseecom_device_no);
9065
9066 class_destroy(driver_class);
9067
9068 unregister_chrdev_region(qseecom_device_no, 1);
9069
9070 return ret;
9071}
9072
9073static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9074{
9075 int ret = 0;
9076 struct qseecom_clk *qclk;
9077
9078 qclk = &qseecom.qsee;
9079 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9080 if (qseecom.no_clock_support)
9081 return 0;
9082
9083 mutex_lock(&qsee_bw_mutex);
9084 mutex_lock(&clk_access_lock);
9085
9086 if (qseecom.current_mode != INACTIVE) {
9087 ret = msm_bus_scale_client_update_request(
9088 qseecom.qsee_perf_client, INACTIVE);
9089 if (ret)
9090 pr_err("Fail to scale down bus\n");
9091 else
9092 qseecom.current_mode = INACTIVE;
9093 }
9094
9095 if (qclk->clk_access_cnt) {
9096 if (qclk->ce_clk != NULL)
9097 clk_disable_unprepare(qclk->ce_clk);
9098 if (qclk->ce_core_clk != NULL)
9099 clk_disable_unprepare(qclk->ce_core_clk);
9100 if (qclk->ce_bus_clk != NULL)
9101 clk_disable_unprepare(qclk->ce_bus_clk);
9102 }
9103
9104 del_timer_sync(&(qseecom.bw_scale_down_timer));
9105 qseecom.timer_running = false;
9106
9107 mutex_unlock(&clk_access_lock);
9108 mutex_unlock(&qsee_bw_mutex);
9109 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9110
9111 return 0;
9112}
9113
9114static int qseecom_resume(struct platform_device *pdev)
9115{
9116 int mode = 0;
9117 int ret = 0;
9118 struct qseecom_clk *qclk;
9119
9120 qclk = &qseecom.qsee;
9121 if (qseecom.no_clock_support)
9122 goto exit;
9123
9124 mutex_lock(&qsee_bw_mutex);
9125 mutex_lock(&clk_access_lock);
9126 if (qseecom.cumulative_mode >= HIGH)
9127 mode = HIGH;
9128 else
9129 mode = qseecom.cumulative_mode;
9130
9131 if (qseecom.cumulative_mode != INACTIVE) {
9132 ret = msm_bus_scale_client_update_request(
9133 qseecom.qsee_perf_client, mode);
9134 if (ret)
9135 pr_err("Fail to scale up bus to %d\n", mode);
9136 else
9137 qseecom.current_mode = mode;
9138 }
9139
9140 if (qclk->clk_access_cnt) {
9141 if (qclk->ce_core_clk != NULL) {
9142 ret = clk_prepare_enable(qclk->ce_core_clk);
9143 if (ret) {
9144 pr_err("Unable to enable/prep CE core clk\n");
9145 qclk->clk_access_cnt = 0;
9146 goto err;
9147 }
9148 }
9149 if (qclk->ce_clk != NULL) {
9150 ret = clk_prepare_enable(qclk->ce_clk);
9151 if (ret) {
9152 pr_err("Unable to enable/prep CE iface clk\n");
9153 qclk->clk_access_cnt = 0;
9154 goto ce_clk_err;
9155 }
9156 }
9157 if (qclk->ce_bus_clk != NULL) {
9158 ret = clk_prepare_enable(qclk->ce_bus_clk);
9159 if (ret) {
9160 pr_err("Unable to enable/prep CE bus clk\n");
9161 qclk->clk_access_cnt = 0;
9162 goto ce_bus_clk_err;
9163 }
9164 }
9165 }
9166
9167 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9168 qseecom.bw_scale_down_timer.expires = jiffies +
9169 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9170 mod_timer(&(qseecom.bw_scale_down_timer),
9171 qseecom.bw_scale_down_timer.expires);
9172 qseecom.timer_running = true;
9173 }
9174
9175 mutex_unlock(&clk_access_lock);
9176 mutex_unlock(&qsee_bw_mutex);
9177 goto exit;
9178
9179ce_bus_clk_err:
9180 if (qclk->ce_clk)
9181 clk_disable_unprepare(qclk->ce_clk);
9182ce_clk_err:
9183 if (qclk->ce_core_clk)
9184 clk_disable_unprepare(qclk->ce_core_clk);
9185err:
9186 mutex_unlock(&clk_access_lock);
9187 mutex_unlock(&qsee_bw_mutex);
9188 ret = -EIO;
9189exit:
9190 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9191 return ret;
9192}
9193
9194static const struct of_device_id qseecom_match[] = {
9195 {
9196 .compatible = "qcom,qseecom",
9197 },
9198 {}
9199};
9200
9201static struct platform_driver qseecom_plat_driver = {
9202 .probe = qseecom_probe,
9203 .remove = qseecom_remove,
9204 .suspend = qseecom_suspend,
9205 .resume = qseecom_resume,
9206 .driver = {
9207 .name = "qseecom",
9208 .owner = THIS_MODULE,
9209 .of_match_table = qseecom_match,
9210 },
9211};
9212
9213static int qseecom_init(void)
9214{
9215 return platform_driver_register(&qseecom_plat_driver);
9216}
9217
9218static void qseecom_exit(void)
9219{
9220 platform_driver_unregister(&qseecom_plat_driver);
9221}
9222
9223MODULE_LICENSE("GPL v2");
9224MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9225
9226module_init(qseecom_init);
9227module_exit(qseecom_exit);