blob: 21c3545caa44e30546d57bfd92194bdef5955d9f [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
334};
335
336struct qseecom_listener_handle {
337 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800338 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800339 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700340};
341
342static struct qseecom_control qseecom;
343
344struct qseecom_dev_handle {
345 enum qseecom_client_handle_type type;
346 union {
347 struct qseecom_client_handle client;
348 struct qseecom_listener_handle listener;
349 };
350 bool released;
351 int abort;
352 wait_queue_head_t abort_wq;
353 atomic_t ioctl_count;
354 bool perf_enabled;
355 bool fast_load_enabled;
356 enum qseecom_bandwidth_request_mode mode;
357 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
358 uint32_t sglist_cnt;
359 bool use_legacy_cmd;
360};
361
362struct qseecom_key_id_usage_desc {
363 uint8_t desc[QSEECOM_KEY_ID_SIZE];
364};
365
366struct qseecom_crypto_info {
367 unsigned int unit_num;
368 unsigned int ce;
369 unsigned int pipe_pair;
370};
371
372static struct qseecom_key_id_usage_desc key_id_array[] = {
373 {
374 .desc = "Undefined Usage Index",
375 },
376
377 {
378 .desc = "Full Disk Encryption",
379 },
380
381 {
382 .desc = "Per File Encryption",
383 },
384
385 {
386 .desc = "UFS ICE Full Disk Encryption",
387 },
388
389 {
390 .desc = "SDCC ICE Full Disk Encryption",
391 },
392};
393
394/* Function proto types */
395static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
396static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
397static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
398static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
399static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
400static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
401 char *cmnlib_name);
402static int qseecom_enable_ice_setup(int usage);
403static int qseecom_disable_ice_setup(int usage);
404static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
405static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
406 void __user *argp);
407static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
408 void __user *argp);
409static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
410 void __user *argp);
411
412static int get_qseecom_keymaster_status(char *str)
413{
414 get_option(&str, &qseecom.is_apps_region_protected);
415 return 1;
416}
417__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
418
Zhen Kong03f220d2019-02-01 17:12:34 -0800419
420#define QSEECOM_SCM_EBUSY_WAIT_MS 30
421#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
422
423static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
424{
425 int ret = 0;
426 int retry_count = 0;
427
428 do {
429 ret = scm_call2_noretry(smc_id, desc);
430 if (ret == -EBUSY) {
431 mutex_unlock(&app_access_lock);
432 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
433 mutex_lock(&app_access_lock);
434 }
435 if (retry_count == 33)
436 pr_warn("secure world has been busy for 1 second!\n");
437 } while (ret == -EBUSY &&
438 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
439 return ret;
440}
441
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700442static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
443 const void *req_buf, void *resp_buf)
444{
445 int ret = 0;
446 uint32_t smc_id = 0;
447 uint32_t qseos_cmd_id = 0;
448 struct scm_desc desc = {0};
449 struct qseecom_command_scm_resp *scm_resp = NULL;
450
451 if (!req_buf || !resp_buf) {
452 pr_err("Invalid buffer pointer\n");
453 return -EINVAL;
454 }
455 qseos_cmd_id = *(uint32_t *)req_buf;
456 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
457
458 switch (svc_id) {
459 case 6: {
460 if (tz_cmd_id == 3) {
461 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
462 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
463 desc.args[0] = *(uint32_t *)req_buf;
464 } else {
465 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
466 svc_id, tz_cmd_id);
467 return -EINVAL;
468 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800469 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700470 break;
471 }
472 case SCM_SVC_ES: {
473 switch (tz_cmd_id) {
474 case SCM_SAVE_PARTITION_HASH_ID: {
475 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
476 struct qseecom_save_partition_hash_req *p_hash_req =
477 (struct qseecom_save_partition_hash_req *)
478 req_buf;
479 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
480
481 if (!tzbuf)
482 return -ENOMEM;
483 memset(tzbuf, 0, tzbuflen);
484 memcpy(tzbuf, p_hash_req->digest,
485 SHA256_DIGEST_LENGTH);
486 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
487 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
488 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
489 desc.args[0] = p_hash_req->partition_id;
490 desc.args[1] = virt_to_phys(tzbuf);
491 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800492 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700493 kzfree(tzbuf);
494 break;
495 }
496 default: {
497 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
498 tz_cmd_id);
499 ret = -EINVAL;
500 break;
501 }
502 } /* end of switch (tz_cmd_id) */
503 break;
504 } /* end of case SCM_SVC_ES */
505 case SCM_SVC_TZSCHEDULER: {
506 switch (qseos_cmd_id) {
507 case QSEOS_APP_START_COMMAND: {
508 struct qseecom_load_app_ireq *req;
509 struct qseecom_load_app_64bit_ireq *req_64bit;
510
511 smc_id = TZ_OS_APP_START_ID;
512 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
513 if (qseecom.qsee_version < QSEE_VERSION_40) {
514 req = (struct qseecom_load_app_ireq *)req_buf;
515 desc.args[0] = req->mdt_len;
516 desc.args[1] = req->img_len;
517 desc.args[2] = req->phy_addr;
518 } else {
519 req_64bit =
520 (struct qseecom_load_app_64bit_ireq *)
521 req_buf;
522 desc.args[0] = req_64bit->mdt_len;
523 desc.args[1] = req_64bit->img_len;
524 desc.args[2] = req_64bit->phy_addr;
525 }
526 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800527 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700528 break;
529 }
530 case QSEOS_APP_SHUTDOWN_COMMAND: {
531 struct qseecom_unload_app_ireq *req;
532
533 req = (struct qseecom_unload_app_ireq *)req_buf;
534 smc_id = TZ_OS_APP_SHUTDOWN_ID;
535 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
536 desc.args[0] = req->app_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800537 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700538 break;
539 }
540 case QSEOS_APP_LOOKUP_COMMAND: {
541 struct qseecom_check_app_ireq *req;
542 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
543 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
544
545 if (!tzbuf)
546 return -ENOMEM;
547 req = (struct qseecom_check_app_ireq *)req_buf;
548 pr_debug("Lookup app_name = %s\n", req->app_name);
549 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
550 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
551 smc_id = TZ_OS_APP_LOOKUP_ID;
552 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
553 desc.args[0] = virt_to_phys(tzbuf);
554 desc.args[1] = strlen(req->app_name);
555 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800556 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700557 kzfree(tzbuf);
558 break;
559 }
560 case QSEOS_APP_REGION_NOTIFICATION: {
561 struct qsee_apps_region_info_ireq *req;
562 struct qsee_apps_region_info_64bit_ireq *req_64bit;
563
564 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
565 desc.arginfo =
566 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
567 if (qseecom.qsee_version < QSEE_VERSION_40) {
568 req = (struct qsee_apps_region_info_ireq *)
569 req_buf;
570 desc.args[0] = req->addr;
571 desc.args[1] = req->size;
572 } else {
573 req_64bit =
574 (struct qsee_apps_region_info_64bit_ireq *)
575 req_buf;
576 desc.args[0] = req_64bit->addr;
577 desc.args[1] = req_64bit->size;
578 }
579 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800580 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700581 break;
582 }
583 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
584 struct qseecom_load_lib_image_ireq *req;
585 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
586
587 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
588 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
589 if (qseecom.qsee_version < QSEE_VERSION_40) {
590 req = (struct qseecom_load_lib_image_ireq *)
591 req_buf;
592 desc.args[0] = req->mdt_len;
593 desc.args[1] = req->img_len;
594 desc.args[2] = req->phy_addr;
595 } else {
596 req_64bit =
597 (struct qseecom_load_lib_image_64bit_ireq *)
598 req_buf;
599 desc.args[0] = req_64bit->mdt_len;
600 desc.args[1] = req_64bit->img_len;
601 desc.args[2] = req_64bit->phy_addr;
602 }
603 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800604 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700605 break;
606 }
607 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
608 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
609 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
610 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800611 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700612 break;
613 }
614 case QSEOS_REGISTER_LISTENER: {
615 struct qseecom_register_listener_ireq *req;
616 struct qseecom_register_listener_64bit_ireq *req_64bit;
617
618 desc.arginfo =
619 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
620 if (qseecom.qsee_version < QSEE_VERSION_40) {
621 req = (struct qseecom_register_listener_ireq *)
622 req_buf;
623 desc.args[0] = req->listener_id;
624 desc.args[1] = req->sb_ptr;
625 desc.args[2] = req->sb_len;
626 } else {
627 req_64bit =
628 (struct qseecom_register_listener_64bit_ireq *)
629 req_buf;
630 desc.args[0] = req_64bit->listener_id;
631 desc.args[1] = req_64bit->sb_ptr;
632 desc.args[2] = req_64bit->sb_len;
633 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700634 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700635 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800636 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800637 if (ret == -EIO) {
638 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700639 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700640 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800641 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700642 }
643 break;
644 }
645 case QSEOS_DEREGISTER_LISTENER: {
646 struct qseecom_unregister_listener_ireq *req;
647
648 req = (struct qseecom_unregister_listener_ireq *)
649 req_buf;
650 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
651 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
652 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800653 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700654 break;
655 }
656 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
657 struct qseecom_client_listener_data_irsp *req;
658
659 req = (struct qseecom_client_listener_data_irsp *)
660 req_buf;
661 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
662 desc.arginfo =
663 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
664 desc.args[0] = req->listener_id;
665 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800666 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700667 break;
668 }
669 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
670 struct qseecom_client_listener_data_irsp *req;
671 struct qseecom_client_listener_data_64bit_irsp *req_64;
672
673 smc_id =
674 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
675 desc.arginfo =
676 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
677 if (qseecom.qsee_version < QSEE_VERSION_40) {
678 req =
679 (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 desc.args[0] = req->listener_id;
682 desc.args[1] = req->status;
683 desc.args[2] = req->sglistinfo_ptr;
684 desc.args[3] = req->sglistinfo_len;
685 } else {
686 req_64 =
687 (struct qseecom_client_listener_data_64bit_irsp *)
688 req_buf;
689 desc.args[0] = req_64->listener_id;
690 desc.args[1] = req_64->status;
691 desc.args[2] = req_64->sglistinfo_ptr;
692 desc.args[3] = req_64->sglistinfo_len;
693 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800694 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700695 break;
696 }
697 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
698 struct qseecom_load_app_ireq *req;
699 struct qseecom_load_app_64bit_ireq *req_64bit;
700
701 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
702 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
703 if (qseecom.qsee_version < QSEE_VERSION_40) {
704 req = (struct qseecom_load_app_ireq *)req_buf;
705 desc.args[0] = req->mdt_len;
706 desc.args[1] = req->img_len;
707 desc.args[2] = req->phy_addr;
708 } else {
709 req_64bit =
710 (struct qseecom_load_app_64bit_ireq *)req_buf;
711 desc.args[0] = req_64bit->mdt_len;
712 desc.args[1] = req_64bit->img_len;
713 desc.args[2] = req_64bit->phy_addr;
714 }
715 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800716 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700717 break;
718 }
719 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
720 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
721 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
722 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800723 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700724 break;
725 }
726
727 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
728 struct qseecom_client_send_data_ireq *req;
729 struct qseecom_client_send_data_64bit_ireq *req_64bit;
730
731 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
732 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
733 if (qseecom.qsee_version < QSEE_VERSION_40) {
734 req = (struct qseecom_client_send_data_ireq *)
735 req_buf;
736 desc.args[0] = req->app_id;
737 desc.args[1] = req->req_ptr;
738 desc.args[2] = req->req_len;
739 desc.args[3] = req->rsp_ptr;
740 desc.args[4] = req->rsp_len;
741 } else {
742 req_64bit =
743 (struct qseecom_client_send_data_64bit_ireq *)
744 req_buf;
745 desc.args[0] = req_64bit->app_id;
746 desc.args[1] = req_64bit->req_ptr;
747 desc.args[2] = req_64bit->req_len;
748 desc.args[3] = req_64bit->rsp_ptr;
749 desc.args[4] = req_64bit->rsp_len;
750 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800751 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700752 break;
753 }
754 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
755 struct qseecom_client_send_data_ireq *req;
756 struct qseecom_client_send_data_64bit_ireq *req_64bit;
757
758 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
759 desc.arginfo =
760 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
761 if (qseecom.qsee_version < QSEE_VERSION_40) {
762 req = (struct qseecom_client_send_data_ireq *)
763 req_buf;
764 desc.args[0] = req->app_id;
765 desc.args[1] = req->req_ptr;
766 desc.args[2] = req->req_len;
767 desc.args[3] = req->rsp_ptr;
768 desc.args[4] = req->rsp_len;
769 desc.args[5] = req->sglistinfo_ptr;
770 desc.args[6] = req->sglistinfo_len;
771 } else {
772 req_64bit =
773 (struct qseecom_client_send_data_64bit_ireq *)
774 req_buf;
775 desc.args[0] = req_64bit->app_id;
776 desc.args[1] = req_64bit->req_ptr;
777 desc.args[2] = req_64bit->req_len;
778 desc.args[3] = req_64bit->rsp_ptr;
779 desc.args[4] = req_64bit->rsp_len;
780 desc.args[5] = req_64bit->sglistinfo_ptr;
781 desc.args[6] = req_64bit->sglistinfo_len;
782 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800783 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700784 break;
785 }
786 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
787 struct qseecom_client_send_service_ireq *req;
788
789 req = (struct qseecom_client_send_service_ireq *)
790 req_buf;
791 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
792 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
793 desc.args[0] = req->key_type;
794 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800795 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700796 break;
797 }
798 case QSEOS_RPMB_ERASE_COMMAND: {
799 smc_id = TZ_OS_RPMB_ERASE_ID;
800 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800802 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700803 break;
804 }
805 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
806 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
807 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
808 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800809 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700810 break;
811 }
812 case QSEOS_GENERATE_KEY: {
813 u32 tzbuflen = PAGE_ALIGN(sizeof
814 (struct qseecom_key_generate_ireq) -
815 sizeof(uint32_t));
816 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
817
818 if (!tzbuf)
819 return -ENOMEM;
820 memset(tzbuf, 0, tzbuflen);
821 memcpy(tzbuf, req_buf + sizeof(uint32_t),
822 (sizeof(struct qseecom_key_generate_ireq) -
823 sizeof(uint32_t)));
824 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
825 smc_id = TZ_OS_KS_GEN_KEY_ID;
826 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
827 desc.args[0] = virt_to_phys(tzbuf);
828 desc.args[1] = tzbuflen;
829 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800830 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700831 kzfree(tzbuf);
832 break;
833 }
834 case QSEOS_DELETE_KEY: {
835 u32 tzbuflen = PAGE_ALIGN(sizeof
836 (struct qseecom_key_delete_ireq) -
837 sizeof(uint32_t));
838 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
839
840 if (!tzbuf)
841 return -ENOMEM;
842 memset(tzbuf, 0, tzbuflen);
843 memcpy(tzbuf, req_buf + sizeof(uint32_t),
844 (sizeof(struct qseecom_key_delete_ireq) -
845 sizeof(uint32_t)));
846 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
847 smc_id = TZ_OS_KS_DEL_KEY_ID;
848 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
849 desc.args[0] = virt_to_phys(tzbuf);
850 desc.args[1] = tzbuflen;
851 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800852 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700853 kzfree(tzbuf);
854 break;
855 }
856 case QSEOS_SET_KEY: {
857 u32 tzbuflen = PAGE_ALIGN(sizeof
858 (struct qseecom_key_select_ireq) -
859 sizeof(uint32_t));
860 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
861
862 if (!tzbuf)
863 return -ENOMEM;
864 memset(tzbuf, 0, tzbuflen);
865 memcpy(tzbuf, req_buf + sizeof(uint32_t),
866 (sizeof(struct qseecom_key_select_ireq) -
867 sizeof(uint32_t)));
868 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
869 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
870 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
871 desc.args[0] = virt_to_phys(tzbuf);
872 desc.args[1] = tzbuflen;
873 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800874 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700875 kzfree(tzbuf);
876 break;
877 }
878 case QSEOS_UPDATE_KEY_USERINFO: {
879 u32 tzbuflen = PAGE_ALIGN(sizeof
880 (struct qseecom_key_userinfo_update_ireq) -
881 sizeof(uint32_t));
882 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
883
884 if (!tzbuf)
885 return -ENOMEM;
886 memset(tzbuf, 0, tzbuflen);
887 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
888 (struct qseecom_key_userinfo_update_ireq) -
889 sizeof(uint32_t)));
890 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
891 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
892 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
893 desc.args[0] = virt_to_phys(tzbuf);
894 desc.args[1] = tzbuflen;
895 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800896 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700897 kzfree(tzbuf);
898 break;
899 }
900 case QSEOS_TEE_OPEN_SESSION: {
901 struct qseecom_qteec_ireq *req;
902 struct qseecom_qteec_64bit_ireq *req_64bit;
903
904 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
905 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
906 if (qseecom.qsee_version < QSEE_VERSION_40) {
907 req = (struct qseecom_qteec_ireq *)req_buf;
908 desc.args[0] = req->app_id;
909 desc.args[1] = req->req_ptr;
910 desc.args[2] = req->req_len;
911 desc.args[3] = req->resp_ptr;
912 desc.args[4] = req->resp_len;
913 } else {
914 req_64bit = (struct qseecom_qteec_64bit_ireq *)
915 req_buf;
916 desc.args[0] = req_64bit->app_id;
917 desc.args[1] = req_64bit->req_ptr;
918 desc.args[2] = req_64bit->req_len;
919 desc.args[3] = req_64bit->resp_ptr;
920 desc.args[4] = req_64bit->resp_len;
921 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800922 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700923 break;
924 }
925 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
926 struct qseecom_qteec_ireq *req;
927 struct qseecom_qteec_64bit_ireq *req_64bit;
928
929 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
930 desc.arginfo =
931 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
932 if (qseecom.qsee_version < QSEE_VERSION_40) {
933 req = (struct qseecom_qteec_ireq *)req_buf;
934 desc.args[0] = req->app_id;
935 desc.args[1] = req->req_ptr;
936 desc.args[2] = req->req_len;
937 desc.args[3] = req->resp_ptr;
938 desc.args[4] = req->resp_len;
939 desc.args[5] = req->sglistinfo_ptr;
940 desc.args[6] = req->sglistinfo_len;
941 } else {
942 req_64bit = (struct qseecom_qteec_64bit_ireq *)
943 req_buf;
944 desc.args[0] = req_64bit->app_id;
945 desc.args[1] = req_64bit->req_ptr;
946 desc.args[2] = req_64bit->req_len;
947 desc.args[3] = req_64bit->resp_ptr;
948 desc.args[4] = req_64bit->resp_len;
949 desc.args[5] = req_64bit->sglistinfo_ptr;
950 desc.args[6] = req_64bit->sglistinfo_len;
951 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800952 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700953 break;
954 }
955 case QSEOS_TEE_INVOKE_COMMAND: {
956 struct qseecom_qteec_ireq *req;
957 struct qseecom_qteec_64bit_ireq *req_64bit;
958
959 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
960 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
961 if (qseecom.qsee_version < QSEE_VERSION_40) {
962 req = (struct qseecom_qteec_ireq *)req_buf;
963 desc.args[0] = req->app_id;
964 desc.args[1] = req->req_ptr;
965 desc.args[2] = req->req_len;
966 desc.args[3] = req->resp_ptr;
967 desc.args[4] = req->resp_len;
968 } else {
969 req_64bit = (struct qseecom_qteec_64bit_ireq *)
970 req_buf;
971 desc.args[0] = req_64bit->app_id;
972 desc.args[1] = req_64bit->req_ptr;
973 desc.args[2] = req_64bit->req_len;
974 desc.args[3] = req_64bit->resp_ptr;
975 desc.args[4] = req_64bit->resp_len;
976 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800977 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700978 break;
979 }
980 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
981 struct qseecom_qteec_ireq *req;
982 struct qseecom_qteec_64bit_ireq *req_64bit;
983
984 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
985 desc.arginfo =
986 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
987 if (qseecom.qsee_version < QSEE_VERSION_40) {
988 req = (struct qseecom_qteec_ireq *)req_buf;
989 desc.args[0] = req->app_id;
990 desc.args[1] = req->req_ptr;
991 desc.args[2] = req->req_len;
992 desc.args[3] = req->resp_ptr;
993 desc.args[4] = req->resp_len;
994 desc.args[5] = req->sglistinfo_ptr;
995 desc.args[6] = req->sglistinfo_len;
996 } else {
997 req_64bit = (struct qseecom_qteec_64bit_ireq *)
998 req_buf;
999 desc.args[0] = req_64bit->app_id;
1000 desc.args[1] = req_64bit->req_ptr;
1001 desc.args[2] = req_64bit->req_len;
1002 desc.args[3] = req_64bit->resp_ptr;
1003 desc.args[4] = req_64bit->resp_len;
1004 desc.args[5] = req_64bit->sglistinfo_ptr;
1005 desc.args[6] = req_64bit->sglistinfo_len;
1006 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001007 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001008 break;
1009 }
1010 case QSEOS_TEE_CLOSE_SESSION: {
1011 struct qseecom_qteec_ireq *req;
1012 struct qseecom_qteec_64bit_ireq *req_64bit;
1013
1014 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1015 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1016 if (qseecom.qsee_version < QSEE_VERSION_40) {
1017 req = (struct qseecom_qteec_ireq *)req_buf;
1018 desc.args[0] = req->app_id;
1019 desc.args[1] = req->req_ptr;
1020 desc.args[2] = req->req_len;
1021 desc.args[3] = req->resp_ptr;
1022 desc.args[4] = req->resp_len;
1023 } else {
1024 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1025 req_buf;
1026 desc.args[0] = req_64bit->app_id;
1027 desc.args[1] = req_64bit->req_ptr;
1028 desc.args[2] = req_64bit->req_len;
1029 desc.args[3] = req_64bit->resp_ptr;
1030 desc.args[4] = req_64bit->resp_len;
1031 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001032 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001033 break;
1034 }
1035 case QSEOS_TEE_REQUEST_CANCELLATION: {
1036 struct qseecom_qteec_ireq *req;
1037 struct qseecom_qteec_64bit_ireq *req_64bit;
1038
1039 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1040 desc.arginfo =
1041 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1042 if (qseecom.qsee_version < QSEE_VERSION_40) {
1043 req = (struct qseecom_qteec_ireq *)req_buf;
1044 desc.args[0] = req->app_id;
1045 desc.args[1] = req->req_ptr;
1046 desc.args[2] = req->req_len;
1047 desc.args[3] = req->resp_ptr;
1048 desc.args[4] = req->resp_len;
1049 } else {
1050 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1051 req_buf;
1052 desc.args[0] = req_64bit->app_id;
1053 desc.args[1] = req_64bit->req_ptr;
1054 desc.args[2] = req_64bit->req_len;
1055 desc.args[3] = req_64bit->resp_ptr;
1056 desc.args[4] = req_64bit->resp_len;
1057 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001058 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001059 break;
1060 }
1061 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1062 struct qseecom_continue_blocked_request_ireq *req =
1063 (struct qseecom_continue_blocked_request_ireq *)
1064 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001065 if (qseecom.smcinvoke_support)
1066 smc_id =
1067 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1068 else
1069 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001070 desc.arginfo =
1071 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001072 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001073 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001074 break;
1075 }
1076 default: {
1077 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1078 qseos_cmd_id);
1079 ret = -EINVAL;
1080 break;
1081 }
1082 } /*end of switch (qsee_cmd_id) */
1083 break;
1084 } /*end of case SCM_SVC_TZSCHEDULER*/
1085 default: {
1086 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1087 svc_id);
1088 ret = -EINVAL;
1089 break;
1090 }
1091 } /*end of switch svc_id */
1092 scm_resp->result = desc.ret[0];
1093 scm_resp->resp_type = desc.ret[1];
1094 scm_resp->data = desc.ret[2];
1095 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1096 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1097 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1098 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1099 return ret;
1100}
1101
1102
1103static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1104 size_t cmd_len, void *resp_buf, size_t resp_len)
1105{
1106 if (!is_scm_armv8())
1107 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1108 resp_buf, resp_len);
1109 else
1110 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1111}
1112
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001113static struct qseecom_registered_listener_list *__qseecom_find_svc(
1114 int32_t listener_id)
1115{
1116 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001117
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118 list_for_each_entry(entry,
1119 &qseecom.registered_listener_list_head, list) {
1120 if (entry->svc.listener_id == listener_id)
1121 break;
1122 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001124 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001125 return NULL;
1126 }
1127
1128 return entry;
1129}
1130
1131static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1132 struct qseecom_dev_handle *handle,
1133 struct qseecom_register_listener_req *listener)
1134{
1135 int ret = 0;
1136 struct qseecom_register_listener_ireq req;
1137 struct qseecom_register_listener_64bit_ireq req_64bit;
1138 struct qseecom_command_scm_resp resp;
1139 ion_phys_addr_t pa;
1140 void *cmd_buf = NULL;
1141 size_t cmd_len;
1142
1143 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001144 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 listener->ifd_data_fd);
1146 if (IS_ERR_OR_NULL(svc->ihandle)) {
1147 pr_err("Ion client could not retrieve the handle\n");
1148 return -ENOMEM;
1149 }
1150
1151 /* Get the physical address of the ION BUF */
1152 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1153 if (ret) {
1154 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1155 ret);
1156 return ret;
1157 }
1158 /* Populate the structure for sending scm call to load image */
1159 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1160 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1161 pr_err("ION memory mapping for listener shared buffer failed\n");
1162 return -ENOMEM;
1163 }
1164 svc->sb_phys = (phys_addr_t)pa;
1165
1166 if (qseecom.qsee_version < QSEE_VERSION_40) {
1167 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1168 req.listener_id = svc->svc.listener_id;
1169 req.sb_len = svc->sb_length;
1170 req.sb_ptr = (uint32_t)svc->sb_phys;
1171 cmd_buf = (void *)&req;
1172 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1173 } else {
1174 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1175 req_64bit.listener_id = svc->svc.listener_id;
1176 req_64bit.sb_len = svc->sb_length;
1177 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1178 cmd_buf = (void *)&req_64bit;
1179 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1180 }
1181
1182 resp.result = QSEOS_RESULT_INCOMPLETE;
1183
Zhen Kongc4c162a2019-01-23 12:07:12 -08001184 mutex_unlock(&listener_access_lock);
1185 mutex_lock(&app_access_lock);
1186 __qseecom_reentrancy_check_if_no_app_blocked(
1187 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001188 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1189 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001190 mutex_unlock(&app_access_lock);
1191 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001192 if (ret) {
1193 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1194 return -EINVAL;
1195 }
1196
1197 if (resp.result != QSEOS_RESULT_SUCCESS) {
1198 pr_err("Error SB registration req: resp.result = %d\n",
1199 resp.result);
1200 return -EPERM;
1201 }
1202 return 0;
1203}
1204
1205static int qseecom_register_listener(struct qseecom_dev_handle *data,
1206 void __user *argp)
1207{
1208 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001209 struct qseecom_register_listener_req rcvd_lstnr;
1210 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001211 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212
1213 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1214 if (ret) {
1215 pr_err("copy_from_user failed\n");
1216 return ret;
1217 }
1218 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1219 rcvd_lstnr.sb_size))
1220 return -EFAULT;
1221
Zhen Kong3c674612018-09-06 22:51:27 -07001222 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001223
1224 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1225 if (ptr_svc) {
1226 if (ptr_svc->unregister_pending == false) {
1227 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001228 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 data->released = true;
1230 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 } else {
1232 /*wait until listener is unregistered*/
1233 pr_debug("register %d has to wait\n",
1234 rcvd_lstnr.listener_id);
1235 mutex_unlock(&listener_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301236 ret = wait_event_interruptible(
Zhen Kongbcdeda22018-11-16 13:50:51 -08001237 qseecom.register_lsnr_pending_wq,
1238 list_empty(
1239 &qseecom.unregister_lsnr_pending_list_head));
1240 if (ret) {
1241 pr_err("interrupted register_pending_wq %d\n",
1242 rcvd_lstnr.listener_id);
1243 mutex_lock(&listener_access_lock);
1244 return -ERESTARTSYS;
1245 }
1246 mutex_lock(&listener_access_lock);
1247 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1250 if (!new_entry)
1251 return -ENOMEM;
1252 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001253 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001254
1255 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1256 new_entry->sb_length = rcvd_lstnr.sb_size;
1257 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1258 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001259 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1260 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 kzfree(new_entry);
1262 return -ENOMEM;
1263 }
1264
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001265 init_waitqueue_head(&new_entry->rcv_req_wq);
1266 init_waitqueue_head(&new_entry->listener_block_app_wq);
1267 new_entry->send_resp_flag = 0;
1268 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001269 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270
Zhen Kong3c674612018-09-06 22:51:27 -07001271 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 return ret;
1273}
1274
Zhen Kongbcdeda22018-11-16 13:50:51 -08001275static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1276 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001277{
1278 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_command_scm_resp resp;
1281 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1282
1283 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1284 req.listener_id = data->listener.id;
1285 resp.result = QSEOS_RESULT_INCOMPLETE;
1286
Zhen Kongc4c162a2019-01-23 12:07:12 -08001287 mutex_unlock(&listener_access_lock);
1288 mutex_lock(&app_access_lock);
1289 __qseecom_reentrancy_check_if_no_app_blocked(
1290 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1292 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001293 mutex_unlock(&app_access_lock);
1294 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (ret) {
1296 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1297 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001298 if (ret == -EBUSY)
1299 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001300 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001301 }
1302
1303 if (resp.result != QSEOS_RESULT_SUCCESS) {
1304 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1305 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001306 ret = -EPERM;
1307 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001308 }
1309
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301311 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001312 atomic_read(&data->ioctl_count) <= 1)) {
1313 pr_err("Interrupted from abort\n");
1314 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001315 }
1316 }
1317
Zhen Kong3c674612018-09-06 22:51:27 -07001318exit:
1319 if (ptr_svc->sb_virt) {
1320 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001321 if (!IS_ERR_OR_NULL(ihandle)) {
1322 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1323 ion_free(qseecom.ion_clnt, ihandle);
1324 }
1325 }
Zhen Kong3c674612018-09-06 22:51:27 -07001326 list_del(&ptr_svc->list);
1327 kzfree(ptr_svc);
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001330 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 return ret;
1332}
1333
Zhen Kongbcdeda22018-11-16 13:50:51 -08001334static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1335{
1336 struct qseecom_registered_listener_list *ptr_svc = NULL;
1337 struct qseecom_unregister_pending_list *entry = NULL;
1338
1339 ptr_svc = __qseecom_find_svc(data->listener.id);
1340 if (!ptr_svc) {
1341 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1342 return -ENODATA;
1343 }
1344 /* stop CA thread waiting for listener response */
1345 ptr_svc->abort = 1;
1346 wake_up_interruptible_all(&qseecom.send_resp_wq);
1347
Zhen Kongc4c162a2019-01-23 12:07:12 -08001348 /* stop listener thread waiting for listener request */
1349 data->abort = 1;
1350 wake_up_all(&ptr_svc->rcv_req_wq);
1351
Zhen Kongbcdeda22018-11-16 13:50:51 -08001352 /* return directly if pending*/
1353 if (ptr_svc->unregister_pending)
1354 return 0;
1355
1356 /*add unregistration into pending list*/
1357 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1358 if (!entry)
1359 return -ENOMEM;
1360 entry->data = data;
1361 list_add_tail(&entry->list,
1362 &qseecom.unregister_lsnr_pending_list_head);
1363 ptr_svc->unregister_pending = true;
1364 pr_debug("unregister %d pending\n", data->listener.id);
1365 return 0;
1366}
1367
1368static void __qseecom_processing_pending_lsnr_unregister(void)
1369{
1370 struct qseecom_unregister_pending_list *entry = NULL;
1371 struct qseecom_registered_listener_list *ptr_svc = NULL;
1372 struct list_head *pos;
1373 int ret = 0;
1374
1375 mutex_lock(&listener_access_lock);
1376 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1377 pos = qseecom.unregister_lsnr_pending_list_head.next;
1378 entry = list_entry(pos,
1379 struct qseecom_unregister_pending_list, list);
1380 if (entry && entry->data) {
1381 pr_debug("process pending unregister %d\n",
1382 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001383 /* don't process if qseecom_release is not called*/
1384 if (!entry->data->listener.release_called)
1385 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001386 ptr_svc = __qseecom_find_svc(
1387 entry->data->listener.id);
1388 if (ptr_svc) {
1389 ret = __qseecom_unregister_listener(
1390 entry->data, ptr_svc);
1391 if (ret == -EBUSY) {
1392 pr_debug("unregister %d pending again\n",
1393 entry->data->listener.id);
1394 mutex_unlock(&listener_access_lock);
1395 return;
1396 }
1397 } else
1398 pr_err("invalid listener %d\n",
1399 entry->data->listener.id);
1400 kzfree(entry->data);
1401 }
1402 list_del(pos);
1403 kzfree(entry);
1404 }
1405 mutex_unlock(&listener_access_lock);
1406 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1407}
1408
Zhen Kongc4c162a2019-01-23 12:07:12 -08001409static void __wakeup_unregister_listener_kthread(void)
1410{
1411 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1412 LSNR_UNREG_KT_WAKEUP);
1413 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1414}
1415
1416static int __qseecom_unregister_listener_kthread_func(void *data)
1417{
1418 while (!kthread_should_stop()) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301419 wait_event_interruptible(
Zhen Kongc4c162a2019-01-23 12:07:12 -08001420 qseecom.unregister_lsnr_kthread_wq,
1421 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1422 == LSNR_UNREG_KT_WAKEUP);
1423 pr_debug("kthread to unregister listener is called %d\n",
1424 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1425 __qseecom_processing_pending_lsnr_unregister();
1426 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1427 LSNR_UNREG_KT_SLEEP);
1428 }
1429 pr_warn("kthread to unregister listener stopped\n");
1430 return 0;
1431}
1432
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001433static int __qseecom_set_msm_bus_request(uint32_t mode)
1434{
1435 int ret = 0;
1436 struct qseecom_clk *qclk;
1437
1438 qclk = &qseecom.qsee;
1439 if (qclk->ce_core_src_clk != NULL) {
1440 if (mode == INACTIVE) {
1441 __qseecom_disable_clk(CLK_QSEE);
1442 } else {
1443 ret = __qseecom_enable_clk(CLK_QSEE);
1444 if (ret)
1445 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1446 ret, mode);
1447 }
1448 }
1449
1450 if ((!ret) && (qseecom.current_mode != mode)) {
1451 ret = msm_bus_scale_client_update_request(
1452 qseecom.qsee_perf_client, mode);
1453 if (ret) {
1454 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1455 ret, mode);
1456 if (qclk->ce_core_src_clk != NULL) {
1457 if (mode == INACTIVE) {
1458 ret = __qseecom_enable_clk(CLK_QSEE);
1459 if (ret)
1460 pr_err("CLK enable failed\n");
1461 } else
1462 __qseecom_disable_clk(CLK_QSEE);
1463 }
1464 }
1465 qseecom.current_mode = mode;
1466 }
1467 return ret;
1468}
1469
1470static void qseecom_bw_inactive_req_work(struct work_struct *work)
1471{
1472 mutex_lock(&app_access_lock);
1473 mutex_lock(&qsee_bw_mutex);
1474 if (qseecom.timer_running)
1475 __qseecom_set_msm_bus_request(INACTIVE);
1476 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1477 qseecom.current_mode, qseecom.cumulative_mode);
1478 qseecom.timer_running = false;
1479 mutex_unlock(&qsee_bw_mutex);
1480 mutex_unlock(&app_access_lock);
1481}
1482
1483static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1484{
1485 schedule_work(&qseecom.bw_inactive_req_ws);
1486}
1487
1488static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1489{
1490 struct qseecom_clk *qclk;
1491 int ret = 0;
1492
1493 mutex_lock(&clk_access_lock);
1494 if (ce == CLK_QSEE)
1495 qclk = &qseecom.qsee;
1496 else
1497 qclk = &qseecom.ce_drv;
1498
1499 if (qclk->clk_access_cnt > 2) {
1500 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1501 ret = -EINVAL;
1502 goto err_dec_ref_cnt;
1503 }
1504 if (qclk->clk_access_cnt == 2)
1505 qclk->clk_access_cnt--;
1506
1507err_dec_ref_cnt:
1508 mutex_unlock(&clk_access_lock);
1509 return ret;
1510}
1511
1512
1513static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1514{
1515 int32_t ret = 0;
1516 int32_t request_mode = INACTIVE;
1517
1518 mutex_lock(&qsee_bw_mutex);
1519 if (mode == 0) {
1520 if (qseecom.cumulative_mode > MEDIUM)
1521 request_mode = HIGH;
1522 else
1523 request_mode = qseecom.cumulative_mode;
1524 } else {
1525 request_mode = mode;
1526 }
1527
1528 ret = __qseecom_set_msm_bus_request(request_mode);
1529 if (ret) {
1530 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1531 ret, request_mode);
1532 goto err_scale_timer;
1533 }
1534
1535 if (qseecom.timer_running) {
1536 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1537 if (ret) {
1538 pr_err("Failed to decrease clk ref count.\n");
1539 goto err_scale_timer;
1540 }
1541 del_timer_sync(&(qseecom.bw_scale_down_timer));
1542 qseecom.timer_running = false;
1543 }
1544err_scale_timer:
1545 mutex_unlock(&qsee_bw_mutex);
1546 return ret;
1547}
1548
1549
1550static int qseecom_unregister_bus_bandwidth_needs(
1551 struct qseecom_dev_handle *data)
1552{
1553 int32_t ret = 0;
1554
1555 qseecom.cumulative_mode -= data->mode;
1556 data->mode = INACTIVE;
1557
1558 return ret;
1559}
1560
1561static int __qseecom_register_bus_bandwidth_needs(
1562 struct qseecom_dev_handle *data, uint32_t request_mode)
1563{
1564 int32_t ret = 0;
1565
1566 if (data->mode == INACTIVE) {
1567 qseecom.cumulative_mode += request_mode;
1568 data->mode = request_mode;
1569 } else {
1570 if (data->mode != request_mode) {
1571 qseecom.cumulative_mode -= data->mode;
1572 qseecom.cumulative_mode += request_mode;
1573 data->mode = request_mode;
1574 }
1575 }
1576 return ret;
1577}
1578
1579static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1580{
1581 int ret = 0;
1582
1583 ret = qsee_vote_for_clock(data, CLK_DFAB);
1584 if (ret) {
1585 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1586 goto perf_enable_exit;
1587 }
1588 ret = qsee_vote_for_clock(data, CLK_SFPB);
1589 if (ret) {
1590 qsee_disable_clock_vote(data, CLK_DFAB);
1591 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1592 goto perf_enable_exit;
1593 }
1594
1595perf_enable_exit:
1596 return ret;
1597}
1598
1599static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1600 void __user *argp)
1601{
1602 int32_t ret = 0;
1603 int32_t req_mode;
1604
1605 if (qseecom.no_clock_support)
1606 return 0;
1607
1608 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1609 if (ret) {
1610 pr_err("copy_from_user failed\n");
1611 return ret;
1612 }
1613 if (req_mode > HIGH) {
1614 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1615 return -EINVAL;
1616 }
1617
1618 /*
1619 * Register bus bandwidth needs if bus scaling feature is enabled;
1620 * otherwise, qseecom enable/disable clocks for the client directly.
1621 */
1622 if (qseecom.support_bus_scaling) {
1623 mutex_lock(&qsee_bw_mutex);
1624 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1625 mutex_unlock(&qsee_bw_mutex);
1626 } else {
1627 pr_debug("Bus scaling feature is NOT enabled\n");
1628 pr_debug("request bandwidth mode %d for the client\n",
1629 req_mode);
1630 if (req_mode != INACTIVE) {
1631 ret = qseecom_perf_enable(data);
1632 if (ret)
1633 pr_err("Failed to vote for clock with err %d\n",
1634 ret);
1635 } else {
1636 qsee_disable_clock_vote(data, CLK_DFAB);
1637 qsee_disable_clock_vote(data, CLK_SFPB);
1638 }
1639 }
1640 return ret;
1641}
1642
1643static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1644{
1645 if (qseecom.no_clock_support)
1646 return;
1647
1648 mutex_lock(&qsee_bw_mutex);
1649 qseecom.bw_scale_down_timer.expires = jiffies +
1650 msecs_to_jiffies(duration);
1651 mod_timer(&(qseecom.bw_scale_down_timer),
1652 qseecom.bw_scale_down_timer.expires);
1653 qseecom.timer_running = true;
1654 mutex_unlock(&qsee_bw_mutex);
1655}
1656
1657static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1658{
1659 if (!qseecom.support_bus_scaling)
1660 qsee_disable_clock_vote(data, CLK_SFPB);
1661 else
1662 __qseecom_add_bw_scale_down_timer(
1663 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1664}
1665
1666static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1667{
1668 int ret = 0;
1669
1670 if (qseecom.support_bus_scaling) {
1671 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1672 if (ret)
1673 pr_err("Failed to set bw MEDIUM.\n");
1674 } else {
1675 ret = qsee_vote_for_clock(data, CLK_SFPB);
1676 if (ret)
1677 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1678 }
1679 return ret;
1680}
1681
1682static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1683 void __user *argp)
1684{
1685 ion_phys_addr_t pa;
1686 int32_t ret;
1687 struct qseecom_set_sb_mem_param_req req;
1688 size_t len;
1689
1690 /* Copy the relevant information needed for loading the image */
1691 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1692 return -EFAULT;
1693
1694 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1695 (req.sb_len == 0)) {
1696 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1697 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1698 return -EFAULT;
1699 }
1700 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1701 req.sb_len))
1702 return -EFAULT;
1703
1704 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001705 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001706 req.ifd_data_fd);
1707 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1708 pr_err("Ion client could not retrieve the handle\n");
1709 return -ENOMEM;
1710 }
1711 /* Get the physical address of the ION BUF */
1712 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1713 if (ret) {
1714
1715 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1716 ret);
1717 return ret;
1718 }
1719
1720 if (len < req.sb_len) {
1721 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1722 req.sb_len, len);
1723 return -EINVAL;
1724 }
1725 /* Populate the structure for sending scm call to load image */
1726 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1727 data->client.ihandle);
1728 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1729 pr_err("ION memory mapping for client shared buf failed\n");
1730 return -ENOMEM;
1731 }
1732 data->client.sb_phys = (phys_addr_t)pa;
1733 data->client.sb_length = req.sb_len;
1734 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1735 return 0;
1736}
1737
Zhen Kong26e62742018-05-04 17:19:06 -07001738static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1739 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001740{
1741 int ret;
1742
1743 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001744 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001745}
1746
1747static int __qseecom_reentrancy_listener_has_sent_rsp(
1748 struct qseecom_dev_handle *data,
1749 struct qseecom_registered_listener_list *ptr_svc)
1750{
1751 int ret;
1752
1753 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001754 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001755}
1756
1757static void __qseecom_clean_listener_sglistinfo(
1758 struct qseecom_registered_listener_list *ptr_svc)
1759{
1760 if (ptr_svc->sglist_cnt) {
1761 memset(ptr_svc->sglistinfo_ptr, 0,
1762 SGLISTINFO_TABLE_SIZE);
1763 ptr_svc->sglist_cnt = 0;
1764 }
1765}
1766
1767static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1768 struct qseecom_command_scm_resp *resp)
1769{
1770 int ret = 0;
1771 int rc = 0;
1772 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001773 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1774 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1775 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 struct qseecom_registered_listener_list *ptr_svc = NULL;
1777 sigset_t new_sigset;
1778 sigset_t old_sigset;
1779 uint32_t status;
1780 void *cmd_buf = NULL;
1781 size_t cmd_len;
1782 struct sglist_info *table = NULL;
1783
Zhen Kongbcdeda22018-11-16 13:50:51 -08001784 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001785 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1786 lstnr = resp->data;
1787 /*
1788 * Wake up blocking lsitener service with the lstnr id
1789 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001790 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001791 list_for_each_entry(ptr_svc,
1792 &qseecom.registered_listener_list_head, list) {
1793 if (ptr_svc->svc.listener_id == lstnr) {
1794 ptr_svc->listener_in_use = true;
1795 ptr_svc->rcv_req_flag = 1;
1796 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1797 break;
1798 }
1799 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001800
1801 if (ptr_svc == NULL) {
1802 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001803 rc = -EINVAL;
1804 status = QSEOS_RESULT_FAILURE;
1805 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 }
1807
1808 if (!ptr_svc->ihandle) {
1809 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001810 rc = -EINVAL;
1811 status = QSEOS_RESULT_FAILURE;
1812 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001813 }
1814
1815 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001816 pr_err("Service %d does not exist\n",
1817 lstnr);
1818 rc = -ERESTARTSYS;
1819 ptr_svc = NULL;
1820 status = QSEOS_RESULT_FAILURE;
1821 goto err_resp;
1822 }
1823
1824 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001825 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001826 lstnr, ptr_svc->abort);
1827 rc = -ENODEV;
1828 status = QSEOS_RESULT_FAILURE;
1829 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 }
Zhen Kong25731112018-09-20 13:10:03 -07001831
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001832 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1833
1834 /* initialize the new signal mask with all signals*/
1835 sigfillset(&new_sigset);
1836 /* block all signals */
1837 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1838
Zhen Kongbcdeda22018-11-16 13:50:51 -08001839 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001840 do {
1841 /*
1842 * When reentrancy is not supported, check global
1843 * send_resp_flag; otherwise, check this listener's
1844 * send_resp_flag.
1845 */
1846 if (!qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301847 !wait_event_interruptible(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001848 __qseecom_listener_has_sent_rsp(
1849 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001850 break;
1851 }
1852
1853 if (qseecom.qsee_reentrancy_support &&
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05301854 !wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001855 __qseecom_reentrancy_listener_has_sent_rsp(
1856 data, ptr_svc))) {
1857 break;
1858 }
1859 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001860 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 /* restore signal mask */
1862 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001863 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1865 data->client.app_id, lstnr, ret);
1866 rc = -ENODEV;
1867 status = QSEOS_RESULT_FAILURE;
1868 } else {
1869 status = QSEOS_RESULT_SUCCESS;
1870 }
Zhen Kong26e62742018-05-04 17:19:06 -07001871err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001873 if (ptr_svc) {
1874 ptr_svc->send_resp_flag = 0;
1875 table = ptr_svc->sglistinfo_ptr;
1876 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001877 if (qseecom.qsee_version < QSEE_VERSION_40) {
1878 send_data_rsp.listener_id = lstnr;
1879 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001880 if (table) {
1881 send_data_rsp.sglistinfo_ptr =
1882 (uint32_t)virt_to_phys(table);
1883 send_data_rsp.sglistinfo_len =
1884 SGLISTINFO_TABLE_SIZE;
1885 dmac_flush_range((void *)table,
1886 (void *)table + SGLISTINFO_TABLE_SIZE);
1887 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001888 cmd_buf = (void *)&send_data_rsp;
1889 cmd_len = sizeof(send_data_rsp);
1890 } else {
1891 send_data_rsp_64bit.listener_id = lstnr;
1892 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001893 if (table) {
1894 send_data_rsp_64bit.sglistinfo_ptr =
1895 virt_to_phys(table);
1896 send_data_rsp_64bit.sglistinfo_len =
1897 SGLISTINFO_TABLE_SIZE;
1898 dmac_flush_range((void *)table,
1899 (void *)table + SGLISTINFO_TABLE_SIZE);
1900 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001901 cmd_buf = (void *)&send_data_rsp_64bit;
1902 cmd_len = sizeof(send_data_rsp_64bit);
1903 }
Zhen Kong7d500032018-08-06 16:58:31 -07001904 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1906 else
1907 *(uint32_t *)cmd_buf =
1908 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001909 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001910 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1911 ptr_svc->ihandle,
1912 ptr_svc->sb_virt, ptr_svc->sb_length,
1913 ION_IOC_CLEAN_INV_CACHES);
1914 if (ret) {
1915 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001916 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001917 }
1918 }
1919
1920 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1921 ret = __qseecom_enable_clk(CLK_QSEE);
1922 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001923 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001924 }
1925
1926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001928 if (ptr_svc) {
1929 ptr_svc->listener_in_use = false;
1930 __qseecom_clean_listener_sglistinfo(ptr_svc);
1931 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001932 if (ret) {
1933 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1934 ret, data->client.app_id);
1935 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1936 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001937 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001938 }
Zhen Kong26e62742018-05-04 17:19:06 -07001939 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1940 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1942 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1943 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1944 resp->result, data->client.app_id, lstnr);
1945 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001947 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001948exit:
1949 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001950 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1951 __qseecom_disable_clk(CLK_QSEE);
1952
1953 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 qseecom.app_block_ref_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001955 if (rc)
1956 return rc;
1957
1958 return ret;
1959}
1960
Zhen Konga91aaf02018-02-02 17:21:04 -08001961static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001962 struct qseecom_command_scm_resp *resp,
1963 struct qseecom_registered_app_list *ptr_app,
1964 struct qseecom_dev_handle *data)
1965{
1966 struct qseecom_registered_listener_list *list_ptr;
1967 int ret = 0;
1968 struct qseecom_continue_blocked_request_ireq ireq;
1969 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001970 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001971 sigset_t new_sigset;
1972 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001973 unsigned long flags;
1974 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975
1976 if (!resp || !data) {
1977 pr_err("invalid resp or data pointer\n");
1978 ret = -EINVAL;
1979 goto exit;
1980 }
1981
1982 /* find app_id & img_name from list */
Zhen Konge4804722019-02-27 21:13:18 -08001983 if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001984 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1985 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1986 list) {
1987 if ((ptr_app->app_id == data->client.app_id) &&
1988 (!strcmp(ptr_app->app_name,
1989 data->client.app_name))) {
1990 found_app = true;
1991 break;
1992 }
1993 }
1994 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1995 flags);
1996 if (!found_app) {
1997 pr_err("app_id %d (%s) is not found\n",
1998 data->client.app_id,
1999 (char *)data->client.app_name);
2000 ret = -ENOENT;
2001 goto exit;
2002 }
2003 }
2004
Zhen Kongd8cc0052017-11-13 15:13:31 -08002005 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002006 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002007 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002008 list_ptr = __qseecom_find_svc(resp->data);
2009 if (!list_ptr) {
2010 pr_err("Invalid listener ID %d\n", resp->data);
2011 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002012 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002013 goto exit;
2014 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 ptr_app->blocked_on_listener_id = resp->data;
2016
2017 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2018 resp->data, list_ptr->listener_in_use,
2019 session_id, data->client.app_id);
2020
2021 /* sleep until listener is available */
2022 sigfillset(&new_sigset);
2023 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2024
2025 do {
2026 qseecom.app_block_ref_cnt++;
2027 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002028 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002029 mutex_unlock(&app_access_lock);
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302030 wait_event_interruptible(
Zhen Konga91aaf02018-02-02 17:21:04 -08002031 list_ptr->listener_block_app_wq,
2032 !list_ptr->listener_in_use);
2033 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002034 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002035 ptr_app->app_blocked = false;
2036 qseecom.app_block_ref_cnt--;
2037 } while (list_ptr->listener_in_use);
2038
2039 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2040
2041 ptr_app->blocked_on_listener_id = 0;
2042 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2043 resp->data, session_id, data->client.app_id);
2044
2045 /* notify TZ that listener is available */
2046 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2047
2048 if (qseecom.smcinvoke_support)
2049 ireq.app_or_session_id = session_id;
2050 else
2051 ireq.app_or_session_id = data->client.app_id;
2052
2053 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2054 &ireq, sizeof(ireq),
2055 &continue_resp, sizeof(continue_resp));
2056 if (ret && qseecom.smcinvoke_support) {
2057 /* retry with legacy cmd */
2058 qseecom.smcinvoke_support = false;
2059 ireq.app_or_session_id = data->client.app_id;
2060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2061 &ireq, sizeof(ireq),
2062 &continue_resp, sizeof(continue_resp));
2063 qseecom.smcinvoke_support = true;
2064 if (ret) {
2065 pr_err("unblock app %d or session %d fail\n",
2066 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002067 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002068 goto exit;
2069 }
2070 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002071 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002072 resp->result = continue_resp.result;
2073 resp->resp_type = continue_resp.resp_type;
2074 resp->data = continue_resp.data;
2075 pr_debug("unblock resp = %d\n", resp->result);
2076 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2077
2078 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2079 pr_err("Unexpected unblock resp %d\n", resp->result);
2080 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002081 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002082exit:
2083 return ret;
2084}
2085
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002086static int __qseecom_reentrancy_process_incomplete_cmd(
2087 struct qseecom_dev_handle *data,
2088 struct qseecom_command_scm_resp *resp)
2089{
2090 int ret = 0;
2091 int rc = 0;
2092 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002093 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2094 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2095 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002096 struct qseecom_registered_listener_list *ptr_svc = NULL;
2097 sigset_t new_sigset;
2098 sigset_t old_sigset;
2099 uint32_t status;
2100 void *cmd_buf = NULL;
2101 size_t cmd_len;
2102 struct sglist_info *table = NULL;
2103
Zhen Kong26e62742018-05-04 17:19:06 -07002104 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002105 lstnr = resp->data;
2106 /*
2107 * Wake up blocking lsitener service with the lstnr id
2108 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002109 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002110 list_for_each_entry(ptr_svc,
2111 &qseecom.registered_listener_list_head, list) {
2112 if (ptr_svc->svc.listener_id == lstnr) {
2113 ptr_svc->listener_in_use = true;
2114 ptr_svc->rcv_req_flag = 1;
2115 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2116 break;
2117 }
2118 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002119
2120 if (ptr_svc == NULL) {
2121 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002122 rc = -EINVAL;
2123 status = QSEOS_RESULT_FAILURE;
2124 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002125 }
2126
2127 if (!ptr_svc->ihandle) {
2128 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002129 rc = -EINVAL;
2130 status = QSEOS_RESULT_FAILURE;
2131 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002132 }
2133
2134 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002135 pr_err("Service %d does not exist\n",
2136 lstnr);
2137 rc = -ERESTARTSYS;
2138 ptr_svc = NULL;
2139 status = QSEOS_RESULT_FAILURE;
2140 goto err_resp;
2141 }
2142
2143 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002144 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002145 lstnr, ptr_svc->abort);
2146 rc = -ENODEV;
2147 status = QSEOS_RESULT_FAILURE;
2148 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002149 }
Zhen Kong25731112018-09-20 13:10:03 -07002150
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002151 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2152
2153 /* initialize the new signal mask with all signals*/
2154 sigfillset(&new_sigset);
2155
2156 /* block all signals */
2157 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2158
2159 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002160 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002161 mutex_unlock(&app_access_lock);
2162 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302163 if (!wait_event_interruptible(qseecom.send_resp_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002164 __qseecom_reentrancy_listener_has_sent_rsp(
2165 data, ptr_svc))) {
2166 break;
2167 }
2168 } while (1);
2169 /* lock mutex again after resp sent */
2170 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002171 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002172 ptr_svc->send_resp_flag = 0;
2173 qseecom.send_resp_flag = 0;
2174
2175 /* restore signal mask */
2176 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002177 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002178 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2179 data->client.app_id, lstnr, ret);
2180 rc = -ENODEV;
2181 status = QSEOS_RESULT_FAILURE;
2182 } else {
2183 status = QSEOS_RESULT_SUCCESS;
2184 }
Zhen Kong26e62742018-05-04 17:19:06 -07002185err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002186 if (ptr_svc)
2187 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002188 if (qseecom.qsee_version < QSEE_VERSION_40) {
2189 send_data_rsp.listener_id = lstnr;
2190 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002191 if (table) {
2192 send_data_rsp.sglistinfo_ptr =
2193 (uint32_t)virt_to_phys(table);
2194 send_data_rsp.sglistinfo_len =
2195 SGLISTINFO_TABLE_SIZE;
2196 dmac_flush_range((void *)table,
2197 (void *)table + SGLISTINFO_TABLE_SIZE);
2198 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002199 cmd_buf = (void *)&send_data_rsp;
2200 cmd_len = sizeof(send_data_rsp);
2201 } else {
2202 send_data_rsp_64bit.listener_id = lstnr;
2203 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002204 if (table) {
2205 send_data_rsp_64bit.sglistinfo_ptr =
2206 virt_to_phys(table);
2207 send_data_rsp_64bit.sglistinfo_len =
2208 SGLISTINFO_TABLE_SIZE;
2209 dmac_flush_range((void *)table,
2210 (void *)table + SGLISTINFO_TABLE_SIZE);
2211 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002212 cmd_buf = (void *)&send_data_rsp_64bit;
2213 cmd_len = sizeof(send_data_rsp_64bit);
2214 }
Zhen Kong7d500032018-08-06 16:58:31 -07002215 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002216 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2217 else
2218 *(uint32_t *)cmd_buf =
2219 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002220 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002221 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2222 ptr_svc->ihandle,
2223 ptr_svc->sb_virt, ptr_svc->sb_length,
2224 ION_IOC_CLEAN_INV_CACHES);
2225 if (ret) {
2226 pr_err("cache operation failed %d\n", ret);
2227 return ret;
2228 }
2229 }
2230 if (lstnr == RPMB_SERVICE) {
2231 ret = __qseecom_enable_clk(CLK_QSEE);
2232 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002233 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002234 }
2235
2236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2237 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002238 if (ptr_svc) {
2239 ptr_svc->listener_in_use = false;
2240 __qseecom_clean_listener_sglistinfo(ptr_svc);
2241 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2242 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002243
2244 if (ret) {
2245 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2246 ret, data->client.app_id);
2247 goto exit;
2248 }
2249
2250 switch (resp->result) {
2251 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2252 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2253 lstnr, data->client.app_id, resp->data);
2254 if (lstnr == resp->data) {
2255 pr_err("lstnr %d should not be blocked!\n",
2256 lstnr);
2257 ret = -EINVAL;
2258 goto exit;
2259 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002260 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002261 ret = __qseecom_process_reentrancy_blocked_on_listener(
2262 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002263 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002264 if (ret) {
2265 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2266 data->client.app_id,
2267 data->client.app_name, resp->data);
2268 goto exit;
2269 }
2270 case QSEOS_RESULT_SUCCESS:
2271 case QSEOS_RESULT_INCOMPLETE:
2272 break;
2273 default:
2274 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2275 resp->result, data->client.app_id, lstnr);
2276 ret = -EINVAL;
2277 goto exit;
2278 }
2279exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002280 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002281 if (lstnr == RPMB_SERVICE)
2282 __qseecom_disable_clk(CLK_QSEE);
2283
2284 }
2285 if (rc)
2286 return rc;
2287
2288 return ret;
2289}
2290
2291/*
2292 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2293 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2294 * So, needs to first check if no app blocked before sending OS level scm call,
2295 * then wait until all apps are unblocked.
2296 */
2297static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2298{
2299 sigset_t new_sigset, old_sigset;
2300
2301 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2302 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2303 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2304 /* thread sleep until this app unblocked */
2305 while (qseecom.app_block_ref_cnt > 0) {
2306 sigfillset(&new_sigset);
2307 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2308 mutex_unlock(&app_access_lock);
2309 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302310 if (!wait_event_interruptible(
2311 qseecom.app_block_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002312 (qseecom.app_block_ref_cnt == 0)))
2313 break;
2314 } while (1);
2315 mutex_lock(&app_access_lock);
2316 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2317 }
2318 }
2319}
2320
2321/*
2322 * scm_call of send data will fail if this TA is blocked or there are more
2323 * than one TA requesting listener services; So, first check to see if need
2324 * to wait.
2325 */
2326static void __qseecom_reentrancy_check_if_this_app_blocked(
2327 struct qseecom_registered_app_list *ptr_app)
2328{
2329 sigset_t new_sigset, old_sigset;
2330
2331 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002332 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002333 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2334 /* thread sleep until this app unblocked */
2335 sigfillset(&new_sigset);
2336 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2337 mutex_unlock(&app_access_lock);
2338 do {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302339 if (!wait_event_interruptible(
2340 qseecom.app_block_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002341 (!ptr_app->app_blocked &&
2342 qseecom.app_block_ref_cnt <= 1)))
2343 break;
2344 } while (1);
2345 mutex_lock(&app_access_lock);
2346 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2347 }
Zhen Kongdea10592018-07-30 17:50:10 -07002348 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002349 }
2350}
2351
2352static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2353 uint32_t *app_id)
2354{
2355 int32_t ret;
2356 struct qseecom_command_scm_resp resp;
2357 bool found_app = false;
2358 struct qseecom_registered_app_list *entry = NULL;
2359 unsigned long flags = 0;
2360
2361 if (!app_id) {
2362 pr_err("Null pointer to app_id\n");
2363 return -EINVAL;
2364 }
2365 *app_id = 0;
2366
2367 /* check if app exists and has been registered locally */
2368 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2369 list_for_each_entry(entry,
2370 &qseecom.registered_app_list_head, list) {
2371 if (!strcmp(entry->app_name, req.app_name)) {
2372 found_app = true;
2373 break;
2374 }
2375 }
2376 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2377 if (found_app) {
2378 pr_debug("Found app with id %d\n", entry->app_id);
2379 *app_id = entry->app_id;
2380 return 0;
2381 }
2382
2383 memset((void *)&resp, 0, sizeof(resp));
2384
2385 /* SCM_CALL to check if app_id for the mentioned app exists */
2386 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2387 sizeof(struct qseecom_check_app_ireq),
2388 &resp, sizeof(resp));
2389 if (ret) {
2390 pr_err("scm_call to check if app is already loaded failed\n");
2391 return -EINVAL;
2392 }
2393
2394 if (resp.result == QSEOS_RESULT_FAILURE)
2395 return 0;
2396
2397 switch (resp.resp_type) {
2398 /*qsee returned listener type response */
2399 case QSEOS_LISTENER_ID:
2400 pr_err("resp type is of listener type instead of app");
2401 return -EINVAL;
2402 case QSEOS_APP_ID:
2403 *app_id = resp.data;
2404 return 0;
2405 default:
2406 pr_err("invalid resp type (%d) from qsee",
2407 resp.resp_type);
2408 return -ENODEV;
2409 }
2410}
2411
2412static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2413{
2414 struct qseecom_registered_app_list *entry = NULL;
2415 unsigned long flags = 0;
2416 u32 app_id = 0;
2417 struct ion_handle *ihandle; /* Ion handle */
2418 struct qseecom_load_img_req load_img_req;
2419 int32_t ret = 0;
2420 ion_phys_addr_t pa = 0;
2421 size_t len;
2422 struct qseecom_command_scm_resp resp;
2423 struct qseecom_check_app_ireq req;
2424 struct qseecom_load_app_ireq load_req;
2425 struct qseecom_load_app_64bit_ireq load_req_64bit;
2426 void *cmd_buf = NULL;
2427 size_t cmd_len;
2428 bool first_time = false;
2429
2430 /* Copy the relevant information needed for loading the image */
2431 if (copy_from_user(&load_img_req,
2432 (void __user *)argp,
2433 sizeof(struct qseecom_load_img_req))) {
2434 pr_err("copy_from_user failed\n");
2435 return -EFAULT;
2436 }
2437
2438 /* Check and load cmnlib */
2439 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2440 if (!qseecom.commonlib_loaded &&
2441 load_img_req.app_arch == ELFCLASS32) {
2442 ret = qseecom_load_commonlib_image(data, "cmnlib");
2443 if (ret) {
2444 pr_err("failed to load cmnlib\n");
2445 return -EIO;
2446 }
2447 qseecom.commonlib_loaded = true;
2448 pr_debug("cmnlib is loaded\n");
2449 }
2450
2451 if (!qseecom.commonlib64_loaded &&
2452 load_img_req.app_arch == ELFCLASS64) {
2453 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2454 if (ret) {
2455 pr_err("failed to load cmnlib64\n");
2456 return -EIO;
2457 }
2458 qseecom.commonlib64_loaded = true;
2459 pr_debug("cmnlib64 is loaded\n");
2460 }
2461 }
2462
2463 if (qseecom.support_bus_scaling) {
2464 mutex_lock(&qsee_bw_mutex);
2465 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2466 mutex_unlock(&qsee_bw_mutex);
2467 if (ret)
2468 return ret;
2469 }
2470
2471 /* Vote for the SFPB clock */
2472 ret = __qseecom_enable_clk_scale_up(data);
2473 if (ret)
2474 goto enable_clk_err;
2475
2476 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2477 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2478 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2479
2480 ret = __qseecom_check_app_exists(req, &app_id);
2481 if (ret < 0)
2482 goto loadapp_err;
2483
2484 if (app_id) {
2485 pr_debug("App id %d (%s) already exists\n", app_id,
2486 (char *)(req.app_name));
2487 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2488 list_for_each_entry(entry,
2489 &qseecom.registered_app_list_head, list){
2490 if (entry->app_id == app_id) {
2491 entry->ref_cnt++;
2492 break;
2493 }
2494 }
2495 spin_unlock_irqrestore(
2496 &qseecom.registered_app_list_lock, flags);
2497 ret = 0;
2498 } else {
2499 first_time = true;
2500 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2501 (char *)(load_img_req.img_name));
2502 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002503 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002504 load_img_req.ifd_data_fd);
2505 if (IS_ERR_OR_NULL(ihandle)) {
2506 pr_err("Ion client could not retrieve the handle\n");
2507 ret = -ENOMEM;
2508 goto loadapp_err;
2509 }
2510
2511 /* Get the physical address of the ION BUF */
2512 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2513 if (ret) {
2514 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2515 ret);
2516 goto loadapp_err;
2517 }
2518 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2519 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2520 len, load_img_req.mdt_len,
2521 load_img_req.img_len);
2522 ret = -EINVAL;
2523 goto loadapp_err;
2524 }
2525 /* Populate the structure for sending scm call to load image */
2526 if (qseecom.qsee_version < QSEE_VERSION_40) {
2527 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2528 load_req.mdt_len = load_img_req.mdt_len;
2529 load_req.img_len = load_img_req.img_len;
2530 strlcpy(load_req.app_name, load_img_req.img_name,
2531 MAX_APP_NAME_SIZE);
2532 load_req.phy_addr = (uint32_t)pa;
2533 cmd_buf = (void *)&load_req;
2534 cmd_len = sizeof(struct qseecom_load_app_ireq);
2535 } else {
2536 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2537 load_req_64bit.mdt_len = load_img_req.mdt_len;
2538 load_req_64bit.img_len = load_img_req.img_len;
2539 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2540 MAX_APP_NAME_SIZE);
2541 load_req_64bit.phy_addr = (uint64_t)pa;
2542 cmd_buf = (void *)&load_req_64bit;
2543 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2544 }
2545
2546 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2547 ION_IOC_CLEAN_INV_CACHES);
2548 if (ret) {
2549 pr_err("cache operation failed %d\n", ret);
2550 goto loadapp_err;
2551 }
2552
2553 /* SCM_CALL to load the app and get the app_id back */
2554 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2555 cmd_len, &resp, sizeof(resp));
2556 if (ret) {
2557 pr_err("scm_call to load app failed\n");
2558 if (!IS_ERR_OR_NULL(ihandle))
2559 ion_free(qseecom.ion_clnt, ihandle);
2560 ret = -EINVAL;
2561 goto loadapp_err;
2562 }
2563
2564 if (resp.result == QSEOS_RESULT_FAILURE) {
2565 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2566 if (!IS_ERR_OR_NULL(ihandle))
2567 ion_free(qseecom.ion_clnt, ihandle);
2568 ret = -EFAULT;
2569 goto loadapp_err;
2570 }
2571
2572 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2573 ret = __qseecom_process_incomplete_cmd(data, &resp);
2574 if (ret) {
2575 pr_err("process_incomplete_cmd failed err: %d\n",
2576 ret);
2577 if (!IS_ERR_OR_NULL(ihandle))
2578 ion_free(qseecom.ion_clnt, ihandle);
2579 ret = -EFAULT;
2580 goto loadapp_err;
2581 }
2582 }
2583
2584 if (resp.result != QSEOS_RESULT_SUCCESS) {
2585 pr_err("scm_call failed resp.result unknown, %d\n",
2586 resp.result);
2587 if (!IS_ERR_OR_NULL(ihandle))
2588 ion_free(qseecom.ion_clnt, ihandle);
2589 ret = -EFAULT;
2590 goto loadapp_err;
2591 }
2592
2593 app_id = resp.data;
2594
2595 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2596 if (!entry) {
2597 ret = -ENOMEM;
2598 goto loadapp_err;
2599 }
2600 entry->app_id = app_id;
2601 entry->ref_cnt = 1;
2602 entry->app_arch = load_img_req.app_arch;
2603 /*
2604 * keymaster app may be first loaded as "keymaste" by qseecomd,
2605 * and then used as "keymaster" on some targets. To avoid app
2606 * name checking error, register "keymaster" into app_list and
2607 * thread private data.
2608 */
2609 if (!strcmp(load_img_req.img_name, "keymaste"))
2610 strlcpy(entry->app_name, "keymaster",
2611 MAX_APP_NAME_SIZE);
2612 else
2613 strlcpy(entry->app_name, load_img_req.img_name,
2614 MAX_APP_NAME_SIZE);
2615 entry->app_blocked = false;
2616 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002617 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002618
2619 /* Deallocate the handle */
2620 if (!IS_ERR_OR_NULL(ihandle))
2621 ion_free(qseecom.ion_clnt, ihandle);
2622
2623 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2624 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2625 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2626 flags);
2627
2628 pr_warn("App with id %u (%s) now loaded\n", app_id,
2629 (char *)(load_img_req.img_name));
2630 }
2631 data->client.app_id = app_id;
2632 data->client.app_arch = load_img_req.app_arch;
2633 if (!strcmp(load_img_req.img_name, "keymaste"))
2634 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2635 else
2636 strlcpy(data->client.app_name, load_img_req.img_name,
2637 MAX_APP_NAME_SIZE);
2638 load_img_req.app_id = app_id;
2639 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2640 pr_err("copy_to_user failed\n");
2641 ret = -EFAULT;
2642 if (first_time == true) {
2643 spin_lock_irqsave(
2644 &qseecom.registered_app_list_lock, flags);
2645 list_del(&entry->list);
2646 spin_unlock_irqrestore(
2647 &qseecom.registered_app_list_lock, flags);
2648 kzfree(entry);
2649 }
2650 }
2651
2652loadapp_err:
2653 __qseecom_disable_clk_scale_down(data);
2654enable_clk_err:
2655 if (qseecom.support_bus_scaling) {
2656 mutex_lock(&qsee_bw_mutex);
2657 qseecom_unregister_bus_bandwidth_needs(data);
2658 mutex_unlock(&qsee_bw_mutex);
2659 }
2660 return ret;
2661}
2662
2663static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2664{
2665 int ret = 1; /* Set unload app */
2666
2667 wake_up_all(&qseecom.send_resp_wq);
2668 if (qseecom.qsee_reentrancy_support)
2669 mutex_unlock(&app_access_lock);
2670 while (atomic_read(&data->ioctl_count) > 1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05302671 if (wait_event_interruptible(data->abort_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002672 atomic_read(&data->ioctl_count) <= 1)) {
2673 pr_err("Interrupted from abort\n");
2674 ret = -ERESTARTSYS;
2675 break;
2676 }
2677 }
2678 if (qseecom.qsee_reentrancy_support)
2679 mutex_lock(&app_access_lock);
2680 return ret;
2681}
2682
2683static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2684{
2685 int ret = 0;
2686
2687 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2688 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2689 ion_free(qseecom.ion_clnt, data->client.ihandle);
2690 data->client.ihandle = NULL;
2691 }
2692 return ret;
2693}
2694
2695static int qseecom_unload_app(struct qseecom_dev_handle *data,
2696 bool app_crash)
2697{
2698 unsigned long flags;
2699 unsigned long flags1;
2700 int ret = 0;
2701 struct qseecom_command_scm_resp resp;
2702 struct qseecom_registered_app_list *ptr_app = NULL;
2703 bool unload = false;
2704 bool found_app = false;
2705 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002706 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002707
2708 if (!data) {
2709 pr_err("Invalid/uninitialized device handle\n");
2710 return -EINVAL;
2711 }
2712
2713 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2714 pr_debug("Do not unload keymaster app from tz\n");
2715 goto unload_exit;
2716 }
2717
2718 __qseecom_cleanup_app(data);
2719 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2720
2721 if (data->client.app_id > 0) {
2722 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2723 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2724 list) {
2725 if (ptr_app->app_id == data->client.app_id) {
2726 if (!strcmp((void *)ptr_app->app_name,
2727 (void *)data->client.app_name)) {
2728 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002729 if (ptr_app->app_blocked ||
2730 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002731 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002732 if (app_crash || ptr_app->ref_cnt == 1)
2733 unload = true;
2734 break;
2735 }
2736 found_dead_app = true;
2737 break;
2738 }
2739 }
2740 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2741 flags);
2742 if (found_app == false && found_dead_app == false) {
2743 pr_err("Cannot find app with id = %d (%s)\n",
2744 data->client.app_id,
2745 (char *)data->client.app_name);
2746 ret = -EINVAL;
2747 goto unload_exit;
2748 }
2749 }
2750
2751 if (found_dead_app)
2752 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2753 (char *)data->client.app_name);
2754
2755 if (unload) {
2756 struct qseecom_unload_app_ireq req;
2757 /* Populate the structure for sending scm call to load image */
2758 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2759 req.app_id = data->client.app_id;
2760
2761 /* SCM_CALL to unload the app */
2762 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2763 sizeof(struct qseecom_unload_app_ireq),
2764 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002765 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002766 if (ret) {
2767 pr_err("scm_call to unload app (id = %d) failed\n",
2768 req.app_id);
2769 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002770 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002771 } else {
2772 pr_warn("App id %d now unloaded\n", req.app_id);
2773 }
2774 if (resp.result == QSEOS_RESULT_FAILURE) {
2775 pr_err("app (%d) unload_failed!!\n",
2776 data->client.app_id);
2777 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002778 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002779 }
2780 if (resp.result == QSEOS_RESULT_SUCCESS)
2781 pr_debug("App (%d) is unloaded!!\n",
2782 data->client.app_id);
2783 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2784 ret = __qseecom_process_incomplete_cmd(data, &resp);
2785 if (ret) {
2786 pr_err("process_incomplete_cmd fail err: %d\n",
2787 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002788 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002789 }
2790 }
2791 }
2792
Zhen Kongf818f152019-03-13 12:31:32 -07002793scm_exit:
2794 if (scm_called) {
2795 /* double check if this app_entry still exists */
2796 bool doublecheck = false;
2797
2798 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2799 list_for_each_entry(ptr_app,
2800 &qseecom.registered_app_list_head, list) {
2801 if ((ptr_app->app_id == data->client.app_id) &&
2802 (!strcmp((void *)ptr_app->app_name,
2803 (void *)data->client.app_name))) {
2804 doublecheck = true;
2805 break;
2806 }
2807 }
2808 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2809 flags1);
2810 if (!doublecheck) {
2811 pr_warn("app %d(%s) entry is already removed\n",
2812 data->client.app_id,
2813 (char *)data->client.app_name);
2814 found_app = false;
2815 }
2816 }
Zhen Kong7d500032018-08-06 16:58:31 -07002817unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002818 if (found_app) {
2819 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2820 if (app_crash) {
2821 ptr_app->ref_cnt = 0;
2822 pr_debug("app_crash: ref_count = 0\n");
2823 } else {
2824 if (ptr_app->ref_cnt == 1) {
2825 ptr_app->ref_cnt = 0;
2826 pr_debug("ref_count set to 0\n");
2827 } else {
2828 ptr_app->ref_cnt--;
2829 pr_debug("Can't unload app(%d) inuse\n",
2830 ptr_app->app_id);
2831 }
2832 }
2833 if (unload) {
2834 list_del(&ptr_app->list);
2835 kzfree(ptr_app);
2836 }
2837 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2838 flags1);
2839 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002840 qseecom_unmap_ion_allocated_memory(data);
2841 data->released = true;
2842 return ret;
2843}
2844
2845static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2846 unsigned long virt)
2847{
2848 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2849}
2850
2851static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2852 unsigned long virt)
2853{
2854 return (uintptr_t)data->client.sb_virt +
2855 (virt - data->client.user_virt_sb_base);
2856}
2857
2858int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2859 struct qseecom_send_svc_cmd_req *req_ptr,
2860 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2861{
2862 int ret = 0;
2863 void *req_buf = NULL;
2864
2865 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2866 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2867 req_ptr, send_svc_ireq_ptr);
2868 return -EINVAL;
2869 }
2870
2871 /* Clients need to ensure req_buf is at base offset of shared buffer */
2872 if ((uintptr_t)req_ptr->cmd_req_buf !=
2873 data_ptr->client.user_virt_sb_base) {
2874 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2875 return -EINVAL;
2876 }
2877
2878 if (data_ptr->client.sb_length <
2879 sizeof(struct qseecom_rpmb_provision_key)) {
2880 pr_err("shared buffer is too small to hold key type\n");
2881 return -EINVAL;
2882 }
2883 req_buf = data_ptr->client.sb_virt;
2884
2885 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2886 send_svc_ireq_ptr->key_type =
2887 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2888 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2889 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2890 data_ptr, (uintptr_t)req_ptr->resp_buf));
2891 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2892
2893 return ret;
2894}
2895
2896int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2897 struct qseecom_send_svc_cmd_req *req_ptr,
2898 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2899{
2900 int ret = 0;
2901 uint32_t reqd_len_sb_in = 0;
2902
2903 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2904 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2905 req_ptr, send_svc_ireq_ptr);
2906 return -EINVAL;
2907 }
2908
2909 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2910 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2911 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2912 pr_err("Required: %u, Available: %zu\n",
2913 reqd_len_sb_in, data_ptr->client.sb_length);
2914 return -ENOMEM;
2915 }
2916
2917 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2918 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2919 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2920 data_ptr, (uintptr_t)req_ptr->resp_buf));
2921 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2922
2923 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2924 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2925
2926
2927 return ret;
2928}
2929
2930static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2931 struct qseecom_send_svc_cmd_req *req)
2932{
2933 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2934 pr_err("req or cmd buffer or response buffer is null\n");
2935 return -EINVAL;
2936 }
2937
2938 if (!data || !data->client.ihandle) {
2939 pr_err("Client or client handle is not initialized\n");
2940 return -EINVAL;
2941 }
2942
2943 if (data->client.sb_virt == NULL) {
2944 pr_err("sb_virt null\n");
2945 return -EINVAL;
2946 }
2947
2948 if (data->client.user_virt_sb_base == 0) {
2949 pr_err("user_virt_sb_base is null\n");
2950 return -EINVAL;
2951 }
2952
2953 if (data->client.sb_length == 0) {
2954 pr_err("sb_length is 0\n");
2955 return -EINVAL;
2956 }
2957
2958 if (((uintptr_t)req->cmd_req_buf <
2959 data->client.user_virt_sb_base) ||
2960 ((uintptr_t)req->cmd_req_buf >=
2961 (data->client.user_virt_sb_base + data->client.sb_length))) {
2962 pr_err("cmd buffer address not within shared bufffer\n");
2963 return -EINVAL;
2964 }
2965 if (((uintptr_t)req->resp_buf <
2966 data->client.user_virt_sb_base) ||
2967 ((uintptr_t)req->resp_buf >=
2968 (data->client.user_virt_sb_base + data->client.sb_length))) {
2969 pr_err("response buffer address not within shared bufffer\n");
2970 return -EINVAL;
2971 }
2972 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2973 (req->cmd_req_len > data->client.sb_length) ||
2974 (req->resp_len > data->client.sb_length)) {
2975 pr_err("cmd buf length or response buf length not valid\n");
2976 return -EINVAL;
2977 }
2978 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2979 pr_err("Integer overflow detected in req_len & rsp_len\n");
2980 return -EINVAL;
2981 }
2982
2983 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2984 pr_debug("Not enough memory to fit cmd_buf.\n");
2985 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2986 (req->cmd_req_len + req->resp_len),
2987 data->client.sb_length);
2988 return -ENOMEM;
2989 }
2990 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2991 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2992 return -EINVAL;
2993 }
2994 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2995 pr_err("Integer overflow in resp_len & resp_buf\n");
2996 return -EINVAL;
2997 }
2998 if (data->client.user_virt_sb_base >
2999 (ULONG_MAX - data->client.sb_length)) {
3000 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3001 return -EINVAL;
3002 }
3003 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3004 ((uintptr_t)data->client.user_virt_sb_base +
3005 data->client.sb_length)) ||
3006 (((uintptr_t)req->resp_buf + req->resp_len) >
3007 ((uintptr_t)data->client.user_virt_sb_base +
3008 data->client.sb_length))) {
3009 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3010 return -EINVAL;
3011 }
3012 return 0;
3013}
3014
3015static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3016 void __user *argp)
3017{
3018 int ret = 0;
3019 struct qseecom_client_send_service_ireq send_svc_ireq;
3020 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3021 struct qseecom_command_scm_resp resp;
3022 struct qseecom_send_svc_cmd_req req;
3023 void *send_req_ptr;
3024 size_t req_buf_size;
3025
3026 /*struct qseecom_command_scm_resp resp;*/
3027
3028 if (copy_from_user(&req,
3029 (void __user *)argp,
3030 sizeof(req))) {
3031 pr_err("copy_from_user failed\n");
3032 return -EFAULT;
3033 }
3034
3035 if (__validate_send_service_cmd_inputs(data, &req))
3036 return -EINVAL;
3037
3038 data->type = QSEECOM_SECURE_SERVICE;
3039
3040 switch (req.cmd_id) {
3041 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3042 case QSEOS_RPMB_ERASE_COMMAND:
3043 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3044 send_req_ptr = &send_svc_ireq;
3045 req_buf_size = sizeof(send_svc_ireq);
3046 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3047 send_req_ptr))
3048 return -EINVAL;
3049 break;
3050 case QSEOS_FSM_LTEOTA_REQ_CMD:
3051 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3052 case QSEOS_FSM_IKE_REQ_CMD:
3053 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3054 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3055 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3056 case QSEOS_FSM_ENCFS_REQ_CMD:
3057 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3058 send_req_ptr = &send_fsm_key_svc_ireq;
3059 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3060 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3061 send_req_ptr))
3062 return -EINVAL;
3063 break;
3064 default:
3065 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3066 return -EINVAL;
3067 }
3068
3069 if (qseecom.support_bus_scaling) {
3070 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3071 if (ret) {
3072 pr_err("Fail to set bw HIGH\n");
3073 return ret;
3074 }
3075 } else {
3076 ret = qseecom_perf_enable(data);
3077 if (ret) {
3078 pr_err("Failed to vote for clocks with err %d\n", ret);
3079 goto exit;
3080 }
3081 }
3082
3083 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3084 data->client.sb_virt, data->client.sb_length,
3085 ION_IOC_CLEAN_INV_CACHES);
3086 if (ret) {
3087 pr_err("cache operation failed %d\n", ret);
3088 goto exit;
3089 }
3090 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3091 (const void *)send_req_ptr,
3092 req_buf_size, &resp, sizeof(resp));
3093 if (ret) {
3094 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3095 if (!qseecom.support_bus_scaling) {
3096 qsee_disable_clock_vote(data, CLK_DFAB);
3097 qsee_disable_clock_vote(data, CLK_SFPB);
3098 } else {
3099 __qseecom_add_bw_scale_down_timer(
3100 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3101 }
3102 goto exit;
3103 }
3104 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3105 data->client.sb_virt, data->client.sb_length,
3106 ION_IOC_INV_CACHES);
3107 if (ret) {
3108 pr_err("cache operation failed %d\n", ret);
3109 goto exit;
3110 }
3111 switch (resp.result) {
3112 case QSEOS_RESULT_SUCCESS:
3113 break;
3114 case QSEOS_RESULT_INCOMPLETE:
3115 pr_debug("qseos_result_incomplete\n");
3116 ret = __qseecom_process_incomplete_cmd(data, &resp);
3117 if (ret) {
3118 pr_err("process_incomplete_cmd fail with result: %d\n",
3119 resp.result);
3120 }
3121 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3122 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303123 if (put_user(resp.result,
3124 (uint32_t __user *)req.resp_buf)) {
3125 ret = -EINVAL;
3126 goto exit;
3127 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003128 ret = 0;
3129 }
3130 break;
3131 case QSEOS_RESULT_FAILURE:
3132 pr_err("scm call failed with resp.result: %d\n", resp.result);
3133 ret = -EINVAL;
3134 break;
3135 default:
3136 pr_err("Response result %d not supported\n",
3137 resp.result);
3138 ret = -EINVAL;
3139 break;
3140 }
3141 if (!qseecom.support_bus_scaling) {
3142 qsee_disable_clock_vote(data, CLK_DFAB);
3143 qsee_disable_clock_vote(data, CLK_SFPB);
3144 } else {
3145 __qseecom_add_bw_scale_down_timer(
3146 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3147 }
3148
3149exit:
3150 return ret;
3151}
3152
3153static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3154 struct qseecom_send_cmd_req *req)
3155
3156{
3157 if (!data || !data->client.ihandle) {
3158 pr_err("Client or client handle is not initialized\n");
3159 return -EINVAL;
3160 }
3161 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3162 (req->cmd_req_buf == NULL)) {
3163 pr_err("cmd buffer or response buffer is null\n");
3164 return -EINVAL;
3165 }
3166 if (((uintptr_t)req->cmd_req_buf <
3167 data->client.user_virt_sb_base) ||
3168 ((uintptr_t)req->cmd_req_buf >=
3169 (data->client.user_virt_sb_base + data->client.sb_length))) {
3170 pr_err("cmd buffer address not within shared bufffer\n");
3171 return -EINVAL;
3172 }
3173 if (((uintptr_t)req->resp_buf <
3174 data->client.user_virt_sb_base) ||
3175 ((uintptr_t)req->resp_buf >=
3176 (data->client.user_virt_sb_base + data->client.sb_length))) {
3177 pr_err("response buffer address not within shared bufffer\n");
3178 return -EINVAL;
3179 }
3180 if ((req->cmd_req_len == 0) ||
3181 (req->cmd_req_len > data->client.sb_length) ||
3182 (req->resp_len > data->client.sb_length)) {
3183 pr_err("cmd buf length or response buf length not valid\n");
3184 return -EINVAL;
3185 }
3186 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3187 pr_err("Integer overflow detected in req_len & rsp_len\n");
3188 return -EINVAL;
3189 }
3190
3191 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3192 pr_debug("Not enough memory to fit cmd_buf.\n");
3193 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3194 (req->cmd_req_len + req->resp_len),
3195 data->client.sb_length);
3196 return -ENOMEM;
3197 }
3198 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3199 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3200 return -EINVAL;
3201 }
3202 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3203 pr_err("Integer overflow in resp_len & resp_buf\n");
3204 return -EINVAL;
3205 }
3206 if (data->client.user_virt_sb_base >
3207 (ULONG_MAX - data->client.sb_length)) {
3208 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3209 return -EINVAL;
3210 }
3211 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3212 ((uintptr_t)data->client.user_virt_sb_base +
3213 data->client.sb_length)) ||
3214 (((uintptr_t)req->resp_buf + req->resp_len) >
3215 ((uintptr_t)data->client.user_virt_sb_base +
3216 data->client.sb_length))) {
3217 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3218 return -EINVAL;
3219 }
3220 return 0;
3221}
3222
3223int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3224 struct qseecom_registered_app_list *ptr_app,
3225 struct qseecom_dev_handle *data)
3226{
3227 int ret = 0;
3228
3229 switch (resp->result) {
3230 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3231 pr_warn("App(%d) %s is blocked on listener %d\n",
3232 data->client.app_id, data->client.app_name,
3233 resp->data);
3234 ret = __qseecom_process_reentrancy_blocked_on_listener(
3235 resp, ptr_app, data);
3236 if (ret) {
3237 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3238 data->client.app_id, data->client.app_name, resp->data);
3239 return ret;
3240 }
3241
3242 case QSEOS_RESULT_INCOMPLETE:
3243 qseecom.app_block_ref_cnt++;
3244 ptr_app->app_blocked = true;
3245 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3246 ptr_app->app_blocked = false;
3247 qseecom.app_block_ref_cnt--;
3248 wake_up_interruptible(&qseecom.app_block_wq);
3249 if (ret)
3250 pr_err("process_incomplete_cmd failed err: %d\n",
3251 ret);
3252 return ret;
3253 case QSEOS_RESULT_SUCCESS:
3254 return ret;
3255 default:
3256 pr_err("Response result %d not supported\n",
3257 resp->result);
3258 return -EINVAL;
3259 }
3260}
3261
3262static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3263 struct qseecom_send_cmd_req *req)
3264{
3265 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003266 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003267 u32 reqd_len_sb_in = 0;
3268 struct qseecom_client_send_data_ireq send_data_req = {0};
3269 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3270 struct qseecom_command_scm_resp resp;
3271 unsigned long flags;
3272 struct qseecom_registered_app_list *ptr_app;
3273 bool found_app = false;
3274 void *cmd_buf = NULL;
3275 size_t cmd_len;
3276 struct sglist_info *table = data->sglistinfo_ptr;
3277
3278 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3279 /* find app_id & img_name from list */
3280 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3281 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3282 list) {
3283 if ((ptr_app->app_id == data->client.app_id) &&
3284 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3285 found_app = true;
3286 break;
3287 }
3288 }
3289 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3290
3291 if (!found_app) {
3292 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3293 (char *)data->client.app_name);
3294 return -ENOENT;
3295 }
3296
3297 if (qseecom.qsee_version < QSEE_VERSION_40) {
3298 send_data_req.app_id = data->client.app_id;
3299 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3300 data, (uintptr_t)req->cmd_req_buf));
3301 send_data_req.req_len = req->cmd_req_len;
3302 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3303 data, (uintptr_t)req->resp_buf));
3304 send_data_req.rsp_len = req->resp_len;
3305 send_data_req.sglistinfo_ptr =
3306 (uint32_t)virt_to_phys(table);
3307 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3308 dmac_flush_range((void *)table,
3309 (void *)table + SGLISTINFO_TABLE_SIZE);
3310 cmd_buf = (void *)&send_data_req;
3311 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3312 } else {
3313 send_data_req_64bit.app_id = data->client.app_id;
3314 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3315 (uintptr_t)req->cmd_req_buf);
3316 send_data_req_64bit.req_len = req->cmd_req_len;
3317 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3318 (uintptr_t)req->resp_buf);
3319 send_data_req_64bit.rsp_len = req->resp_len;
3320 /* check if 32bit app's phys_addr region is under 4GB.*/
3321 if ((data->client.app_arch == ELFCLASS32) &&
3322 ((send_data_req_64bit.req_ptr >=
3323 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3324 (send_data_req_64bit.rsp_ptr >=
3325 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3326 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3327 data->client.app_name,
3328 send_data_req_64bit.req_ptr,
3329 send_data_req_64bit.req_len,
3330 send_data_req_64bit.rsp_ptr,
3331 send_data_req_64bit.rsp_len);
3332 return -EFAULT;
3333 }
3334 send_data_req_64bit.sglistinfo_ptr =
3335 (uint64_t)virt_to_phys(table);
3336 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3337 dmac_flush_range((void *)table,
3338 (void *)table + SGLISTINFO_TABLE_SIZE);
3339 cmd_buf = (void *)&send_data_req_64bit;
3340 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3341 }
3342
3343 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3344 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3345 else
3346 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3347
3348 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3349 data->client.sb_virt,
3350 reqd_len_sb_in,
3351 ION_IOC_CLEAN_INV_CACHES);
3352 if (ret) {
3353 pr_err("cache operation failed %d\n", ret);
3354 return ret;
3355 }
3356
3357 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3358
3359 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3360 cmd_buf, cmd_len,
3361 &resp, sizeof(resp));
3362 if (ret) {
3363 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3364 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003365 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003366 }
3367
3368 if (qseecom.qsee_reentrancy_support) {
3369 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003370 if (ret)
3371 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003372 } else {
3373 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3374 ret = __qseecom_process_incomplete_cmd(data, &resp);
3375 if (ret) {
3376 pr_err("process_incomplete_cmd failed err: %d\n",
3377 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003378 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003379 }
3380 } else {
3381 if (resp.result != QSEOS_RESULT_SUCCESS) {
3382 pr_err("Response result %d not supported\n",
3383 resp.result);
3384 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003385 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003386 }
3387 }
3388 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003389exit:
3390 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003391 data->client.sb_virt, data->client.sb_length,
3392 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003393 if (ret2) {
3394 pr_err("cache operation failed %d\n", ret2);
3395 return ret2;
3396 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003397 return ret;
3398}
3399
3400static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3401{
3402 int ret = 0;
3403 struct qseecom_send_cmd_req req;
3404
3405 ret = copy_from_user(&req, argp, sizeof(req));
3406 if (ret) {
3407 pr_err("copy_from_user failed\n");
3408 return ret;
3409 }
3410
3411 if (__validate_send_cmd_inputs(data, &req))
3412 return -EINVAL;
3413
3414 ret = __qseecom_send_cmd(data, &req);
3415
3416 if (ret)
3417 return ret;
3418
3419 return ret;
3420}
3421
3422int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3423 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3424 struct qseecom_dev_handle *data, int i) {
3425
3426 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3427 (req->ifd_data[i].fd > 0)) {
3428 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3429 (req->ifd_data[i].cmd_buf_offset >
3430 req->cmd_req_len - sizeof(uint32_t))) {
3431 pr_err("Invalid offset (req len) 0x%x\n",
3432 req->ifd_data[i].cmd_buf_offset);
3433 return -EINVAL;
3434 }
3435 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3436 (lstnr_resp->ifd_data[i].fd > 0)) {
3437 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3438 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3439 lstnr_resp->resp_len - sizeof(uint32_t))) {
3440 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3441 lstnr_resp->ifd_data[i].cmd_buf_offset);
3442 return -EINVAL;
3443 }
3444 }
3445 return 0;
3446}
3447
3448static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3449 struct qseecom_dev_handle *data)
3450{
3451 struct ion_handle *ihandle;
3452 char *field;
3453 int ret = 0;
3454 int i = 0;
3455 uint32_t len = 0;
3456 struct scatterlist *sg;
3457 struct qseecom_send_modfd_cmd_req *req = NULL;
3458 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3459 struct qseecom_registered_listener_list *this_lstnr = NULL;
3460 uint32_t offset;
3461 struct sg_table *sg_ptr;
3462
3463 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3464 (data->type != QSEECOM_CLIENT_APP))
3465 return -EFAULT;
3466
3467 if (msg == NULL) {
3468 pr_err("Invalid address\n");
3469 return -EINVAL;
3470 }
3471 if (data->type == QSEECOM_LISTENER_SERVICE) {
3472 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3473 this_lstnr = __qseecom_find_svc(data->listener.id);
3474 if (IS_ERR_OR_NULL(this_lstnr)) {
3475 pr_err("Invalid listener ID\n");
3476 return -ENOMEM;
3477 }
3478 } else {
3479 req = (struct qseecom_send_modfd_cmd_req *)msg;
3480 }
3481
3482 for (i = 0; i < MAX_ION_FD; i++) {
3483 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3484 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003485 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003486 req->ifd_data[i].fd);
3487 if (IS_ERR_OR_NULL(ihandle)) {
3488 pr_err("Ion client can't retrieve the handle\n");
3489 return -ENOMEM;
3490 }
3491 field = (char *) req->cmd_req_buf +
3492 req->ifd_data[i].cmd_buf_offset;
3493 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3494 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003495 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003496 lstnr_resp->ifd_data[i].fd);
3497 if (IS_ERR_OR_NULL(ihandle)) {
3498 pr_err("Ion client can't retrieve the handle\n");
3499 return -ENOMEM;
3500 }
3501 field = lstnr_resp->resp_buf_ptr +
3502 lstnr_resp->ifd_data[i].cmd_buf_offset;
3503 } else {
3504 continue;
3505 }
3506 /* Populate the cmd data structure with the phys_addr */
3507 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3508 if (IS_ERR_OR_NULL(sg_ptr)) {
3509 pr_err("IOn client could not retrieve sg table\n");
3510 goto err;
3511 }
3512 if (sg_ptr->nents == 0) {
3513 pr_err("Num of scattered entries is 0\n");
3514 goto err;
3515 }
3516 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3517 pr_err("Num of scattered entries");
3518 pr_err(" (%d) is greater than max supported %d\n",
3519 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3520 goto err;
3521 }
3522 sg = sg_ptr->sgl;
3523 if (sg_ptr->nents == 1) {
3524 uint32_t *update;
3525
3526 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3527 goto err;
3528 if ((data->type == QSEECOM_CLIENT_APP &&
3529 (data->client.app_arch == ELFCLASS32 ||
3530 data->client.app_arch == ELFCLASS64)) ||
3531 (data->type == QSEECOM_LISTENER_SERVICE)) {
3532 /*
3533 * Check if sg list phy add region is under 4GB
3534 */
3535 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3536 (!cleanup) &&
3537 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3538 >= PHY_ADDR_4G - sg->length)) {
3539 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3540 data->client.app_name,
3541 &(sg_dma_address(sg_ptr->sgl)),
3542 sg->length);
3543 goto err;
3544 }
3545 update = (uint32_t *) field;
3546 *update = cleanup ? 0 :
3547 (uint32_t)sg_dma_address(sg_ptr->sgl);
3548 } else {
3549 pr_err("QSEE app arch %u is not supported\n",
3550 data->client.app_arch);
3551 goto err;
3552 }
3553 len += (uint32_t)sg->length;
3554 } else {
3555 struct qseecom_sg_entry *update;
3556 int j = 0;
3557
3558 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3559 (req->ifd_data[i].fd > 0)) {
3560
3561 if ((req->cmd_req_len <
3562 SG_ENTRY_SZ * sg_ptr->nents) ||
3563 (req->ifd_data[i].cmd_buf_offset >
3564 (req->cmd_req_len -
3565 SG_ENTRY_SZ * sg_ptr->nents))) {
3566 pr_err("Invalid offset = 0x%x\n",
3567 req->ifd_data[i].cmd_buf_offset);
3568 goto err;
3569 }
3570
3571 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3572 (lstnr_resp->ifd_data[i].fd > 0)) {
3573
3574 if ((lstnr_resp->resp_len <
3575 SG_ENTRY_SZ * sg_ptr->nents) ||
3576 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3577 (lstnr_resp->resp_len -
3578 SG_ENTRY_SZ * sg_ptr->nents))) {
3579 goto err;
3580 }
3581 }
3582 if ((data->type == QSEECOM_CLIENT_APP &&
3583 (data->client.app_arch == ELFCLASS32 ||
3584 data->client.app_arch == ELFCLASS64)) ||
3585 (data->type == QSEECOM_LISTENER_SERVICE)) {
3586 update = (struct qseecom_sg_entry *)field;
3587 for (j = 0; j < sg_ptr->nents; j++) {
3588 /*
3589 * Check if sg list PA is under 4GB
3590 */
3591 if ((qseecom.qsee_version >=
3592 QSEE_VERSION_40) &&
3593 (!cleanup) &&
3594 ((uint64_t)(sg_dma_address(sg))
3595 >= PHY_ADDR_4G - sg->length)) {
3596 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3597 data->client.app_name,
3598 &(sg_dma_address(sg)),
3599 sg->length);
3600 goto err;
3601 }
3602 update->phys_addr = cleanup ? 0 :
3603 (uint32_t)sg_dma_address(sg);
3604 update->len = cleanup ? 0 : sg->length;
3605 update++;
3606 len += sg->length;
3607 sg = sg_next(sg);
3608 }
3609 } else {
3610 pr_err("QSEE app arch %u is not supported\n",
3611 data->client.app_arch);
3612 goto err;
3613 }
3614 }
3615
3616 if (cleanup) {
3617 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3618 ihandle, NULL, len,
3619 ION_IOC_INV_CACHES);
3620 if (ret) {
3621 pr_err("cache operation failed %d\n", ret);
3622 goto err;
3623 }
3624 } else {
3625 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3626 ihandle, NULL, len,
3627 ION_IOC_CLEAN_INV_CACHES);
3628 if (ret) {
3629 pr_err("cache operation failed %d\n", ret);
3630 goto err;
3631 }
3632 if (data->type == QSEECOM_CLIENT_APP) {
3633 offset = req->ifd_data[i].cmd_buf_offset;
3634 data->sglistinfo_ptr[i].indexAndFlags =
3635 SGLISTINFO_SET_INDEX_FLAG(
3636 (sg_ptr->nents == 1), 0, offset);
3637 data->sglistinfo_ptr[i].sizeOrCount =
3638 (sg_ptr->nents == 1) ?
3639 sg->length : sg_ptr->nents;
3640 data->sglist_cnt = i + 1;
3641 } else {
3642 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3643 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3644 (uintptr_t)this_lstnr->sb_virt);
3645 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3646 SGLISTINFO_SET_INDEX_FLAG(
3647 (sg_ptr->nents == 1), 0, offset);
3648 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3649 (sg_ptr->nents == 1) ?
3650 sg->length : sg_ptr->nents;
3651 this_lstnr->sglist_cnt = i + 1;
3652 }
3653 }
3654 /* Deallocate the handle */
3655 if (!IS_ERR_OR_NULL(ihandle))
3656 ion_free(qseecom.ion_clnt, ihandle);
3657 }
3658 return ret;
3659err:
3660 if (!IS_ERR_OR_NULL(ihandle))
3661 ion_free(qseecom.ion_clnt, ihandle);
3662 return -ENOMEM;
3663}
3664
3665static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3666 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3667{
3668 struct scatterlist *sg = sg_ptr->sgl;
3669 struct qseecom_sg_entry_64bit *sg_entry;
3670 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3671 void *buf;
3672 uint i;
3673 size_t size;
3674 dma_addr_t coh_pmem;
3675
3676 if (fd_idx >= MAX_ION_FD) {
3677 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3678 return -ENOMEM;
3679 }
3680 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3681 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3682 /* Allocate a contiguous kernel buffer */
3683 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3684 size = (size + PAGE_SIZE) & PAGE_MASK;
3685 buf = dma_alloc_coherent(qseecom.pdev,
3686 size, &coh_pmem, GFP_KERNEL);
3687 if (buf == NULL) {
3688 pr_err("failed to alloc memory for sg buf\n");
3689 return -ENOMEM;
3690 }
3691 /* update qseecom_sg_list_buf_hdr_64bit */
3692 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3693 buf_hdr->new_buf_phys_addr = coh_pmem;
3694 buf_hdr->nents_total = sg_ptr->nents;
3695 /* save the left sg entries into new allocated buf */
3696 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3697 for (i = 0; i < sg_ptr->nents; i++) {
3698 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3699 sg_entry->len = sg->length;
3700 sg_entry++;
3701 sg = sg_next(sg);
3702 }
3703
3704 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3705 data->client.sec_buf_fd[fd_idx].vbase = buf;
3706 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3707 data->client.sec_buf_fd[fd_idx].size = size;
3708
3709 return 0;
3710}
3711
3712static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3713 struct qseecom_dev_handle *data)
3714{
3715 struct ion_handle *ihandle;
3716 char *field;
3717 int ret = 0;
3718 int i = 0;
3719 uint32_t len = 0;
3720 struct scatterlist *sg;
3721 struct qseecom_send_modfd_cmd_req *req = NULL;
3722 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3723 struct qseecom_registered_listener_list *this_lstnr = NULL;
3724 uint32_t offset;
3725 struct sg_table *sg_ptr;
3726
3727 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3728 (data->type != QSEECOM_CLIENT_APP))
3729 return -EFAULT;
3730
3731 if (msg == NULL) {
3732 pr_err("Invalid address\n");
3733 return -EINVAL;
3734 }
3735 if (data->type == QSEECOM_LISTENER_SERVICE) {
3736 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3737 this_lstnr = __qseecom_find_svc(data->listener.id);
3738 if (IS_ERR_OR_NULL(this_lstnr)) {
3739 pr_err("Invalid listener ID\n");
3740 return -ENOMEM;
3741 }
3742 } else {
3743 req = (struct qseecom_send_modfd_cmd_req *)msg;
3744 }
3745
3746 for (i = 0; i < MAX_ION_FD; i++) {
3747 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3748 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003749 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003750 req->ifd_data[i].fd);
3751 if (IS_ERR_OR_NULL(ihandle)) {
3752 pr_err("Ion client can't retrieve the handle\n");
3753 return -ENOMEM;
3754 }
3755 field = (char *) req->cmd_req_buf +
3756 req->ifd_data[i].cmd_buf_offset;
3757 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3758 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003759 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003760 lstnr_resp->ifd_data[i].fd);
3761 if (IS_ERR_OR_NULL(ihandle)) {
3762 pr_err("Ion client can't retrieve the handle\n");
3763 return -ENOMEM;
3764 }
3765 field = lstnr_resp->resp_buf_ptr +
3766 lstnr_resp->ifd_data[i].cmd_buf_offset;
3767 } else {
3768 continue;
3769 }
3770 /* Populate the cmd data structure with the phys_addr */
3771 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3772 if (IS_ERR_OR_NULL(sg_ptr)) {
3773 pr_err("IOn client could not retrieve sg table\n");
3774 goto err;
3775 }
3776 if (sg_ptr->nents == 0) {
3777 pr_err("Num of scattered entries is 0\n");
3778 goto err;
3779 }
3780 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3781 pr_warn("Num of scattered entries");
3782 pr_warn(" (%d) is greater than %d\n",
3783 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3784 if (cleanup) {
3785 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3786 data->client.sec_buf_fd[i].vbase)
3787 dma_free_coherent(qseecom.pdev,
3788 data->client.sec_buf_fd[i].size,
3789 data->client.sec_buf_fd[i].vbase,
3790 data->client.sec_buf_fd[i].pbase);
3791 } else {
3792 ret = __qseecom_allocate_sg_list_buffer(data,
3793 field, i, sg_ptr);
3794 if (ret) {
3795 pr_err("Failed to allocate sg list buffer\n");
3796 goto err;
3797 }
3798 }
3799 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3800 sg = sg_ptr->sgl;
3801 goto cleanup;
3802 }
3803 sg = sg_ptr->sgl;
3804 if (sg_ptr->nents == 1) {
3805 uint64_t *update_64bit;
3806
3807 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3808 goto err;
3809 /* 64bit app uses 64bit address */
3810 update_64bit = (uint64_t *) field;
3811 *update_64bit = cleanup ? 0 :
3812 (uint64_t)sg_dma_address(sg_ptr->sgl);
3813 len += (uint32_t)sg->length;
3814 } else {
3815 struct qseecom_sg_entry_64bit *update_64bit;
3816 int j = 0;
3817
3818 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3819 (req->ifd_data[i].fd > 0)) {
3820
3821 if ((req->cmd_req_len <
3822 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3823 (req->ifd_data[i].cmd_buf_offset >
3824 (req->cmd_req_len -
3825 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3826 pr_err("Invalid offset = 0x%x\n",
3827 req->ifd_data[i].cmd_buf_offset);
3828 goto err;
3829 }
3830
3831 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3832 (lstnr_resp->ifd_data[i].fd > 0)) {
3833
3834 if ((lstnr_resp->resp_len <
3835 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3836 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3837 (lstnr_resp->resp_len -
3838 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3839 goto err;
3840 }
3841 }
3842 /* 64bit app uses 64bit address */
3843 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3844 for (j = 0; j < sg_ptr->nents; j++) {
3845 update_64bit->phys_addr = cleanup ? 0 :
3846 (uint64_t)sg_dma_address(sg);
3847 update_64bit->len = cleanup ? 0 :
3848 (uint32_t)sg->length;
3849 update_64bit++;
3850 len += sg->length;
3851 sg = sg_next(sg);
3852 }
3853 }
3854cleanup:
3855 if (cleanup) {
3856 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3857 ihandle, NULL, len,
3858 ION_IOC_INV_CACHES);
3859 if (ret) {
3860 pr_err("cache operation failed %d\n", ret);
3861 goto err;
3862 }
3863 } else {
3864 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3865 ihandle, NULL, len,
3866 ION_IOC_CLEAN_INV_CACHES);
3867 if (ret) {
3868 pr_err("cache operation failed %d\n", ret);
3869 goto err;
3870 }
3871 if (data->type == QSEECOM_CLIENT_APP) {
3872 offset = req->ifd_data[i].cmd_buf_offset;
3873 data->sglistinfo_ptr[i].indexAndFlags =
3874 SGLISTINFO_SET_INDEX_FLAG(
3875 (sg_ptr->nents == 1), 1, offset);
3876 data->sglistinfo_ptr[i].sizeOrCount =
3877 (sg_ptr->nents == 1) ?
3878 sg->length : sg_ptr->nents;
3879 data->sglist_cnt = i + 1;
3880 } else {
3881 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3882 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3883 (uintptr_t)this_lstnr->sb_virt);
3884 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3885 SGLISTINFO_SET_INDEX_FLAG(
3886 (sg_ptr->nents == 1), 1, offset);
3887 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3888 (sg_ptr->nents == 1) ?
3889 sg->length : sg_ptr->nents;
3890 this_lstnr->sglist_cnt = i + 1;
3891 }
3892 }
3893 /* Deallocate the handle */
3894 if (!IS_ERR_OR_NULL(ihandle))
3895 ion_free(qseecom.ion_clnt, ihandle);
3896 }
3897 return ret;
3898err:
3899 for (i = 0; i < MAX_ION_FD; i++)
3900 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3901 data->client.sec_buf_fd[i].vbase)
3902 dma_free_coherent(qseecom.pdev,
3903 data->client.sec_buf_fd[i].size,
3904 data->client.sec_buf_fd[i].vbase,
3905 data->client.sec_buf_fd[i].pbase);
3906 if (!IS_ERR_OR_NULL(ihandle))
3907 ion_free(qseecom.ion_clnt, ihandle);
3908 return -ENOMEM;
3909}
3910
3911static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3912 void __user *argp,
3913 bool is_64bit_addr)
3914{
3915 int ret = 0;
3916 int i;
3917 struct qseecom_send_modfd_cmd_req req;
3918 struct qseecom_send_cmd_req send_cmd_req;
3919
3920 ret = copy_from_user(&req, argp, sizeof(req));
3921 if (ret) {
3922 pr_err("copy_from_user failed\n");
3923 return ret;
3924 }
3925
3926 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3927 send_cmd_req.cmd_req_len = req.cmd_req_len;
3928 send_cmd_req.resp_buf = req.resp_buf;
3929 send_cmd_req.resp_len = req.resp_len;
3930
3931 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3932 return -EINVAL;
3933
3934 /* validate offsets */
3935 for (i = 0; i < MAX_ION_FD; i++) {
3936 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3937 pr_err("Invalid offset %d = 0x%x\n",
3938 i, req.ifd_data[i].cmd_buf_offset);
3939 return -EINVAL;
3940 }
3941 }
3942 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3943 (uintptr_t)req.cmd_req_buf);
3944 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3945 (uintptr_t)req.resp_buf);
3946
3947 if (!is_64bit_addr) {
3948 ret = __qseecom_update_cmd_buf(&req, false, data);
3949 if (ret)
3950 return ret;
3951 ret = __qseecom_send_cmd(data, &send_cmd_req);
3952 if (ret)
3953 return ret;
3954 ret = __qseecom_update_cmd_buf(&req, true, data);
3955 if (ret)
3956 return ret;
3957 } else {
3958 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3959 if (ret)
3960 return ret;
3961 ret = __qseecom_send_cmd(data, &send_cmd_req);
3962 if (ret)
3963 return ret;
3964 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3965 if (ret)
3966 return ret;
3967 }
3968
3969 return ret;
3970}
3971
3972static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3973 void __user *argp)
3974{
3975 return __qseecom_send_modfd_cmd(data, argp, false);
3976}
3977
3978static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3979 void __user *argp)
3980{
3981 return __qseecom_send_modfd_cmd(data, argp, true);
3982}
3983
3984
3985
3986static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3987 struct qseecom_registered_listener_list *svc)
3988{
3989 int ret;
3990
Zhen Kongf5087172018-10-11 17:22:05 -07003991 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003992 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003993}
3994
3995static int qseecom_receive_req(struct qseecom_dev_handle *data)
3996{
3997 int ret = 0;
3998 struct qseecom_registered_listener_list *this_lstnr;
3999
Zhen Kongbcdeda22018-11-16 13:50:51 -08004000 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004001 this_lstnr = __qseecom_find_svc(data->listener.id);
4002 if (!this_lstnr) {
4003 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004004 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004005 return -ENODATA;
4006 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004007 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004008
4009 while (1) {
AnilKumar Chimata8b5f7e62019-04-18 13:29:15 +05304010 if (wait_event_interruptible(this_lstnr->rcv_req_wq,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004011 __qseecom_listener_has_rcvd_req(data,
4012 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07004013 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004014 (uint32_t)data->listener.id);
4015 /* woken up for different reason */
4016 return -ERESTARTSYS;
4017 }
4018
Zhen Kongbcdeda22018-11-16 13:50:51 -08004019 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004020 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004021 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004022 return -ENODEV;
4023 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004024 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004025 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004026 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004027 break;
4028 }
4029 return ret;
4030}
4031
4032static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4033{
4034 unsigned char app_arch = 0;
4035 struct elf32_hdr *ehdr;
4036 struct elf64_hdr *ehdr64;
4037
4038 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4039
4040 switch (app_arch) {
4041 case ELFCLASS32: {
4042 ehdr = (struct elf32_hdr *)fw_entry->data;
4043 if (fw_entry->size < sizeof(*ehdr)) {
4044 pr_err("%s: Not big enough to be an elf32 header\n",
4045 qseecom.pdev->init_name);
4046 return false;
4047 }
4048 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4049 pr_err("%s: Not an elf32 header\n",
4050 qseecom.pdev->init_name);
4051 return false;
4052 }
4053 if (ehdr->e_phnum == 0) {
4054 pr_err("%s: No loadable segments\n",
4055 qseecom.pdev->init_name);
4056 return false;
4057 }
4058 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4059 sizeof(struct elf32_hdr) > fw_entry->size) {
4060 pr_err("%s: Program headers not within mdt\n",
4061 qseecom.pdev->init_name);
4062 return false;
4063 }
4064 break;
4065 }
4066 case ELFCLASS64: {
4067 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4068 if (fw_entry->size < sizeof(*ehdr64)) {
4069 pr_err("%s: Not big enough to be an elf64 header\n",
4070 qseecom.pdev->init_name);
4071 return false;
4072 }
4073 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4074 pr_err("%s: Not an elf64 header\n",
4075 qseecom.pdev->init_name);
4076 return false;
4077 }
4078 if (ehdr64->e_phnum == 0) {
4079 pr_err("%s: No loadable segments\n",
4080 qseecom.pdev->init_name);
4081 return false;
4082 }
4083 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4084 sizeof(struct elf64_hdr) > fw_entry->size) {
4085 pr_err("%s: Program headers not within mdt\n",
4086 qseecom.pdev->init_name);
4087 return false;
4088 }
4089 break;
4090 }
4091 default: {
4092 pr_err("QSEE app arch %u is not supported\n", app_arch);
4093 return false;
4094 }
4095 }
4096 return true;
4097}
4098
4099static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4100 uint32_t *app_arch)
4101{
4102 int ret = -1;
4103 int i = 0, rc = 0;
4104 const struct firmware *fw_entry = NULL;
4105 char fw_name[MAX_APP_NAME_SIZE];
4106 struct elf32_hdr *ehdr;
4107 struct elf64_hdr *ehdr64;
4108 int num_images = 0;
4109
4110 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4111 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4112 if (rc) {
4113 pr_err("error with request_firmware\n");
4114 ret = -EIO;
4115 goto err;
4116 }
4117 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4118 ret = -EIO;
4119 goto err;
4120 }
4121 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4122 *fw_size = fw_entry->size;
4123 if (*app_arch == ELFCLASS32) {
4124 ehdr = (struct elf32_hdr *)fw_entry->data;
4125 num_images = ehdr->e_phnum;
4126 } else if (*app_arch == ELFCLASS64) {
4127 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4128 num_images = ehdr64->e_phnum;
4129 } else {
4130 pr_err("QSEE %s app, arch %u is not supported\n",
4131 appname, *app_arch);
4132 ret = -EIO;
4133 goto err;
4134 }
4135 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4136 release_firmware(fw_entry);
4137 fw_entry = NULL;
4138 for (i = 0; i < num_images; i++) {
4139 memset(fw_name, 0, sizeof(fw_name));
4140 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4141 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4142 if (ret)
4143 goto err;
4144 if (*fw_size > U32_MAX - fw_entry->size) {
4145 pr_err("QSEE %s app file size overflow\n", appname);
4146 ret = -EINVAL;
4147 goto err;
4148 }
4149 *fw_size += fw_entry->size;
4150 release_firmware(fw_entry);
4151 fw_entry = NULL;
4152 }
4153
4154 return ret;
4155err:
4156 if (fw_entry)
4157 release_firmware(fw_entry);
4158 *fw_size = 0;
4159 return ret;
4160}
4161
4162static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4163 uint32_t fw_size,
4164 struct qseecom_load_app_ireq *load_req)
4165{
4166 int ret = -1;
4167 int i = 0, rc = 0;
4168 const struct firmware *fw_entry = NULL;
4169 char fw_name[MAX_APP_NAME_SIZE];
4170 u8 *img_data_ptr = img_data;
4171 struct elf32_hdr *ehdr;
4172 struct elf64_hdr *ehdr64;
4173 int num_images = 0;
4174 unsigned char app_arch = 0;
4175
4176 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4177 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4178 if (rc) {
4179 ret = -EIO;
4180 goto err;
4181 }
4182
4183 load_req->img_len = fw_entry->size;
4184 if (load_req->img_len > fw_size) {
4185 pr_err("app %s size %zu is larger than buf size %u\n",
4186 appname, fw_entry->size, fw_size);
4187 ret = -EINVAL;
4188 goto err;
4189 }
4190 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4191 img_data_ptr = img_data_ptr + fw_entry->size;
4192 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4193
4194 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4195 if (app_arch == ELFCLASS32) {
4196 ehdr = (struct elf32_hdr *)fw_entry->data;
4197 num_images = ehdr->e_phnum;
4198 } else if (app_arch == ELFCLASS64) {
4199 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4200 num_images = ehdr64->e_phnum;
4201 } else {
4202 pr_err("QSEE %s app, arch %u is not supported\n",
4203 appname, app_arch);
4204 ret = -EIO;
4205 goto err;
4206 }
4207 release_firmware(fw_entry);
4208 fw_entry = NULL;
4209 for (i = 0; i < num_images; i++) {
4210 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4211 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4212 if (ret) {
4213 pr_err("Failed to locate blob %s\n", fw_name);
4214 goto err;
4215 }
4216 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4217 (fw_entry->size + load_req->img_len > fw_size)) {
4218 pr_err("Invalid file size for %s\n", fw_name);
4219 ret = -EINVAL;
4220 goto err;
4221 }
4222 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4223 img_data_ptr = img_data_ptr + fw_entry->size;
4224 load_req->img_len += fw_entry->size;
4225 release_firmware(fw_entry);
4226 fw_entry = NULL;
4227 }
4228 return ret;
4229err:
4230 release_firmware(fw_entry);
4231 return ret;
4232}
4233
4234static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4235 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4236{
4237 size_t len = 0;
4238 int ret = 0;
4239 ion_phys_addr_t pa;
4240 struct ion_handle *ihandle = NULL;
4241 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004242 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004243 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004244
Zhen Kong3dd92792017-12-08 09:47:15 -08004245 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004246 if (retry++) {
4247 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004248 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004249 mutex_lock(&app_access_lock);
4250 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004251 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004252 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004253 } while (IS_ERR_OR_NULL(ihandle) &&
4254 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004255
4256 if (IS_ERR_OR_NULL(ihandle)) {
4257 pr_err("ION alloc failed\n");
4258 return -ENOMEM;
4259 }
4260 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4261 ihandle);
4262
4263 if (IS_ERR_OR_NULL(img_data)) {
4264 pr_err("ION memory mapping for image loading failed\n");
4265 ret = -ENOMEM;
4266 goto exit_ion_free;
4267 }
4268 /* Get the physical address of the ION BUF */
4269 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4270 if (ret) {
4271 pr_err("physical memory retrieval failure\n");
4272 ret = -EIO;
4273 goto exit_ion_unmap_kernel;
4274 }
4275
4276 *pihandle = ihandle;
4277 *data = img_data;
4278 *paddr = pa;
4279 return ret;
4280
4281exit_ion_unmap_kernel:
4282 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4283exit_ion_free:
4284 ion_free(qseecom.ion_clnt, ihandle);
4285 ihandle = NULL;
4286 return ret;
4287}
4288
4289static void __qseecom_free_img_data(struct ion_handle **ihandle)
4290{
4291 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4292 ion_free(qseecom.ion_clnt, *ihandle);
4293 *ihandle = NULL;
4294}
4295
4296static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4297 uint32_t *app_id)
4298{
4299 int ret = -1;
4300 uint32_t fw_size = 0;
4301 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4302 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4303 struct qseecom_command_scm_resp resp;
4304 u8 *img_data = NULL;
4305 ion_phys_addr_t pa = 0;
4306 struct ion_handle *ihandle = NULL;
4307 void *cmd_buf = NULL;
4308 size_t cmd_len;
4309 uint32_t app_arch = 0;
4310
4311 if (!data || !appname || !app_id) {
4312 pr_err("Null pointer to data or appname or appid\n");
4313 return -EINVAL;
4314 }
4315 *app_id = 0;
4316 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4317 return -EIO;
4318 data->client.app_arch = app_arch;
4319
4320 /* Check and load cmnlib */
4321 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4322 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4323 ret = qseecom_load_commonlib_image(data, "cmnlib");
4324 if (ret) {
4325 pr_err("failed to load cmnlib\n");
4326 return -EIO;
4327 }
4328 qseecom.commonlib_loaded = true;
4329 pr_debug("cmnlib is loaded\n");
4330 }
4331
4332 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4333 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4334 if (ret) {
4335 pr_err("failed to load cmnlib64\n");
4336 return -EIO;
4337 }
4338 qseecom.commonlib64_loaded = true;
4339 pr_debug("cmnlib64 is loaded\n");
4340 }
4341 }
4342
4343 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4344 if (ret)
4345 return ret;
4346
4347 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4348 if (ret) {
4349 ret = -EIO;
4350 goto exit_free_img_data;
4351 }
4352
4353 /* Populate the load_req parameters */
4354 if (qseecom.qsee_version < QSEE_VERSION_40) {
4355 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4356 load_req.mdt_len = load_req.mdt_len;
4357 load_req.img_len = load_req.img_len;
4358 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4359 load_req.phy_addr = (uint32_t)pa;
4360 cmd_buf = (void *)&load_req;
4361 cmd_len = sizeof(struct qseecom_load_app_ireq);
4362 } else {
4363 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4364 load_req_64bit.mdt_len = load_req.mdt_len;
4365 load_req_64bit.img_len = load_req.img_len;
4366 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4367 load_req_64bit.phy_addr = (uint64_t)pa;
4368 cmd_buf = (void *)&load_req_64bit;
4369 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4370 }
4371
4372 if (qseecom.support_bus_scaling) {
4373 mutex_lock(&qsee_bw_mutex);
4374 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4375 mutex_unlock(&qsee_bw_mutex);
4376 if (ret) {
4377 ret = -EIO;
4378 goto exit_free_img_data;
4379 }
4380 }
4381
4382 ret = __qseecom_enable_clk_scale_up(data);
4383 if (ret) {
4384 ret = -EIO;
4385 goto exit_unregister_bus_bw_need;
4386 }
4387
4388 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4389 img_data, fw_size,
4390 ION_IOC_CLEAN_INV_CACHES);
4391 if (ret) {
4392 pr_err("cache operation failed %d\n", ret);
4393 goto exit_disable_clk_vote;
4394 }
4395
4396 /* SCM_CALL to load the image */
4397 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4398 &resp, sizeof(resp));
4399 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004400 pr_err("scm_call to load failed : ret %d, result %x\n",
4401 ret, resp.result);
4402 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4403 ret = -EEXIST;
4404 else
4405 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004406 goto exit_disable_clk_vote;
4407 }
4408
4409 switch (resp.result) {
4410 case QSEOS_RESULT_SUCCESS:
4411 *app_id = resp.data;
4412 break;
4413 case QSEOS_RESULT_INCOMPLETE:
4414 ret = __qseecom_process_incomplete_cmd(data, &resp);
4415 if (ret)
4416 pr_err("process_incomplete_cmd FAILED\n");
4417 else
4418 *app_id = resp.data;
4419 break;
4420 case QSEOS_RESULT_FAILURE:
4421 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4422 break;
4423 default:
4424 pr_err("scm call return unknown response %d\n", resp.result);
4425 ret = -EINVAL;
4426 break;
4427 }
4428
4429exit_disable_clk_vote:
4430 __qseecom_disable_clk_scale_down(data);
4431
4432exit_unregister_bus_bw_need:
4433 if (qseecom.support_bus_scaling) {
4434 mutex_lock(&qsee_bw_mutex);
4435 qseecom_unregister_bus_bandwidth_needs(data);
4436 mutex_unlock(&qsee_bw_mutex);
4437 }
4438
4439exit_free_img_data:
4440 __qseecom_free_img_data(&ihandle);
4441 return ret;
4442}
4443
4444static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4445 char *cmnlib_name)
4446{
4447 int ret = 0;
4448 uint32_t fw_size = 0;
4449 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4450 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4451 struct qseecom_command_scm_resp resp;
4452 u8 *img_data = NULL;
4453 ion_phys_addr_t pa = 0;
4454 void *cmd_buf = NULL;
4455 size_t cmd_len;
4456 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004457 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004458
4459 if (!cmnlib_name) {
4460 pr_err("cmnlib_name is NULL\n");
4461 return -EINVAL;
4462 }
4463 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4464 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4465 cmnlib_name, strlen(cmnlib_name));
4466 return -EINVAL;
4467 }
4468
4469 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4470 return -EIO;
4471
Zhen Kong3bafb312017-10-18 10:27:20 -07004472 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004473 &img_data, fw_size, &pa);
4474 if (ret)
4475 return -EIO;
4476
4477 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4478 if (ret) {
4479 ret = -EIO;
4480 goto exit_free_img_data;
4481 }
4482 if (qseecom.qsee_version < QSEE_VERSION_40) {
4483 load_req.phy_addr = (uint32_t)pa;
4484 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4485 cmd_buf = (void *)&load_req;
4486 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4487 } else {
4488 load_req_64bit.phy_addr = (uint64_t)pa;
4489 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4490 load_req_64bit.img_len = load_req.img_len;
4491 load_req_64bit.mdt_len = load_req.mdt_len;
4492 cmd_buf = (void *)&load_req_64bit;
4493 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4494 }
4495
4496 if (qseecom.support_bus_scaling) {
4497 mutex_lock(&qsee_bw_mutex);
4498 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4499 mutex_unlock(&qsee_bw_mutex);
4500 if (ret) {
4501 ret = -EIO;
4502 goto exit_free_img_data;
4503 }
4504 }
4505
4506 /* Vote for the SFPB clock */
4507 ret = __qseecom_enable_clk_scale_up(data);
4508 if (ret) {
4509 ret = -EIO;
4510 goto exit_unregister_bus_bw_need;
4511 }
4512
Zhen Kong3bafb312017-10-18 10:27:20 -07004513 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004514 img_data, fw_size,
4515 ION_IOC_CLEAN_INV_CACHES);
4516 if (ret) {
4517 pr_err("cache operation failed %d\n", ret);
4518 goto exit_disable_clk_vote;
4519 }
4520
4521 /* SCM_CALL to load the image */
4522 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4523 &resp, sizeof(resp));
4524 if (ret) {
4525 pr_err("scm_call to load failed : ret %d\n", ret);
4526 ret = -EIO;
4527 goto exit_disable_clk_vote;
4528 }
4529
4530 switch (resp.result) {
4531 case QSEOS_RESULT_SUCCESS:
4532 break;
4533 case QSEOS_RESULT_FAILURE:
4534 pr_err("scm call failed w/response result%d\n", resp.result);
4535 ret = -EINVAL;
4536 goto exit_disable_clk_vote;
4537 case QSEOS_RESULT_INCOMPLETE:
4538 ret = __qseecom_process_incomplete_cmd(data, &resp);
4539 if (ret) {
4540 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4541 goto exit_disable_clk_vote;
4542 }
4543 break;
4544 default:
4545 pr_err("scm call return unknown response %d\n", resp.result);
4546 ret = -EINVAL;
4547 goto exit_disable_clk_vote;
4548 }
4549
4550exit_disable_clk_vote:
4551 __qseecom_disable_clk_scale_down(data);
4552
4553exit_unregister_bus_bw_need:
4554 if (qseecom.support_bus_scaling) {
4555 mutex_lock(&qsee_bw_mutex);
4556 qseecom_unregister_bus_bandwidth_needs(data);
4557 mutex_unlock(&qsee_bw_mutex);
4558 }
4559
4560exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004561 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004562 return ret;
4563}
4564
4565static int qseecom_unload_commonlib_image(void)
4566{
4567 int ret = -EINVAL;
4568 struct qseecom_unload_lib_image_ireq unload_req = {0};
4569 struct qseecom_command_scm_resp resp;
4570
4571 /* Populate the remaining parameters */
4572 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4573
4574 /* SCM_CALL to load the image */
4575 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4576 sizeof(struct qseecom_unload_lib_image_ireq),
4577 &resp, sizeof(resp));
4578 if (ret) {
4579 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4580 ret = -EIO;
4581 } else {
4582 switch (resp.result) {
4583 case QSEOS_RESULT_SUCCESS:
4584 break;
4585 case QSEOS_RESULT_FAILURE:
4586 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4587 break;
4588 default:
4589 pr_err("scm call return unknown response %d\n",
4590 resp.result);
4591 ret = -EINVAL;
4592 break;
4593 }
4594 }
4595
4596 return ret;
4597}
4598
4599int qseecom_start_app(struct qseecom_handle **handle,
4600 char *app_name, uint32_t size)
4601{
4602 int32_t ret = 0;
4603 unsigned long flags = 0;
4604 struct qseecom_dev_handle *data = NULL;
4605 struct qseecom_check_app_ireq app_ireq;
4606 struct qseecom_registered_app_list *entry = NULL;
4607 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4608 bool found_app = false;
4609 size_t len;
4610 ion_phys_addr_t pa;
4611 uint32_t fw_size, app_arch;
4612 uint32_t app_id = 0;
4613
Zhen Kongc4c162a2019-01-23 12:07:12 -08004614 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004615
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004616 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4617 pr_err("Not allowed to be called in %d state\n",
4618 atomic_read(&qseecom.qseecom_state));
4619 return -EPERM;
4620 }
4621 if (!app_name) {
4622 pr_err("failed to get the app name\n");
4623 return -EINVAL;
4624 }
4625
Zhen Kong64a6d7282017-06-16 11:55:07 -07004626 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004627 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004628 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004629 return -EINVAL;
4630 }
4631
4632 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4633 if (!(*handle))
4634 return -ENOMEM;
4635
4636 data = kzalloc(sizeof(*data), GFP_KERNEL);
4637 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304638 ret = -ENOMEM;
4639 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004640 }
4641 data->abort = 0;
4642 data->type = QSEECOM_CLIENT_APP;
4643 data->released = false;
4644 data->client.sb_length = size;
4645 data->client.user_virt_sb_base = 0;
4646 data->client.ihandle = NULL;
4647
4648 init_waitqueue_head(&data->abort_wq);
4649
4650 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4651 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4652 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4653 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304654 ret = -ENOMEM;
4655 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004656 }
4657 mutex_lock(&app_access_lock);
4658
Zhen Kong5d02be92018-05-29 16:17:29 -07004659recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004660 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4661 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4662 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4663 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304664 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004665
4666 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4667 if (app_id) {
4668 pr_warn("App id %d for [%s] app exists\n", app_id,
4669 (char *)app_ireq.app_name);
4670 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4671 list_for_each_entry(entry,
4672 &qseecom.registered_app_list_head, list){
4673 if (entry->app_id == app_id) {
4674 entry->ref_cnt++;
4675 found_app = true;
4676 break;
4677 }
4678 }
4679 spin_unlock_irqrestore(
4680 &qseecom.registered_app_list_lock, flags);
4681 if (!found_app)
4682 pr_warn("App_id %d [%s] was loaded but not registered\n",
4683 ret, (char *)app_ireq.app_name);
4684 } else {
4685 /* load the app and get the app_id */
4686 pr_debug("%s: Loading app for the first time'\n",
4687 qseecom.pdev->init_name);
4688 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004689 if (ret == -EEXIST) {
4690 pr_err("recheck if TA %s is loaded\n", app_name);
4691 goto recheck;
4692 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304693 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004694 }
4695 data->client.app_id = app_id;
4696 if (!found_app) {
4697 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4698 if (!entry) {
4699 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304700 ret = -ENOMEM;
4701 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004702 }
4703 entry->app_id = app_id;
4704 entry->ref_cnt = 1;
4705 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4706 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4707 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304708 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004709 }
4710 entry->app_arch = app_arch;
4711 entry->app_blocked = false;
4712 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004713 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004714 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4715 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4716 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4717 flags);
4718 }
4719
4720 /* Get the physical address of the ION BUF */
4721 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4722 if (ret) {
4723 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4724 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304725 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004726 }
4727
4728 /* Populate the structure for sending scm call to load image */
4729 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4730 data->client.ihandle);
4731 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4732 pr_err("ION memory mapping for client shared buf failed\n");
4733 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304734 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004735 }
4736 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4737 data->client.sb_phys = (phys_addr_t)pa;
4738 (*handle)->dev = (void *)data;
4739 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4740 (*handle)->sbuf_len = data->client.sb_length;
4741
4742 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4743 if (!kclient_entry) {
4744 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304745 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004746 }
4747 kclient_entry->handle = *handle;
4748
4749 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4750 list_add_tail(&kclient_entry->list,
4751 &qseecom.registered_kclient_list_head);
4752 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4753
4754 mutex_unlock(&app_access_lock);
4755 return 0;
4756
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304757exit_ion_unmap_kernel:
4758 if (!IS_ERR_OR_NULL(data->client.ihandle))
4759 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4760exit_entry_free:
4761 kfree(entry);
4762exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004763 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304764 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4765 ion_free(qseecom.ion_clnt, data->client.ihandle);
4766 data->client.ihandle = NULL;
4767 }
4768exit_data_free:
4769 kfree(data);
4770exit_handle_free:
4771 if (*handle) {
4772 kfree(*handle);
4773 *handle = NULL;
4774 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004775 return ret;
4776}
4777EXPORT_SYMBOL(qseecom_start_app);
4778
4779int qseecom_shutdown_app(struct qseecom_handle **handle)
4780{
4781 int ret = -EINVAL;
4782 struct qseecom_dev_handle *data;
4783
4784 struct qseecom_registered_kclient_list *kclient = NULL;
4785 unsigned long flags = 0;
4786 bool found_handle = false;
4787
Zhen Kongc4c162a2019-01-23 12:07:12 -08004788 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004789
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004790 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4791 pr_err("Not allowed to be called in %d state\n",
4792 atomic_read(&qseecom.qseecom_state));
4793 return -EPERM;
4794 }
4795
4796 if ((handle == NULL) || (*handle == NULL)) {
4797 pr_err("Handle is not initialized\n");
4798 return -EINVAL;
4799 }
4800 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4801 mutex_lock(&app_access_lock);
4802
4803 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4804 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4805 list) {
4806 if (kclient->handle == (*handle)) {
4807 list_del(&kclient->list);
4808 found_handle = true;
4809 break;
4810 }
4811 }
4812 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4813 if (!found_handle)
4814 pr_err("Unable to find the handle, exiting\n");
4815 else
4816 ret = qseecom_unload_app(data, false);
4817
4818 mutex_unlock(&app_access_lock);
4819 if (ret == 0) {
4820 kzfree(data);
4821 kzfree(*handle);
4822 kzfree(kclient);
4823 *handle = NULL;
4824 }
4825
4826 return ret;
4827}
4828EXPORT_SYMBOL(qseecom_shutdown_app);
4829
4830int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4831 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4832{
4833 int ret = 0;
4834 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4835 struct qseecom_dev_handle *data;
4836 bool perf_enabled = false;
4837
Zhen Kongc4c162a2019-01-23 12:07:12 -08004838 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004839
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004840 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4841 pr_err("Not allowed to be called in %d state\n",
4842 atomic_read(&qseecom.qseecom_state));
4843 return -EPERM;
4844 }
4845
4846 if (handle == NULL) {
4847 pr_err("Handle is not initialized\n");
4848 return -EINVAL;
4849 }
4850 data = handle->dev;
4851
4852 req.cmd_req_len = sbuf_len;
4853 req.resp_len = rbuf_len;
4854 req.cmd_req_buf = send_buf;
4855 req.resp_buf = resp_buf;
4856
4857 if (__validate_send_cmd_inputs(data, &req))
4858 return -EINVAL;
4859
4860 mutex_lock(&app_access_lock);
4861 if (qseecom.support_bus_scaling) {
4862 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4863 if (ret) {
4864 pr_err("Failed to set bw.\n");
4865 mutex_unlock(&app_access_lock);
4866 return ret;
4867 }
4868 }
4869 /*
4870 * On targets where crypto clock is handled by HLOS,
4871 * if clk_access_cnt is zero and perf_enabled is false,
4872 * then the crypto clock was not enabled before sending cmd
4873 * to tz, qseecom will enable the clock to avoid service failure.
4874 */
4875 if (!qseecom.no_clock_support &&
4876 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4877 pr_debug("ce clock is not enabled!\n");
4878 ret = qseecom_perf_enable(data);
4879 if (ret) {
4880 pr_err("Failed to vote for clock with err %d\n",
4881 ret);
4882 mutex_unlock(&app_access_lock);
4883 return -EINVAL;
4884 }
4885 perf_enabled = true;
4886 }
4887 if (!strcmp(data->client.app_name, "securemm"))
4888 data->use_legacy_cmd = true;
4889
4890 ret = __qseecom_send_cmd(data, &req);
4891 data->use_legacy_cmd = false;
4892 if (qseecom.support_bus_scaling)
4893 __qseecom_add_bw_scale_down_timer(
4894 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4895
4896 if (perf_enabled) {
4897 qsee_disable_clock_vote(data, CLK_DFAB);
4898 qsee_disable_clock_vote(data, CLK_SFPB);
4899 }
4900
4901 mutex_unlock(&app_access_lock);
4902
4903 if (ret)
4904 return ret;
4905
4906 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4907 req.resp_len, req.resp_buf);
4908 return ret;
4909}
4910EXPORT_SYMBOL(qseecom_send_command);
4911
4912int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4913{
4914 int ret = 0;
4915
4916 if ((handle == NULL) || (handle->dev == NULL)) {
4917 pr_err("No valid kernel client\n");
4918 return -EINVAL;
4919 }
4920 if (high) {
4921 if (qseecom.support_bus_scaling) {
4922 mutex_lock(&qsee_bw_mutex);
4923 __qseecom_register_bus_bandwidth_needs(handle->dev,
4924 HIGH);
4925 mutex_unlock(&qsee_bw_mutex);
4926 } else {
4927 ret = qseecom_perf_enable(handle->dev);
4928 if (ret)
4929 pr_err("Failed to vote for clock with err %d\n",
4930 ret);
4931 }
4932 } else {
4933 if (!qseecom.support_bus_scaling) {
4934 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4935 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4936 } else {
4937 mutex_lock(&qsee_bw_mutex);
4938 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4939 mutex_unlock(&qsee_bw_mutex);
4940 }
4941 }
4942 return ret;
4943}
4944EXPORT_SYMBOL(qseecom_set_bandwidth);
4945
4946int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4947{
4948 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4949 struct qseecom_dev_handle dummy_private_data = {0};
4950 struct qseecom_command_scm_resp resp;
4951 int ret = 0;
4952
4953 if (!desc) {
4954 pr_err("desc is NULL\n");
4955 return -EINVAL;
4956 }
4957
4958 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004959 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004960 resp.data = desc->ret[2]; /*listener_id*/
4961
Zhen Konge7f525f2017-12-01 18:26:25 -08004962 dummy_private_data.client.app_id = desc->ret[1];
4963 dummy_app_entry.app_id = desc->ret[1];
4964
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004965 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004966 if (qseecom.qsee_reentrancy_support)
4967 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004968 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004969 else
4970 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4971 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004972 mutex_unlock(&app_access_lock);
4973 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004974 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004975 (int)desc->ret[0], (int)desc->ret[2],
4976 (int)desc->ret[1], ret);
4977 desc->ret[0] = resp.result;
4978 desc->ret[1] = resp.resp_type;
4979 desc->ret[2] = resp.data;
4980 return ret;
4981}
4982EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4983
4984static int qseecom_send_resp(void)
4985{
4986 qseecom.send_resp_flag = 1;
4987 wake_up_interruptible(&qseecom.send_resp_wq);
4988 return 0;
4989}
4990
4991static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4992{
4993 struct qseecom_registered_listener_list *this_lstnr = NULL;
4994
4995 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4996 this_lstnr = __qseecom_find_svc(data->listener.id);
4997 if (this_lstnr == NULL)
4998 return -EINVAL;
4999 qseecom.send_resp_flag = 1;
5000 this_lstnr->send_resp_flag = 1;
5001 wake_up_interruptible(&qseecom.send_resp_wq);
5002 return 0;
5003}
5004
5005static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5006 struct qseecom_send_modfd_listener_resp *resp,
5007 struct qseecom_registered_listener_list *this_lstnr)
5008{
5009 int i;
5010
5011 if (!data || !resp || !this_lstnr) {
5012 pr_err("listener handle or resp msg is null\n");
5013 return -EINVAL;
5014 }
5015
5016 if (resp->resp_buf_ptr == NULL) {
5017 pr_err("resp buffer is null\n");
5018 return -EINVAL;
5019 }
5020 /* validate resp buf length */
5021 if ((resp->resp_len == 0) ||
5022 (resp->resp_len > this_lstnr->sb_length)) {
5023 pr_err("resp buf length %d not valid\n", resp->resp_len);
5024 return -EINVAL;
5025 }
5026
5027 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5028 pr_err("Integer overflow in resp_len & resp_buf\n");
5029 return -EINVAL;
5030 }
5031 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5032 (ULONG_MAX - this_lstnr->sb_length)) {
5033 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5034 return -EINVAL;
5035 }
5036 /* validate resp buf */
5037 if (((uintptr_t)resp->resp_buf_ptr <
5038 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5039 ((uintptr_t)resp->resp_buf_ptr >=
5040 ((uintptr_t)this_lstnr->user_virt_sb_base +
5041 this_lstnr->sb_length)) ||
5042 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5043 ((uintptr_t)this_lstnr->user_virt_sb_base +
5044 this_lstnr->sb_length))) {
5045 pr_err("resp buf is out of shared buffer region\n");
5046 return -EINVAL;
5047 }
5048
5049 /* validate offsets */
5050 for (i = 0; i < MAX_ION_FD; i++) {
5051 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5052 pr_err("Invalid offset %d = 0x%x\n",
5053 i, resp->ifd_data[i].cmd_buf_offset);
5054 return -EINVAL;
5055 }
5056 }
5057
5058 return 0;
5059}
5060
5061static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5062 void __user *argp, bool is_64bit_addr)
5063{
5064 struct qseecom_send_modfd_listener_resp resp;
5065 struct qseecom_registered_listener_list *this_lstnr = NULL;
5066
5067 if (copy_from_user(&resp, argp, sizeof(resp))) {
5068 pr_err("copy_from_user failed");
5069 return -EINVAL;
5070 }
5071
5072 this_lstnr = __qseecom_find_svc(data->listener.id);
5073 if (this_lstnr == NULL)
5074 return -EINVAL;
5075
5076 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5077 return -EINVAL;
5078
5079 resp.resp_buf_ptr = this_lstnr->sb_virt +
5080 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5081
5082 if (!is_64bit_addr)
5083 __qseecom_update_cmd_buf(&resp, false, data);
5084 else
5085 __qseecom_update_cmd_buf_64(&resp, false, data);
5086 qseecom.send_resp_flag = 1;
5087 this_lstnr->send_resp_flag = 1;
5088 wake_up_interruptible(&qseecom.send_resp_wq);
5089 return 0;
5090}
5091
5092static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5093 void __user *argp)
5094{
5095 return __qseecom_send_modfd_resp(data, argp, false);
5096}
5097
5098static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5099 void __user *argp)
5100{
5101 return __qseecom_send_modfd_resp(data, argp, true);
5102}
5103
5104static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5105 void __user *argp)
5106{
5107 struct qseecom_qseos_version_req req;
5108
5109 if (copy_from_user(&req, argp, sizeof(req))) {
5110 pr_err("copy_from_user failed");
5111 return -EINVAL;
5112 }
5113 req.qseos_version = qseecom.qseos_version;
5114 if (copy_to_user(argp, &req, sizeof(req))) {
5115 pr_err("copy_to_user failed");
5116 return -EINVAL;
5117 }
5118 return 0;
5119}
5120
5121static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5122{
5123 int rc = 0;
5124 struct qseecom_clk *qclk = NULL;
5125
5126 if (qseecom.no_clock_support)
5127 return 0;
5128
5129 if (ce == CLK_QSEE)
5130 qclk = &qseecom.qsee;
5131 if (ce == CLK_CE_DRV)
5132 qclk = &qseecom.ce_drv;
5133
5134 if (qclk == NULL) {
5135 pr_err("CLK type not supported\n");
5136 return -EINVAL;
5137 }
5138 mutex_lock(&clk_access_lock);
5139
5140 if (qclk->clk_access_cnt == ULONG_MAX) {
5141 pr_err("clk_access_cnt beyond limitation\n");
5142 goto err;
5143 }
5144 if (qclk->clk_access_cnt > 0) {
5145 qclk->clk_access_cnt++;
5146 mutex_unlock(&clk_access_lock);
5147 return rc;
5148 }
5149
5150 /* Enable CE core clk */
5151 if (qclk->ce_core_clk != NULL) {
5152 rc = clk_prepare_enable(qclk->ce_core_clk);
5153 if (rc) {
5154 pr_err("Unable to enable/prepare CE core clk\n");
5155 goto err;
5156 }
5157 }
5158 /* Enable CE clk */
5159 if (qclk->ce_clk != NULL) {
5160 rc = clk_prepare_enable(qclk->ce_clk);
5161 if (rc) {
5162 pr_err("Unable to enable/prepare CE iface clk\n");
5163 goto ce_clk_err;
5164 }
5165 }
5166 /* Enable AXI clk */
5167 if (qclk->ce_bus_clk != NULL) {
5168 rc = clk_prepare_enable(qclk->ce_bus_clk);
5169 if (rc) {
5170 pr_err("Unable to enable/prepare CE bus clk\n");
5171 goto ce_bus_clk_err;
5172 }
5173 }
5174 qclk->clk_access_cnt++;
5175 mutex_unlock(&clk_access_lock);
5176 return 0;
5177
5178ce_bus_clk_err:
5179 if (qclk->ce_clk != NULL)
5180 clk_disable_unprepare(qclk->ce_clk);
5181ce_clk_err:
5182 if (qclk->ce_core_clk != NULL)
5183 clk_disable_unprepare(qclk->ce_core_clk);
5184err:
5185 mutex_unlock(&clk_access_lock);
5186 return -EIO;
5187}
5188
5189static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5190{
5191 struct qseecom_clk *qclk;
5192
5193 if (qseecom.no_clock_support)
5194 return;
5195
5196 if (ce == CLK_QSEE)
5197 qclk = &qseecom.qsee;
5198 else
5199 qclk = &qseecom.ce_drv;
5200
5201 mutex_lock(&clk_access_lock);
5202
5203 if (qclk->clk_access_cnt == 0) {
5204 mutex_unlock(&clk_access_lock);
5205 return;
5206 }
5207
5208 if (qclk->clk_access_cnt == 1) {
5209 if (qclk->ce_clk != NULL)
5210 clk_disable_unprepare(qclk->ce_clk);
5211 if (qclk->ce_core_clk != NULL)
5212 clk_disable_unprepare(qclk->ce_core_clk);
5213 if (qclk->ce_bus_clk != NULL)
5214 clk_disable_unprepare(qclk->ce_bus_clk);
5215 }
5216 qclk->clk_access_cnt--;
5217 mutex_unlock(&clk_access_lock);
5218}
5219
5220static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5221 int32_t clk_type)
5222{
5223 int ret = 0;
5224 struct qseecom_clk *qclk;
5225
5226 if (qseecom.no_clock_support)
5227 return 0;
5228
5229 qclk = &qseecom.qsee;
5230 if (!qseecom.qsee_perf_client)
5231 return ret;
5232
5233 switch (clk_type) {
5234 case CLK_DFAB:
5235 mutex_lock(&qsee_bw_mutex);
5236 if (!qseecom.qsee_bw_count) {
5237 if (qseecom.qsee_sfpb_bw_count > 0)
5238 ret = msm_bus_scale_client_update_request(
5239 qseecom.qsee_perf_client, 3);
5240 else {
5241 if (qclk->ce_core_src_clk != NULL)
5242 ret = __qseecom_enable_clk(CLK_QSEE);
5243 if (!ret) {
5244 ret =
5245 msm_bus_scale_client_update_request(
5246 qseecom.qsee_perf_client, 1);
5247 if ((ret) &&
5248 (qclk->ce_core_src_clk != NULL))
5249 __qseecom_disable_clk(CLK_QSEE);
5250 }
5251 }
5252 if (ret)
5253 pr_err("DFAB Bandwidth req failed (%d)\n",
5254 ret);
5255 else {
5256 qseecom.qsee_bw_count++;
5257 data->perf_enabled = true;
5258 }
5259 } else {
5260 qseecom.qsee_bw_count++;
5261 data->perf_enabled = true;
5262 }
5263 mutex_unlock(&qsee_bw_mutex);
5264 break;
5265 case CLK_SFPB:
5266 mutex_lock(&qsee_bw_mutex);
5267 if (!qseecom.qsee_sfpb_bw_count) {
5268 if (qseecom.qsee_bw_count > 0)
5269 ret = msm_bus_scale_client_update_request(
5270 qseecom.qsee_perf_client, 3);
5271 else {
5272 if (qclk->ce_core_src_clk != NULL)
5273 ret = __qseecom_enable_clk(CLK_QSEE);
5274 if (!ret) {
5275 ret =
5276 msm_bus_scale_client_update_request(
5277 qseecom.qsee_perf_client, 2);
5278 if ((ret) &&
5279 (qclk->ce_core_src_clk != NULL))
5280 __qseecom_disable_clk(CLK_QSEE);
5281 }
5282 }
5283
5284 if (ret)
5285 pr_err("SFPB Bandwidth req failed (%d)\n",
5286 ret);
5287 else {
5288 qseecom.qsee_sfpb_bw_count++;
5289 data->fast_load_enabled = true;
5290 }
5291 } else {
5292 qseecom.qsee_sfpb_bw_count++;
5293 data->fast_load_enabled = true;
5294 }
5295 mutex_unlock(&qsee_bw_mutex);
5296 break;
5297 default:
5298 pr_err("Clock type not defined\n");
5299 break;
5300 }
5301 return ret;
5302}
5303
5304static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5305 int32_t clk_type)
5306{
5307 int32_t ret = 0;
5308 struct qseecom_clk *qclk;
5309
5310 qclk = &qseecom.qsee;
5311
5312 if (qseecom.no_clock_support)
5313 return;
5314 if (!qseecom.qsee_perf_client)
5315 return;
5316
5317 switch (clk_type) {
5318 case CLK_DFAB:
5319 mutex_lock(&qsee_bw_mutex);
5320 if (qseecom.qsee_bw_count == 0) {
5321 pr_err("Client error.Extra call to disable DFAB clk\n");
5322 mutex_unlock(&qsee_bw_mutex);
5323 return;
5324 }
5325
5326 if (qseecom.qsee_bw_count == 1) {
5327 if (qseecom.qsee_sfpb_bw_count > 0)
5328 ret = msm_bus_scale_client_update_request(
5329 qseecom.qsee_perf_client, 2);
5330 else {
5331 ret = msm_bus_scale_client_update_request(
5332 qseecom.qsee_perf_client, 0);
5333 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5334 __qseecom_disable_clk(CLK_QSEE);
5335 }
5336 if (ret)
5337 pr_err("SFPB Bandwidth req fail (%d)\n",
5338 ret);
5339 else {
5340 qseecom.qsee_bw_count--;
5341 data->perf_enabled = false;
5342 }
5343 } else {
5344 qseecom.qsee_bw_count--;
5345 data->perf_enabled = false;
5346 }
5347 mutex_unlock(&qsee_bw_mutex);
5348 break;
5349 case CLK_SFPB:
5350 mutex_lock(&qsee_bw_mutex);
5351 if (qseecom.qsee_sfpb_bw_count == 0) {
5352 pr_err("Client error.Extra call to disable SFPB clk\n");
5353 mutex_unlock(&qsee_bw_mutex);
5354 return;
5355 }
5356 if (qseecom.qsee_sfpb_bw_count == 1) {
5357 if (qseecom.qsee_bw_count > 0)
5358 ret = msm_bus_scale_client_update_request(
5359 qseecom.qsee_perf_client, 1);
5360 else {
5361 ret = msm_bus_scale_client_update_request(
5362 qseecom.qsee_perf_client, 0);
5363 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5364 __qseecom_disable_clk(CLK_QSEE);
5365 }
5366 if (ret)
5367 pr_err("SFPB Bandwidth req fail (%d)\n",
5368 ret);
5369 else {
5370 qseecom.qsee_sfpb_bw_count--;
5371 data->fast_load_enabled = false;
5372 }
5373 } else {
5374 qseecom.qsee_sfpb_bw_count--;
5375 data->fast_load_enabled = false;
5376 }
5377 mutex_unlock(&qsee_bw_mutex);
5378 break;
5379 default:
5380 pr_err("Clock type not defined\n");
5381 break;
5382 }
5383
5384}
5385
5386static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5387 void __user *argp)
5388{
5389 struct ion_handle *ihandle; /* Ion handle */
5390 struct qseecom_load_img_req load_img_req;
5391 int uret = 0;
5392 int ret;
5393 ion_phys_addr_t pa = 0;
5394 size_t len;
5395 struct qseecom_load_app_ireq load_req;
5396 struct qseecom_load_app_64bit_ireq load_req_64bit;
5397 struct qseecom_command_scm_resp resp;
5398 void *cmd_buf = NULL;
5399 size_t cmd_len;
5400 /* Copy the relevant information needed for loading the image */
5401 if (copy_from_user(&load_img_req,
5402 (void __user *)argp,
5403 sizeof(struct qseecom_load_img_req))) {
5404 pr_err("copy_from_user failed\n");
5405 return -EFAULT;
5406 }
5407
5408 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005409 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005410 load_img_req.ifd_data_fd);
5411 if (IS_ERR_OR_NULL(ihandle)) {
5412 pr_err("Ion client could not retrieve the handle\n");
5413 return -ENOMEM;
5414 }
5415
5416 /* Get the physical address of the ION BUF */
5417 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5418 if (ret) {
5419 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5420 ret);
5421 return ret;
5422 }
5423 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5424 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5425 len, load_img_req.mdt_len,
5426 load_img_req.img_len);
5427 return ret;
5428 }
5429 /* Populate the structure for sending scm call to load image */
5430 if (qseecom.qsee_version < QSEE_VERSION_40) {
5431 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5432 load_req.mdt_len = load_img_req.mdt_len;
5433 load_req.img_len = load_img_req.img_len;
5434 load_req.phy_addr = (uint32_t)pa;
5435 cmd_buf = (void *)&load_req;
5436 cmd_len = sizeof(struct qseecom_load_app_ireq);
5437 } else {
5438 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5439 load_req_64bit.mdt_len = load_img_req.mdt_len;
5440 load_req_64bit.img_len = load_img_req.img_len;
5441 load_req_64bit.phy_addr = (uint64_t)pa;
5442 cmd_buf = (void *)&load_req_64bit;
5443 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5444 }
5445
5446 if (qseecom.support_bus_scaling) {
5447 mutex_lock(&qsee_bw_mutex);
5448 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5449 mutex_unlock(&qsee_bw_mutex);
5450 if (ret) {
5451 ret = -EIO;
5452 goto exit_cpu_restore;
5453 }
5454 }
5455
5456 /* Vote for the SFPB clock */
5457 ret = __qseecom_enable_clk_scale_up(data);
5458 if (ret) {
5459 ret = -EIO;
5460 goto exit_register_bus_bandwidth_needs;
5461 }
5462 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5463 ION_IOC_CLEAN_INV_CACHES);
5464 if (ret) {
5465 pr_err("cache operation failed %d\n", ret);
5466 goto exit_disable_clock;
5467 }
5468 /* SCM_CALL to load the external elf */
5469 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5470 &resp, sizeof(resp));
5471 if (ret) {
5472 pr_err("scm_call to load failed : ret %d\n",
5473 ret);
5474 ret = -EFAULT;
5475 goto exit_disable_clock;
5476 }
5477
5478 switch (resp.result) {
5479 case QSEOS_RESULT_SUCCESS:
5480 break;
5481 case QSEOS_RESULT_INCOMPLETE:
5482 pr_err("%s: qseos result incomplete\n", __func__);
5483 ret = __qseecom_process_incomplete_cmd(data, &resp);
5484 if (ret)
5485 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5486 break;
5487 case QSEOS_RESULT_FAILURE:
5488 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5489 ret = -EFAULT;
5490 break;
5491 default:
5492 pr_err("scm_call response result %d not supported\n",
5493 resp.result);
5494 ret = -EFAULT;
5495 break;
5496 }
5497
5498exit_disable_clock:
5499 __qseecom_disable_clk_scale_down(data);
5500
5501exit_register_bus_bandwidth_needs:
5502 if (qseecom.support_bus_scaling) {
5503 mutex_lock(&qsee_bw_mutex);
5504 uret = qseecom_unregister_bus_bandwidth_needs(data);
5505 mutex_unlock(&qsee_bw_mutex);
5506 if (uret)
5507 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5508 uret, ret);
5509 }
5510
5511exit_cpu_restore:
5512 /* Deallocate the handle */
5513 if (!IS_ERR_OR_NULL(ihandle))
5514 ion_free(qseecom.ion_clnt, ihandle);
5515 return ret;
5516}
5517
5518static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5519{
5520 int ret = 0;
5521 struct qseecom_command_scm_resp resp;
5522 struct qseecom_unload_app_ireq req;
5523
5524 /* unavailable client app */
5525 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5526
5527 /* Populate the structure for sending scm call to unload image */
5528 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5529
5530 /* SCM_CALL to unload the external elf */
5531 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5532 sizeof(struct qseecom_unload_app_ireq),
5533 &resp, sizeof(resp));
5534 if (ret) {
5535 pr_err("scm_call to unload failed : ret %d\n",
5536 ret);
5537 ret = -EFAULT;
5538 goto qseecom_unload_external_elf_scm_err;
5539 }
5540 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5541 ret = __qseecom_process_incomplete_cmd(data, &resp);
5542 if (ret)
5543 pr_err("process_incomplete_cmd fail err: %d\n",
5544 ret);
5545 } else {
5546 if (resp.result != QSEOS_RESULT_SUCCESS) {
5547 pr_err("scm_call to unload image failed resp.result =%d\n",
5548 resp.result);
5549 ret = -EFAULT;
5550 }
5551 }
5552
5553qseecom_unload_external_elf_scm_err:
5554
5555 return ret;
5556}
5557
5558static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5559 void __user *argp)
5560{
5561
5562 int32_t ret;
5563 struct qseecom_qseos_app_load_query query_req;
5564 struct qseecom_check_app_ireq req;
5565 struct qseecom_registered_app_list *entry = NULL;
5566 unsigned long flags = 0;
5567 uint32_t app_arch = 0, app_id = 0;
5568 bool found_app = false;
5569
5570 /* Copy the relevant information needed for loading the image */
5571 if (copy_from_user(&query_req,
5572 (void __user *)argp,
5573 sizeof(struct qseecom_qseos_app_load_query))) {
5574 pr_err("copy_from_user failed\n");
5575 return -EFAULT;
5576 }
5577
5578 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5579 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5580 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5581
5582 ret = __qseecom_check_app_exists(req, &app_id);
5583 if (ret) {
5584 pr_err(" scm call to check if app is loaded failed");
5585 return ret; /* scm call failed */
5586 }
5587 if (app_id) {
5588 pr_debug("App id %d (%s) already exists\n", app_id,
5589 (char *)(req.app_name));
5590 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5591 list_for_each_entry(entry,
5592 &qseecom.registered_app_list_head, list){
5593 if (entry->app_id == app_id) {
5594 app_arch = entry->app_arch;
5595 entry->ref_cnt++;
5596 found_app = true;
5597 break;
5598 }
5599 }
5600 spin_unlock_irqrestore(
5601 &qseecom.registered_app_list_lock, flags);
5602 data->client.app_id = app_id;
5603 query_req.app_id = app_id;
5604 if (app_arch) {
5605 data->client.app_arch = app_arch;
5606 query_req.app_arch = app_arch;
5607 } else {
5608 data->client.app_arch = 0;
5609 query_req.app_arch = 0;
5610 }
5611 strlcpy(data->client.app_name, query_req.app_name,
5612 MAX_APP_NAME_SIZE);
5613 /*
5614 * If app was loaded by appsbl before and was not registered,
5615 * regiser this app now.
5616 */
5617 if (!found_app) {
5618 pr_debug("Register app %d [%s] which was loaded before\n",
5619 ret, (char *)query_req.app_name);
5620 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5621 if (!entry) {
5622 pr_err("kmalloc for app entry failed\n");
5623 return -ENOMEM;
5624 }
5625 entry->app_id = app_id;
5626 entry->ref_cnt = 1;
5627 entry->app_arch = data->client.app_arch;
5628 strlcpy(entry->app_name, data->client.app_name,
5629 MAX_APP_NAME_SIZE);
5630 entry->app_blocked = false;
5631 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005632 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005633 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5634 flags);
5635 list_add_tail(&entry->list,
5636 &qseecom.registered_app_list_head);
5637 spin_unlock_irqrestore(
5638 &qseecom.registered_app_list_lock, flags);
5639 }
5640 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5641 pr_err("copy_to_user failed\n");
5642 return -EFAULT;
5643 }
5644 return -EEXIST; /* app already loaded */
5645 } else {
5646 return 0; /* app not loaded */
5647 }
5648}
5649
5650static int __qseecom_get_ce_pipe_info(
5651 enum qseecom_key_management_usage_type usage,
5652 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5653{
5654 int ret = -EINVAL;
5655 int i, j;
5656 struct qseecom_ce_info_use *p = NULL;
5657 int total = 0;
5658 struct qseecom_ce_pipe_entry *pcepipe;
5659
5660 switch (usage) {
5661 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5662 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5663 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5664 if (qseecom.support_fde) {
5665 p = qseecom.ce_info.fde;
5666 total = qseecom.ce_info.num_fde;
5667 } else {
5668 pr_err("system does not support fde\n");
5669 return -EINVAL;
5670 }
5671 break;
5672 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5673 if (qseecom.support_pfe) {
5674 p = qseecom.ce_info.pfe;
5675 total = qseecom.ce_info.num_pfe;
5676 } else {
5677 pr_err("system does not support pfe\n");
5678 return -EINVAL;
5679 }
5680 break;
5681 default:
5682 pr_err("unsupported usage %d\n", usage);
5683 return -EINVAL;
5684 }
5685
5686 for (j = 0; j < total; j++) {
5687 if (p->unit_num == unit) {
5688 pcepipe = p->ce_pipe_entry;
5689 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5690 (*ce_hw)[i] = pcepipe->ce_num;
5691 *pipe = pcepipe->ce_pipe_pair;
5692 pcepipe++;
5693 }
5694 ret = 0;
5695 break;
5696 }
5697 p++;
5698 }
5699 return ret;
5700}
5701
5702static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5703 enum qseecom_key_management_usage_type usage,
5704 struct qseecom_key_generate_ireq *ireq)
5705{
5706 struct qseecom_command_scm_resp resp;
5707 int ret;
5708
5709 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5710 usage >= QSEOS_KM_USAGE_MAX) {
5711 pr_err("Error:: unsupported usage %d\n", usage);
5712 return -EFAULT;
5713 }
5714 ret = __qseecom_enable_clk(CLK_QSEE);
5715 if (ret)
5716 return ret;
5717
5718 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5719 ireq, sizeof(struct qseecom_key_generate_ireq),
5720 &resp, sizeof(resp));
5721 if (ret) {
5722 if (ret == -EINVAL &&
5723 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5724 pr_debug("Key ID exists.\n");
5725 ret = 0;
5726 } else {
5727 pr_err("scm call to generate key failed : %d\n", ret);
5728 ret = -EFAULT;
5729 }
5730 goto generate_key_exit;
5731 }
5732
5733 switch (resp.result) {
5734 case QSEOS_RESULT_SUCCESS:
5735 break;
5736 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5737 pr_debug("Key ID exists.\n");
5738 break;
5739 case QSEOS_RESULT_INCOMPLETE:
5740 ret = __qseecom_process_incomplete_cmd(data, &resp);
5741 if (ret) {
5742 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5743 pr_debug("Key ID exists.\n");
5744 ret = 0;
5745 } else {
5746 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5747 resp.result);
5748 }
5749 }
5750 break;
5751 case QSEOS_RESULT_FAILURE:
5752 default:
5753 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5754 ret = -EINVAL;
5755 break;
5756 }
5757generate_key_exit:
5758 __qseecom_disable_clk(CLK_QSEE);
5759 return ret;
5760}
5761
5762static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5763 enum qseecom_key_management_usage_type usage,
5764 struct qseecom_key_delete_ireq *ireq)
5765{
5766 struct qseecom_command_scm_resp resp;
5767 int ret;
5768
5769 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5770 usage >= QSEOS_KM_USAGE_MAX) {
5771 pr_err("Error:: unsupported usage %d\n", usage);
5772 return -EFAULT;
5773 }
5774 ret = __qseecom_enable_clk(CLK_QSEE);
5775 if (ret)
5776 return ret;
5777
5778 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5779 ireq, sizeof(struct qseecom_key_delete_ireq),
5780 &resp, sizeof(struct qseecom_command_scm_resp));
5781 if (ret) {
5782 if (ret == -EINVAL &&
5783 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5784 pr_debug("Max attempts to input password reached.\n");
5785 ret = -ERANGE;
5786 } else {
5787 pr_err("scm call to delete key failed : %d\n", ret);
5788 ret = -EFAULT;
5789 }
5790 goto del_key_exit;
5791 }
5792
5793 switch (resp.result) {
5794 case QSEOS_RESULT_SUCCESS:
5795 break;
5796 case QSEOS_RESULT_INCOMPLETE:
5797 ret = __qseecom_process_incomplete_cmd(data, &resp);
5798 if (ret) {
5799 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5800 resp.result);
5801 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5802 pr_debug("Max attempts to input password reached.\n");
5803 ret = -ERANGE;
5804 }
5805 }
5806 break;
5807 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5808 pr_debug("Max attempts to input password reached.\n");
5809 ret = -ERANGE;
5810 break;
5811 case QSEOS_RESULT_FAILURE:
5812 default:
5813 pr_err("Delete key scm call failed resp.result %d\n",
5814 resp.result);
5815 ret = -EINVAL;
5816 break;
5817 }
5818del_key_exit:
5819 __qseecom_disable_clk(CLK_QSEE);
5820 return ret;
5821}
5822
5823static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5824 enum qseecom_key_management_usage_type usage,
5825 struct qseecom_key_select_ireq *ireq)
5826{
5827 struct qseecom_command_scm_resp resp;
5828 int ret;
5829
5830 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5831 usage >= QSEOS_KM_USAGE_MAX) {
5832 pr_err("Error:: unsupported usage %d\n", usage);
5833 return -EFAULT;
5834 }
5835 ret = __qseecom_enable_clk(CLK_QSEE);
5836 if (ret)
5837 return ret;
5838
5839 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5840 ret = __qseecom_enable_clk(CLK_CE_DRV);
5841 if (ret)
5842 return ret;
5843 }
5844
5845 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5846 ireq, sizeof(struct qseecom_key_select_ireq),
5847 &resp, sizeof(struct qseecom_command_scm_resp));
5848 if (ret) {
5849 if (ret == -EINVAL &&
5850 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5851 pr_debug("Max attempts to input password reached.\n");
5852 ret = -ERANGE;
5853 } else if (ret == -EINVAL &&
5854 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5855 pr_debug("Set Key operation under processing...\n");
5856 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5857 } else {
5858 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5859 ret);
5860 ret = -EFAULT;
5861 }
5862 goto set_key_exit;
5863 }
5864
5865 switch (resp.result) {
5866 case QSEOS_RESULT_SUCCESS:
5867 break;
5868 case QSEOS_RESULT_INCOMPLETE:
5869 ret = __qseecom_process_incomplete_cmd(data, &resp);
5870 if (ret) {
5871 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5872 resp.result);
5873 if (resp.result ==
5874 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5875 pr_debug("Set Key operation under processing...\n");
5876 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5877 }
5878 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5879 pr_debug("Max attempts to input password reached.\n");
5880 ret = -ERANGE;
5881 }
5882 }
5883 break;
5884 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5885 pr_debug("Max attempts to input password reached.\n");
5886 ret = -ERANGE;
5887 break;
5888 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5889 pr_debug("Set Key operation under processing...\n");
5890 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5891 break;
5892 case QSEOS_RESULT_FAILURE:
5893 default:
5894 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5895 ret = -EINVAL;
5896 break;
5897 }
5898set_key_exit:
5899 __qseecom_disable_clk(CLK_QSEE);
5900 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5901 __qseecom_disable_clk(CLK_CE_DRV);
5902 return ret;
5903}
5904
5905static int __qseecom_update_current_key_user_info(
5906 struct qseecom_dev_handle *data,
5907 enum qseecom_key_management_usage_type usage,
5908 struct qseecom_key_userinfo_update_ireq *ireq)
5909{
5910 struct qseecom_command_scm_resp resp;
5911 int ret;
5912
5913 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5914 usage >= QSEOS_KM_USAGE_MAX) {
5915 pr_err("Error:: unsupported usage %d\n", usage);
5916 return -EFAULT;
5917 }
5918 ret = __qseecom_enable_clk(CLK_QSEE);
5919 if (ret)
5920 return ret;
5921
5922 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5923 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5924 &resp, sizeof(struct qseecom_command_scm_resp));
5925 if (ret) {
5926 if (ret == -EINVAL &&
5927 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5928 pr_debug("Set Key operation under processing...\n");
5929 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5930 } else {
5931 pr_err("scm call to update key userinfo failed: %d\n",
5932 ret);
5933 __qseecom_disable_clk(CLK_QSEE);
5934 return -EFAULT;
5935 }
5936 }
5937
5938 switch (resp.result) {
5939 case QSEOS_RESULT_SUCCESS:
5940 break;
5941 case QSEOS_RESULT_INCOMPLETE:
5942 ret = __qseecom_process_incomplete_cmd(data, &resp);
5943 if (resp.result ==
5944 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5945 pr_debug("Set Key operation under processing...\n");
5946 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5947 }
5948 if (ret)
5949 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5950 resp.result);
5951 break;
5952 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5953 pr_debug("Update Key operation under processing...\n");
5954 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5955 break;
5956 case QSEOS_RESULT_FAILURE:
5957 default:
5958 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5959 ret = -EINVAL;
5960 break;
5961 }
5962
5963 __qseecom_disable_clk(CLK_QSEE);
5964 return ret;
5965}
5966
5967
5968static int qseecom_enable_ice_setup(int usage)
5969{
5970 int ret = 0;
5971
5972 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5973 ret = qcom_ice_setup_ice_hw("ufs", true);
5974 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5975 ret = qcom_ice_setup_ice_hw("sdcc", true);
5976
5977 return ret;
5978}
5979
5980static int qseecom_disable_ice_setup(int usage)
5981{
5982 int ret = 0;
5983
5984 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5985 ret = qcom_ice_setup_ice_hw("ufs", false);
5986 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5987 ret = qcom_ice_setup_ice_hw("sdcc", false);
5988
5989 return ret;
5990}
5991
5992static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5993{
5994 struct qseecom_ce_info_use *pce_info_use, *p;
5995 int total = 0;
5996 int i;
5997
5998 switch (usage) {
5999 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
6000 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
6001 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6002 p = qseecom.ce_info.fde;
6003 total = qseecom.ce_info.num_fde;
6004 break;
6005 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6006 p = qseecom.ce_info.pfe;
6007 total = qseecom.ce_info.num_pfe;
6008 break;
6009 default:
6010 pr_err("unsupported usage %d\n", usage);
6011 return -EINVAL;
6012 }
6013
6014 pce_info_use = NULL;
6015
6016 for (i = 0; i < total; i++) {
6017 if (p->unit_num == unit) {
6018 pce_info_use = p;
6019 break;
6020 }
6021 p++;
6022 }
6023 if (!pce_info_use) {
6024 pr_err("can not find %d\n", unit);
6025 return -EINVAL;
6026 }
6027 return pce_info_use->num_ce_pipe_entries;
6028}
6029
6030static int qseecom_create_key(struct qseecom_dev_handle *data,
6031 void __user *argp)
6032{
6033 int i;
6034 uint32_t *ce_hw = NULL;
6035 uint32_t pipe = 0;
6036 int ret = 0;
6037 uint32_t flags = 0;
6038 struct qseecom_create_key_req create_key_req;
6039 struct qseecom_key_generate_ireq generate_key_ireq;
6040 struct qseecom_key_select_ireq set_key_ireq;
6041 uint32_t entries = 0;
6042
6043 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6044 if (ret) {
6045 pr_err("copy_from_user failed\n");
6046 return ret;
6047 }
6048
6049 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6050 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6051 pr_err("unsupported usage %d\n", create_key_req.usage);
6052 ret = -EFAULT;
6053 return ret;
6054 }
6055 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6056 create_key_req.usage);
6057 if (entries <= 0) {
6058 pr_err("no ce instance for usage %d instance %d\n",
6059 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6060 ret = -EINVAL;
6061 return ret;
6062 }
6063
6064 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6065 if (!ce_hw) {
6066 ret = -ENOMEM;
6067 return ret;
6068 }
6069 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6070 DEFAULT_CE_INFO_UNIT);
6071 if (ret) {
6072 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6073 ret = -EINVAL;
6074 goto free_buf;
6075 }
6076
6077 if (qseecom.fde_key_size)
6078 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6079 else
6080 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6081
Jiten Patela7bb1d52018-05-11 12:34:26 +05306082 if (qseecom.enable_key_wrap_in_ks == true)
6083 flags |= ENABLE_KEY_WRAP_IN_KS;
6084
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006085 generate_key_ireq.flags = flags;
6086 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6087 memset((void *)generate_key_ireq.key_id,
6088 0, QSEECOM_KEY_ID_SIZE);
6089 memset((void *)generate_key_ireq.hash32,
6090 0, QSEECOM_HASH_SIZE);
6091 memcpy((void *)generate_key_ireq.key_id,
6092 (void *)key_id_array[create_key_req.usage].desc,
6093 QSEECOM_KEY_ID_SIZE);
6094 memcpy((void *)generate_key_ireq.hash32,
6095 (void *)create_key_req.hash32,
6096 QSEECOM_HASH_SIZE);
6097
6098 ret = __qseecom_generate_and_save_key(data,
6099 create_key_req.usage, &generate_key_ireq);
6100 if (ret) {
6101 pr_err("Failed to generate key on storage: %d\n", ret);
6102 goto free_buf;
6103 }
6104
6105 for (i = 0; i < entries; i++) {
6106 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6107 if (create_key_req.usage ==
6108 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6109 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6110 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6111
6112 } else if (create_key_req.usage ==
6113 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6114 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6115 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6116
6117 } else {
6118 set_key_ireq.ce = ce_hw[i];
6119 set_key_ireq.pipe = pipe;
6120 }
6121 set_key_ireq.flags = flags;
6122
6123 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6124 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6125 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6126 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6127 memcpy((void *)set_key_ireq.key_id,
6128 (void *)key_id_array[create_key_req.usage].desc,
6129 QSEECOM_KEY_ID_SIZE);
6130 memcpy((void *)set_key_ireq.hash32,
6131 (void *)create_key_req.hash32,
6132 QSEECOM_HASH_SIZE);
6133 /*
6134 * It will return false if it is GPCE based crypto instance or
6135 * ICE is setup properly
6136 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006137 ret = qseecom_enable_ice_setup(create_key_req.usage);
6138 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006139 goto free_buf;
6140
6141 do {
6142 ret = __qseecom_set_clear_ce_key(data,
6143 create_key_req.usage,
6144 &set_key_ireq);
6145 /*
6146 * wait a little before calling scm again to let other
6147 * processes run
6148 */
6149 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6150 msleep(50);
6151
6152 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6153
6154 qseecom_disable_ice_setup(create_key_req.usage);
6155
6156 if (ret) {
6157 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6158 pipe, ce_hw[i], ret);
6159 goto free_buf;
6160 } else {
6161 pr_err("Set the key successfully\n");
6162 if ((create_key_req.usage ==
6163 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6164 (create_key_req.usage ==
6165 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6166 goto free_buf;
6167 }
6168 }
6169
6170free_buf:
6171 kzfree(ce_hw);
6172 return ret;
6173}
6174
6175static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6176 void __user *argp)
6177{
6178 uint32_t *ce_hw = NULL;
6179 uint32_t pipe = 0;
6180 int ret = 0;
6181 uint32_t flags = 0;
6182 int i, j;
6183 struct qseecom_wipe_key_req wipe_key_req;
6184 struct qseecom_key_delete_ireq delete_key_ireq;
6185 struct qseecom_key_select_ireq clear_key_ireq;
6186 uint32_t entries = 0;
6187
6188 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6189 if (ret) {
6190 pr_err("copy_from_user failed\n");
6191 return ret;
6192 }
6193
6194 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6195 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6196 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6197 ret = -EFAULT;
6198 return ret;
6199 }
6200
6201 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6202 wipe_key_req.usage);
6203 if (entries <= 0) {
6204 pr_err("no ce instance for usage %d instance %d\n",
6205 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6206 ret = -EINVAL;
6207 return ret;
6208 }
6209
6210 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6211 if (!ce_hw) {
6212 ret = -ENOMEM;
6213 return ret;
6214 }
6215
6216 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6217 DEFAULT_CE_INFO_UNIT);
6218 if (ret) {
6219 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6220 ret = -EINVAL;
6221 goto free_buf;
6222 }
6223
6224 if (wipe_key_req.wipe_key_flag) {
6225 delete_key_ireq.flags = flags;
6226 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6227 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6228 memcpy((void *)delete_key_ireq.key_id,
6229 (void *)key_id_array[wipe_key_req.usage].desc,
6230 QSEECOM_KEY_ID_SIZE);
6231 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6232
6233 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6234 &delete_key_ireq);
6235 if (ret) {
6236 pr_err("Failed to delete key from ssd storage: %d\n",
6237 ret);
6238 ret = -EFAULT;
6239 goto free_buf;
6240 }
6241 }
6242
6243 for (j = 0; j < entries; j++) {
6244 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6245 if (wipe_key_req.usage ==
6246 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6247 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6248 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6249 } else if (wipe_key_req.usage ==
6250 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6251 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6252 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6253 } else {
6254 clear_key_ireq.ce = ce_hw[j];
6255 clear_key_ireq.pipe = pipe;
6256 }
6257 clear_key_ireq.flags = flags;
6258 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6259 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6260 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6261 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6262
6263 /*
6264 * It will return false if it is GPCE based crypto instance or
6265 * ICE is setup properly
6266 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006267 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6268 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006269 goto free_buf;
6270
6271 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6272 &clear_key_ireq);
6273
6274 qseecom_disable_ice_setup(wipe_key_req.usage);
6275
6276 if (ret) {
6277 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6278 pipe, ce_hw[j], ret);
6279 ret = -EFAULT;
6280 goto free_buf;
6281 }
6282 }
6283
6284free_buf:
6285 kzfree(ce_hw);
6286 return ret;
6287}
6288
6289static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6290 void __user *argp)
6291{
6292 int ret = 0;
6293 uint32_t flags = 0;
6294 struct qseecom_update_key_userinfo_req update_key_req;
6295 struct qseecom_key_userinfo_update_ireq ireq;
6296
6297 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6298 if (ret) {
6299 pr_err("copy_from_user failed\n");
6300 return ret;
6301 }
6302
6303 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6304 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6305 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6306 return -EFAULT;
6307 }
6308
6309 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6310
6311 if (qseecom.fde_key_size)
6312 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6313 else
6314 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6315
6316 ireq.flags = flags;
6317 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6318 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6319 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6320 memcpy((void *)ireq.key_id,
6321 (void *)key_id_array[update_key_req.usage].desc,
6322 QSEECOM_KEY_ID_SIZE);
6323 memcpy((void *)ireq.current_hash32,
6324 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6325 memcpy((void *)ireq.new_hash32,
6326 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6327
6328 do {
6329 ret = __qseecom_update_current_key_user_info(data,
6330 update_key_req.usage,
6331 &ireq);
6332 /*
6333 * wait a little before calling scm again to let other
6334 * processes run
6335 */
6336 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6337 msleep(50);
6338
6339 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6340 if (ret) {
6341 pr_err("Failed to update key info: %d\n", ret);
6342 return ret;
6343 }
6344 return ret;
6345
6346}
6347static int qseecom_is_es_activated(void __user *argp)
6348{
Zhen Kong26e62742018-05-04 17:19:06 -07006349 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006350 struct qseecom_command_scm_resp resp;
6351 int ret;
6352
6353 if (qseecom.qsee_version < QSEE_VERSION_04) {
6354 pr_err("invalid qsee version\n");
6355 return -ENODEV;
6356 }
6357
6358 if (argp == NULL) {
6359 pr_err("arg is null\n");
6360 return -EINVAL;
6361 }
6362
6363 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6364 &req, sizeof(req), &resp, sizeof(resp));
6365 if (ret) {
6366 pr_err("scm_call failed\n");
6367 return ret;
6368 }
6369
6370 req.is_activated = resp.result;
6371 ret = copy_to_user(argp, &req, sizeof(req));
6372 if (ret) {
6373 pr_err("copy_to_user failed\n");
6374 return ret;
6375 }
6376
6377 return 0;
6378}
6379
6380static int qseecom_save_partition_hash(void __user *argp)
6381{
6382 struct qseecom_save_partition_hash_req req;
6383 struct qseecom_command_scm_resp resp;
6384 int ret;
6385
6386 memset(&resp, 0x00, sizeof(resp));
6387
6388 if (qseecom.qsee_version < QSEE_VERSION_04) {
6389 pr_err("invalid qsee version\n");
6390 return -ENODEV;
6391 }
6392
6393 if (argp == NULL) {
6394 pr_err("arg is null\n");
6395 return -EINVAL;
6396 }
6397
6398 ret = copy_from_user(&req, argp, sizeof(req));
6399 if (ret) {
6400 pr_err("copy_from_user failed\n");
6401 return ret;
6402 }
6403
6404 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6405 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6406 if (ret) {
6407 pr_err("qseecom_scm_call failed\n");
6408 return ret;
6409 }
6410
6411 return 0;
6412}
6413
6414static int qseecom_mdtp_cipher_dip(void __user *argp)
6415{
6416 struct qseecom_mdtp_cipher_dip_req req;
6417 u32 tzbuflenin, tzbuflenout;
6418 char *tzbufin = NULL, *tzbufout = NULL;
6419 struct scm_desc desc = {0};
6420 int ret;
6421
6422 do {
6423 /* Copy the parameters from userspace */
6424 if (argp == NULL) {
6425 pr_err("arg is null\n");
6426 ret = -EINVAL;
6427 break;
6428 }
6429
6430 ret = copy_from_user(&req, argp, sizeof(req));
6431 if (ret) {
6432 pr_err("copy_from_user failed, ret= %d\n", ret);
6433 break;
6434 }
6435
6436 if (req.in_buf == NULL || req.out_buf == NULL ||
6437 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6438 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6439 req.direction > 1) {
6440 pr_err("invalid parameters\n");
6441 ret = -EINVAL;
6442 break;
6443 }
6444
6445 /* Copy the input buffer from userspace to kernel space */
6446 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6447 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6448 if (!tzbufin) {
6449 pr_err("error allocating in buffer\n");
6450 ret = -ENOMEM;
6451 break;
6452 }
6453
6454 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6455 if (ret) {
6456 pr_err("copy_from_user failed, ret=%d\n", ret);
6457 break;
6458 }
6459
6460 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6461
6462 /* Prepare the output buffer in kernel space */
6463 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6464 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6465 if (!tzbufout) {
6466 pr_err("error allocating out buffer\n");
6467 ret = -ENOMEM;
6468 break;
6469 }
6470
6471 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6472
6473 /* Send the command to TZ */
6474 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6475 desc.args[0] = virt_to_phys(tzbufin);
6476 desc.args[1] = req.in_buf_size;
6477 desc.args[2] = virt_to_phys(tzbufout);
6478 desc.args[3] = req.out_buf_size;
6479 desc.args[4] = req.direction;
6480
6481 ret = __qseecom_enable_clk(CLK_QSEE);
6482 if (ret)
6483 break;
6484
Zhen Kong03f220d2019-02-01 17:12:34 -08006485 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006486
6487 __qseecom_disable_clk(CLK_QSEE);
6488
6489 if (ret) {
6490 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6491 ret);
6492 break;
6493 }
6494
6495 /* Copy the output buffer from kernel space to userspace */
6496 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6497 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6498 if (ret) {
6499 pr_err("copy_to_user failed, ret=%d\n", ret);
6500 break;
6501 }
6502 } while (0);
6503
6504 kzfree(tzbufin);
6505 kzfree(tzbufout);
6506
6507 return ret;
6508}
6509
6510static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6511 struct qseecom_qteec_req *req)
6512{
6513 if (!data || !data->client.ihandle) {
6514 pr_err("Client or client handle is not initialized\n");
6515 return -EINVAL;
6516 }
6517
6518 if (data->type != QSEECOM_CLIENT_APP)
6519 return -EFAULT;
6520
6521 if (req->req_len > UINT_MAX - req->resp_len) {
6522 pr_err("Integer overflow detected in req_len & rsp_len\n");
6523 return -EINVAL;
6524 }
6525
6526 if (req->req_len + req->resp_len > data->client.sb_length) {
6527 pr_debug("Not enough memory to fit cmd_buf.\n");
6528 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6529 (req->req_len + req->resp_len), data->client.sb_length);
6530 return -ENOMEM;
6531 }
6532
6533 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6534 pr_err("cmd buffer or response buffer is null\n");
6535 return -EINVAL;
6536 }
6537 if (((uintptr_t)req->req_ptr <
6538 data->client.user_virt_sb_base) ||
6539 ((uintptr_t)req->req_ptr >=
6540 (data->client.user_virt_sb_base + data->client.sb_length))) {
6541 pr_err("cmd buffer address not within shared bufffer\n");
6542 return -EINVAL;
6543 }
6544
6545 if (((uintptr_t)req->resp_ptr <
6546 data->client.user_virt_sb_base) ||
6547 ((uintptr_t)req->resp_ptr >=
6548 (data->client.user_virt_sb_base + data->client.sb_length))) {
6549 pr_err("response buffer address not within shared bufffer\n");
6550 return -EINVAL;
6551 }
6552
6553 if ((req->req_len == 0) || (req->resp_len == 0)) {
6554 pr_err("cmd buf lengtgh/response buf length not valid\n");
6555 return -EINVAL;
6556 }
6557
6558 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6559 pr_err("Integer overflow in req_len & req_ptr\n");
6560 return -EINVAL;
6561 }
6562
6563 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6564 pr_err("Integer overflow in resp_len & resp_ptr\n");
6565 return -EINVAL;
6566 }
6567
6568 if (data->client.user_virt_sb_base >
6569 (ULONG_MAX - data->client.sb_length)) {
6570 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6571 return -EINVAL;
6572 }
6573 if ((((uintptr_t)req->req_ptr + req->req_len) >
6574 ((uintptr_t)data->client.user_virt_sb_base +
6575 data->client.sb_length)) ||
6576 (((uintptr_t)req->resp_ptr + req->resp_len) >
6577 ((uintptr_t)data->client.user_virt_sb_base +
6578 data->client.sb_length))) {
6579 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6580 return -EINVAL;
6581 }
6582 return 0;
6583}
6584
6585static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6586 uint32_t fd_idx, struct sg_table *sg_ptr)
6587{
6588 struct scatterlist *sg = sg_ptr->sgl;
6589 struct qseecom_sg_entry *sg_entry;
6590 void *buf;
6591 uint i;
6592 size_t size;
6593 dma_addr_t coh_pmem;
6594
6595 if (fd_idx >= MAX_ION_FD) {
6596 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6597 return -ENOMEM;
6598 }
6599 /*
6600 * Allocate a buffer, populate it with number of entry plus
6601 * each sg entry's phy addr and length; then return the
6602 * phy_addr of the buffer.
6603 */
6604 size = sizeof(uint32_t) +
6605 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6606 size = (size + PAGE_SIZE) & PAGE_MASK;
6607 buf = dma_alloc_coherent(qseecom.pdev,
6608 size, &coh_pmem, GFP_KERNEL);
6609 if (buf == NULL) {
6610 pr_err("failed to alloc memory for sg buf\n");
6611 return -ENOMEM;
6612 }
6613 *(uint32_t *)buf = sg_ptr->nents;
6614 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6615 for (i = 0; i < sg_ptr->nents; i++) {
6616 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6617 sg_entry->len = sg->length;
6618 sg_entry++;
6619 sg = sg_next(sg);
6620 }
6621 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6622 data->client.sec_buf_fd[fd_idx].vbase = buf;
6623 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6624 data->client.sec_buf_fd[fd_idx].size = size;
6625 return 0;
6626}
6627
6628static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6629 struct qseecom_dev_handle *data, bool cleanup)
6630{
6631 struct ion_handle *ihandle;
6632 int ret = 0;
6633 int i = 0;
6634 uint32_t *update;
6635 struct sg_table *sg_ptr = NULL;
6636 struct scatterlist *sg;
6637 struct qseecom_param_memref *memref;
6638
6639 if (req == NULL) {
6640 pr_err("Invalid address\n");
6641 return -EINVAL;
6642 }
6643 for (i = 0; i < MAX_ION_FD; i++) {
6644 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006645 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006646 req->ifd_data[i].fd);
6647 if (IS_ERR_OR_NULL(ihandle)) {
6648 pr_err("Ion client can't retrieve the handle\n");
6649 return -ENOMEM;
6650 }
6651 if ((req->req_len < sizeof(uint32_t)) ||
6652 (req->ifd_data[i].cmd_buf_offset >
6653 req->req_len - sizeof(uint32_t))) {
6654 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6655 req->req_len,
6656 req->ifd_data[i].cmd_buf_offset);
6657 return -EINVAL;
6658 }
6659 update = (uint32_t *)((char *) req->req_ptr +
6660 req->ifd_data[i].cmd_buf_offset);
6661 if (!update) {
6662 pr_err("update pointer is NULL\n");
6663 return -EINVAL;
6664 }
6665 } else {
6666 continue;
6667 }
6668 /* Populate the cmd data structure with the phys_addr */
6669 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6670 if (IS_ERR_OR_NULL(sg_ptr)) {
6671 pr_err("IOn client could not retrieve sg table\n");
6672 goto err;
6673 }
6674 sg = sg_ptr->sgl;
6675 if (sg == NULL) {
6676 pr_err("sg is NULL\n");
6677 goto err;
6678 }
6679 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6680 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6681 sg_ptr->nents, sg->length);
6682 goto err;
6683 }
6684 /* clean up buf for pre-allocated fd */
6685 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6686 (*update)) {
6687 if (data->client.sec_buf_fd[i].vbase)
6688 dma_free_coherent(qseecom.pdev,
6689 data->client.sec_buf_fd[i].size,
6690 data->client.sec_buf_fd[i].vbase,
6691 data->client.sec_buf_fd[i].pbase);
6692 memset((void *)update, 0,
6693 sizeof(struct qseecom_param_memref));
6694 memset(&(data->client.sec_buf_fd[i]), 0,
6695 sizeof(struct qseecom_sec_buf_fd_info));
6696 goto clean;
6697 }
6698
6699 if (*update == 0) {
6700 /* update buf for pre-allocated fd from secure heap*/
6701 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6702 sg_ptr);
6703 if (ret) {
6704 pr_err("Failed to handle buf for fd[%d]\n", i);
6705 goto err;
6706 }
6707 memref = (struct qseecom_param_memref *)update;
6708 memref->buffer =
6709 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6710 memref->size =
6711 (uint32_t)(data->client.sec_buf_fd[i].size);
6712 } else {
6713 /* update buf for fd from non-secure qseecom heap */
6714 if (sg_ptr->nents != 1) {
6715 pr_err("Num of scat entr (%d) invalid\n",
6716 sg_ptr->nents);
6717 goto err;
6718 }
6719 if (cleanup)
6720 *update = 0;
6721 else
6722 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6723 }
6724clean:
6725 if (cleanup) {
6726 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6727 ihandle, NULL, sg->length,
6728 ION_IOC_INV_CACHES);
6729 if (ret) {
6730 pr_err("cache operation failed %d\n", ret);
6731 goto err;
6732 }
6733 } else {
6734 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6735 ihandle, NULL, sg->length,
6736 ION_IOC_CLEAN_INV_CACHES);
6737 if (ret) {
6738 pr_err("cache operation failed %d\n", ret);
6739 goto err;
6740 }
6741 data->sglistinfo_ptr[i].indexAndFlags =
6742 SGLISTINFO_SET_INDEX_FLAG(
6743 (sg_ptr->nents == 1), 0,
6744 req->ifd_data[i].cmd_buf_offset);
6745 data->sglistinfo_ptr[i].sizeOrCount =
6746 (sg_ptr->nents == 1) ?
6747 sg->length : sg_ptr->nents;
6748 data->sglist_cnt = i + 1;
6749 }
6750 /* Deallocate the handle */
6751 if (!IS_ERR_OR_NULL(ihandle))
6752 ion_free(qseecom.ion_clnt, ihandle);
6753 }
6754 return ret;
6755err:
6756 if (!IS_ERR_OR_NULL(ihandle))
6757 ion_free(qseecom.ion_clnt, ihandle);
6758 return -ENOMEM;
6759}
6760
6761static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6762 struct qseecom_qteec_req *req, uint32_t cmd_id)
6763{
6764 struct qseecom_command_scm_resp resp;
6765 struct qseecom_qteec_ireq ireq;
6766 struct qseecom_qteec_64bit_ireq ireq_64bit;
6767 struct qseecom_registered_app_list *ptr_app;
6768 bool found_app = false;
6769 unsigned long flags;
6770 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006771 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006772 uint32_t reqd_len_sb_in = 0;
6773 void *cmd_buf = NULL;
6774 size_t cmd_len;
6775 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306776 void *req_ptr = NULL;
6777 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006778
6779 ret = __qseecom_qteec_validate_msg(data, req);
6780 if (ret)
6781 return ret;
6782
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306783 req_ptr = req->req_ptr;
6784 resp_ptr = req->resp_ptr;
6785
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006786 /* find app_id & img_name from list */
6787 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6788 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6789 list) {
6790 if ((ptr_app->app_id == data->client.app_id) &&
6791 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6792 found_app = true;
6793 break;
6794 }
6795 }
6796 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6797 if (!found_app) {
6798 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6799 (char *)data->client.app_name);
6800 return -ENOENT;
6801 }
6802
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306803 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6804 (uintptr_t)req->req_ptr);
6805 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6806 (uintptr_t)req->resp_ptr);
6807
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006808 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6809 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6810 ret = __qseecom_update_qteec_req_buf(
6811 (struct qseecom_qteec_modfd_req *)req, data, false);
6812 if (ret)
6813 return ret;
6814 }
6815
6816 if (qseecom.qsee_version < QSEE_VERSION_40) {
6817 ireq.app_id = data->client.app_id;
6818 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306819 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006820 ireq.req_len = req->req_len;
6821 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306822 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006823 ireq.resp_len = req->resp_len;
6824 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6825 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6826 dmac_flush_range((void *)table,
6827 (void *)table + SGLISTINFO_TABLE_SIZE);
6828 cmd_buf = (void *)&ireq;
6829 cmd_len = sizeof(struct qseecom_qteec_ireq);
6830 } else {
6831 ireq_64bit.app_id = data->client.app_id;
6832 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306833 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006834 ireq_64bit.req_len = req->req_len;
6835 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306836 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006837 ireq_64bit.resp_len = req->resp_len;
6838 if ((data->client.app_arch == ELFCLASS32) &&
6839 ((ireq_64bit.req_ptr >=
6840 PHY_ADDR_4G - ireq_64bit.req_len) ||
6841 (ireq_64bit.resp_ptr >=
6842 PHY_ADDR_4G - ireq_64bit.resp_len))){
6843 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6844 data->client.app_name, data->client.app_id);
6845 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6846 ireq_64bit.req_ptr, ireq_64bit.req_len,
6847 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6848 return -EFAULT;
6849 }
6850 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6851 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6852 dmac_flush_range((void *)table,
6853 (void *)table + SGLISTINFO_TABLE_SIZE);
6854 cmd_buf = (void *)&ireq_64bit;
6855 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6856 }
6857 if (qseecom.whitelist_support == true
6858 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6859 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6860 else
6861 *(uint32_t *)cmd_buf = cmd_id;
6862
6863 reqd_len_sb_in = req->req_len + req->resp_len;
6864 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6865 data->client.sb_virt,
6866 reqd_len_sb_in,
6867 ION_IOC_CLEAN_INV_CACHES);
6868 if (ret) {
6869 pr_err("cache operation failed %d\n", ret);
6870 return ret;
6871 }
6872
6873 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6874
6875 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6876 cmd_buf, cmd_len,
6877 &resp, sizeof(resp));
6878 if (ret) {
6879 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6880 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006881 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006882 }
6883
6884 if (qseecom.qsee_reentrancy_support) {
6885 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006886 if (ret)
6887 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006888 } else {
6889 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6890 ret = __qseecom_process_incomplete_cmd(data, &resp);
6891 if (ret) {
6892 pr_err("process_incomplete_cmd failed err: %d\n",
6893 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006894 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006895 }
6896 } else {
6897 if (resp.result != QSEOS_RESULT_SUCCESS) {
6898 pr_err("Response result %d not supported\n",
6899 resp.result);
6900 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006901 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006902 }
6903 }
6904 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006905exit:
6906 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006907 data->client.sb_virt, data->client.sb_length,
6908 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006909 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006910 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006911 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006912 }
6913
6914 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6915 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006916 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006917 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006918 if (ret2)
6919 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006920 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006921 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006922}
6923
6924static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6925 void __user *argp)
6926{
6927 struct qseecom_qteec_modfd_req req;
6928 int ret = 0;
6929
6930 ret = copy_from_user(&req, argp,
6931 sizeof(struct qseecom_qteec_modfd_req));
6932 if (ret) {
6933 pr_err("copy_from_user failed\n");
6934 return ret;
6935 }
6936 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6937 QSEOS_TEE_OPEN_SESSION);
6938
6939 return ret;
6940}
6941
6942static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6943 void __user *argp)
6944{
6945 struct qseecom_qteec_req req;
6946 int ret = 0;
6947
6948 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6949 if (ret) {
6950 pr_err("copy_from_user failed\n");
6951 return ret;
6952 }
6953 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6954 return ret;
6955}
6956
6957static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6958 void __user *argp)
6959{
6960 struct qseecom_qteec_modfd_req req;
6961 struct qseecom_command_scm_resp resp;
6962 struct qseecom_qteec_ireq ireq;
6963 struct qseecom_qteec_64bit_ireq ireq_64bit;
6964 struct qseecom_registered_app_list *ptr_app;
6965 bool found_app = false;
6966 unsigned long flags;
6967 int ret = 0;
6968 int i = 0;
6969 uint32_t reqd_len_sb_in = 0;
6970 void *cmd_buf = NULL;
6971 size_t cmd_len;
6972 struct sglist_info *table = data->sglistinfo_ptr;
6973 void *req_ptr = NULL;
6974 void *resp_ptr = NULL;
6975
6976 ret = copy_from_user(&req, argp,
6977 sizeof(struct qseecom_qteec_modfd_req));
6978 if (ret) {
6979 pr_err("copy_from_user failed\n");
6980 return ret;
6981 }
6982 ret = __qseecom_qteec_validate_msg(data,
6983 (struct qseecom_qteec_req *)(&req));
6984 if (ret)
6985 return ret;
6986 req_ptr = req.req_ptr;
6987 resp_ptr = req.resp_ptr;
6988
6989 /* find app_id & img_name from list */
6990 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6991 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6992 list) {
6993 if ((ptr_app->app_id == data->client.app_id) &&
6994 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6995 found_app = true;
6996 break;
6997 }
6998 }
6999 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
7000 if (!found_app) {
7001 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7002 (char *)data->client.app_name);
7003 return -ENOENT;
7004 }
7005
7006 /* validate offsets */
7007 for (i = 0; i < MAX_ION_FD; i++) {
7008 if (req.ifd_data[i].fd) {
7009 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7010 return -EINVAL;
7011 }
7012 }
7013 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7014 (uintptr_t)req.req_ptr);
7015 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7016 (uintptr_t)req.resp_ptr);
7017 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7018 if (ret)
7019 return ret;
7020
7021 if (qseecom.qsee_version < QSEE_VERSION_40) {
7022 ireq.app_id = data->client.app_id;
7023 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7024 (uintptr_t)req_ptr);
7025 ireq.req_len = req.req_len;
7026 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7027 (uintptr_t)resp_ptr);
7028 ireq.resp_len = req.resp_len;
7029 cmd_buf = (void *)&ireq;
7030 cmd_len = sizeof(struct qseecom_qteec_ireq);
7031 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7032 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7033 dmac_flush_range((void *)table,
7034 (void *)table + SGLISTINFO_TABLE_SIZE);
7035 } else {
7036 ireq_64bit.app_id = data->client.app_id;
7037 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7038 (uintptr_t)req_ptr);
7039 ireq_64bit.req_len = req.req_len;
7040 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7041 (uintptr_t)resp_ptr);
7042 ireq_64bit.resp_len = req.resp_len;
7043 cmd_buf = (void *)&ireq_64bit;
7044 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7045 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7046 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7047 dmac_flush_range((void *)table,
7048 (void *)table + SGLISTINFO_TABLE_SIZE);
7049 }
7050 reqd_len_sb_in = req.req_len + req.resp_len;
7051 if (qseecom.whitelist_support == true)
7052 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7053 else
7054 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7055
7056 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7057 data->client.sb_virt,
7058 reqd_len_sb_in,
7059 ION_IOC_CLEAN_INV_CACHES);
7060 if (ret) {
7061 pr_err("cache operation failed %d\n", ret);
7062 return ret;
7063 }
7064
7065 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7066
7067 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7068 cmd_buf, cmd_len,
7069 &resp, sizeof(resp));
7070 if (ret) {
7071 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7072 ret, data->client.app_id);
7073 return ret;
7074 }
7075
7076 if (qseecom.qsee_reentrancy_support) {
7077 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7078 } else {
7079 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7080 ret = __qseecom_process_incomplete_cmd(data, &resp);
7081 if (ret) {
7082 pr_err("process_incomplete_cmd failed err: %d\n",
7083 ret);
7084 return ret;
7085 }
7086 } else {
7087 if (resp.result != QSEOS_RESULT_SUCCESS) {
7088 pr_err("Response result %d not supported\n",
7089 resp.result);
7090 ret = -EINVAL;
7091 }
7092 }
7093 }
7094 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7095 if (ret)
7096 return ret;
7097
7098 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7099 data->client.sb_virt, data->client.sb_length,
7100 ION_IOC_INV_CACHES);
7101 if (ret) {
7102 pr_err("cache operation failed %d\n", ret);
7103 return ret;
7104 }
7105 return 0;
7106}
7107
7108static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7109 void __user *argp)
7110{
7111 struct qseecom_qteec_modfd_req req;
7112 int ret = 0;
7113
7114 ret = copy_from_user(&req, argp,
7115 sizeof(struct qseecom_qteec_modfd_req));
7116 if (ret) {
7117 pr_err("copy_from_user failed\n");
7118 return ret;
7119 }
7120 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7121 QSEOS_TEE_REQUEST_CANCELLATION);
7122
7123 return ret;
7124}
7125
7126static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7127{
7128 if (data->sglist_cnt) {
7129 memset(data->sglistinfo_ptr, 0,
7130 SGLISTINFO_TABLE_SIZE);
7131 data->sglist_cnt = 0;
7132 }
7133}
7134
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307135static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007136 unsigned int cmd, unsigned long arg)
7137{
7138 int ret = 0;
7139 struct qseecom_dev_handle *data = file->private_data;
7140 void __user *argp = (void __user *) arg;
7141 bool perf_enabled = false;
7142
7143 if (!data) {
7144 pr_err("Invalid/uninitialized device handle\n");
7145 return -EINVAL;
7146 }
7147
7148 if (data->abort) {
7149 pr_err("Aborting qseecom driver\n");
7150 return -ENODEV;
7151 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007152 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7153 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7154 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7155 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007156 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007157
7158 switch (cmd) {
7159 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7160 if (data->type != QSEECOM_GENERIC) {
7161 pr_err("reg lstnr req: invalid handle (%d)\n",
7162 data->type);
7163 ret = -EINVAL;
7164 break;
7165 }
7166 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007167 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007168 atomic_inc(&data->ioctl_count);
7169 data->type = QSEECOM_LISTENER_SERVICE;
7170 ret = qseecom_register_listener(data, argp);
7171 atomic_dec(&data->ioctl_count);
7172 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007173 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007174 if (ret)
7175 pr_err("failed qseecom_register_listener: %d\n", ret);
7176 break;
7177 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307178 case QSEECOM_IOCTL_SET_ICE_INFO: {
7179 struct qseecom_ice_data_t ice_data;
7180
7181 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7182 if (ret) {
7183 pr_err("copy_from_user failed\n");
7184 return -EFAULT;
7185 }
7186 qcom_ice_set_fde_flag(ice_data.flag);
7187 break;
7188 }
7189
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007190 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7191 if ((data->listener.id == 0) ||
7192 (data->type != QSEECOM_LISTENER_SERVICE)) {
7193 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7194 data->type, data->listener.id);
7195 ret = -EINVAL;
7196 break;
7197 }
7198 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007199 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007200 atomic_inc(&data->ioctl_count);
7201 ret = qseecom_unregister_listener(data);
7202 atomic_dec(&data->ioctl_count);
7203 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007204 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007205 if (ret)
7206 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7207 break;
7208 }
7209 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7210 if ((data->client.app_id == 0) ||
7211 (data->type != QSEECOM_CLIENT_APP)) {
7212 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7213 data->type, data->client.app_id);
7214 ret = -EINVAL;
7215 break;
7216 }
7217 /* Only one client allowed here at a time */
7218 mutex_lock(&app_access_lock);
7219 if (qseecom.support_bus_scaling) {
7220 /* register bus bw in case the client doesn't do it */
7221 if (!data->mode) {
7222 mutex_lock(&qsee_bw_mutex);
7223 __qseecom_register_bus_bandwidth_needs(
7224 data, HIGH);
7225 mutex_unlock(&qsee_bw_mutex);
7226 }
7227 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7228 if (ret) {
7229 pr_err("Failed to set bw.\n");
7230 ret = -EINVAL;
7231 mutex_unlock(&app_access_lock);
7232 break;
7233 }
7234 }
7235 /*
7236 * On targets where crypto clock is handled by HLOS,
7237 * if clk_access_cnt is zero and perf_enabled is false,
7238 * then the crypto clock was not enabled before sending cmd to
7239 * tz, qseecom will enable the clock to avoid service failure.
7240 */
7241 if (!qseecom.no_clock_support &&
7242 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7243 pr_debug("ce clock is not enabled!\n");
7244 ret = qseecom_perf_enable(data);
7245 if (ret) {
7246 pr_err("Failed to vote for clock with err %d\n",
7247 ret);
7248 mutex_unlock(&app_access_lock);
7249 ret = -EINVAL;
7250 break;
7251 }
7252 perf_enabled = true;
7253 }
7254 atomic_inc(&data->ioctl_count);
7255 ret = qseecom_send_cmd(data, argp);
7256 if (qseecom.support_bus_scaling)
7257 __qseecom_add_bw_scale_down_timer(
7258 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7259 if (perf_enabled) {
7260 qsee_disable_clock_vote(data, CLK_DFAB);
7261 qsee_disable_clock_vote(data, CLK_SFPB);
7262 }
7263 atomic_dec(&data->ioctl_count);
7264 wake_up_all(&data->abort_wq);
7265 mutex_unlock(&app_access_lock);
7266 if (ret)
7267 pr_err("failed qseecom_send_cmd: %d\n", ret);
7268 break;
7269 }
7270 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7271 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7272 if ((data->client.app_id == 0) ||
7273 (data->type != QSEECOM_CLIENT_APP)) {
7274 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7275 data->type, data->client.app_id);
7276 ret = -EINVAL;
7277 break;
7278 }
7279 /* Only one client allowed here at a time */
7280 mutex_lock(&app_access_lock);
7281 if (qseecom.support_bus_scaling) {
7282 if (!data->mode) {
7283 mutex_lock(&qsee_bw_mutex);
7284 __qseecom_register_bus_bandwidth_needs(
7285 data, HIGH);
7286 mutex_unlock(&qsee_bw_mutex);
7287 }
7288 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7289 if (ret) {
7290 pr_err("Failed to set bw.\n");
7291 mutex_unlock(&app_access_lock);
7292 ret = -EINVAL;
7293 break;
7294 }
7295 }
7296 /*
7297 * On targets where crypto clock is handled by HLOS,
7298 * if clk_access_cnt is zero and perf_enabled is false,
7299 * then the crypto clock was not enabled before sending cmd to
7300 * tz, qseecom will enable the clock to avoid service failure.
7301 */
7302 if (!qseecom.no_clock_support &&
7303 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7304 pr_debug("ce clock is not enabled!\n");
7305 ret = qseecom_perf_enable(data);
7306 if (ret) {
7307 pr_err("Failed to vote for clock with err %d\n",
7308 ret);
7309 mutex_unlock(&app_access_lock);
7310 ret = -EINVAL;
7311 break;
7312 }
7313 perf_enabled = true;
7314 }
7315 atomic_inc(&data->ioctl_count);
7316 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7317 ret = qseecom_send_modfd_cmd(data, argp);
7318 else
7319 ret = qseecom_send_modfd_cmd_64(data, argp);
7320 if (qseecom.support_bus_scaling)
7321 __qseecom_add_bw_scale_down_timer(
7322 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7323 if (perf_enabled) {
7324 qsee_disable_clock_vote(data, CLK_DFAB);
7325 qsee_disable_clock_vote(data, CLK_SFPB);
7326 }
7327 atomic_dec(&data->ioctl_count);
7328 wake_up_all(&data->abort_wq);
7329 mutex_unlock(&app_access_lock);
7330 if (ret)
7331 pr_err("failed qseecom_send_cmd: %d\n", ret);
7332 __qseecom_clean_data_sglistinfo(data);
7333 break;
7334 }
7335 case QSEECOM_IOCTL_RECEIVE_REQ: {
7336 if ((data->listener.id == 0) ||
7337 (data->type != QSEECOM_LISTENER_SERVICE)) {
7338 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7339 data->type, data->listener.id);
7340 ret = -EINVAL;
7341 break;
7342 }
7343 atomic_inc(&data->ioctl_count);
7344 ret = qseecom_receive_req(data);
7345 atomic_dec(&data->ioctl_count);
7346 wake_up_all(&data->abort_wq);
7347 if (ret && (ret != -ERESTARTSYS))
7348 pr_err("failed qseecom_receive_req: %d\n", ret);
7349 break;
7350 }
7351 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7352 if ((data->listener.id == 0) ||
7353 (data->type != QSEECOM_LISTENER_SERVICE)) {
7354 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7355 data->type, data->listener.id);
7356 ret = -EINVAL;
7357 break;
7358 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007359 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007360 atomic_inc(&data->ioctl_count);
7361 if (!qseecom.qsee_reentrancy_support)
7362 ret = qseecom_send_resp();
7363 else
7364 ret = qseecom_reentrancy_send_resp(data);
7365 atomic_dec(&data->ioctl_count);
7366 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007367 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007368 if (ret)
7369 pr_err("failed qseecom_send_resp: %d\n", ret);
7370 break;
7371 }
7372 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7373 if ((data->type != QSEECOM_CLIENT_APP) &&
7374 (data->type != QSEECOM_GENERIC) &&
7375 (data->type != QSEECOM_SECURE_SERVICE)) {
7376 pr_err("set mem param req: invalid handle (%d)\n",
7377 data->type);
7378 ret = -EINVAL;
7379 break;
7380 }
7381 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7382 mutex_lock(&app_access_lock);
7383 atomic_inc(&data->ioctl_count);
7384 ret = qseecom_set_client_mem_param(data, argp);
7385 atomic_dec(&data->ioctl_count);
7386 mutex_unlock(&app_access_lock);
7387 if (ret)
7388 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7389 ret);
7390 break;
7391 }
7392 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7393 if ((data->type != QSEECOM_GENERIC) &&
7394 (data->type != QSEECOM_CLIENT_APP)) {
7395 pr_err("load app req: invalid handle (%d)\n",
7396 data->type);
7397 ret = -EINVAL;
7398 break;
7399 }
7400 data->type = QSEECOM_CLIENT_APP;
7401 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7402 mutex_lock(&app_access_lock);
7403 atomic_inc(&data->ioctl_count);
7404 ret = qseecom_load_app(data, argp);
7405 atomic_dec(&data->ioctl_count);
7406 mutex_unlock(&app_access_lock);
7407 if (ret)
7408 pr_err("failed load_app request: %d\n", ret);
7409 break;
7410 }
7411 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7412 if ((data->client.app_id == 0) ||
7413 (data->type != QSEECOM_CLIENT_APP)) {
7414 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7415 data->type, data->client.app_id);
7416 ret = -EINVAL;
7417 break;
7418 }
7419 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7420 mutex_lock(&app_access_lock);
7421 atomic_inc(&data->ioctl_count);
7422 ret = qseecom_unload_app(data, false);
7423 atomic_dec(&data->ioctl_count);
7424 mutex_unlock(&app_access_lock);
7425 if (ret)
7426 pr_err("failed unload_app request: %d\n", ret);
7427 break;
7428 }
7429 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7430 atomic_inc(&data->ioctl_count);
7431 ret = qseecom_get_qseos_version(data, argp);
7432 if (ret)
7433 pr_err("qseecom_get_qseos_version: %d\n", ret);
7434 atomic_dec(&data->ioctl_count);
7435 break;
7436 }
7437 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7438 if ((data->type != QSEECOM_GENERIC) &&
7439 (data->type != QSEECOM_CLIENT_APP)) {
7440 pr_err("perf enable req: invalid handle (%d)\n",
7441 data->type);
7442 ret = -EINVAL;
7443 break;
7444 }
7445 if ((data->type == QSEECOM_CLIENT_APP) &&
7446 (data->client.app_id == 0)) {
7447 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7448 data->type, data->client.app_id);
7449 ret = -EINVAL;
7450 break;
7451 }
7452 atomic_inc(&data->ioctl_count);
7453 if (qseecom.support_bus_scaling) {
7454 mutex_lock(&qsee_bw_mutex);
7455 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7456 mutex_unlock(&qsee_bw_mutex);
7457 } else {
7458 ret = qseecom_perf_enable(data);
7459 if (ret)
7460 pr_err("Fail to vote for clocks %d\n", ret);
7461 }
7462 atomic_dec(&data->ioctl_count);
7463 break;
7464 }
7465 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7466 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7467 (data->type != QSEECOM_CLIENT_APP)) {
7468 pr_err("perf disable req: invalid handle (%d)\n",
7469 data->type);
7470 ret = -EINVAL;
7471 break;
7472 }
7473 if ((data->type == QSEECOM_CLIENT_APP) &&
7474 (data->client.app_id == 0)) {
7475 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7476 data->type, data->client.app_id);
7477 ret = -EINVAL;
7478 break;
7479 }
7480 atomic_inc(&data->ioctl_count);
7481 if (!qseecom.support_bus_scaling) {
7482 qsee_disable_clock_vote(data, CLK_DFAB);
7483 qsee_disable_clock_vote(data, CLK_SFPB);
7484 } else {
7485 mutex_lock(&qsee_bw_mutex);
7486 qseecom_unregister_bus_bandwidth_needs(data);
7487 mutex_unlock(&qsee_bw_mutex);
7488 }
7489 atomic_dec(&data->ioctl_count);
7490 break;
7491 }
7492
7493 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7494 /* If crypto clock is not handled by HLOS, return directly. */
7495 if (qseecom.no_clock_support) {
7496 pr_debug("crypto clock is not handled by HLOS\n");
7497 break;
7498 }
7499 if ((data->client.app_id == 0) ||
7500 (data->type != QSEECOM_CLIENT_APP)) {
7501 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7502 data->type, data->client.app_id);
7503 ret = -EINVAL;
7504 break;
7505 }
7506 atomic_inc(&data->ioctl_count);
7507 ret = qseecom_scale_bus_bandwidth(data, argp);
7508 atomic_dec(&data->ioctl_count);
7509 break;
7510 }
7511 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7512 if (data->type != QSEECOM_GENERIC) {
7513 pr_err("load ext elf req: invalid client handle (%d)\n",
7514 data->type);
7515 ret = -EINVAL;
7516 break;
7517 }
7518 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7519 data->released = true;
7520 mutex_lock(&app_access_lock);
7521 atomic_inc(&data->ioctl_count);
7522 ret = qseecom_load_external_elf(data, argp);
7523 atomic_dec(&data->ioctl_count);
7524 mutex_unlock(&app_access_lock);
7525 if (ret)
7526 pr_err("failed load_external_elf request: %d\n", ret);
7527 break;
7528 }
7529 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7530 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7531 pr_err("unload ext elf req: invalid handle (%d)\n",
7532 data->type);
7533 ret = -EINVAL;
7534 break;
7535 }
7536 data->released = true;
7537 mutex_lock(&app_access_lock);
7538 atomic_inc(&data->ioctl_count);
7539 ret = qseecom_unload_external_elf(data);
7540 atomic_dec(&data->ioctl_count);
7541 mutex_unlock(&app_access_lock);
7542 if (ret)
7543 pr_err("failed unload_app request: %d\n", ret);
7544 break;
7545 }
7546 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7547 data->type = QSEECOM_CLIENT_APP;
7548 mutex_lock(&app_access_lock);
7549 atomic_inc(&data->ioctl_count);
7550 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7551 ret = qseecom_query_app_loaded(data, argp);
7552 atomic_dec(&data->ioctl_count);
7553 mutex_unlock(&app_access_lock);
7554 break;
7555 }
7556 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7557 if (data->type != QSEECOM_GENERIC) {
7558 pr_err("send cmd svc req: invalid handle (%d)\n",
7559 data->type);
7560 ret = -EINVAL;
7561 break;
7562 }
7563 data->type = QSEECOM_SECURE_SERVICE;
7564 if (qseecom.qsee_version < QSEE_VERSION_03) {
7565 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7566 qseecom.qsee_version);
7567 return -EINVAL;
7568 }
7569 mutex_lock(&app_access_lock);
7570 atomic_inc(&data->ioctl_count);
7571 ret = qseecom_send_service_cmd(data, argp);
7572 atomic_dec(&data->ioctl_count);
7573 mutex_unlock(&app_access_lock);
7574 break;
7575 }
7576 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7577 if (!(qseecom.support_pfe || qseecom.support_fde))
7578 pr_err("Features requiring key init not supported\n");
7579 if (data->type != QSEECOM_GENERIC) {
7580 pr_err("create key req: invalid handle (%d)\n",
7581 data->type);
7582 ret = -EINVAL;
7583 break;
7584 }
7585 if (qseecom.qsee_version < QSEE_VERSION_05) {
7586 pr_err("Create Key feature unsupported: qsee ver %u\n",
7587 qseecom.qsee_version);
7588 return -EINVAL;
7589 }
7590 data->released = true;
7591 mutex_lock(&app_access_lock);
7592 atomic_inc(&data->ioctl_count);
7593 ret = qseecom_create_key(data, argp);
7594 if (ret)
7595 pr_err("failed to create encryption key: %d\n", ret);
7596
7597 atomic_dec(&data->ioctl_count);
7598 mutex_unlock(&app_access_lock);
7599 break;
7600 }
7601 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7602 if (!(qseecom.support_pfe || qseecom.support_fde))
7603 pr_err("Features requiring key init not supported\n");
7604 if (data->type != QSEECOM_GENERIC) {
7605 pr_err("wipe key req: invalid handle (%d)\n",
7606 data->type);
7607 ret = -EINVAL;
7608 break;
7609 }
7610 if (qseecom.qsee_version < QSEE_VERSION_05) {
7611 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7612 qseecom.qsee_version);
7613 return -EINVAL;
7614 }
7615 data->released = true;
7616 mutex_lock(&app_access_lock);
7617 atomic_inc(&data->ioctl_count);
7618 ret = qseecom_wipe_key(data, argp);
7619 if (ret)
7620 pr_err("failed to wipe encryption key: %d\n", ret);
7621 atomic_dec(&data->ioctl_count);
7622 mutex_unlock(&app_access_lock);
7623 break;
7624 }
7625 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7626 if (!(qseecom.support_pfe || qseecom.support_fde))
7627 pr_err("Features requiring key init not supported\n");
7628 if (data->type != QSEECOM_GENERIC) {
7629 pr_err("update key req: invalid handle (%d)\n",
7630 data->type);
7631 ret = -EINVAL;
7632 break;
7633 }
7634 if (qseecom.qsee_version < QSEE_VERSION_05) {
7635 pr_err("Update Key feature unsupported in qsee ver %u\n",
7636 qseecom.qsee_version);
7637 return -EINVAL;
7638 }
7639 data->released = true;
7640 mutex_lock(&app_access_lock);
7641 atomic_inc(&data->ioctl_count);
7642 ret = qseecom_update_key_user_info(data, argp);
7643 if (ret)
7644 pr_err("failed to update key user info: %d\n", ret);
7645 atomic_dec(&data->ioctl_count);
7646 mutex_unlock(&app_access_lock);
7647 break;
7648 }
7649 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7650 if (data->type != QSEECOM_GENERIC) {
7651 pr_err("save part hash req: invalid handle (%d)\n",
7652 data->type);
7653 ret = -EINVAL;
7654 break;
7655 }
7656 data->released = true;
7657 mutex_lock(&app_access_lock);
7658 atomic_inc(&data->ioctl_count);
7659 ret = qseecom_save_partition_hash(argp);
7660 atomic_dec(&data->ioctl_count);
7661 mutex_unlock(&app_access_lock);
7662 break;
7663 }
7664 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7665 if (data->type != QSEECOM_GENERIC) {
7666 pr_err("ES activated req: invalid handle (%d)\n",
7667 data->type);
7668 ret = -EINVAL;
7669 break;
7670 }
7671 data->released = true;
7672 mutex_lock(&app_access_lock);
7673 atomic_inc(&data->ioctl_count);
7674 ret = qseecom_is_es_activated(argp);
7675 atomic_dec(&data->ioctl_count);
7676 mutex_unlock(&app_access_lock);
7677 break;
7678 }
7679 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7680 if (data->type != QSEECOM_GENERIC) {
7681 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7682 data->type);
7683 ret = -EINVAL;
7684 break;
7685 }
7686 data->released = true;
7687 mutex_lock(&app_access_lock);
7688 atomic_inc(&data->ioctl_count);
7689 ret = qseecom_mdtp_cipher_dip(argp);
7690 atomic_dec(&data->ioctl_count);
7691 mutex_unlock(&app_access_lock);
7692 break;
7693 }
7694 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7695 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7696 if ((data->listener.id == 0) ||
7697 (data->type != QSEECOM_LISTENER_SERVICE)) {
7698 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7699 data->type, data->listener.id);
7700 ret = -EINVAL;
7701 break;
7702 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007703 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007704 atomic_inc(&data->ioctl_count);
7705 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7706 ret = qseecom_send_modfd_resp(data, argp);
7707 else
7708 ret = qseecom_send_modfd_resp_64(data, argp);
7709 atomic_dec(&data->ioctl_count);
7710 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007711 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007712 if (ret)
7713 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7714 __qseecom_clean_data_sglistinfo(data);
7715 break;
7716 }
7717 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7718 if ((data->client.app_id == 0) ||
7719 (data->type != QSEECOM_CLIENT_APP)) {
7720 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7721 data->type, data->client.app_id);
7722 ret = -EINVAL;
7723 break;
7724 }
7725 if (qseecom.qsee_version < QSEE_VERSION_40) {
7726 pr_err("GP feature unsupported: qsee ver %u\n",
7727 qseecom.qsee_version);
7728 return -EINVAL;
7729 }
7730 /* Only one client allowed here at a time */
7731 mutex_lock(&app_access_lock);
7732 atomic_inc(&data->ioctl_count);
7733 ret = qseecom_qteec_open_session(data, argp);
7734 atomic_dec(&data->ioctl_count);
7735 wake_up_all(&data->abort_wq);
7736 mutex_unlock(&app_access_lock);
7737 if (ret)
7738 pr_err("failed open_session_cmd: %d\n", ret);
7739 __qseecom_clean_data_sglistinfo(data);
7740 break;
7741 }
7742 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7743 if ((data->client.app_id == 0) ||
7744 (data->type != QSEECOM_CLIENT_APP)) {
7745 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7746 data->type, data->client.app_id);
7747 ret = -EINVAL;
7748 break;
7749 }
7750 if (qseecom.qsee_version < QSEE_VERSION_40) {
7751 pr_err("GP feature unsupported: qsee ver %u\n",
7752 qseecom.qsee_version);
7753 return -EINVAL;
7754 }
7755 /* Only one client allowed here at a time */
7756 mutex_lock(&app_access_lock);
7757 atomic_inc(&data->ioctl_count);
7758 ret = qseecom_qteec_close_session(data, argp);
7759 atomic_dec(&data->ioctl_count);
7760 wake_up_all(&data->abort_wq);
7761 mutex_unlock(&app_access_lock);
7762 if (ret)
7763 pr_err("failed close_session_cmd: %d\n", ret);
7764 break;
7765 }
7766 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7767 if ((data->client.app_id == 0) ||
7768 (data->type != QSEECOM_CLIENT_APP)) {
7769 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7770 data->type, data->client.app_id);
7771 ret = -EINVAL;
7772 break;
7773 }
7774 if (qseecom.qsee_version < QSEE_VERSION_40) {
7775 pr_err("GP feature unsupported: qsee ver %u\n",
7776 qseecom.qsee_version);
7777 return -EINVAL;
7778 }
7779 /* Only one client allowed here at a time */
7780 mutex_lock(&app_access_lock);
7781 atomic_inc(&data->ioctl_count);
7782 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7783 atomic_dec(&data->ioctl_count);
7784 wake_up_all(&data->abort_wq);
7785 mutex_unlock(&app_access_lock);
7786 if (ret)
7787 pr_err("failed Invoke cmd: %d\n", ret);
7788 __qseecom_clean_data_sglistinfo(data);
7789 break;
7790 }
7791 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7792 if ((data->client.app_id == 0) ||
7793 (data->type != QSEECOM_CLIENT_APP)) {
7794 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7795 data->type, data->client.app_id);
7796 ret = -EINVAL;
7797 break;
7798 }
7799 if (qseecom.qsee_version < QSEE_VERSION_40) {
7800 pr_err("GP feature unsupported: qsee ver %u\n",
7801 qseecom.qsee_version);
7802 return -EINVAL;
7803 }
7804 /* Only one client allowed here at a time */
7805 mutex_lock(&app_access_lock);
7806 atomic_inc(&data->ioctl_count);
7807 ret = qseecom_qteec_request_cancellation(data, argp);
7808 atomic_dec(&data->ioctl_count);
7809 wake_up_all(&data->abort_wq);
7810 mutex_unlock(&app_access_lock);
7811 if (ret)
7812 pr_err("failed request_cancellation: %d\n", ret);
7813 break;
7814 }
7815 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7816 atomic_inc(&data->ioctl_count);
7817 ret = qseecom_get_ce_info(data, argp);
7818 if (ret)
7819 pr_err("failed get fde ce pipe info: %d\n", ret);
7820 atomic_dec(&data->ioctl_count);
7821 break;
7822 }
7823 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7824 atomic_inc(&data->ioctl_count);
7825 ret = qseecom_free_ce_info(data, argp);
7826 if (ret)
7827 pr_err("failed get fde ce pipe info: %d\n", ret);
7828 atomic_dec(&data->ioctl_count);
7829 break;
7830 }
7831 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7832 atomic_inc(&data->ioctl_count);
7833 ret = qseecom_query_ce_info(data, argp);
7834 if (ret)
7835 pr_err("failed get fde ce pipe info: %d\n", ret);
7836 atomic_dec(&data->ioctl_count);
7837 break;
7838 }
7839 default:
7840 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7841 return -EINVAL;
7842 }
7843 return ret;
7844}
7845
7846static int qseecom_open(struct inode *inode, struct file *file)
7847{
7848 int ret = 0;
7849 struct qseecom_dev_handle *data;
7850
7851 data = kzalloc(sizeof(*data), GFP_KERNEL);
7852 if (!data)
7853 return -ENOMEM;
7854 file->private_data = data;
7855 data->abort = 0;
7856 data->type = QSEECOM_GENERIC;
7857 data->released = false;
7858 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7859 data->mode = INACTIVE;
7860 init_waitqueue_head(&data->abort_wq);
7861 atomic_set(&data->ioctl_count, 0);
7862 return ret;
7863}
7864
7865static int qseecom_release(struct inode *inode, struct file *file)
7866{
7867 struct qseecom_dev_handle *data = file->private_data;
7868 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007869 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007870
7871 if (data->released == false) {
7872 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7873 data->type, data->mode, data);
7874 switch (data->type) {
7875 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007876 pr_debug("release lsnr svc %d\n", data->listener.id);
7877 free_private_data = false;
7878 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007879 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007880 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007881 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007882 break;
7883 case QSEECOM_CLIENT_APP:
7884 mutex_lock(&app_access_lock);
7885 ret = qseecom_unload_app(data, true);
7886 mutex_unlock(&app_access_lock);
7887 break;
7888 case QSEECOM_SECURE_SERVICE:
7889 case QSEECOM_GENERIC:
7890 ret = qseecom_unmap_ion_allocated_memory(data);
7891 if (ret)
7892 pr_err("Ion Unmap failed\n");
7893 break;
7894 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7895 break;
7896 default:
7897 pr_err("Unsupported clnt_handle_type %d",
7898 data->type);
7899 break;
7900 }
7901 }
7902
7903 if (qseecom.support_bus_scaling) {
7904 mutex_lock(&qsee_bw_mutex);
7905 if (data->mode != INACTIVE) {
7906 qseecom_unregister_bus_bandwidth_needs(data);
7907 if (qseecom.cumulative_mode == INACTIVE) {
7908 ret = __qseecom_set_msm_bus_request(INACTIVE);
7909 if (ret)
7910 pr_err("Fail to scale down bus\n");
7911 }
7912 }
7913 mutex_unlock(&qsee_bw_mutex);
7914 } else {
7915 if (data->fast_load_enabled == true)
7916 qsee_disable_clock_vote(data, CLK_SFPB);
7917 if (data->perf_enabled == true)
7918 qsee_disable_clock_vote(data, CLK_DFAB);
7919 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007920
Zhen Kongbcdeda22018-11-16 13:50:51 -08007921 if (free_private_data)
7922 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007923 return ret;
7924}
7925
7926#ifdef CONFIG_COMPAT
7927#include "compat_qseecom.c"
7928#else
7929#define compat_qseecom_ioctl NULL
7930#endif
7931
7932static const struct file_operations qseecom_fops = {
7933 .owner = THIS_MODULE,
7934 .unlocked_ioctl = qseecom_ioctl,
7935 .compat_ioctl = compat_qseecom_ioctl,
7936 .open = qseecom_open,
7937 .release = qseecom_release
7938};
7939
7940static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7941{
7942 int rc = 0;
7943 struct device *pdev;
7944 struct qseecom_clk *qclk;
7945 char *core_clk_src = NULL;
7946 char *core_clk = NULL;
7947 char *iface_clk = NULL;
7948 char *bus_clk = NULL;
7949
7950 switch (ce) {
7951 case CLK_QSEE: {
7952 core_clk_src = "core_clk_src";
7953 core_clk = "core_clk";
7954 iface_clk = "iface_clk";
7955 bus_clk = "bus_clk";
7956 qclk = &qseecom.qsee;
7957 qclk->instance = CLK_QSEE;
7958 break;
7959 };
7960 case CLK_CE_DRV: {
7961 core_clk_src = "ce_drv_core_clk_src";
7962 core_clk = "ce_drv_core_clk";
7963 iface_clk = "ce_drv_iface_clk";
7964 bus_clk = "ce_drv_bus_clk";
7965 qclk = &qseecom.ce_drv;
7966 qclk->instance = CLK_CE_DRV;
7967 break;
7968 };
7969 default:
7970 pr_err("Invalid ce hw instance: %d!\n", ce);
7971 return -EIO;
7972 }
7973
7974 if (qseecom.no_clock_support) {
7975 qclk->ce_core_clk = NULL;
7976 qclk->ce_clk = NULL;
7977 qclk->ce_bus_clk = NULL;
7978 qclk->ce_core_src_clk = NULL;
7979 return 0;
7980 }
7981
7982 pdev = qseecom.pdev;
7983
7984 /* Get CE3 src core clk. */
7985 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7986 if (!IS_ERR(qclk->ce_core_src_clk)) {
7987 rc = clk_set_rate(qclk->ce_core_src_clk,
7988 qseecom.ce_opp_freq_hz);
7989 if (rc) {
7990 clk_put(qclk->ce_core_src_clk);
7991 qclk->ce_core_src_clk = NULL;
7992 pr_err("Unable to set the core src clk @%uMhz.\n",
7993 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7994 return -EIO;
7995 }
7996 } else {
7997 pr_warn("Unable to get CE core src clk, set to NULL\n");
7998 qclk->ce_core_src_clk = NULL;
7999 }
8000
8001 /* Get CE core clk */
8002 qclk->ce_core_clk = clk_get(pdev, core_clk);
8003 if (IS_ERR(qclk->ce_core_clk)) {
8004 rc = PTR_ERR(qclk->ce_core_clk);
8005 pr_err("Unable to get CE core clk\n");
8006 if (qclk->ce_core_src_clk != NULL)
8007 clk_put(qclk->ce_core_src_clk);
8008 return -EIO;
8009 }
8010
8011 /* Get CE Interface clk */
8012 qclk->ce_clk = clk_get(pdev, iface_clk);
8013 if (IS_ERR(qclk->ce_clk)) {
8014 rc = PTR_ERR(qclk->ce_clk);
8015 pr_err("Unable to get CE interface clk\n");
8016 if (qclk->ce_core_src_clk != NULL)
8017 clk_put(qclk->ce_core_src_clk);
8018 clk_put(qclk->ce_core_clk);
8019 return -EIO;
8020 }
8021
8022 /* Get CE AXI clk */
8023 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8024 if (IS_ERR(qclk->ce_bus_clk)) {
8025 rc = PTR_ERR(qclk->ce_bus_clk);
8026 pr_err("Unable to get CE BUS interface clk\n");
8027 if (qclk->ce_core_src_clk != NULL)
8028 clk_put(qclk->ce_core_src_clk);
8029 clk_put(qclk->ce_core_clk);
8030 clk_put(qclk->ce_clk);
8031 return -EIO;
8032 }
8033
8034 return rc;
8035}
8036
8037static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8038{
8039 struct qseecom_clk *qclk;
8040
8041 if (ce == CLK_QSEE)
8042 qclk = &qseecom.qsee;
8043 else
8044 qclk = &qseecom.ce_drv;
8045
8046 if (qclk->ce_clk != NULL) {
8047 clk_put(qclk->ce_clk);
8048 qclk->ce_clk = NULL;
8049 }
8050 if (qclk->ce_core_clk != NULL) {
8051 clk_put(qclk->ce_core_clk);
8052 qclk->ce_core_clk = NULL;
8053 }
8054 if (qclk->ce_bus_clk != NULL) {
8055 clk_put(qclk->ce_bus_clk);
8056 qclk->ce_bus_clk = NULL;
8057 }
8058 if (qclk->ce_core_src_clk != NULL) {
8059 clk_put(qclk->ce_core_src_clk);
8060 qclk->ce_core_src_clk = NULL;
8061 }
8062 qclk->instance = CLK_INVALID;
8063}
8064
8065static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8066{
8067 int rc = 0;
8068 uint32_t hlos_num_ce_hw_instances;
8069 uint32_t disk_encrypt_pipe;
8070 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008071 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008072 int i;
8073 const int *tbl;
8074 int size;
8075 int entry;
8076 struct qseecom_crypto_info *pfde_tbl = NULL;
8077 struct qseecom_crypto_info *p;
8078 int tbl_size;
8079 int j;
8080 bool old_db = true;
8081 struct qseecom_ce_info_use *pce_info_use;
8082 uint32_t *unit_tbl = NULL;
8083 int total_units = 0;
8084 struct qseecom_ce_pipe_entry *pce_entry;
8085
8086 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8087 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8088
8089 if (of_property_read_u32((&pdev->dev)->of_node,
8090 "qcom,qsee-ce-hw-instance",
8091 &qseecom.ce_info.qsee_ce_hw_instance)) {
8092 pr_err("Fail to get qsee ce hw instance information.\n");
8093 rc = -EINVAL;
8094 goto out;
8095 } else {
8096 pr_debug("qsee-ce-hw-instance=0x%x\n",
8097 qseecom.ce_info.qsee_ce_hw_instance);
8098 }
8099
8100 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8101 "qcom,support-fde");
8102 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8103 "qcom,support-pfe");
8104
8105 if (!qseecom.support_pfe && !qseecom.support_fde) {
8106 pr_warn("Device does not support PFE/FDE");
8107 goto out;
8108 }
8109
8110 if (qseecom.support_fde)
8111 tbl = of_get_property((&pdev->dev)->of_node,
8112 "qcom,full-disk-encrypt-info", &size);
8113 else
8114 tbl = NULL;
8115 if (tbl) {
8116 old_db = false;
8117 if (size % sizeof(struct qseecom_crypto_info)) {
8118 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8119 size);
8120 rc = -EINVAL;
8121 goto out;
8122 }
8123 tbl_size = size / sizeof
8124 (struct qseecom_crypto_info);
8125
8126 pfde_tbl = kzalloc(size, GFP_KERNEL);
8127 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8128 total_units = 0;
8129
8130 if (!pfde_tbl || !unit_tbl) {
8131 pr_err("failed to alloc memory\n");
8132 rc = -ENOMEM;
8133 goto out;
8134 }
8135 if (of_property_read_u32_array((&pdev->dev)->of_node,
8136 "qcom,full-disk-encrypt-info",
8137 (u32 *)pfde_tbl, size/sizeof(u32))) {
8138 pr_err("failed to read full-disk-encrypt-info tbl\n");
8139 rc = -EINVAL;
8140 goto out;
8141 }
8142
8143 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8144 for (j = 0; j < total_units; j++) {
8145 if (p->unit_num == *(unit_tbl + j))
8146 break;
8147 }
8148 if (j == total_units) {
8149 *(unit_tbl + total_units) = p->unit_num;
8150 total_units++;
8151 }
8152 }
8153
8154 qseecom.ce_info.num_fde = total_units;
8155 pce_info_use = qseecom.ce_info.fde = kcalloc(
8156 total_units, sizeof(struct qseecom_ce_info_use),
8157 GFP_KERNEL);
8158 if (!pce_info_use) {
8159 pr_err("failed to alloc memory\n");
8160 rc = -ENOMEM;
8161 goto out;
8162 }
8163
8164 for (j = 0; j < total_units; j++, pce_info_use++) {
8165 pce_info_use->unit_num = *(unit_tbl + j);
8166 pce_info_use->alloc = false;
8167 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8168 pce_info_use->num_ce_pipe_entries = 0;
8169 pce_info_use->ce_pipe_entry = NULL;
8170 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8171 if (p->unit_num == pce_info_use->unit_num)
8172 pce_info_use->num_ce_pipe_entries++;
8173 }
8174
8175 entry = pce_info_use->num_ce_pipe_entries;
8176 pce_entry = pce_info_use->ce_pipe_entry =
8177 kcalloc(entry,
8178 sizeof(struct qseecom_ce_pipe_entry),
8179 GFP_KERNEL);
8180 if (pce_entry == NULL) {
8181 pr_err("failed to alloc memory\n");
8182 rc = -ENOMEM;
8183 goto out;
8184 }
8185
8186 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8187 if (p->unit_num == pce_info_use->unit_num) {
8188 pce_entry->ce_num = p->ce;
8189 pce_entry->ce_pipe_pair =
8190 p->pipe_pair;
8191 pce_entry->valid = true;
8192 pce_entry++;
8193 }
8194 }
8195 }
8196 kfree(unit_tbl);
8197 unit_tbl = NULL;
8198 kfree(pfde_tbl);
8199 pfde_tbl = NULL;
8200 }
8201
8202 if (qseecom.support_pfe)
8203 tbl = of_get_property((&pdev->dev)->of_node,
8204 "qcom,per-file-encrypt-info", &size);
8205 else
8206 tbl = NULL;
8207 if (tbl) {
8208 old_db = false;
8209 if (size % sizeof(struct qseecom_crypto_info)) {
8210 pr_err("per-file-encrypt-info tbl size(%d)\n",
8211 size);
8212 rc = -EINVAL;
8213 goto out;
8214 }
8215 tbl_size = size / sizeof
8216 (struct qseecom_crypto_info);
8217
8218 pfde_tbl = kzalloc(size, GFP_KERNEL);
8219 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8220 total_units = 0;
8221 if (!pfde_tbl || !unit_tbl) {
8222 pr_err("failed to alloc memory\n");
8223 rc = -ENOMEM;
8224 goto out;
8225 }
8226 if (of_property_read_u32_array((&pdev->dev)->of_node,
8227 "qcom,per-file-encrypt-info",
8228 (u32 *)pfde_tbl, size/sizeof(u32))) {
8229 pr_err("failed to read per-file-encrypt-info tbl\n");
8230 rc = -EINVAL;
8231 goto out;
8232 }
8233
8234 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8235 for (j = 0; j < total_units; j++) {
8236 if (p->unit_num == *(unit_tbl + j))
8237 break;
8238 }
8239 if (j == total_units) {
8240 *(unit_tbl + total_units) = p->unit_num;
8241 total_units++;
8242 }
8243 }
8244
8245 qseecom.ce_info.num_pfe = total_units;
8246 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8247 total_units, sizeof(struct qseecom_ce_info_use),
8248 GFP_KERNEL);
8249 if (!pce_info_use) {
8250 pr_err("failed to alloc memory\n");
8251 rc = -ENOMEM;
8252 goto out;
8253 }
8254
8255 for (j = 0; j < total_units; j++, pce_info_use++) {
8256 pce_info_use->unit_num = *(unit_tbl + j);
8257 pce_info_use->alloc = false;
8258 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8259 pce_info_use->num_ce_pipe_entries = 0;
8260 pce_info_use->ce_pipe_entry = NULL;
8261 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8262 if (p->unit_num == pce_info_use->unit_num)
8263 pce_info_use->num_ce_pipe_entries++;
8264 }
8265
8266 entry = pce_info_use->num_ce_pipe_entries;
8267 pce_entry = pce_info_use->ce_pipe_entry =
8268 kcalloc(entry,
8269 sizeof(struct qseecom_ce_pipe_entry),
8270 GFP_KERNEL);
8271 if (pce_entry == NULL) {
8272 pr_err("failed to alloc memory\n");
8273 rc = -ENOMEM;
8274 goto out;
8275 }
8276
8277 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8278 if (p->unit_num == pce_info_use->unit_num) {
8279 pce_entry->ce_num = p->ce;
8280 pce_entry->ce_pipe_pair =
8281 p->pipe_pair;
8282 pce_entry->valid = true;
8283 pce_entry++;
8284 }
8285 }
8286 }
8287 kfree(unit_tbl);
8288 unit_tbl = NULL;
8289 kfree(pfde_tbl);
8290 pfde_tbl = NULL;
8291 }
8292
8293 if (!old_db)
8294 goto out1;
8295
8296 if (of_property_read_bool((&pdev->dev)->of_node,
8297 "qcom,support-multiple-ce-hw-instance")) {
8298 if (of_property_read_u32((&pdev->dev)->of_node,
8299 "qcom,hlos-num-ce-hw-instances",
8300 &hlos_num_ce_hw_instances)) {
8301 pr_err("Fail: get hlos number of ce hw instance\n");
8302 rc = -EINVAL;
8303 goto out;
8304 }
8305 } else {
8306 hlos_num_ce_hw_instances = 1;
8307 }
8308
8309 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8310 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8311 MAX_CE_PIPE_PAIR_PER_UNIT);
8312 rc = -EINVAL;
8313 goto out;
8314 }
8315
8316 if (of_property_read_u32_array((&pdev->dev)->of_node,
8317 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8318 hlos_num_ce_hw_instances)) {
8319 pr_err("Fail: get hlos ce hw instance info\n");
8320 rc = -EINVAL;
8321 goto out;
8322 }
8323
8324 if (qseecom.support_fde) {
8325 pce_info_use = qseecom.ce_info.fde =
8326 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8327 if (!pce_info_use) {
8328 pr_err("failed to alloc memory\n");
8329 rc = -ENOMEM;
8330 goto out;
8331 }
8332 /* by default for old db */
8333 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8334 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8335 pce_info_use->alloc = false;
8336 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8337 pce_info_use->ce_pipe_entry = NULL;
8338 if (of_property_read_u32((&pdev->dev)->of_node,
8339 "qcom,disk-encrypt-pipe-pair",
8340 &disk_encrypt_pipe)) {
8341 pr_err("Fail to get FDE pipe information.\n");
8342 rc = -EINVAL;
8343 goto out;
8344 } else {
8345 pr_debug("disk-encrypt-pipe-pair=0x%x",
8346 disk_encrypt_pipe);
8347 }
8348 entry = pce_info_use->num_ce_pipe_entries =
8349 hlos_num_ce_hw_instances;
8350 pce_entry = pce_info_use->ce_pipe_entry =
8351 kcalloc(entry,
8352 sizeof(struct qseecom_ce_pipe_entry),
8353 GFP_KERNEL);
8354 if (pce_entry == NULL) {
8355 pr_err("failed to alloc memory\n");
8356 rc = -ENOMEM;
8357 goto out;
8358 }
8359 for (i = 0; i < entry; i++) {
8360 pce_entry->ce_num = hlos_ce_hw_instance[i];
8361 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8362 pce_entry->valid = 1;
8363 pce_entry++;
8364 }
8365 } else {
8366 pr_warn("Device does not support FDE");
8367 disk_encrypt_pipe = 0xff;
8368 }
8369 if (qseecom.support_pfe) {
8370 pce_info_use = qseecom.ce_info.pfe =
8371 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8372 if (!pce_info_use) {
8373 pr_err("failed to alloc memory\n");
8374 rc = -ENOMEM;
8375 goto out;
8376 }
8377 /* by default for old db */
8378 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8379 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8380 pce_info_use->alloc = false;
8381 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8382 pce_info_use->ce_pipe_entry = NULL;
8383
8384 if (of_property_read_u32((&pdev->dev)->of_node,
8385 "qcom,file-encrypt-pipe-pair",
8386 &file_encrypt_pipe)) {
8387 pr_err("Fail to get PFE pipe information.\n");
8388 rc = -EINVAL;
8389 goto out;
8390 } else {
8391 pr_debug("file-encrypt-pipe-pair=0x%x",
8392 file_encrypt_pipe);
8393 }
8394 entry = pce_info_use->num_ce_pipe_entries =
8395 hlos_num_ce_hw_instances;
8396 pce_entry = pce_info_use->ce_pipe_entry =
8397 kcalloc(entry,
8398 sizeof(struct qseecom_ce_pipe_entry),
8399 GFP_KERNEL);
8400 if (pce_entry == NULL) {
8401 pr_err("failed to alloc memory\n");
8402 rc = -ENOMEM;
8403 goto out;
8404 }
8405 for (i = 0; i < entry; i++) {
8406 pce_entry->ce_num = hlos_ce_hw_instance[i];
8407 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8408 pce_entry->valid = 1;
8409 pce_entry++;
8410 }
8411 } else {
8412 pr_warn("Device does not support PFE");
8413 file_encrypt_pipe = 0xff;
8414 }
8415
8416out1:
8417 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8418 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8419out:
8420 if (rc) {
8421 if (qseecom.ce_info.fde) {
8422 pce_info_use = qseecom.ce_info.fde;
8423 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8424 pce_entry = pce_info_use->ce_pipe_entry;
8425 kfree(pce_entry);
8426 pce_info_use++;
8427 }
8428 }
8429 kfree(qseecom.ce_info.fde);
8430 qseecom.ce_info.fde = NULL;
8431 if (qseecom.ce_info.pfe) {
8432 pce_info_use = qseecom.ce_info.pfe;
8433 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8434 pce_entry = pce_info_use->ce_pipe_entry;
8435 kfree(pce_entry);
8436 pce_info_use++;
8437 }
8438 }
8439 kfree(qseecom.ce_info.pfe);
8440 qseecom.ce_info.pfe = NULL;
8441 }
8442 kfree(unit_tbl);
8443 kfree(pfde_tbl);
8444 return rc;
8445}
8446
8447static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8448 void __user *argp)
8449{
8450 struct qseecom_ce_info_req req;
8451 struct qseecom_ce_info_req *pinfo = &req;
8452 int ret = 0;
8453 int i;
8454 unsigned int entries;
8455 struct qseecom_ce_info_use *pce_info_use, *p;
8456 int total = 0;
8457 bool found = false;
8458 struct qseecom_ce_pipe_entry *pce_entry;
8459
8460 ret = copy_from_user(pinfo, argp,
8461 sizeof(struct qseecom_ce_info_req));
8462 if (ret) {
8463 pr_err("copy_from_user failed\n");
8464 return ret;
8465 }
8466
8467 switch (pinfo->usage) {
8468 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8469 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8470 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8471 if (qseecom.support_fde) {
8472 p = qseecom.ce_info.fde;
8473 total = qseecom.ce_info.num_fde;
8474 } else {
8475 pr_err("system does not support fde\n");
8476 return -EINVAL;
8477 }
8478 break;
8479 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8480 if (qseecom.support_pfe) {
8481 p = qseecom.ce_info.pfe;
8482 total = qseecom.ce_info.num_pfe;
8483 } else {
8484 pr_err("system does not support pfe\n");
8485 return -EINVAL;
8486 }
8487 break;
8488 default:
8489 pr_err("unsupported usage %d\n", pinfo->usage);
8490 return -EINVAL;
8491 }
8492
8493 pce_info_use = NULL;
8494 for (i = 0; i < total; i++) {
8495 if (!p->alloc)
8496 pce_info_use = p;
8497 else if (!memcmp(p->handle, pinfo->handle,
8498 MAX_CE_INFO_HANDLE_SIZE)) {
8499 pce_info_use = p;
8500 found = true;
8501 break;
8502 }
8503 p++;
8504 }
8505
8506 if (pce_info_use == NULL)
8507 return -EBUSY;
8508
8509 pinfo->unit_num = pce_info_use->unit_num;
8510 if (!pce_info_use->alloc) {
8511 pce_info_use->alloc = true;
8512 memcpy(pce_info_use->handle,
8513 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8514 }
8515 if (pce_info_use->num_ce_pipe_entries >
8516 MAX_CE_PIPE_PAIR_PER_UNIT)
8517 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8518 else
8519 entries = pce_info_use->num_ce_pipe_entries;
8520 pinfo->num_ce_pipe_entries = entries;
8521 pce_entry = pce_info_use->ce_pipe_entry;
8522 for (i = 0; i < entries; i++, pce_entry++)
8523 pinfo->ce_pipe_entry[i] = *pce_entry;
8524 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8525 pinfo->ce_pipe_entry[i].valid = 0;
8526
8527 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8528 pr_err("copy_to_user failed\n");
8529 ret = -EFAULT;
8530 }
8531 return ret;
8532}
8533
8534static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8535 void __user *argp)
8536{
8537 struct qseecom_ce_info_req req;
8538 struct qseecom_ce_info_req *pinfo = &req;
8539 int ret = 0;
8540 struct qseecom_ce_info_use *p;
8541 int total = 0;
8542 int i;
8543 bool found = false;
8544
8545 ret = copy_from_user(pinfo, argp,
8546 sizeof(struct qseecom_ce_info_req));
8547 if (ret)
8548 return ret;
8549
8550 switch (pinfo->usage) {
8551 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8552 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8553 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8554 if (qseecom.support_fde) {
8555 p = qseecom.ce_info.fde;
8556 total = qseecom.ce_info.num_fde;
8557 } else {
8558 pr_err("system does not support fde\n");
8559 return -EINVAL;
8560 }
8561 break;
8562 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8563 if (qseecom.support_pfe) {
8564 p = qseecom.ce_info.pfe;
8565 total = qseecom.ce_info.num_pfe;
8566 } else {
8567 pr_err("system does not support pfe\n");
8568 return -EINVAL;
8569 }
8570 break;
8571 default:
8572 pr_err("unsupported usage %d\n", pinfo->usage);
8573 return -EINVAL;
8574 }
8575
8576 for (i = 0; i < total; i++) {
8577 if (p->alloc &&
8578 !memcmp(p->handle, pinfo->handle,
8579 MAX_CE_INFO_HANDLE_SIZE)) {
8580 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8581 p->alloc = false;
8582 found = true;
8583 break;
8584 }
8585 p++;
8586 }
8587 return ret;
8588}
8589
8590static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8591 void __user *argp)
8592{
8593 struct qseecom_ce_info_req req;
8594 struct qseecom_ce_info_req *pinfo = &req;
8595 int ret = 0;
8596 int i;
8597 unsigned int entries;
8598 struct qseecom_ce_info_use *pce_info_use, *p;
8599 int total = 0;
8600 bool found = false;
8601 struct qseecom_ce_pipe_entry *pce_entry;
8602
8603 ret = copy_from_user(pinfo, argp,
8604 sizeof(struct qseecom_ce_info_req));
8605 if (ret)
8606 return ret;
8607
8608 switch (pinfo->usage) {
8609 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8610 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8611 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8612 if (qseecom.support_fde) {
8613 p = qseecom.ce_info.fde;
8614 total = qseecom.ce_info.num_fde;
8615 } else {
8616 pr_err("system does not support fde\n");
8617 return -EINVAL;
8618 }
8619 break;
8620 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8621 if (qseecom.support_pfe) {
8622 p = qseecom.ce_info.pfe;
8623 total = qseecom.ce_info.num_pfe;
8624 } else {
8625 pr_err("system does not support pfe\n");
8626 return -EINVAL;
8627 }
8628 break;
8629 default:
8630 pr_err("unsupported usage %d\n", pinfo->usage);
8631 return -EINVAL;
8632 }
8633
8634 pce_info_use = NULL;
8635 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8636 pinfo->num_ce_pipe_entries = 0;
8637 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8638 pinfo->ce_pipe_entry[i].valid = 0;
8639
8640 for (i = 0; i < total; i++) {
8641
8642 if (p->alloc && !memcmp(p->handle,
8643 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8644 pce_info_use = p;
8645 found = true;
8646 break;
8647 }
8648 p++;
8649 }
8650 if (!pce_info_use)
8651 goto out;
8652 pinfo->unit_num = pce_info_use->unit_num;
8653 if (pce_info_use->num_ce_pipe_entries >
8654 MAX_CE_PIPE_PAIR_PER_UNIT)
8655 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8656 else
8657 entries = pce_info_use->num_ce_pipe_entries;
8658 pinfo->num_ce_pipe_entries = entries;
8659 pce_entry = pce_info_use->ce_pipe_entry;
8660 for (i = 0; i < entries; i++, pce_entry++)
8661 pinfo->ce_pipe_entry[i] = *pce_entry;
8662 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8663 pinfo->ce_pipe_entry[i].valid = 0;
8664out:
8665 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8666 pr_err("copy_to_user failed\n");
8667 ret = -EFAULT;
8668 }
8669 return ret;
8670}
8671
8672/*
8673 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8674 * then whitelist feature is not supported.
8675 */
8676static int qseecom_check_whitelist_feature(void)
8677{
8678 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8679
8680 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8681}
8682
8683static int qseecom_probe(struct platform_device *pdev)
8684{
8685 int rc;
8686 int i;
8687 uint32_t feature = 10;
8688 struct device *class_dev;
8689 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8690 struct qseecom_command_scm_resp resp;
8691 struct qseecom_ce_info_use *pce_info_use = NULL;
8692
8693 qseecom.qsee_bw_count = 0;
8694 qseecom.qsee_perf_client = 0;
8695 qseecom.qsee_sfpb_bw_count = 0;
8696
8697 qseecom.qsee.ce_core_clk = NULL;
8698 qseecom.qsee.ce_clk = NULL;
8699 qseecom.qsee.ce_core_src_clk = NULL;
8700 qseecom.qsee.ce_bus_clk = NULL;
8701
8702 qseecom.cumulative_mode = 0;
8703 qseecom.current_mode = INACTIVE;
8704 qseecom.support_bus_scaling = false;
8705 qseecom.support_fde = false;
8706 qseecom.support_pfe = false;
8707
8708 qseecom.ce_drv.ce_core_clk = NULL;
8709 qseecom.ce_drv.ce_clk = NULL;
8710 qseecom.ce_drv.ce_core_src_clk = NULL;
8711 qseecom.ce_drv.ce_bus_clk = NULL;
8712 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8713
8714 qseecom.app_block_ref_cnt = 0;
8715 init_waitqueue_head(&qseecom.app_block_wq);
8716 qseecom.whitelist_support = true;
8717
8718 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8719 if (rc < 0) {
8720 pr_err("alloc_chrdev_region failed %d\n", rc);
8721 return rc;
8722 }
8723
8724 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8725 if (IS_ERR(driver_class)) {
8726 rc = -ENOMEM;
8727 pr_err("class_create failed %d\n", rc);
8728 goto exit_unreg_chrdev_region;
8729 }
8730
8731 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8732 QSEECOM_DEV);
8733 if (IS_ERR(class_dev)) {
8734 pr_err("class_device_create failed %d\n", rc);
8735 rc = -ENOMEM;
8736 goto exit_destroy_class;
8737 }
8738
8739 cdev_init(&qseecom.cdev, &qseecom_fops);
8740 qseecom.cdev.owner = THIS_MODULE;
8741
8742 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8743 if (rc < 0) {
8744 pr_err("cdev_add failed %d\n", rc);
8745 goto exit_destroy_device;
8746 }
8747
8748 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008749 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8750 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008751 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008752 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8753 spin_lock_init(&qseecom.registered_kclient_list_lock);
8754 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008755 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008756 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008757 qseecom.send_resp_flag = 0;
8758
8759 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008760 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008761 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8762 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008763 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008764 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8765 if (rc) {
8766 pr_err("Failed to get QSEE version info %d\n", rc);
8767 goto exit_del_cdev;
8768 }
8769 qseecom.qsee_version = resp.result;
8770 qseecom.qseos_version = QSEOS_VERSION_14;
8771 qseecom.commonlib_loaded = false;
8772 qseecom.commonlib64_loaded = false;
8773 qseecom.pdev = class_dev;
8774 /* Create ION msm client */
8775 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8776 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8777 pr_err("Ion client cannot be created\n");
8778 rc = -ENOMEM;
8779 goto exit_del_cdev;
8780 }
8781
8782 /* register client for bus scaling */
8783 if (pdev->dev.of_node) {
8784 qseecom.pdev->of_node = pdev->dev.of_node;
8785 qseecom.support_bus_scaling =
8786 of_property_read_bool((&pdev->dev)->of_node,
8787 "qcom,support-bus-scaling");
8788 rc = qseecom_retrieve_ce_data(pdev);
8789 if (rc)
8790 goto exit_destroy_ion_client;
8791 qseecom.appsbl_qseecom_support =
8792 of_property_read_bool((&pdev->dev)->of_node,
8793 "qcom,appsbl-qseecom-support");
8794 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8795 qseecom.appsbl_qseecom_support);
8796
8797 qseecom.commonlib64_loaded =
8798 of_property_read_bool((&pdev->dev)->of_node,
8799 "qcom,commonlib64-loaded-by-uefi");
8800 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8801 qseecom.commonlib64_loaded);
8802 qseecom.fde_key_size =
8803 of_property_read_bool((&pdev->dev)->of_node,
8804 "qcom,fde-key-size");
8805 qseecom.no_clock_support =
8806 of_property_read_bool((&pdev->dev)->of_node,
8807 "qcom,no-clock-support");
8808 if (!qseecom.no_clock_support) {
8809 pr_info("qseecom clocks handled by other subsystem\n");
8810 } else {
8811 pr_info("no-clock-support=0x%x",
8812 qseecom.no_clock_support);
8813 }
8814
8815 if (of_property_read_u32((&pdev->dev)->of_node,
8816 "qcom,qsee-reentrancy-support",
8817 &qseecom.qsee_reentrancy_support)) {
8818 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8819 qseecom.qsee_reentrancy_support = 0;
8820 } else {
8821 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8822 qseecom.qsee_reentrancy_support);
8823 }
8824
Jiten Patela7bb1d52018-05-11 12:34:26 +05308825 qseecom.enable_key_wrap_in_ks =
8826 of_property_read_bool((&pdev->dev)->of_node,
8827 "qcom,enable-key-wrap-in-ks");
8828 if (qseecom.enable_key_wrap_in_ks) {
8829 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8830 qseecom.enable_key_wrap_in_ks);
8831 }
8832
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008833 /*
8834 * The qseecom bus scaling flag can not be enabled when
8835 * crypto clock is not handled by HLOS.
8836 */
8837 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8838 pr_err("support_bus_scaling flag can not be enabled.\n");
8839 rc = -EINVAL;
8840 goto exit_destroy_ion_client;
8841 }
8842
8843 if (of_property_read_u32((&pdev->dev)->of_node,
8844 "qcom,ce-opp-freq",
8845 &qseecom.ce_opp_freq_hz)) {
8846 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8847 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8848 }
8849 rc = __qseecom_init_clk(CLK_QSEE);
8850 if (rc)
8851 goto exit_destroy_ion_client;
8852
8853 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8854 (qseecom.support_pfe || qseecom.support_fde)) {
8855 rc = __qseecom_init_clk(CLK_CE_DRV);
8856 if (rc) {
8857 __qseecom_deinit_clk(CLK_QSEE);
8858 goto exit_destroy_ion_client;
8859 }
8860 } else {
8861 struct qseecom_clk *qclk;
8862
8863 qclk = &qseecom.qsee;
8864 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8865 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8866 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8867 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8868 }
8869
8870 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8871 msm_bus_cl_get_pdata(pdev);
8872 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8873 (!qseecom.is_apps_region_protected &&
8874 !qseecom.appsbl_qseecom_support)) {
8875 struct resource *resource = NULL;
8876 struct qsee_apps_region_info_ireq req;
8877 struct qsee_apps_region_info_64bit_ireq req_64bit;
8878 struct qseecom_command_scm_resp resp;
8879 void *cmd_buf = NULL;
8880 size_t cmd_len;
8881
8882 resource = platform_get_resource_byname(pdev,
8883 IORESOURCE_MEM, "secapp-region");
8884 if (resource) {
8885 if (qseecom.qsee_version < QSEE_VERSION_40) {
8886 req.qsee_cmd_id =
8887 QSEOS_APP_REGION_NOTIFICATION;
8888 req.addr = (uint32_t)resource->start;
8889 req.size = resource_size(resource);
8890 cmd_buf = (void *)&req;
8891 cmd_len = sizeof(struct
8892 qsee_apps_region_info_ireq);
8893 pr_warn("secure app region addr=0x%x size=0x%x",
8894 req.addr, req.size);
8895 } else {
8896 req_64bit.qsee_cmd_id =
8897 QSEOS_APP_REGION_NOTIFICATION;
8898 req_64bit.addr = resource->start;
8899 req_64bit.size = resource_size(
8900 resource);
8901 cmd_buf = (void *)&req_64bit;
8902 cmd_len = sizeof(struct
8903 qsee_apps_region_info_64bit_ireq);
8904 pr_warn("secure app region addr=0x%llx size=0x%x",
8905 req_64bit.addr, req_64bit.size);
8906 }
8907 } else {
8908 pr_err("Fail to get secure app region info\n");
8909 rc = -EINVAL;
8910 goto exit_deinit_clock;
8911 }
8912 rc = __qseecom_enable_clk(CLK_QSEE);
8913 if (rc) {
8914 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8915 rc = -EIO;
8916 goto exit_deinit_clock;
8917 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008918 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008919 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8920 cmd_buf, cmd_len,
8921 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008922 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008923 __qseecom_disable_clk(CLK_QSEE);
8924 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8925 pr_err("send secapp reg fail %d resp.res %d\n",
8926 rc, resp.result);
8927 rc = -EINVAL;
8928 goto exit_deinit_clock;
8929 }
8930 }
8931 /*
8932 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8933 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8934 * Pls add "qseecom.commonlib64_loaded = true" here too.
8935 */
8936 if (qseecom.is_apps_region_protected ||
8937 qseecom.appsbl_qseecom_support)
8938 qseecom.commonlib_loaded = true;
8939 } else {
8940 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8941 pdev->dev.platform_data;
8942 }
8943 if (qseecom.support_bus_scaling) {
8944 init_timer(&(qseecom.bw_scale_down_timer));
8945 INIT_WORK(&qseecom.bw_inactive_req_ws,
8946 qseecom_bw_inactive_req_work);
8947 qseecom.bw_scale_down_timer.function =
8948 qseecom_scale_bus_bandwidth_timer_callback;
8949 }
8950 qseecom.timer_running = false;
8951 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8952 qseecom_platform_support);
8953
8954 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8955 pr_warn("qseecom.whitelist_support = %d\n",
8956 qseecom.whitelist_support);
8957
8958 if (!qseecom.qsee_perf_client)
8959 pr_err("Unable to register bus client\n");
8960
Zhen Kongc4c162a2019-01-23 12:07:12 -08008961 /*create a kthread to process pending listener unregister task */
8962 qseecom.unregister_lsnr_kthread_task = kthread_run(
8963 __qseecom_unregister_listener_kthread_func,
8964 NULL, "qseecom-unreg-lsnr");
8965 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8966 pr_err("failed to create kthread to unregister listener\n");
8967 rc = -EINVAL;
8968 goto exit_deinit_clock;
8969 }
8970 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8971 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008972 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8973 return 0;
8974
8975exit_deinit_clock:
8976 __qseecom_deinit_clk(CLK_QSEE);
8977 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8978 (qseecom.support_pfe || qseecom.support_fde))
8979 __qseecom_deinit_clk(CLK_CE_DRV);
8980exit_destroy_ion_client:
8981 if (qseecom.ce_info.fde) {
8982 pce_info_use = qseecom.ce_info.fde;
8983 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8984 kzfree(pce_info_use->ce_pipe_entry);
8985 pce_info_use++;
8986 }
8987 kfree(qseecom.ce_info.fde);
8988 }
8989 if (qseecom.ce_info.pfe) {
8990 pce_info_use = qseecom.ce_info.pfe;
8991 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8992 kzfree(pce_info_use->ce_pipe_entry);
8993 pce_info_use++;
8994 }
8995 kfree(qseecom.ce_info.pfe);
8996 }
8997 ion_client_destroy(qseecom.ion_clnt);
8998exit_del_cdev:
8999 cdev_del(&qseecom.cdev);
9000exit_destroy_device:
9001 device_destroy(driver_class, qseecom_device_no);
9002exit_destroy_class:
9003 class_destroy(driver_class);
9004exit_unreg_chrdev_region:
9005 unregister_chrdev_region(qseecom_device_no, 1);
9006 return rc;
9007}
9008
9009static int qseecom_remove(struct platform_device *pdev)
9010{
9011 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309012 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009013 unsigned long flags = 0;
9014 int ret = 0;
9015 int i;
9016 struct qseecom_ce_pipe_entry *pce_entry;
9017 struct qseecom_ce_info_use *pce_info_use;
9018
9019 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9020 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9021
Monika Singhe711b162018-04-24 09:54:50 +05309022 list_for_each_entry_safe(kclient, kclient_tmp,
9023 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009024
9025 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009026 if (!kclient->handle) {
9027 list_del(&kclient->list);
9028 kzfree(kclient);
9029 break;
9030 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009031
9032 list_del(&kclient->list);
9033 mutex_lock(&app_access_lock);
9034 ret = qseecom_unload_app(kclient->handle->dev, false);
9035 mutex_unlock(&app_access_lock);
9036 if (!ret) {
9037 kzfree(kclient->handle->dev);
9038 kzfree(kclient->handle);
9039 kzfree(kclient);
9040 }
9041 }
9042
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009043 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9044
9045 if (qseecom.qseos_version > QSEEE_VERSION_00)
9046 qseecom_unload_commonlib_image();
9047
9048 if (qseecom.qsee_perf_client)
9049 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9050 0);
9051 if (pdev->dev.platform_data != NULL)
9052 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9053
9054 if (qseecom.support_bus_scaling) {
9055 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9056 del_timer_sync(&qseecom.bw_scale_down_timer);
9057 }
9058
9059 if (qseecom.ce_info.fde) {
9060 pce_info_use = qseecom.ce_info.fde;
9061 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9062 pce_entry = pce_info_use->ce_pipe_entry;
9063 kfree(pce_entry);
9064 pce_info_use++;
9065 }
9066 }
9067 kfree(qseecom.ce_info.fde);
9068 if (qseecom.ce_info.pfe) {
9069 pce_info_use = qseecom.ce_info.pfe;
9070 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9071 pce_entry = pce_info_use->ce_pipe_entry;
9072 kfree(pce_entry);
9073 pce_info_use++;
9074 }
9075 }
9076 kfree(qseecom.ce_info.pfe);
9077
9078 /* register client for bus scaling */
9079 if (pdev->dev.of_node) {
9080 __qseecom_deinit_clk(CLK_QSEE);
9081 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9082 (qseecom.support_pfe || qseecom.support_fde))
9083 __qseecom_deinit_clk(CLK_CE_DRV);
9084 }
9085
9086 ion_client_destroy(qseecom.ion_clnt);
9087
Zhen Kongc4c162a2019-01-23 12:07:12 -08009088 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9089
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009090 cdev_del(&qseecom.cdev);
9091
9092 device_destroy(driver_class, qseecom_device_no);
9093
9094 class_destroy(driver_class);
9095
9096 unregister_chrdev_region(qseecom_device_no, 1);
9097
9098 return ret;
9099}
9100
9101static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9102{
9103 int ret = 0;
9104 struct qseecom_clk *qclk;
9105
9106 qclk = &qseecom.qsee;
9107 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9108 if (qseecom.no_clock_support)
9109 return 0;
9110
9111 mutex_lock(&qsee_bw_mutex);
9112 mutex_lock(&clk_access_lock);
9113
9114 if (qseecom.current_mode != INACTIVE) {
9115 ret = msm_bus_scale_client_update_request(
9116 qseecom.qsee_perf_client, INACTIVE);
9117 if (ret)
9118 pr_err("Fail to scale down bus\n");
9119 else
9120 qseecom.current_mode = INACTIVE;
9121 }
9122
9123 if (qclk->clk_access_cnt) {
9124 if (qclk->ce_clk != NULL)
9125 clk_disable_unprepare(qclk->ce_clk);
9126 if (qclk->ce_core_clk != NULL)
9127 clk_disable_unprepare(qclk->ce_core_clk);
9128 if (qclk->ce_bus_clk != NULL)
9129 clk_disable_unprepare(qclk->ce_bus_clk);
9130 }
9131
9132 del_timer_sync(&(qseecom.bw_scale_down_timer));
9133 qseecom.timer_running = false;
9134
9135 mutex_unlock(&clk_access_lock);
9136 mutex_unlock(&qsee_bw_mutex);
9137 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9138
9139 return 0;
9140}
9141
9142static int qseecom_resume(struct platform_device *pdev)
9143{
9144 int mode = 0;
9145 int ret = 0;
9146 struct qseecom_clk *qclk;
9147
9148 qclk = &qseecom.qsee;
9149 if (qseecom.no_clock_support)
9150 goto exit;
9151
9152 mutex_lock(&qsee_bw_mutex);
9153 mutex_lock(&clk_access_lock);
9154 if (qseecom.cumulative_mode >= HIGH)
9155 mode = HIGH;
9156 else
9157 mode = qseecom.cumulative_mode;
9158
9159 if (qseecom.cumulative_mode != INACTIVE) {
9160 ret = msm_bus_scale_client_update_request(
9161 qseecom.qsee_perf_client, mode);
9162 if (ret)
9163 pr_err("Fail to scale up bus to %d\n", mode);
9164 else
9165 qseecom.current_mode = mode;
9166 }
9167
9168 if (qclk->clk_access_cnt) {
9169 if (qclk->ce_core_clk != NULL) {
9170 ret = clk_prepare_enable(qclk->ce_core_clk);
9171 if (ret) {
9172 pr_err("Unable to enable/prep CE core clk\n");
9173 qclk->clk_access_cnt = 0;
9174 goto err;
9175 }
9176 }
9177 if (qclk->ce_clk != NULL) {
9178 ret = clk_prepare_enable(qclk->ce_clk);
9179 if (ret) {
9180 pr_err("Unable to enable/prep CE iface clk\n");
9181 qclk->clk_access_cnt = 0;
9182 goto ce_clk_err;
9183 }
9184 }
9185 if (qclk->ce_bus_clk != NULL) {
9186 ret = clk_prepare_enable(qclk->ce_bus_clk);
9187 if (ret) {
9188 pr_err("Unable to enable/prep CE bus clk\n");
9189 qclk->clk_access_cnt = 0;
9190 goto ce_bus_clk_err;
9191 }
9192 }
9193 }
9194
9195 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9196 qseecom.bw_scale_down_timer.expires = jiffies +
9197 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9198 mod_timer(&(qseecom.bw_scale_down_timer),
9199 qseecom.bw_scale_down_timer.expires);
9200 qseecom.timer_running = true;
9201 }
9202
9203 mutex_unlock(&clk_access_lock);
9204 mutex_unlock(&qsee_bw_mutex);
9205 goto exit;
9206
9207ce_bus_clk_err:
9208 if (qclk->ce_clk)
9209 clk_disable_unprepare(qclk->ce_clk);
9210ce_clk_err:
9211 if (qclk->ce_core_clk)
9212 clk_disable_unprepare(qclk->ce_core_clk);
9213err:
9214 mutex_unlock(&clk_access_lock);
9215 mutex_unlock(&qsee_bw_mutex);
9216 ret = -EIO;
9217exit:
9218 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9219 return ret;
9220}
9221
9222static const struct of_device_id qseecom_match[] = {
9223 {
9224 .compatible = "qcom,qseecom",
9225 },
9226 {}
9227};
9228
9229static struct platform_driver qseecom_plat_driver = {
9230 .probe = qseecom_probe,
9231 .remove = qseecom_remove,
9232 .suspend = qseecom_suspend,
9233 .resume = qseecom_resume,
9234 .driver = {
9235 .name = "qseecom",
9236 .owner = THIS_MODULE,
9237 .of_match_table = qseecom_match,
9238 },
9239};
9240
9241static int qseecom_init(void)
9242{
9243 return platform_driver_register(&qseecom_plat_driver);
9244}
9245
9246static void qseecom_exit(void)
9247{
9248 platform_driver_unregister(&qseecom_plat_driver);
9249}
9250
9251MODULE_LICENSE("GPL v2");
9252MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9253
9254module_init(qseecom_init);
9255module_exit(qseecom_exit);