blob: 883260442c282793b69955b8c35d7f6724d870fc [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
Zhen Kongc4c162a2019-01-23 12:07:12 -080053#include <linux/kthread.h>
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070054
55#define QSEECOM_DEV "qseecom"
56#define QSEOS_VERSION_14 0x14
57#define QSEEE_VERSION_00 0x400000
58#define QSEE_VERSION_01 0x401000
59#define QSEE_VERSION_02 0x402000
60#define QSEE_VERSION_03 0x403000
61#define QSEE_VERSION_04 0x404000
62#define QSEE_VERSION_05 0x405000
63#define QSEE_VERSION_20 0x800000
64#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
65
66#define QSEE_CE_CLK_100MHZ 100000000
67#define CE_CLK_DIV 1000000
68
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053069#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070070#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
71 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
72
73#define QSEECOM_INVALID_KEY_ID 0xff
74
75/* Save partition image hash for authentication check */
76#define SCM_SAVE_PARTITION_HASH_ID 0x01
77
78/* Check if enterprise security is activate */
79#define SCM_IS_ACTIVATED_ID 0x02
80
81/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
82#define SCM_MDTP_CIPHER_DIP 0x01
83
84/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
85#define MAX_DIP 0x20000
86
87#define RPMB_SERVICE 0x2000
88#define SSD_SERVICE 0x3000
89
90#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
91#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
92#define TWO 2
93#define QSEECOM_UFS_ICE_CE_NUM 10
94#define QSEECOM_SDCC_ICE_CE_NUM 20
95#define QSEECOM_ICE_FDE_KEY_INDEX 0
96
97#define PHY_ADDR_4G (1ULL<<32)
98
99#define QSEECOM_STATE_NOT_READY 0
100#define QSEECOM_STATE_SUSPEND 1
101#define QSEECOM_STATE_READY 2
102#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
103
104/*
105 * default ce info unit to 0 for
106 * services which
107 * support only single instance.
108 * Most of services are in this category.
109 */
110#define DEFAULT_CE_INFO_UNIT 0
111#define DEFAULT_NUM_CE_INFO_UNIT 1
112
Jiten Patela7bb1d52018-05-11 12:34:26 +0530113#define FDE_FLAG_POS 4
114#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
115
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700116enum qseecom_clk_definitions {
117 CLK_DFAB = 0,
118 CLK_SFPB,
119};
120
121enum qseecom_ice_key_size_type {
122 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
123 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
125 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
126 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
127 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
128};
129
130enum qseecom_client_handle_type {
131 QSEECOM_CLIENT_APP = 1,
132 QSEECOM_LISTENER_SERVICE,
133 QSEECOM_SECURE_SERVICE,
134 QSEECOM_GENERIC,
135 QSEECOM_UNAVAILABLE_CLIENT_APP,
136};
137
138enum qseecom_ce_hw_instance {
139 CLK_QSEE = 0,
140 CLK_CE_DRV,
141 CLK_INVALID,
142};
143
Zhen Kongc4c162a2019-01-23 12:07:12 -0800144enum qseecom_listener_unregister_kthread_state {
145 LSNR_UNREG_KT_SLEEP = 0,
146 LSNR_UNREG_KT_WAKEUP,
147};
148
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700149static struct class *driver_class;
150static dev_t qseecom_device_no;
151
152static DEFINE_MUTEX(qsee_bw_mutex);
153static DEFINE_MUTEX(app_access_lock);
154static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800155static DEFINE_MUTEX(listener_access_lock);
156
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700157
158struct sglist_info {
159 uint32_t indexAndFlags;
160 uint32_t sizeOrCount;
161};
162
163/*
164 * The 31th bit indicates only one or multiple physical address inside
165 * the request buffer. If it is set, the index locates a single physical addr
166 * inside the request buffer, and `sizeOrCount` is the size of the memory being
167 * shared at that physical address.
168 * Otherwise, the index locates an array of {start, len} pairs (a
169 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
170 * that array.
171 *
172 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
173 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
174 *
175 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
176 */
177#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
178 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
179
180#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
181
182#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
183
184#define MAKE_WHITELIST_VERSION(major, minor, patch) \
185 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
186
187struct qseecom_registered_listener_list {
188 struct list_head list;
189 struct qseecom_register_listener_req svc;
190 void *user_virt_sb_base;
191 u8 *sb_virt;
192 phys_addr_t sb_phys;
193 size_t sb_length;
194 struct ion_handle *ihandle; /* Retrieve phy addr */
195 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700197 int rcv_req_flag;
198 int send_resp_flag;
199 bool listener_in_use;
200 /* wq for thread blocked on this listener*/
201 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800202 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
203 uint32_t sglist_cnt;
204 int abort;
205 bool unregister_pending;
206};
207
208struct qseecom_unregister_pending_list {
209 struct list_head list;
210 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700211};
212
213struct qseecom_registered_app_list {
214 struct list_head list;
215 u32 app_id;
216 u32 ref_cnt;
217 char app_name[MAX_APP_NAME_SIZE];
218 u32 app_arch;
219 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700220 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700221 u32 blocked_on_listener_id;
222};
223
224struct qseecom_registered_kclient_list {
225 struct list_head list;
226 struct qseecom_handle *handle;
227};
228
229struct qseecom_ce_info_use {
230 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
231 unsigned int unit_num;
232 unsigned int num_ce_pipe_entries;
233 struct qseecom_ce_pipe_entry *ce_pipe_entry;
234 bool alloc;
235 uint32_t type;
236};
237
238struct ce_hw_usage_info {
239 uint32_t qsee_ce_hw_instance;
240 uint32_t num_fde;
241 struct qseecom_ce_info_use *fde;
242 uint32_t num_pfe;
243 struct qseecom_ce_info_use *pfe;
244};
245
246struct qseecom_clk {
247 enum qseecom_ce_hw_instance instance;
248 struct clk *ce_core_clk;
249 struct clk *ce_clk;
250 struct clk *ce_core_src_clk;
251 struct clk *ce_bus_clk;
252 uint32_t clk_access_cnt;
253};
254
255struct qseecom_control {
256 struct ion_client *ion_clnt; /* Ion client */
257 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700258
259 struct list_head registered_app_list_head;
260 spinlock_t registered_app_list_lock;
261
262 struct list_head registered_kclient_list_head;
263 spinlock_t registered_kclient_list_lock;
264
265 wait_queue_head_t send_resp_wq;
266 int send_resp_flag;
267
268 uint32_t qseos_version;
269 uint32_t qsee_version;
270 struct device *pdev;
271 bool whitelist_support;
272 bool commonlib_loaded;
273 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700274 struct ce_hw_usage_info ce_info;
275
276 int qsee_bw_count;
277 int qsee_sfpb_bw_count;
278
279 uint32_t qsee_perf_client;
280 struct qseecom_clk qsee;
281 struct qseecom_clk ce_drv;
282
283 bool support_bus_scaling;
284 bool support_fde;
285 bool support_pfe;
286 bool fde_key_size;
287 uint32_t cumulative_mode;
288 enum qseecom_bandwidth_request_mode current_mode;
289 struct timer_list bw_scale_down_timer;
290 struct work_struct bw_inactive_req_ws;
291 struct cdev cdev;
292 bool timer_running;
293 bool no_clock_support;
294 unsigned int ce_opp_freq_hz;
295 bool appsbl_qseecom_support;
296 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530297 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700298
299 uint32_t app_block_ref_cnt;
300 wait_queue_head_t app_block_wq;
301 atomic_t qseecom_state;
302 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700303 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800304
305 struct list_head unregister_lsnr_pending_list_head;
306 wait_queue_head_t register_lsnr_pending_wq;
Zhen Kongc4c162a2019-01-23 12:07:12 -0800307 struct task_struct *unregister_lsnr_kthread_task;
308 wait_queue_head_t unregister_lsnr_kthread_wq;
309 atomic_t unregister_lsnr_kthread_state;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700310};
311
312struct qseecom_sec_buf_fd_info {
313 bool is_sec_buf_fd;
314 size_t size;
315 void *vbase;
316 dma_addr_t pbase;
317};
318
319struct qseecom_param_memref {
320 uint32_t buffer;
321 uint32_t size;
322};
323
324struct qseecom_client_handle {
325 u32 app_id;
326 u8 *sb_virt;
327 phys_addr_t sb_phys;
328 unsigned long user_virt_sb_base;
329 size_t sb_length;
330 struct ion_handle *ihandle; /* Retrieve phy addr */
331 char app_name[MAX_APP_NAME_SIZE];
332 u32 app_arch;
333 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
334};
335
336struct qseecom_listener_handle {
337 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800338 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800339 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700340};
341
342static struct qseecom_control qseecom;
343
344struct qseecom_dev_handle {
345 enum qseecom_client_handle_type type;
346 union {
347 struct qseecom_client_handle client;
348 struct qseecom_listener_handle listener;
349 };
350 bool released;
351 int abort;
352 wait_queue_head_t abort_wq;
353 atomic_t ioctl_count;
354 bool perf_enabled;
355 bool fast_load_enabled;
356 enum qseecom_bandwidth_request_mode mode;
357 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
358 uint32_t sglist_cnt;
359 bool use_legacy_cmd;
360};
361
362struct qseecom_key_id_usage_desc {
363 uint8_t desc[QSEECOM_KEY_ID_SIZE];
364};
365
366struct qseecom_crypto_info {
367 unsigned int unit_num;
368 unsigned int ce;
369 unsigned int pipe_pair;
370};
371
372static struct qseecom_key_id_usage_desc key_id_array[] = {
373 {
374 .desc = "Undefined Usage Index",
375 },
376
377 {
378 .desc = "Full Disk Encryption",
379 },
380
381 {
382 .desc = "Per File Encryption",
383 },
384
385 {
386 .desc = "UFS ICE Full Disk Encryption",
387 },
388
389 {
390 .desc = "SDCC ICE Full Disk Encryption",
391 },
392};
393
394/* Function proto types */
395static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
396static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
397static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
398static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
399static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
400static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
401 char *cmnlib_name);
402static int qseecom_enable_ice_setup(int usage);
403static int qseecom_disable_ice_setup(int usage);
404static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
405static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
406 void __user *argp);
407static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
408 void __user *argp);
409static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
410 void __user *argp);
411
412static int get_qseecom_keymaster_status(char *str)
413{
414 get_option(&str, &qseecom.is_apps_region_protected);
415 return 1;
416}
417__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
418
Zhen Kong03f220d2019-02-01 17:12:34 -0800419
420#define QSEECOM_SCM_EBUSY_WAIT_MS 30
421#define QSEECOM_SCM_EBUSY_MAX_RETRY 67
422
423static int __qseecom_scm_call2_locked(uint32_t smc_id, struct scm_desc *desc)
424{
425 int ret = 0;
426 int retry_count = 0;
427
428 do {
429 ret = scm_call2_noretry(smc_id, desc);
430 if (ret == -EBUSY) {
431 mutex_unlock(&app_access_lock);
432 msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
433 mutex_lock(&app_access_lock);
434 }
435 if (retry_count == 33)
436 pr_warn("secure world has been busy for 1 second!\n");
437 } while (ret == -EBUSY &&
438 (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
439 return ret;
440}
441
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700442static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
443 const void *req_buf, void *resp_buf)
444{
445 int ret = 0;
446 uint32_t smc_id = 0;
447 uint32_t qseos_cmd_id = 0;
448 struct scm_desc desc = {0};
449 struct qseecom_command_scm_resp *scm_resp = NULL;
450
451 if (!req_buf || !resp_buf) {
452 pr_err("Invalid buffer pointer\n");
453 return -EINVAL;
454 }
455 qseos_cmd_id = *(uint32_t *)req_buf;
456 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
457
458 switch (svc_id) {
459 case 6: {
460 if (tz_cmd_id == 3) {
461 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
462 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
463 desc.args[0] = *(uint32_t *)req_buf;
464 } else {
465 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
466 svc_id, tz_cmd_id);
467 return -EINVAL;
468 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800469 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700470 break;
471 }
472 case SCM_SVC_ES: {
473 switch (tz_cmd_id) {
474 case SCM_SAVE_PARTITION_HASH_ID: {
475 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
476 struct qseecom_save_partition_hash_req *p_hash_req =
477 (struct qseecom_save_partition_hash_req *)
478 req_buf;
479 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
480
481 if (!tzbuf)
482 return -ENOMEM;
483 memset(tzbuf, 0, tzbuflen);
484 memcpy(tzbuf, p_hash_req->digest,
485 SHA256_DIGEST_LENGTH);
486 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
487 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
488 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
489 desc.args[0] = p_hash_req->partition_id;
490 desc.args[1] = virt_to_phys(tzbuf);
491 desc.args[2] = SHA256_DIGEST_LENGTH;
Zhen Kong03f220d2019-02-01 17:12:34 -0800492 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700493 kzfree(tzbuf);
494 break;
495 }
496 default: {
497 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
498 tz_cmd_id);
499 ret = -EINVAL;
500 break;
501 }
502 } /* end of switch (tz_cmd_id) */
503 break;
504 } /* end of case SCM_SVC_ES */
505 case SCM_SVC_TZSCHEDULER: {
506 switch (qseos_cmd_id) {
507 case QSEOS_APP_START_COMMAND: {
508 struct qseecom_load_app_ireq *req;
509 struct qseecom_load_app_64bit_ireq *req_64bit;
510
511 smc_id = TZ_OS_APP_START_ID;
512 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
513 if (qseecom.qsee_version < QSEE_VERSION_40) {
514 req = (struct qseecom_load_app_ireq *)req_buf;
515 desc.args[0] = req->mdt_len;
516 desc.args[1] = req->img_len;
517 desc.args[2] = req->phy_addr;
518 } else {
519 req_64bit =
520 (struct qseecom_load_app_64bit_ireq *)
521 req_buf;
522 desc.args[0] = req_64bit->mdt_len;
523 desc.args[1] = req_64bit->img_len;
524 desc.args[2] = req_64bit->phy_addr;
525 }
526 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800527 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700528 break;
529 }
530 case QSEOS_APP_SHUTDOWN_COMMAND: {
531 struct qseecom_unload_app_ireq *req;
532
533 req = (struct qseecom_unload_app_ireq *)req_buf;
534 smc_id = TZ_OS_APP_SHUTDOWN_ID;
535 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
536 desc.args[0] = req->app_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800537 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700538 break;
539 }
540 case QSEOS_APP_LOOKUP_COMMAND: {
541 struct qseecom_check_app_ireq *req;
542 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
543 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
544
545 if (!tzbuf)
546 return -ENOMEM;
547 req = (struct qseecom_check_app_ireq *)req_buf;
548 pr_debug("Lookup app_name = %s\n", req->app_name);
549 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
550 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
551 smc_id = TZ_OS_APP_LOOKUP_ID;
552 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
553 desc.args[0] = virt_to_phys(tzbuf);
554 desc.args[1] = strlen(req->app_name);
555 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800556 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700557 kzfree(tzbuf);
558 break;
559 }
560 case QSEOS_APP_REGION_NOTIFICATION: {
561 struct qsee_apps_region_info_ireq *req;
562 struct qsee_apps_region_info_64bit_ireq *req_64bit;
563
564 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
565 desc.arginfo =
566 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
567 if (qseecom.qsee_version < QSEE_VERSION_40) {
568 req = (struct qsee_apps_region_info_ireq *)
569 req_buf;
570 desc.args[0] = req->addr;
571 desc.args[1] = req->size;
572 } else {
573 req_64bit =
574 (struct qsee_apps_region_info_64bit_ireq *)
575 req_buf;
576 desc.args[0] = req_64bit->addr;
577 desc.args[1] = req_64bit->size;
578 }
579 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800580 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700581 break;
582 }
583 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
584 struct qseecom_load_lib_image_ireq *req;
585 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
586
587 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
588 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
589 if (qseecom.qsee_version < QSEE_VERSION_40) {
590 req = (struct qseecom_load_lib_image_ireq *)
591 req_buf;
592 desc.args[0] = req->mdt_len;
593 desc.args[1] = req->img_len;
594 desc.args[2] = req->phy_addr;
595 } else {
596 req_64bit =
597 (struct qseecom_load_lib_image_64bit_ireq *)
598 req_buf;
599 desc.args[0] = req_64bit->mdt_len;
600 desc.args[1] = req_64bit->img_len;
601 desc.args[2] = req_64bit->phy_addr;
602 }
603 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800604 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700605 break;
606 }
607 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
608 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
609 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
610 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800611 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700612 break;
613 }
614 case QSEOS_REGISTER_LISTENER: {
615 struct qseecom_register_listener_ireq *req;
616 struct qseecom_register_listener_64bit_ireq *req_64bit;
617
618 desc.arginfo =
619 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
620 if (qseecom.qsee_version < QSEE_VERSION_40) {
621 req = (struct qseecom_register_listener_ireq *)
622 req_buf;
623 desc.args[0] = req->listener_id;
624 desc.args[1] = req->sb_ptr;
625 desc.args[2] = req->sb_len;
626 } else {
627 req_64bit =
628 (struct qseecom_register_listener_64bit_ireq *)
629 req_buf;
630 desc.args[0] = req_64bit->listener_id;
631 desc.args[1] = req_64bit->sb_ptr;
632 desc.args[2] = req_64bit->sb_len;
633 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700634 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700635 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800636 ret = __qseecom_scm_call2_locked(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800637 if (ret == -EIO) {
638 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700639 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700640 smc_id = TZ_OS_REGISTER_LISTENER_ID;
Zhen Kong03f220d2019-02-01 17:12:34 -0800641 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700642 }
643 break;
644 }
645 case QSEOS_DEREGISTER_LISTENER: {
646 struct qseecom_unregister_listener_ireq *req;
647
648 req = (struct qseecom_unregister_listener_ireq *)
649 req_buf;
650 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
651 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
652 desc.args[0] = req->listener_id;
Zhen Kong03f220d2019-02-01 17:12:34 -0800653 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700654 break;
655 }
656 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
657 struct qseecom_client_listener_data_irsp *req;
658
659 req = (struct qseecom_client_listener_data_irsp *)
660 req_buf;
661 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
662 desc.arginfo =
663 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
664 desc.args[0] = req->listener_id;
665 desc.args[1] = req->status;
Zhen Kong03f220d2019-02-01 17:12:34 -0800666 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700667 break;
668 }
669 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
670 struct qseecom_client_listener_data_irsp *req;
671 struct qseecom_client_listener_data_64bit_irsp *req_64;
672
673 smc_id =
674 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
675 desc.arginfo =
676 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
677 if (qseecom.qsee_version < QSEE_VERSION_40) {
678 req =
679 (struct qseecom_client_listener_data_irsp *)
680 req_buf;
681 desc.args[0] = req->listener_id;
682 desc.args[1] = req->status;
683 desc.args[2] = req->sglistinfo_ptr;
684 desc.args[3] = req->sglistinfo_len;
685 } else {
686 req_64 =
687 (struct qseecom_client_listener_data_64bit_irsp *)
688 req_buf;
689 desc.args[0] = req_64->listener_id;
690 desc.args[1] = req_64->status;
691 desc.args[2] = req_64->sglistinfo_ptr;
692 desc.args[3] = req_64->sglistinfo_len;
693 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800694 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700695 break;
696 }
697 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
698 struct qseecom_load_app_ireq *req;
699 struct qseecom_load_app_64bit_ireq *req_64bit;
700
701 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
702 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
703 if (qseecom.qsee_version < QSEE_VERSION_40) {
704 req = (struct qseecom_load_app_ireq *)req_buf;
705 desc.args[0] = req->mdt_len;
706 desc.args[1] = req->img_len;
707 desc.args[2] = req->phy_addr;
708 } else {
709 req_64bit =
710 (struct qseecom_load_app_64bit_ireq *)req_buf;
711 desc.args[0] = req_64bit->mdt_len;
712 desc.args[1] = req_64bit->img_len;
713 desc.args[2] = req_64bit->phy_addr;
714 }
715 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800716 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700717 break;
718 }
719 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
720 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
721 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
722 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800723 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700724 break;
725 }
726
727 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
728 struct qseecom_client_send_data_ireq *req;
729 struct qseecom_client_send_data_64bit_ireq *req_64bit;
730
731 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
732 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
733 if (qseecom.qsee_version < QSEE_VERSION_40) {
734 req = (struct qseecom_client_send_data_ireq *)
735 req_buf;
736 desc.args[0] = req->app_id;
737 desc.args[1] = req->req_ptr;
738 desc.args[2] = req->req_len;
739 desc.args[3] = req->rsp_ptr;
740 desc.args[4] = req->rsp_len;
741 } else {
742 req_64bit =
743 (struct qseecom_client_send_data_64bit_ireq *)
744 req_buf;
745 desc.args[0] = req_64bit->app_id;
746 desc.args[1] = req_64bit->req_ptr;
747 desc.args[2] = req_64bit->req_len;
748 desc.args[3] = req_64bit->rsp_ptr;
749 desc.args[4] = req_64bit->rsp_len;
750 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800751 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700752 break;
753 }
754 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
755 struct qseecom_client_send_data_ireq *req;
756 struct qseecom_client_send_data_64bit_ireq *req_64bit;
757
758 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
759 desc.arginfo =
760 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
761 if (qseecom.qsee_version < QSEE_VERSION_40) {
762 req = (struct qseecom_client_send_data_ireq *)
763 req_buf;
764 desc.args[0] = req->app_id;
765 desc.args[1] = req->req_ptr;
766 desc.args[2] = req->req_len;
767 desc.args[3] = req->rsp_ptr;
768 desc.args[4] = req->rsp_len;
769 desc.args[5] = req->sglistinfo_ptr;
770 desc.args[6] = req->sglistinfo_len;
771 } else {
772 req_64bit =
773 (struct qseecom_client_send_data_64bit_ireq *)
774 req_buf;
775 desc.args[0] = req_64bit->app_id;
776 desc.args[1] = req_64bit->req_ptr;
777 desc.args[2] = req_64bit->req_len;
778 desc.args[3] = req_64bit->rsp_ptr;
779 desc.args[4] = req_64bit->rsp_len;
780 desc.args[5] = req_64bit->sglistinfo_ptr;
781 desc.args[6] = req_64bit->sglistinfo_len;
782 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800783 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700784 break;
785 }
786 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
787 struct qseecom_client_send_service_ireq *req;
788
789 req = (struct qseecom_client_send_service_ireq *)
790 req_buf;
791 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
792 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
793 desc.args[0] = req->key_type;
794 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800795 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700796 break;
797 }
798 case QSEOS_RPMB_ERASE_COMMAND: {
799 smc_id = TZ_OS_RPMB_ERASE_ID;
800 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
801 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800802 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700803 break;
804 }
805 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
806 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
807 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
808 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800809 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700810 break;
811 }
812 case QSEOS_GENERATE_KEY: {
813 u32 tzbuflen = PAGE_ALIGN(sizeof
814 (struct qseecom_key_generate_ireq) -
815 sizeof(uint32_t));
816 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
817
818 if (!tzbuf)
819 return -ENOMEM;
820 memset(tzbuf, 0, tzbuflen);
821 memcpy(tzbuf, req_buf + sizeof(uint32_t),
822 (sizeof(struct qseecom_key_generate_ireq) -
823 sizeof(uint32_t)));
824 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
825 smc_id = TZ_OS_KS_GEN_KEY_ID;
826 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
827 desc.args[0] = virt_to_phys(tzbuf);
828 desc.args[1] = tzbuflen;
829 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800830 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700831 kzfree(tzbuf);
832 break;
833 }
834 case QSEOS_DELETE_KEY: {
835 u32 tzbuflen = PAGE_ALIGN(sizeof
836 (struct qseecom_key_delete_ireq) -
837 sizeof(uint32_t));
838 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
839
840 if (!tzbuf)
841 return -ENOMEM;
842 memset(tzbuf, 0, tzbuflen);
843 memcpy(tzbuf, req_buf + sizeof(uint32_t),
844 (sizeof(struct qseecom_key_delete_ireq) -
845 sizeof(uint32_t)));
846 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
847 smc_id = TZ_OS_KS_DEL_KEY_ID;
848 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
849 desc.args[0] = virt_to_phys(tzbuf);
850 desc.args[1] = tzbuflen;
851 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800852 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700853 kzfree(tzbuf);
854 break;
855 }
856 case QSEOS_SET_KEY: {
857 u32 tzbuflen = PAGE_ALIGN(sizeof
858 (struct qseecom_key_select_ireq) -
859 sizeof(uint32_t));
860 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
861
862 if (!tzbuf)
863 return -ENOMEM;
864 memset(tzbuf, 0, tzbuflen);
865 memcpy(tzbuf, req_buf + sizeof(uint32_t),
866 (sizeof(struct qseecom_key_select_ireq) -
867 sizeof(uint32_t)));
868 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
869 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
870 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
871 desc.args[0] = virt_to_phys(tzbuf);
872 desc.args[1] = tzbuflen;
873 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800874 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700875 kzfree(tzbuf);
876 break;
877 }
878 case QSEOS_UPDATE_KEY_USERINFO: {
879 u32 tzbuflen = PAGE_ALIGN(sizeof
880 (struct qseecom_key_userinfo_update_ireq) -
881 sizeof(uint32_t));
882 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
883
884 if (!tzbuf)
885 return -ENOMEM;
886 memset(tzbuf, 0, tzbuflen);
887 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
888 (struct qseecom_key_userinfo_update_ireq) -
889 sizeof(uint32_t)));
890 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
891 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
892 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
893 desc.args[0] = virt_to_phys(tzbuf);
894 desc.args[1] = tzbuflen;
895 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
Zhen Kong03f220d2019-02-01 17:12:34 -0800896 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700897 kzfree(tzbuf);
898 break;
899 }
900 case QSEOS_TEE_OPEN_SESSION: {
901 struct qseecom_qteec_ireq *req;
902 struct qseecom_qteec_64bit_ireq *req_64bit;
903
904 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
905 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
906 if (qseecom.qsee_version < QSEE_VERSION_40) {
907 req = (struct qseecom_qteec_ireq *)req_buf;
908 desc.args[0] = req->app_id;
909 desc.args[1] = req->req_ptr;
910 desc.args[2] = req->req_len;
911 desc.args[3] = req->resp_ptr;
912 desc.args[4] = req->resp_len;
913 } else {
914 req_64bit = (struct qseecom_qteec_64bit_ireq *)
915 req_buf;
916 desc.args[0] = req_64bit->app_id;
917 desc.args[1] = req_64bit->req_ptr;
918 desc.args[2] = req_64bit->req_len;
919 desc.args[3] = req_64bit->resp_ptr;
920 desc.args[4] = req_64bit->resp_len;
921 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800922 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700923 break;
924 }
925 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
926 struct qseecom_qteec_ireq *req;
927 struct qseecom_qteec_64bit_ireq *req_64bit;
928
929 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
930 desc.arginfo =
931 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
932 if (qseecom.qsee_version < QSEE_VERSION_40) {
933 req = (struct qseecom_qteec_ireq *)req_buf;
934 desc.args[0] = req->app_id;
935 desc.args[1] = req->req_ptr;
936 desc.args[2] = req->req_len;
937 desc.args[3] = req->resp_ptr;
938 desc.args[4] = req->resp_len;
939 desc.args[5] = req->sglistinfo_ptr;
940 desc.args[6] = req->sglistinfo_len;
941 } else {
942 req_64bit = (struct qseecom_qteec_64bit_ireq *)
943 req_buf;
944 desc.args[0] = req_64bit->app_id;
945 desc.args[1] = req_64bit->req_ptr;
946 desc.args[2] = req_64bit->req_len;
947 desc.args[3] = req_64bit->resp_ptr;
948 desc.args[4] = req_64bit->resp_len;
949 desc.args[5] = req_64bit->sglistinfo_ptr;
950 desc.args[6] = req_64bit->sglistinfo_len;
951 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800952 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700953 break;
954 }
955 case QSEOS_TEE_INVOKE_COMMAND: {
956 struct qseecom_qteec_ireq *req;
957 struct qseecom_qteec_64bit_ireq *req_64bit;
958
959 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
960 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
961 if (qseecom.qsee_version < QSEE_VERSION_40) {
962 req = (struct qseecom_qteec_ireq *)req_buf;
963 desc.args[0] = req->app_id;
964 desc.args[1] = req->req_ptr;
965 desc.args[2] = req->req_len;
966 desc.args[3] = req->resp_ptr;
967 desc.args[4] = req->resp_len;
968 } else {
969 req_64bit = (struct qseecom_qteec_64bit_ireq *)
970 req_buf;
971 desc.args[0] = req_64bit->app_id;
972 desc.args[1] = req_64bit->req_ptr;
973 desc.args[2] = req_64bit->req_len;
974 desc.args[3] = req_64bit->resp_ptr;
975 desc.args[4] = req_64bit->resp_len;
976 }
Zhen Kong03f220d2019-02-01 17:12:34 -0800977 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700978 break;
979 }
980 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
981 struct qseecom_qteec_ireq *req;
982 struct qseecom_qteec_64bit_ireq *req_64bit;
983
984 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
985 desc.arginfo =
986 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
987 if (qseecom.qsee_version < QSEE_VERSION_40) {
988 req = (struct qseecom_qteec_ireq *)req_buf;
989 desc.args[0] = req->app_id;
990 desc.args[1] = req->req_ptr;
991 desc.args[2] = req->req_len;
992 desc.args[3] = req->resp_ptr;
993 desc.args[4] = req->resp_len;
994 desc.args[5] = req->sglistinfo_ptr;
995 desc.args[6] = req->sglistinfo_len;
996 } else {
997 req_64bit = (struct qseecom_qteec_64bit_ireq *)
998 req_buf;
999 desc.args[0] = req_64bit->app_id;
1000 desc.args[1] = req_64bit->req_ptr;
1001 desc.args[2] = req_64bit->req_len;
1002 desc.args[3] = req_64bit->resp_ptr;
1003 desc.args[4] = req_64bit->resp_len;
1004 desc.args[5] = req_64bit->sglistinfo_ptr;
1005 desc.args[6] = req_64bit->sglistinfo_len;
1006 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001007 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001008 break;
1009 }
1010 case QSEOS_TEE_CLOSE_SESSION: {
1011 struct qseecom_qteec_ireq *req;
1012 struct qseecom_qteec_64bit_ireq *req_64bit;
1013
1014 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
1015 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
1016 if (qseecom.qsee_version < QSEE_VERSION_40) {
1017 req = (struct qseecom_qteec_ireq *)req_buf;
1018 desc.args[0] = req->app_id;
1019 desc.args[1] = req->req_ptr;
1020 desc.args[2] = req->req_len;
1021 desc.args[3] = req->resp_ptr;
1022 desc.args[4] = req->resp_len;
1023 } else {
1024 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1025 req_buf;
1026 desc.args[0] = req_64bit->app_id;
1027 desc.args[1] = req_64bit->req_ptr;
1028 desc.args[2] = req_64bit->req_len;
1029 desc.args[3] = req_64bit->resp_ptr;
1030 desc.args[4] = req_64bit->resp_len;
1031 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001032 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001033 break;
1034 }
1035 case QSEOS_TEE_REQUEST_CANCELLATION: {
1036 struct qseecom_qteec_ireq *req;
1037 struct qseecom_qteec_64bit_ireq *req_64bit;
1038
1039 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1040 desc.arginfo =
1041 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1042 if (qseecom.qsee_version < QSEE_VERSION_40) {
1043 req = (struct qseecom_qteec_ireq *)req_buf;
1044 desc.args[0] = req->app_id;
1045 desc.args[1] = req->req_ptr;
1046 desc.args[2] = req->req_len;
1047 desc.args[3] = req->resp_ptr;
1048 desc.args[4] = req->resp_len;
1049 } else {
1050 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1051 req_buf;
1052 desc.args[0] = req_64bit->app_id;
1053 desc.args[1] = req_64bit->req_ptr;
1054 desc.args[2] = req_64bit->req_len;
1055 desc.args[3] = req_64bit->resp_ptr;
1056 desc.args[4] = req_64bit->resp_len;
1057 }
Zhen Kong03f220d2019-02-01 17:12:34 -08001058 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001059 break;
1060 }
1061 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1062 struct qseecom_continue_blocked_request_ireq *req =
1063 (struct qseecom_continue_blocked_request_ireq *)
1064 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001065 if (qseecom.smcinvoke_support)
1066 smc_id =
1067 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1068 else
1069 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001070 desc.arginfo =
1071 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001072 desc.args[0] = req->app_or_session_id;
Zhen Kong03f220d2019-02-01 17:12:34 -08001073 ret = __qseecom_scm_call2_locked(smc_id, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001074 break;
1075 }
1076 default: {
1077 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1078 qseos_cmd_id);
1079 ret = -EINVAL;
1080 break;
1081 }
1082 } /*end of switch (qsee_cmd_id) */
1083 break;
1084 } /*end of case SCM_SVC_TZSCHEDULER*/
1085 default: {
1086 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1087 svc_id);
1088 ret = -EINVAL;
1089 break;
1090 }
1091 } /*end of switch svc_id */
1092 scm_resp->result = desc.ret[0];
1093 scm_resp->resp_type = desc.ret[1];
1094 scm_resp->data = desc.ret[2];
1095 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1096 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1097 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1098 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1099 return ret;
1100}
1101
1102
1103static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1104 size_t cmd_len, void *resp_buf, size_t resp_len)
1105{
1106 if (!is_scm_armv8())
1107 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1108 resp_buf, resp_len);
1109 else
1110 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1111}
1112
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001113static struct qseecom_registered_listener_list *__qseecom_find_svc(
1114 int32_t listener_id)
1115{
1116 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001117
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001118 list_for_each_entry(entry,
1119 &qseecom.registered_listener_list_head, list) {
1120 if (entry->svc.listener_id == listener_id)
1121 break;
1122 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001123 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001124 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001125 return NULL;
1126 }
1127
1128 return entry;
1129}
1130
1131static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1132 struct qseecom_dev_handle *handle,
1133 struct qseecom_register_listener_req *listener)
1134{
1135 int ret = 0;
1136 struct qseecom_register_listener_ireq req;
1137 struct qseecom_register_listener_64bit_ireq req_64bit;
1138 struct qseecom_command_scm_resp resp;
1139 ion_phys_addr_t pa;
1140 void *cmd_buf = NULL;
1141 size_t cmd_len;
1142
1143 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001144 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001145 listener->ifd_data_fd);
1146 if (IS_ERR_OR_NULL(svc->ihandle)) {
1147 pr_err("Ion client could not retrieve the handle\n");
1148 return -ENOMEM;
1149 }
1150
1151 /* Get the physical address of the ION BUF */
1152 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1153 if (ret) {
1154 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1155 ret);
1156 return ret;
1157 }
1158 /* Populate the structure for sending scm call to load image */
1159 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1160 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1161 pr_err("ION memory mapping for listener shared buffer failed\n");
1162 return -ENOMEM;
1163 }
1164 svc->sb_phys = (phys_addr_t)pa;
1165
1166 if (qseecom.qsee_version < QSEE_VERSION_40) {
1167 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1168 req.listener_id = svc->svc.listener_id;
1169 req.sb_len = svc->sb_length;
1170 req.sb_ptr = (uint32_t)svc->sb_phys;
1171 cmd_buf = (void *)&req;
1172 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1173 } else {
1174 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1175 req_64bit.listener_id = svc->svc.listener_id;
1176 req_64bit.sb_len = svc->sb_length;
1177 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1178 cmd_buf = (void *)&req_64bit;
1179 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1180 }
1181
1182 resp.result = QSEOS_RESULT_INCOMPLETE;
1183
Zhen Kongc4c162a2019-01-23 12:07:12 -08001184 mutex_unlock(&listener_access_lock);
1185 mutex_lock(&app_access_lock);
1186 __qseecom_reentrancy_check_if_no_app_blocked(
1187 TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001188 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1189 &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001190 mutex_unlock(&app_access_lock);
1191 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001192 if (ret) {
1193 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1194 return -EINVAL;
1195 }
1196
1197 if (resp.result != QSEOS_RESULT_SUCCESS) {
1198 pr_err("Error SB registration req: resp.result = %d\n",
1199 resp.result);
1200 return -EPERM;
1201 }
1202 return 0;
1203}
1204
1205static int qseecom_register_listener(struct qseecom_dev_handle *data,
1206 void __user *argp)
1207{
1208 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001209 struct qseecom_register_listener_req rcvd_lstnr;
1210 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001211 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001212
1213 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1214 if (ret) {
1215 pr_err("copy_from_user failed\n");
1216 return ret;
1217 }
1218 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1219 rcvd_lstnr.sb_size))
1220 return -EFAULT;
1221
Zhen Kong3c674612018-09-06 22:51:27 -07001222 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001223
1224 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1225 if (ptr_svc) {
1226 if (ptr_svc->unregister_pending == false) {
1227 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001228 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 data->released = true;
1230 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001231 } else {
1232 /*wait until listener is unregistered*/
1233 pr_debug("register %d has to wait\n",
1234 rcvd_lstnr.listener_id);
1235 mutex_unlock(&listener_access_lock);
1236 ret = wait_event_freezable(
1237 qseecom.register_lsnr_pending_wq,
1238 list_empty(
1239 &qseecom.unregister_lsnr_pending_list_head));
1240 if (ret) {
1241 pr_err("interrupted register_pending_wq %d\n",
1242 rcvd_lstnr.listener_id);
1243 mutex_lock(&listener_access_lock);
1244 return -ERESTARTSYS;
1245 }
1246 mutex_lock(&listener_access_lock);
1247 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001248 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001249 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1250 if (!new_entry)
1251 return -ENOMEM;
1252 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001253 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001254
1255 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1256 new_entry->sb_length = rcvd_lstnr.sb_size;
1257 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1258 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001259 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1260 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001261 kzfree(new_entry);
1262 return -ENOMEM;
1263 }
1264
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001265 init_waitqueue_head(&new_entry->rcv_req_wq);
1266 init_waitqueue_head(&new_entry->listener_block_app_wq);
1267 new_entry->send_resp_flag = 0;
1268 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001269 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001270
Zhen Kong3c674612018-09-06 22:51:27 -07001271 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 return ret;
1273}
1274
Zhen Kongbcdeda22018-11-16 13:50:51 -08001275static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1276 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001277{
1278 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001279 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 struct qseecom_command_scm_resp resp;
1281 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1282
1283 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1284 req.listener_id = data->listener.id;
1285 resp.result = QSEOS_RESULT_INCOMPLETE;
1286
Zhen Kongc4c162a2019-01-23 12:07:12 -08001287 mutex_unlock(&listener_access_lock);
1288 mutex_lock(&app_access_lock);
1289 __qseecom_reentrancy_check_if_no_app_blocked(
1290 TZ_OS_DEREGISTER_LISTENER_ID);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1292 sizeof(req), &resp, sizeof(resp));
Zhen Kongc4c162a2019-01-23 12:07:12 -08001293 mutex_unlock(&app_access_lock);
1294 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001295 if (ret) {
1296 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1297 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001298 if (ret == -EBUSY)
1299 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001300 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001301 }
1302
1303 if (resp.result != QSEOS_RESULT_SUCCESS) {
1304 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1305 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001306 ret = -EPERM;
1307 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001308 }
1309
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001310 while (atomic_read(&data->ioctl_count) > 1) {
1311 if (wait_event_freezable(data->abort_wq,
1312 atomic_read(&data->ioctl_count) <= 1)) {
1313 pr_err("Interrupted from abort\n");
1314 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001315 }
1316 }
1317
Zhen Kong3c674612018-09-06 22:51:27 -07001318exit:
1319 if (ptr_svc->sb_virt) {
1320 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001321 if (!IS_ERR_OR_NULL(ihandle)) {
1322 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1323 ion_free(qseecom.ion_clnt, ihandle);
1324 }
1325 }
Zhen Kong3c674612018-09-06 22:51:27 -07001326 list_del(&ptr_svc->list);
1327 kzfree(ptr_svc);
1328
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001329 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001330 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001331 return ret;
1332}
1333
Zhen Kongbcdeda22018-11-16 13:50:51 -08001334static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1335{
1336 struct qseecom_registered_listener_list *ptr_svc = NULL;
1337 struct qseecom_unregister_pending_list *entry = NULL;
1338
1339 ptr_svc = __qseecom_find_svc(data->listener.id);
1340 if (!ptr_svc) {
1341 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1342 return -ENODATA;
1343 }
1344 /* stop CA thread waiting for listener response */
1345 ptr_svc->abort = 1;
1346 wake_up_interruptible_all(&qseecom.send_resp_wq);
1347
Zhen Kongc4c162a2019-01-23 12:07:12 -08001348 /* stop listener thread waiting for listener request */
1349 data->abort = 1;
1350 wake_up_all(&ptr_svc->rcv_req_wq);
1351
Zhen Kongbcdeda22018-11-16 13:50:51 -08001352 /* return directly if pending*/
1353 if (ptr_svc->unregister_pending)
1354 return 0;
1355
1356 /*add unregistration into pending list*/
1357 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1358 if (!entry)
1359 return -ENOMEM;
1360 entry->data = data;
1361 list_add_tail(&entry->list,
1362 &qseecom.unregister_lsnr_pending_list_head);
1363 ptr_svc->unregister_pending = true;
1364 pr_debug("unregister %d pending\n", data->listener.id);
1365 return 0;
1366}
1367
1368static void __qseecom_processing_pending_lsnr_unregister(void)
1369{
1370 struct qseecom_unregister_pending_list *entry = NULL;
1371 struct qseecom_registered_listener_list *ptr_svc = NULL;
1372 struct list_head *pos;
1373 int ret = 0;
1374
1375 mutex_lock(&listener_access_lock);
1376 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1377 pos = qseecom.unregister_lsnr_pending_list_head.next;
1378 entry = list_entry(pos,
1379 struct qseecom_unregister_pending_list, list);
1380 if (entry && entry->data) {
1381 pr_debug("process pending unregister %d\n",
1382 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001383 /* don't process if qseecom_release is not called*/
1384 if (!entry->data->listener.release_called)
1385 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001386 ptr_svc = __qseecom_find_svc(
1387 entry->data->listener.id);
1388 if (ptr_svc) {
1389 ret = __qseecom_unregister_listener(
1390 entry->data, ptr_svc);
1391 if (ret == -EBUSY) {
1392 pr_debug("unregister %d pending again\n",
1393 entry->data->listener.id);
1394 mutex_unlock(&listener_access_lock);
1395 return;
1396 }
1397 } else
1398 pr_err("invalid listener %d\n",
1399 entry->data->listener.id);
1400 kzfree(entry->data);
1401 }
1402 list_del(pos);
1403 kzfree(entry);
1404 }
1405 mutex_unlock(&listener_access_lock);
1406 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1407}
1408
Zhen Kongc4c162a2019-01-23 12:07:12 -08001409static void __wakeup_unregister_listener_kthread(void)
1410{
1411 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1412 LSNR_UNREG_KT_WAKEUP);
1413 wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
1414}
1415
1416static int __qseecom_unregister_listener_kthread_func(void *data)
1417{
1418 while (!kthread_should_stop()) {
1419 wait_event_freezable(
1420 qseecom.unregister_lsnr_kthread_wq,
1421 atomic_read(&qseecom.unregister_lsnr_kthread_state)
1422 == LSNR_UNREG_KT_WAKEUP);
1423 pr_debug("kthread to unregister listener is called %d\n",
1424 atomic_read(&qseecom.unregister_lsnr_kthread_state));
1425 __qseecom_processing_pending_lsnr_unregister();
1426 atomic_set(&qseecom.unregister_lsnr_kthread_state,
1427 LSNR_UNREG_KT_SLEEP);
1428 }
1429 pr_warn("kthread to unregister listener stopped\n");
1430 return 0;
1431}
1432
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001433static int __qseecom_set_msm_bus_request(uint32_t mode)
1434{
1435 int ret = 0;
1436 struct qseecom_clk *qclk;
1437
1438 qclk = &qseecom.qsee;
1439 if (qclk->ce_core_src_clk != NULL) {
1440 if (mode == INACTIVE) {
1441 __qseecom_disable_clk(CLK_QSEE);
1442 } else {
1443 ret = __qseecom_enable_clk(CLK_QSEE);
1444 if (ret)
1445 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1446 ret, mode);
1447 }
1448 }
1449
1450 if ((!ret) && (qseecom.current_mode != mode)) {
1451 ret = msm_bus_scale_client_update_request(
1452 qseecom.qsee_perf_client, mode);
1453 if (ret) {
1454 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1455 ret, mode);
1456 if (qclk->ce_core_src_clk != NULL) {
1457 if (mode == INACTIVE) {
1458 ret = __qseecom_enable_clk(CLK_QSEE);
1459 if (ret)
1460 pr_err("CLK enable failed\n");
1461 } else
1462 __qseecom_disable_clk(CLK_QSEE);
1463 }
1464 }
1465 qseecom.current_mode = mode;
1466 }
1467 return ret;
1468}
1469
1470static void qseecom_bw_inactive_req_work(struct work_struct *work)
1471{
1472 mutex_lock(&app_access_lock);
1473 mutex_lock(&qsee_bw_mutex);
1474 if (qseecom.timer_running)
1475 __qseecom_set_msm_bus_request(INACTIVE);
1476 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1477 qseecom.current_mode, qseecom.cumulative_mode);
1478 qseecom.timer_running = false;
1479 mutex_unlock(&qsee_bw_mutex);
1480 mutex_unlock(&app_access_lock);
1481}
1482
1483static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1484{
1485 schedule_work(&qseecom.bw_inactive_req_ws);
1486}
1487
1488static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1489{
1490 struct qseecom_clk *qclk;
1491 int ret = 0;
1492
1493 mutex_lock(&clk_access_lock);
1494 if (ce == CLK_QSEE)
1495 qclk = &qseecom.qsee;
1496 else
1497 qclk = &qseecom.ce_drv;
1498
1499 if (qclk->clk_access_cnt > 2) {
1500 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1501 ret = -EINVAL;
1502 goto err_dec_ref_cnt;
1503 }
1504 if (qclk->clk_access_cnt == 2)
1505 qclk->clk_access_cnt--;
1506
1507err_dec_ref_cnt:
1508 mutex_unlock(&clk_access_lock);
1509 return ret;
1510}
1511
1512
1513static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1514{
1515 int32_t ret = 0;
1516 int32_t request_mode = INACTIVE;
1517
1518 mutex_lock(&qsee_bw_mutex);
1519 if (mode == 0) {
1520 if (qseecom.cumulative_mode > MEDIUM)
1521 request_mode = HIGH;
1522 else
1523 request_mode = qseecom.cumulative_mode;
1524 } else {
1525 request_mode = mode;
1526 }
1527
1528 ret = __qseecom_set_msm_bus_request(request_mode);
1529 if (ret) {
1530 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1531 ret, request_mode);
1532 goto err_scale_timer;
1533 }
1534
1535 if (qseecom.timer_running) {
1536 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1537 if (ret) {
1538 pr_err("Failed to decrease clk ref count.\n");
1539 goto err_scale_timer;
1540 }
1541 del_timer_sync(&(qseecom.bw_scale_down_timer));
1542 qseecom.timer_running = false;
1543 }
1544err_scale_timer:
1545 mutex_unlock(&qsee_bw_mutex);
1546 return ret;
1547}
1548
1549
1550static int qseecom_unregister_bus_bandwidth_needs(
1551 struct qseecom_dev_handle *data)
1552{
1553 int32_t ret = 0;
1554
1555 qseecom.cumulative_mode -= data->mode;
1556 data->mode = INACTIVE;
1557
1558 return ret;
1559}
1560
1561static int __qseecom_register_bus_bandwidth_needs(
1562 struct qseecom_dev_handle *data, uint32_t request_mode)
1563{
1564 int32_t ret = 0;
1565
1566 if (data->mode == INACTIVE) {
1567 qseecom.cumulative_mode += request_mode;
1568 data->mode = request_mode;
1569 } else {
1570 if (data->mode != request_mode) {
1571 qseecom.cumulative_mode -= data->mode;
1572 qseecom.cumulative_mode += request_mode;
1573 data->mode = request_mode;
1574 }
1575 }
1576 return ret;
1577}
1578
1579static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1580{
1581 int ret = 0;
1582
1583 ret = qsee_vote_for_clock(data, CLK_DFAB);
1584 if (ret) {
1585 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1586 goto perf_enable_exit;
1587 }
1588 ret = qsee_vote_for_clock(data, CLK_SFPB);
1589 if (ret) {
1590 qsee_disable_clock_vote(data, CLK_DFAB);
1591 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1592 goto perf_enable_exit;
1593 }
1594
1595perf_enable_exit:
1596 return ret;
1597}
1598
1599static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1600 void __user *argp)
1601{
1602 int32_t ret = 0;
1603 int32_t req_mode;
1604
1605 if (qseecom.no_clock_support)
1606 return 0;
1607
1608 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1609 if (ret) {
1610 pr_err("copy_from_user failed\n");
1611 return ret;
1612 }
1613 if (req_mode > HIGH) {
1614 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1615 return -EINVAL;
1616 }
1617
1618 /*
1619 * Register bus bandwidth needs if bus scaling feature is enabled;
1620 * otherwise, qseecom enable/disable clocks for the client directly.
1621 */
1622 if (qseecom.support_bus_scaling) {
1623 mutex_lock(&qsee_bw_mutex);
1624 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1625 mutex_unlock(&qsee_bw_mutex);
1626 } else {
1627 pr_debug("Bus scaling feature is NOT enabled\n");
1628 pr_debug("request bandwidth mode %d for the client\n",
1629 req_mode);
1630 if (req_mode != INACTIVE) {
1631 ret = qseecom_perf_enable(data);
1632 if (ret)
1633 pr_err("Failed to vote for clock with err %d\n",
1634 ret);
1635 } else {
1636 qsee_disable_clock_vote(data, CLK_DFAB);
1637 qsee_disable_clock_vote(data, CLK_SFPB);
1638 }
1639 }
1640 return ret;
1641}
1642
1643static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1644{
1645 if (qseecom.no_clock_support)
1646 return;
1647
1648 mutex_lock(&qsee_bw_mutex);
1649 qseecom.bw_scale_down_timer.expires = jiffies +
1650 msecs_to_jiffies(duration);
1651 mod_timer(&(qseecom.bw_scale_down_timer),
1652 qseecom.bw_scale_down_timer.expires);
1653 qseecom.timer_running = true;
1654 mutex_unlock(&qsee_bw_mutex);
1655}
1656
1657static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1658{
1659 if (!qseecom.support_bus_scaling)
1660 qsee_disable_clock_vote(data, CLK_SFPB);
1661 else
1662 __qseecom_add_bw_scale_down_timer(
1663 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1664}
1665
1666static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1667{
1668 int ret = 0;
1669
1670 if (qseecom.support_bus_scaling) {
1671 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1672 if (ret)
1673 pr_err("Failed to set bw MEDIUM.\n");
1674 } else {
1675 ret = qsee_vote_for_clock(data, CLK_SFPB);
1676 if (ret)
1677 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1678 }
1679 return ret;
1680}
1681
1682static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1683 void __user *argp)
1684{
1685 ion_phys_addr_t pa;
1686 int32_t ret;
1687 struct qseecom_set_sb_mem_param_req req;
1688 size_t len;
1689
1690 /* Copy the relevant information needed for loading the image */
1691 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1692 return -EFAULT;
1693
1694 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1695 (req.sb_len == 0)) {
1696 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1697 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1698 return -EFAULT;
1699 }
1700 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1701 req.sb_len))
1702 return -EFAULT;
1703
1704 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001705 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001706 req.ifd_data_fd);
1707 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1708 pr_err("Ion client could not retrieve the handle\n");
1709 return -ENOMEM;
1710 }
1711 /* Get the physical address of the ION BUF */
1712 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1713 if (ret) {
1714
1715 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1716 ret);
1717 return ret;
1718 }
1719
1720 if (len < req.sb_len) {
1721 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1722 req.sb_len, len);
1723 return -EINVAL;
1724 }
1725 /* Populate the structure for sending scm call to load image */
1726 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1727 data->client.ihandle);
1728 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1729 pr_err("ION memory mapping for client shared buf failed\n");
1730 return -ENOMEM;
1731 }
1732 data->client.sb_phys = (phys_addr_t)pa;
1733 data->client.sb_length = req.sb_len;
1734 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1735 return 0;
1736}
1737
Zhen Kong26e62742018-05-04 17:19:06 -07001738static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1739 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001740{
1741 int ret;
1742
1743 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001744 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001745}
1746
1747static int __qseecom_reentrancy_listener_has_sent_rsp(
1748 struct qseecom_dev_handle *data,
1749 struct qseecom_registered_listener_list *ptr_svc)
1750{
1751 int ret;
1752
1753 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001754 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001755}
1756
1757static void __qseecom_clean_listener_sglistinfo(
1758 struct qseecom_registered_listener_list *ptr_svc)
1759{
1760 if (ptr_svc->sglist_cnt) {
1761 memset(ptr_svc->sglistinfo_ptr, 0,
1762 SGLISTINFO_TABLE_SIZE);
1763 ptr_svc->sglist_cnt = 0;
1764 }
1765}
1766
1767static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1768 struct qseecom_command_scm_resp *resp)
1769{
1770 int ret = 0;
1771 int rc = 0;
1772 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001773 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1774 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1775 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 struct qseecom_registered_listener_list *ptr_svc = NULL;
1777 sigset_t new_sigset;
1778 sigset_t old_sigset;
1779 uint32_t status;
1780 void *cmd_buf = NULL;
1781 size_t cmd_len;
1782 struct sglist_info *table = NULL;
1783
Zhen Kongbcdeda22018-11-16 13:50:51 -08001784 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001785 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1786 lstnr = resp->data;
1787 /*
1788 * Wake up blocking lsitener service with the lstnr id
1789 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001790 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001791 list_for_each_entry(ptr_svc,
1792 &qseecom.registered_listener_list_head, list) {
1793 if (ptr_svc->svc.listener_id == lstnr) {
1794 ptr_svc->listener_in_use = true;
1795 ptr_svc->rcv_req_flag = 1;
1796 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1797 break;
1798 }
1799 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001800
1801 if (ptr_svc == NULL) {
1802 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001803 rc = -EINVAL;
1804 status = QSEOS_RESULT_FAILURE;
1805 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001806 }
1807
1808 if (!ptr_svc->ihandle) {
1809 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001810 rc = -EINVAL;
1811 status = QSEOS_RESULT_FAILURE;
1812 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001813 }
1814
1815 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001816 pr_err("Service %d does not exist\n",
1817 lstnr);
1818 rc = -ERESTARTSYS;
1819 ptr_svc = NULL;
1820 status = QSEOS_RESULT_FAILURE;
1821 goto err_resp;
1822 }
1823
1824 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001825 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001826 lstnr, ptr_svc->abort);
1827 rc = -ENODEV;
1828 status = QSEOS_RESULT_FAILURE;
1829 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001830 }
Zhen Kong25731112018-09-20 13:10:03 -07001831
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001832 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1833
1834 /* initialize the new signal mask with all signals*/
1835 sigfillset(&new_sigset);
1836 /* block all signals */
1837 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1838
Zhen Kongbcdeda22018-11-16 13:50:51 -08001839 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001840 do {
1841 /*
1842 * When reentrancy is not supported, check global
1843 * send_resp_flag; otherwise, check this listener's
1844 * send_resp_flag.
1845 */
1846 if (!qseecom.qsee_reentrancy_support &&
1847 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001848 __qseecom_listener_has_sent_rsp(
1849 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001850 break;
1851 }
1852
1853 if (qseecom.qsee_reentrancy_support &&
1854 !wait_event_freezable(qseecom.send_resp_wq,
1855 __qseecom_reentrancy_listener_has_sent_rsp(
1856 data, ptr_svc))) {
1857 break;
1858 }
1859 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001860 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001861 /* restore signal mask */
1862 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001863 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1865 data->client.app_id, lstnr, ret);
1866 rc = -ENODEV;
1867 status = QSEOS_RESULT_FAILURE;
1868 } else {
1869 status = QSEOS_RESULT_SUCCESS;
1870 }
Zhen Kong26e62742018-05-04 17:19:06 -07001871err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001873 if (ptr_svc) {
1874 ptr_svc->send_resp_flag = 0;
1875 table = ptr_svc->sglistinfo_ptr;
1876 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001877 if (qseecom.qsee_version < QSEE_VERSION_40) {
1878 send_data_rsp.listener_id = lstnr;
1879 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001880 if (table) {
1881 send_data_rsp.sglistinfo_ptr =
1882 (uint32_t)virt_to_phys(table);
1883 send_data_rsp.sglistinfo_len =
1884 SGLISTINFO_TABLE_SIZE;
1885 dmac_flush_range((void *)table,
1886 (void *)table + SGLISTINFO_TABLE_SIZE);
1887 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001888 cmd_buf = (void *)&send_data_rsp;
1889 cmd_len = sizeof(send_data_rsp);
1890 } else {
1891 send_data_rsp_64bit.listener_id = lstnr;
1892 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001893 if (table) {
1894 send_data_rsp_64bit.sglistinfo_ptr =
1895 virt_to_phys(table);
1896 send_data_rsp_64bit.sglistinfo_len =
1897 SGLISTINFO_TABLE_SIZE;
1898 dmac_flush_range((void *)table,
1899 (void *)table + SGLISTINFO_TABLE_SIZE);
1900 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001901 cmd_buf = (void *)&send_data_rsp_64bit;
1902 cmd_len = sizeof(send_data_rsp_64bit);
1903 }
Zhen Kong7d500032018-08-06 16:58:31 -07001904 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001905 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1906 else
1907 *(uint32_t *)cmd_buf =
1908 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001909 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001910 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1911 ptr_svc->ihandle,
1912 ptr_svc->sb_virt, ptr_svc->sb_length,
1913 ION_IOC_CLEAN_INV_CACHES);
1914 if (ret) {
1915 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001916 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001917 }
1918 }
1919
1920 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1921 ret = __qseecom_enable_clk(CLK_QSEE);
1922 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001923 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001924 }
1925
1926 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1927 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001928 if (ptr_svc) {
1929 ptr_svc->listener_in_use = false;
1930 __qseecom_clean_listener_sglistinfo(ptr_svc);
1931 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001932 if (ret) {
1933 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1934 ret, data->client.app_id);
1935 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1936 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001937 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001938 }
Zhen Kong26e62742018-05-04 17:19:06 -07001939 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1940 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001941 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1942 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1943 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1944 resp->result, data->client.app_id, lstnr);
1945 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001946 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001947 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001948exit:
1949 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001950 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1951 __qseecom_disable_clk(CLK_QSEE);
1952
1953 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 qseecom.app_block_ref_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001955 if (rc)
1956 return rc;
1957
1958 return ret;
1959}
1960
Zhen Konga91aaf02018-02-02 17:21:04 -08001961static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001962 struct qseecom_command_scm_resp *resp,
1963 struct qseecom_registered_app_list *ptr_app,
1964 struct qseecom_dev_handle *data)
1965{
1966 struct qseecom_registered_listener_list *list_ptr;
1967 int ret = 0;
1968 struct qseecom_continue_blocked_request_ireq ireq;
1969 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001970 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001971 sigset_t new_sigset;
1972 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001973 unsigned long flags;
1974 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001975
1976 if (!resp || !data) {
1977 pr_err("invalid resp or data pointer\n");
1978 ret = -EINVAL;
1979 goto exit;
1980 }
1981
1982 /* find app_id & img_name from list */
Zhen Konge4804722019-02-27 21:13:18 -08001983 if (!ptr_app && data->client.app_arch != ELFCLASSNONE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001984 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1985 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1986 list) {
1987 if ((ptr_app->app_id == data->client.app_id) &&
1988 (!strcmp(ptr_app->app_name,
1989 data->client.app_name))) {
1990 found_app = true;
1991 break;
1992 }
1993 }
1994 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1995 flags);
1996 if (!found_app) {
1997 pr_err("app_id %d (%s) is not found\n",
1998 data->client.app_id,
1999 (char *)data->client.app_name);
2000 ret = -ENOENT;
2001 goto exit;
2002 }
2003 }
2004
Zhen Kongd8cc0052017-11-13 15:13:31 -08002005 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08002006 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002007 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002008 list_ptr = __qseecom_find_svc(resp->data);
2009 if (!list_ptr) {
2010 pr_err("Invalid listener ID %d\n", resp->data);
2011 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002012 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08002013 goto exit;
2014 }
Zhen Konga91aaf02018-02-02 17:21:04 -08002015 ptr_app->blocked_on_listener_id = resp->data;
2016
2017 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
2018 resp->data, list_ptr->listener_in_use,
2019 session_id, data->client.app_id);
2020
2021 /* sleep until listener is available */
2022 sigfillset(&new_sigset);
2023 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2024
2025 do {
2026 qseecom.app_block_ref_cnt++;
2027 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08002028 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002029 mutex_unlock(&app_access_lock);
2030 wait_event_freezable(
2031 list_ptr->listener_block_app_wq,
2032 !list_ptr->listener_in_use);
2033 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002034 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002035 ptr_app->app_blocked = false;
2036 qseecom.app_block_ref_cnt--;
2037 } while (list_ptr->listener_in_use);
2038
2039 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2040
2041 ptr_app->blocked_on_listener_id = 0;
2042 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
2043 resp->data, session_id, data->client.app_id);
2044
2045 /* notify TZ that listener is available */
2046 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
2047
2048 if (qseecom.smcinvoke_support)
2049 ireq.app_or_session_id = session_id;
2050 else
2051 ireq.app_or_session_id = data->client.app_id;
2052
2053 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2054 &ireq, sizeof(ireq),
2055 &continue_resp, sizeof(continue_resp));
2056 if (ret && qseecom.smcinvoke_support) {
2057 /* retry with legacy cmd */
2058 qseecom.smcinvoke_support = false;
2059 ireq.app_or_session_id = data->client.app_id;
2060 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2061 &ireq, sizeof(ireq),
2062 &continue_resp, sizeof(continue_resp));
2063 qseecom.smcinvoke_support = true;
2064 if (ret) {
2065 pr_err("unblock app %d or session %d fail\n",
2066 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002067 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002068 goto exit;
2069 }
2070 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002071 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002072 resp->result = continue_resp.result;
2073 resp->resp_type = continue_resp.resp_type;
2074 resp->data = continue_resp.data;
2075 pr_debug("unblock resp = %d\n", resp->result);
2076 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2077
2078 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2079 pr_err("Unexpected unblock resp %d\n", resp->result);
2080 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002081 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002082exit:
2083 return ret;
2084}
2085
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002086static int __qseecom_reentrancy_process_incomplete_cmd(
2087 struct qseecom_dev_handle *data,
2088 struct qseecom_command_scm_resp *resp)
2089{
2090 int ret = 0;
2091 int rc = 0;
2092 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002093 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2094 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2095 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002096 struct qseecom_registered_listener_list *ptr_svc = NULL;
2097 sigset_t new_sigset;
2098 sigset_t old_sigset;
2099 uint32_t status;
2100 void *cmd_buf = NULL;
2101 size_t cmd_len;
2102 struct sglist_info *table = NULL;
2103
Zhen Kong26e62742018-05-04 17:19:06 -07002104 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002105 lstnr = resp->data;
2106 /*
2107 * Wake up blocking lsitener service with the lstnr id
2108 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002109 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002110 list_for_each_entry(ptr_svc,
2111 &qseecom.registered_listener_list_head, list) {
2112 if (ptr_svc->svc.listener_id == lstnr) {
2113 ptr_svc->listener_in_use = true;
2114 ptr_svc->rcv_req_flag = 1;
2115 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2116 break;
2117 }
2118 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002119
2120 if (ptr_svc == NULL) {
2121 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002122 rc = -EINVAL;
2123 status = QSEOS_RESULT_FAILURE;
2124 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002125 }
2126
2127 if (!ptr_svc->ihandle) {
2128 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002129 rc = -EINVAL;
2130 status = QSEOS_RESULT_FAILURE;
2131 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002132 }
2133
2134 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002135 pr_err("Service %d does not exist\n",
2136 lstnr);
2137 rc = -ERESTARTSYS;
2138 ptr_svc = NULL;
2139 status = QSEOS_RESULT_FAILURE;
2140 goto err_resp;
2141 }
2142
2143 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002144 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002145 lstnr, ptr_svc->abort);
2146 rc = -ENODEV;
2147 status = QSEOS_RESULT_FAILURE;
2148 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002149 }
Zhen Kong25731112018-09-20 13:10:03 -07002150
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002151 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2152
2153 /* initialize the new signal mask with all signals*/
2154 sigfillset(&new_sigset);
2155
2156 /* block all signals */
2157 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2158
2159 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002160 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002161 mutex_unlock(&app_access_lock);
2162 do {
2163 if (!wait_event_freezable(qseecom.send_resp_wq,
2164 __qseecom_reentrancy_listener_has_sent_rsp(
2165 data, ptr_svc))) {
2166 break;
2167 }
2168 } while (1);
2169 /* lock mutex again after resp sent */
2170 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002171 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002172 ptr_svc->send_resp_flag = 0;
2173 qseecom.send_resp_flag = 0;
2174
2175 /* restore signal mask */
2176 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002177 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002178 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2179 data->client.app_id, lstnr, ret);
2180 rc = -ENODEV;
2181 status = QSEOS_RESULT_FAILURE;
2182 } else {
2183 status = QSEOS_RESULT_SUCCESS;
2184 }
Zhen Kong26e62742018-05-04 17:19:06 -07002185err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002186 if (ptr_svc)
2187 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002188 if (qseecom.qsee_version < QSEE_VERSION_40) {
2189 send_data_rsp.listener_id = lstnr;
2190 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002191 if (table) {
2192 send_data_rsp.sglistinfo_ptr =
2193 (uint32_t)virt_to_phys(table);
2194 send_data_rsp.sglistinfo_len =
2195 SGLISTINFO_TABLE_SIZE;
2196 dmac_flush_range((void *)table,
2197 (void *)table + SGLISTINFO_TABLE_SIZE);
2198 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002199 cmd_buf = (void *)&send_data_rsp;
2200 cmd_len = sizeof(send_data_rsp);
2201 } else {
2202 send_data_rsp_64bit.listener_id = lstnr;
2203 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002204 if (table) {
2205 send_data_rsp_64bit.sglistinfo_ptr =
2206 virt_to_phys(table);
2207 send_data_rsp_64bit.sglistinfo_len =
2208 SGLISTINFO_TABLE_SIZE;
2209 dmac_flush_range((void *)table,
2210 (void *)table + SGLISTINFO_TABLE_SIZE);
2211 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002212 cmd_buf = (void *)&send_data_rsp_64bit;
2213 cmd_len = sizeof(send_data_rsp_64bit);
2214 }
Zhen Kong7d500032018-08-06 16:58:31 -07002215 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002216 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2217 else
2218 *(uint32_t *)cmd_buf =
2219 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002220 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002221 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2222 ptr_svc->ihandle,
2223 ptr_svc->sb_virt, ptr_svc->sb_length,
2224 ION_IOC_CLEAN_INV_CACHES);
2225 if (ret) {
2226 pr_err("cache operation failed %d\n", ret);
2227 return ret;
2228 }
2229 }
2230 if (lstnr == RPMB_SERVICE) {
2231 ret = __qseecom_enable_clk(CLK_QSEE);
2232 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002233 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002234 }
2235
2236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2237 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002238 if (ptr_svc) {
2239 ptr_svc->listener_in_use = false;
2240 __qseecom_clean_listener_sglistinfo(ptr_svc);
2241 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2242 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002243
2244 if (ret) {
2245 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2246 ret, data->client.app_id);
2247 goto exit;
2248 }
2249
2250 switch (resp->result) {
2251 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2252 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2253 lstnr, data->client.app_id, resp->data);
2254 if (lstnr == resp->data) {
2255 pr_err("lstnr %d should not be blocked!\n",
2256 lstnr);
2257 ret = -EINVAL;
2258 goto exit;
2259 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002260 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002261 ret = __qseecom_process_reentrancy_blocked_on_listener(
2262 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002263 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002264 if (ret) {
2265 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2266 data->client.app_id,
2267 data->client.app_name, resp->data);
2268 goto exit;
2269 }
2270 case QSEOS_RESULT_SUCCESS:
2271 case QSEOS_RESULT_INCOMPLETE:
2272 break;
2273 default:
2274 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2275 resp->result, data->client.app_id, lstnr);
2276 ret = -EINVAL;
2277 goto exit;
2278 }
2279exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002280 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002281 if (lstnr == RPMB_SERVICE)
2282 __qseecom_disable_clk(CLK_QSEE);
2283
2284 }
2285 if (rc)
2286 return rc;
2287
2288 return ret;
2289}
2290
2291/*
2292 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2293 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2294 * So, needs to first check if no app blocked before sending OS level scm call,
2295 * then wait until all apps are unblocked.
2296 */
2297static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2298{
2299 sigset_t new_sigset, old_sigset;
2300
2301 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2302 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2303 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2304 /* thread sleep until this app unblocked */
2305 while (qseecom.app_block_ref_cnt > 0) {
2306 sigfillset(&new_sigset);
2307 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2308 mutex_unlock(&app_access_lock);
2309 do {
2310 if (!wait_event_freezable(qseecom.app_block_wq,
2311 (qseecom.app_block_ref_cnt == 0)))
2312 break;
2313 } while (1);
2314 mutex_lock(&app_access_lock);
2315 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2316 }
2317 }
2318}
2319
2320/*
2321 * scm_call of send data will fail if this TA is blocked or there are more
2322 * than one TA requesting listener services; So, first check to see if need
2323 * to wait.
2324 */
2325static void __qseecom_reentrancy_check_if_this_app_blocked(
2326 struct qseecom_registered_app_list *ptr_app)
2327{
2328 sigset_t new_sigset, old_sigset;
2329
2330 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002331 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002332 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2333 /* thread sleep until this app unblocked */
2334 sigfillset(&new_sigset);
2335 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2336 mutex_unlock(&app_access_lock);
2337 do {
2338 if (!wait_event_freezable(qseecom.app_block_wq,
2339 (!ptr_app->app_blocked &&
2340 qseecom.app_block_ref_cnt <= 1)))
2341 break;
2342 } while (1);
2343 mutex_lock(&app_access_lock);
2344 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2345 }
Zhen Kongdea10592018-07-30 17:50:10 -07002346 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002347 }
2348}
2349
2350static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2351 uint32_t *app_id)
2352{
2353 int32_t ret;
2354 struct qseecom_command_scm_resp resp;
2355 bool found_app = false;
2356 struct qseecom_registered_app_list *entry = NULL;
2357 unsigned long flags = 0;
2358
2359 if (!app_id) {
2360 pr_err("Null pointer to app_id\n");
2361 return -EINVAL;
2362 }
2363 *app_id = 0;
2364
2365 /* check if app exists and has been registered locally */
2366 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2367 list_for_each_entry(entry,
2368 &qseecom.registered_app_list_head, list) {
2369 if (!strcmp(entry->app_name, req.app_name)) {
2370 found_app = true;
2371 break;
2372 }
2373 }
2374 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2375 if (found_app) {
2376 pr_debug("Found app with id %d\n", entry->app_id);
2377 *app_id = entry->app_id;
2378 return 0;
2379 }
2380
2381 memset((void *)&resp, 0, sizeof(resp));
2382
2383 /* SCM_CALL to check if app_id for the mentioned app exists */
2384 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2385 sizeof(struct qseecom_check_app_ireq),
2386 &resp, sizeof(resp));
2387 if (ret) {
2388 pr_err("scm_call to check if app is already loaded failed\n");
2389 return -EINVAL;
2390 }
2391
2392 if (resp.result == QSEOS_RESULT_FAILURE)
2393 return 0;
2394
2395 switch (resp.resp_type) {
2396 /*qsee returned listener type response */
2397 case QSEOS_LISTENER_ID:
2398 pr_err("resp type is of listener type instead of app");
2399 return -EINVAL;
2400 case QSEOS_APP_ID:
2401 *app_id = resp.data;
2402 return 0;
2403 default:
2404 pr_err("invalid resp type (%d) from qsee",
2405 resp.resp_type);
2406 return -ENODEV;
2407 }
2408}
2409
2410static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2411{
2412 struct qseecom_registered_app_list *entry = NULL;
2413 unsigned long flags = 0;
2414 u32 app_id = 0;
2415 struct ion_handle *ihandle; /* Ion handle */
2416 struct qseecom_load_img_req load_img_req;
2417 int32_t ret = 0;
2418 ion_phys_addr_t pa = 0;
2419 size_t len;
2420 struct qseecom_command_scm_resp resp;
2421 struct qseecom_check_app_ireq req;
2422 struct qseecom_load_app_ireq load_req;
2423 struct qseecom_load_app_64bit_ireq load_req_64bit;
2424 void *cmd_buf = NULL;
2425 size_t cmd_len;
2426 bool first_time = false;
2427
2428 /* Copy the relevant information needed for loading the image */
2429 if (copy_from_user(&load_img_req,
2430 (void __user *)argp,
2431 sizeof(struct qseecom_load_img_req))) {
2432 pr_err("copy_from_user failed\n");
2433 return -EFAULT;
2434 }
2435
2436 /* Check and load cmnlib */
2437 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2438 if (!qseecom.commonlib_loaded &&
2439 load_img_req.app_arch == ELFCLASS32) {
2440 ret = qseecom_load_commonlib_image(data, "cmnlib");
2441 if (ret) {
2442 pr_err("failed to load cmnlib\n");
2443 return -EIO;
2444 }
2445 qseecom.commonlib_loaded = true;
2446 pr_debug("cmnlib is loaded\n");
2447 }
2448
2449 if (!qseecom.commonlib64_loaded &&
2450 load_img_req.app_arch == ELFCLASS64) {
2451 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2452 if (ret) {
2453 pr_err("failed to load cmnlib64\n");
2454 return -EIO;
2455 }
2456 qseecom.commonlib64_loaded = true;
2457 pr_debug("cmnlib64 is loaded\n");
2458 }
2459 }
2460
2461 if (qseecom.support_bus_scaling) {
2462 mutex_lock(&qsee_bw_mutex);
2463 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2464 mutex_unlock(&qsee_bw_mutex);
2465 if (ret)
2466 return ret;
2467 }
2468
2469 /* Vote for the SFPB clock */
2470 ret = __qseecom_enable_clk_scale_up(data);
2471 if (ret)
2472 goto enable_clk_err;
2473
2474 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2475 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2476 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2477
2478 ret = __qseecom_check_app_exists(req, &app_id);
2479 if (ret < 0)
2480 goto loadapp_err;
2481
2482 if (app_id) {
2483 pr_debug("App id %d (%s) already exists\n", app_id,
2484 (char *)(req.app_name));
2485 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2486 list_for_each_entry(entry,
2487 &qseecom.registered_app_list_head, list){
2488 if (entry->app_id == app_id) {
2489 entry->ref_cnt++;
2490 break;
2491 }
2492 }
2493 spin_unlock_irqrestore(
2494 &qseecom.registered_app_list_lock, flags);
2495 ret = 0;
2496 } else {
2497 first_time = true;
2498 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2499 (char *)(load_img_req.img_name));
2500 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002501 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002502 load_img_req.ifd_data_fd);
2503 if (IS_ERR_OR_NULL(ihandle)) {
2504 pr_err("Ion client could not retrieve the handle\n");
2505 ret = -ENOMEM;
2506 goto loadapp_err;
2507 }
2508
2509 /* Get the physical address of the ION BUF */
2510 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2511 if (ret) {
2512 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2513 ret);
2514 goto loadapp_err;
2515 }
2516 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2517 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2518 len, load_img_req.mdt_len,
2519 load_img_req.img_len);
2520 ret = -EINVAL;
2521 goto loadapp_err;
2522 }
2523 /* Populate the structure for sending scm call to load image */
2524 if (qseecom.qsee_version < QSEE_VERSION_40) {
2525 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2526 load_req.mdt_len = load_img_req.mdt_len;
2527 load_req.img_len = load_img_req.img_len;
2528 strlcpy(load_req.app_name, load_img_req.img_name,
2529 MAX_APP_NAME_SIZE);
2530 load_req.phy_addr = (uint32_t)pa;
2531 cmd_buf = (void *)&load_req;
2532 cmd_len = sizeof(struct qseecom_load_app_ireq);
2533 } else {
2534 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2535 load_req_64bit.mdt_len = load_img_req.mdt_len;
2536 load_req_64bit.img_len = load_img_req.img_len;
2537 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2538 MAX_APP_NAME_SIZE);
2539 load_req_64bit.phy_addr = (uint64_t)pa;
2540 cmd_buf = (void *)&load_req_64bit;
2541 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2542 }
2543
2544 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2545 ION_IOC_CLEAN_INV_CACHES);
2546 if (ret) {
2547 pr_err("cache operation failed %d\n", ret);
2548 goto loadapp_err;
2549 }
2550
2551 /* SCM_CALL to load the app and get the app_id back */
2552 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2553 cmd_len, &resp, sizeof(resp));
2554 if (ret) {
2555 pr_err("scm_call to load app failed\n");
2556 if (!IS_ERR_OR_NULL(ihandle))
2557 ion_free(qseecom.ion_clnt, ihandle);
2558 ret = -EINVAL;
2559 goto loadapp_err;
2560 }
2561
2562 if (resp.result == QSEOS_RESULT_FAILURE) {
2563 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2564 if (!IS_ERR_OR_NULL(ihandle))
2565 ion_free(qseecom.ion_clnt, ihandle);
2566 ret = -EFAULT;
2567 goto loadapp_err;
2568 }
2569
2570 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2571 ret = __qseecom_process_incomplete_cmd(data, &resp);
2572 if (ret) {
2573 pr_err("process_incomplete_cmd failed err: %d\n",
2574 ret);
2575 if (!IS_ERR_OR_NULL(ihandle))
2576 ion_free(qseecom.ion_clnt, ihandle);
2577 ret = -EFAULT;
2578 goto loadapp_err;
2579 }
2580 }
2581
2582 if (resp.result != QSEOS_RESULT_SUCCESS) {
2583 pr_err("scm_call failed resp.result unknown, %d\n",
2584 resp.result);
2585 if (!IS_ERR_OR_NULL(ihandle))
2586 ion_free(qseecom.ion_clnt, ihandle);
2587 ret = -EFAULT;
2588 goto loadapp_err;
2589 }
2590
2591 app_id = resp.data;
2592
2593 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2594 if (!entry) {
2595 ret = -ENOMEM;
2596 goto loadapp_err;
2597 }
2598 entry->app_id = app_id;
2599 entry->ref_cnt = 1;
2600 entry->app_arch = load_img_req.app_arch;
2601 /*
2602 * keymaster app may be first loaded as "keymaste" by qseecomd,
2603 * and then used as "keymaster" on some targets. To avoid app
2604 * name checking error, register "keymaster" into app_list and
2605 * thread private data.
2606 */
2607 if (!strcmp(load_img_req.img_name, "keymaste"))
2608 strlcpy(entry->app_name, "keymaster",
2609 MAX_APP_NAME_SIZE);
2610 else
2611 strlcpy(entry->app_name, load_img_req.img_name,
2612 MAX_APP_NAME_SIZE);
2613 entry->app_blocked = false;
2614 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002615 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002616
2617 /* Deallocate the handle */
2618 if (!IS_ERR_OR_NULL(ihandle))
2619 ion_free(qseecom.ion_clnt, ihandle);
2620
2621 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2622 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2623 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2624 flags);
2625
2626 pr_warn("App with id %u (%s) now loaded\n", app_id,
2627 (char *)(load_img_req.img_name));
2628 }
2629 data->client.app_id = app_id;
2630 data->client.app_arch = load_img_req.app_arch;
2631 if (!strcmp(load_img_req.img_name, "keymaste"))
2632 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2633 else
2634 strlcpy(data->client.app_name, load_img_req.img_name,
2635 MAX_APP_NAME_SIZE);
2636 load_img_req.app_id = app_id;
2637 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2638 pr_err("copy_to_user failed\n");
2639 ret = -EFAULT;
2640 if (first_time == true) {
2641 spin_lock_irqsave(
2642 &qseecom.registered_app_list_lock, flags);
2643 list_del(&entry->list);
2644 spin_unlock_irqrestore(
2645 &qseecom.registered_app_list_lock, flags);
2646 kzfree(entry);
2647 }
2648 }
2649
2650loadapp_err:
2651 __qseecom_disable_clk_scale_down(data);
2652enable_clk_err:
2653 if (qseecom.support_bus_scaling) {
2654 mutex_lock(&qsee_bw_mutex);
2655 qseecom_unregister_bus_bandwidth_needs(data);
2656 mutex_unlock(&qsee_bw_mutex);
2657 }
2658 return ret;
2659}
2660
2661static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2662{
2663 int ret = 1; /* Set unload app */
2664
2665 wake_up_all(&qseecom.send_resp_wq);
2666 if (qseecom.qsee_reentrancy_support)
2667 mutex_unlock(&app_access_lock);
2668 while (atomic_read(&data->ioctl_count) > 1) {
2669 if (wait_event_freezable(data->abort_wq,
2670 atomic_read(&data->ioctl_count) <= 1)) {
2671 pr_err("Interrupted from abort\n");
2672 ret = -ERESTARTSYS;
2673 break;
2674 }
2675 }
2676 if (qseecom.qsee_reentrancy_support)
2677 mutex_lock(&app_access_lock);
2678 return ret;
2679}
2680
2681static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2682{
2683 int ret = 0;
2684
2685 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2686 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2687 ion_free(qseecom.ion_clnt, data->client.ihandle);
2688 data->client.ihandle = NULL;
2689 }
2690 return ret;
2691}
2692
2693static int qseecom_unload_app(struct qseecom_dev_handle *data,
2694 bool app_crash)
2695{
2696 unsigned long flags;
2697 unsigned long flags1;
2698 int ret = 0;
2699 struct qseecom_command_scm_resp resp;
2700 struct qseecom_registered_app_list *ptr_app = NULL;
2701 bool unload = false;
2702 bool found_app = false;
2703 bool found_dead_app = false;
Zhen Kongf818f152019-03-13 12:31:32 -07002704 bool scm_called = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002705
2706 if (!data) {
2707 pr_err("Invalid/uninitialized device handle\n");
2708 return -EINVAL;
2709 }
2710
2711 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2712 pr_debug("Do not unload keymaster app from tz\n");
2713 goto unload_exit;
2714 }
2715
2716 __qseecom_cleanup_app(data);
2717 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2718
2719 if (data->client.app_id > 0) {
2720 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2721 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2722 list) {
2723 if (ptr_app->app_id == data->client.app_id) {
2724 if (!strcmp((void *)ptr_app->app_name,
2725 (void *)data->client.app_name)) {
2726 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002727 if (ptr_app->app_blocked ||
2728 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002729 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002730 if (app_crash || ptr_app->ref_cnt == 1)
2731 unload = true;
2732 break;
2733 }
2734 found_dead_app = true;
2735 break;
2736 }
2737 }
2738 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2739 flags);
2740 if (found_app == false && found_dead_app == false) {
2741 pr_err("Cannot find app with id = %d (%s)\n",
2742 data->client.app_id,
2743 (char *)data->client.app_name);
2744 ret = -EINVAL;
2745 goto unload_exit;
2746 }
2747 }
2748
2749 if (found_dead_app)
2750 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2751 (char *)data->client.app_name);
2752
2753 if (unload) {
2754 struct qseecom_unload_app_ireq req;
2755 /* Populate the structure for sending scm call to load image */
2756 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2757 req.app_id = data->client.app_id;
2758
2759 /* SCM_CALL to unload the app */
2760 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2761 sizeof(struct qseecom_unload_app_ireq),
2762 &resp, sizeof(resp));
Zhen Kongf818f152019-03-13 12:31:32 -07002763 scm_called = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002764 if (ret) {
2765 pr_err("scm_call to unload app (id = %d) failed\n",
2766 req.app_id);
2767 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002768 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002769 } else {
2770 pr_warn("App id %d now unloaded\n", req.app_id);
2771 }
2772 if (resp.result == QSEOS_RESULT_FAILURE) {
2773 pr_err("app (%d) unload_failed!!\n",
2774 data->client.app_id);
2775 ret = -EFAULT;
Zhen Kongf818f152019-03-13 12:31:32 -07002776 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002777 }
2778 if (resp.result == QSEOS_RESULT_SUCCESS)
2779 pr_debug("App (%d) is unloaded!!\n",
2780 data->client.app_id);
2781 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2782 ret = __qseecom_process_incomplete_cmd(data, &resp);
2783 if (ret) {
2784 pr_err("process_incomplete_cmd fail err: %d\n",
2785 ret);
Zhen Kongf818f152019-03-13 12:31:32 -07002786 goto scm_exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002787 }
2788 }
2789 }
2790
Zhen Kongf818f152019-03-13 12:31:32 -07002791scm_exit:
2792 if (scm_called) {
2793 /* double check if this app_entry still exists */
2794 bool doublecheck = false;
2795
2796 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2797 list_for_each_entry(ptr_app,
2798 &qseecom.registered_app_list_head, list) {
2799 if ((ptr_app->app_id == data->client.app_id) &&
2800 (!strcmp((void *)ptr_app->app_name,
2801 (void *)data->client.app_name))) {
2802 doublecheck = true;
2803 break;
2804 }
2805 }
2806 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2807 flags1);
2808 if (!doublecheck) {
2809 pr_warn("app %d(%s) entry is already removed\n",
2810 data->client.app_id,
2811 (char *)data->client.app_name);
2812 found_app = false;
2813 }
2814 }
Zhen Kong7d500032018-08-06 16:58:31 -07002815unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002816 if (found_app) {
2817 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2818 if (app_crash) {
2819 ptr_app->ref_cnt = 0;
2820 pr_debug("app_crash: ref_count = 0\n");
2821 } else {
2822 if (ptr_app->ref_cnt == 1) {
2823 ptr_app->ref_cnt = 0;
2824 pr_debug("ref_count set to 0\n");
2825 } else {
2826 ptr_app->ref_cnt--;
2827 pr_debug("Can't unload app(%d) inuse\n",
2828 ptr_app->app_id);
2829 }
2830 }
2831 if (unload) {
2832 list_del(&ptr_app->list);
2833 kzfree(ptr_app);
2834 }
2835 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2836 flags1);
2837 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002838 qseecom_unmap_ion_allocated_memory(data);
2839 data->released = true;
2840 return ret;
2841}
2842
2843static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2844 unsigned long virt)
2845{
2846 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2847}
2848
2849static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2850 unsigned long virt)
2851{
2852 return (uintptr_t)data->client.sb_virt +
2853 (virt - data->client.user_virt_sb_base);
2854}
2855
2856int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2857 struct qseecom_send_svc_cmd_req *req_ptr,
2858 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2859{
2860 int ret = 0;
2861 void *req_buf = NULL;
2862
2863 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2864 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2865 req_ptr, send_svc_ireq_ptr);
2866 return -EINVAL;
2867 }
2868
2869 /* Clients need to ensure req_buf is at base offset of shared buffer */
2870 if ((uintptr_t)req_ptr->cmd_req_buf !=
2871 data_ptr->client.user_virt_sb_base) {
2872 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2873 return -EINVAL;
2874 }
2875
2876 if (data_ptr->client.sb_length <
2877 sizeof(struct qseecom_rpmb_provision_key)) {
2878 pr_err("shared buffer is too small to hold key type\n");
2879 return -EINVAL;
2880 }
2881 req_buf = data_ptr->client.sb_virt;
2882
2883 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2884 send_svc_ireq_ptr->key_type =
2885 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2886 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2887 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2888 data_ptr, (uintptr_t)req_ptr->resp_buf));
2889 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2890
2891 return ret;
2892}
2893
2894int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2895 struct qseecom_send_svc_cmd_req *req_ptr,
2896 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2897{
2898 int ret = 0;
2899 uint32_t reqd_len_sb_in = 0;
2900
2901 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2902 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2903 req_ptr, send_svc_ireq_ptr);
2904 return -EINVAL;
2905 }
2906
2907 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2908 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2909 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2910 pr_err("Required: %u, Available: %zu\n",
2911 reqd_len_sb_in, data_ptr->client.sb_length);
2912 return -ENOMEM;
2913 }
2914
2915 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2916 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2917 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2918 data_ptr, (uintptr_t)req_ptr->resp_buf));
2919 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2920
2921 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2922 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2923
2924
2925 return ret;
2926}
2927
2928static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2929 struct qseecom_send_svc_cmd_req *req)
2930{
2931 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2932 pr_err("req or cmd buffer or response buffer is null\n");
2933 return -EINVAL;
2934 }
2935
2936 if (!data || !data->client.ihandle) {
2937 pr_err("Client or client handle is not initialized\n");
2938 return -EINVAL;
2939 }
2940
2941 if (data->client.sb_virt == NULL) {
2942 pr_err("sb_virt null\n");
2943 return -EINVAL;
2944 }
2945
2946 if (data->client.user_virt_sb_base == 0) {
2947 pr_err("user_virt_sb_base is null\n");
2948 return -EINVAL;
2949 }
2950
2951 if (data->client.sb_length == 0) {
2952 pr_err("sb_length is 0\n");
2953 return -EINVAL;
2954 }
2955
2956 if (((uintptr_t)req->cmd_req_buf <
2957 data->client.user_virt_sb_base) ||
2958 ((uintptr_t)req->cmd_req_buf >=
2959 (data->client.user_virt_sb_base + data->client.sb_length))) {
2960 pr_err("cmd buffer address not within shared bufffer\n");
2961 return -EINVAL;
2962 }
2963 if (((uintptr_t)req->resp_buf <
2964 data->client.user_virt_sb_base) ||
2965 ((uintptr_t)req->resp_buf >=
2966 (data->client.user_virt_sb_base + data->client.sb_length))) {
2967 pr_err("response buffer address not within shared bufffer\n");
2968 return -EINVAL;
2969 }
2970 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2971 (req->cmd_req_len > data->client.sb_length) ||
2972 (req->resp_len > data->client.sb_length)) {
2973 pr_err("cmd buf length or response buf length not valid\n");
2974 return -EINVAL;
2975 }
2976 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2977 pr_err("Integer overflow detected in req_len & rsp_len\n");
2978 return -EINVAL;
2979 }
2980
2981 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2982 pr_debug("Not enough memory to fit cmd_buf.\n");
2983 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2984 (req->cmd_req_len + req->resp_len),
2985 data->client.sb_length);
2986 return -ENOMEM;
2987 }
2988 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2989 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2990 return -EINVAL;
2991 }
2992 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2993 pr_err("Integer overflow in resp_len & resp_buf\n");
2994 return -EINVAL;
2995 }
2996 if (data->client.user_virt_sb_base >
2997 (ULONG_MAX - data->client.sb_length)) {
2998 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2999 return -EINVAL;
3000 }
3001 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3002 ((uintptr_t)data->client.user_virt_sb_base +
3003 data->client.sb_length)) ||
3004 (((uintptr_t)req->resp_buf + req->resp_len) >
3005 ((uintptr_t)data->client.user_virt_sb_base +
3006 data->client.sb_length))) {
3007 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3008 return -EINVAL;
3009 }
3010 return 0;
3011}
3012
3013static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
3014 void __user *argp)
3015{
3016 int ret = 0;
3017 struct qseecom_client_send_service_ireq send_svc_ireq;
3018 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
3019 struct qseecom_command_scm_resp resp;
3020 struct qseecom_send_svc_cmd_req req;
3021 void *send_req_ptr;
3022 size_t req_buf_size;
3023
3024 /*struct qseecom_command_scm_resp resp;*/
3025
3026 if (copy_from_user(&req,
3027 (void __user *)argp,
3028 sizeof(req))) {
3029 pr_err("copy_from_user failed\n");
3030 return -EFAULT;
3031 }
3032
3033 if (__validate_send_service_cmd_inputs(data, &req))
3034 return -EINVAL;
3035
3036 data->type = QSEECOM_SECURE_SERVICE;
3037
3038 switch (req.cmd_id) {
3039 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
3040 case QSEOS_RPMB_ERASE_COMMAND:
3041 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
3042 send_req_ptr = &send_svc_ireq;
3043 req_buf_size = sizeof(send_svc_ireq);
3044 if (__qseecom_process_rpmb_svc_cmd(data, &req,
3045 send_req_ptr))
3046 return -EINVAL;
3047 break;
3048 case QSEOS_FSM_LTEOTA_REQ_CMD:
3049 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
3050 case QSEOS_FSM_IKE_REQ_CMD:
3051 case QSEOS_FSM_IKE_REQ_RSP_CMD:
3052 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
3053 case QSEOS_FSM_OEM_FUSE_READ_ROW:
3054 case QSEOS_FSM_ENCFS_REQ_CMD:
3055 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
3056 send_req_ptr = &send_fsm_key_svc_ireq;
3057 req_buf_size = sizeof(send_fsm_key_svc_ireq);
3058 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
3059 send_req_ptr))
3060 return -EINVAL;
3061 break;
3062 default:
3063 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
3064 return -EINVAL;
3065 }
3066
3067 if (qseecom.support_bus_scaling) {
3068 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
3069 if (ret) {
3070 pr_err("Fail to set bw HIGH\n");
3071 return ret;
3072 }
3073 } else {
3074 ret = qseecom_perf_enable(data);
3075 if (ret) {
3076 pr_err("Failed to vote for clocks with err %d\n", ret);
3077 goto exit;
3078 }
3079 }
3080
3081 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3082 data->client.sb_virt, data->client.sb_length,
3083 ION_IOC_CLEAN_INV_CACHES);
3084 if (ret) {
3085 pr_err("cache operation failed %d\n", ret);
3086 goto exit;
3087 }
3088 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3089 (const void *)send_req_ptr,
3090 req_buf_size, &resp, sizeof(resp));
3091 if (ret) {
3092 pr_err("qseecom_scm_call failed with err: %d\n", ret);
3093 if (!qseecom.support_bus_scaling) {
3094 qsee_disable_clock_vote(data, CLK_DFAB);
3095 qsee_disable_clock_vote(data, CLK_SFPB);
3096 } else {
3097 __qseecom_add_bw_scale_down_timer(
3098 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3099 }
3100 goto exit;
3101 }
3102 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3103 data->client.sb_virt, data->client.sb_length,
3104 ION_IOC_INV_CACHES);
3105 if (ret) {
3106 pr_err("cache operation failed %d\n", ret);
3107 goto exit;
3108 }
3109 switch (resp.result) {
3110 case QSEOS_RESULT_SUCCESS:
3111 break;
3112 case QSEOS_RESULT_INCOMPLETE:
3113 pr_debug("qseos_result_incomplete\n");
3114 ret = __qseecom_process_incomplete_cmd(data, &resp);
3115 if (ret) {
3116 pr_err("process_incomplete_cmd fail with result: %d\n",
3117 resp.result);
3118 }
3119 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3120 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303121 if (put_user(resp.result,
3122 (uint32_t __user *)req.resp_buf)) {
3123 ret = -EINVAL;
3124 goto exit;
3125 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003126 ret = 0;
3127 }
3128 break;
3129 case QSEOS_RESULT_FAILURE:
3130 pr_err("scm call failed with resp.result: %d\n", resp.result);
3131 ret = -EINVAL;
3132 break;
3133 default:
3134 pr_err("Response result %d not supported\n",
3135 resp.result);
3136 ret = -EINVAL;
3137 break;
3138 }
3139 if (!qseecom.support_bus_scaling) {
3140 qsee_disable_clock_vote(data, CLK_DFAB);
3141 qsee_disable_clock_vote(data, CLK_SFPB);
3142 } else {
3143 __qseecom_add_bw_scale_down_timer(
3144 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3145 }
3146
3147exit:
3148 return ret;
3149}
3150
3151static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3152 struct qseecom_send_cmd_req *req)
3153
3154{
3155 if (!data || !data->client.ihandle) {
3156 pr_err("Client or client handle is not initialized\n");
3157 return -EINVAL;
3158 }
3159 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3160 (req->cmd_req_buf == NULL)) {
3161 pr_err("cmd buffer or response buffer is null\n");
3162 return -EINVAL;
3163 }
3164 if (((uintptr_t)req->cmd_req_buf <
3165 data->client.user_virt_sb_base) ||
3166 ((uintptr_t)req->cmd_req_buf >=
3167 (data->client.user_virt_sb_base + data->client.sb_length))) {
3168 pr_err("cmd buffer address not within shared bufffer\n");
3169 return -EINVAL;
3170 }
3171 if (((uintptr_t)req->resp_buf <
3172 data->client.user_virt_sb_base) ||
3173 ((uintptr_t)req->resp_buf >=
3174 (data->client.user_virt_sb_base + data->client.sb_length))) {
3175 pr_err("response buffer address not within shared bufffer\n");
3176 return -EINVAL;
3177 }
3178 if ((req->cmd_req_len == 0) ||
3179 (req->cmd_req_len > data->client.sb_length) ||
3180 (req->resp_len > data->client.sb_length)) {
3181 pr_err("cmd buf length or response buf length not valid\n");
3182 return -EINVAL;
3183 }
3184 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3185 pr_err("Integer overflow detected in req_len & rsp_len\n");
3186 return -EINVAL;
3187 }
3188
3189 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3190 pr_debug("Not enough memory to fit cmd_buf.\n");
3191 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3192 (req->cmd_req_len + req->resp_len),
3193 data->client.sb_length);
3194 return -ENOMEM;
3195 }
3196 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3197 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3198 return -EINVAL;
3199 }
3200 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3201 pr_err("Integer overflow in resp_len & resp_buf\n");
3202 return -EINVAL;
3203 }
3204 if (data->client.user_virt_sb_base >
3205 (ULONG_MAX - data->client.sb_length)) {
3206 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3207 return -EINVAL;
3208 }
3209 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3210 ((uintptr_t)data->client.user_virt_sb_base +
3211 data->client.sb_length)) ||
3212 (((uintptr_t)req->resp_buf + req->resp_len) >
3213 ((uintptr_t)data->client.user_virt_sb_base +
3214 data->client.sb_length))) {
3215 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3216 return -EINVAL;
3217 }
3218 return 0;
3219}
3220
3221int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3222 struct qseecom_registered_app_list *ptr_app,
3223 struct qseecom_dev_handle *data)
3224{
3225 int ret = 0;
3226
3227 switch (resp->result) {
3228 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3229 pr_warn("App(%d) %s is blocked on listener %d\n",
3230 data->client.app_id, data->client.app_name,
3231 resp->data);
3232 ret = __qseecom_process_reentrancy_blocked_on_listener(
3233 resp, ptr_app, data);
3234 if (ret) {
3235 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3236 data->client.app_id, data->client.app_name, resp->data);
3237 return ret;
3238 }
3239
3240 case QSEOS_RESULT_INCOMPLETE:
3241 qseecom.app_block_ref_cnt++;
3242 ptr_app->app_blocked = true;
3243 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3244 ptr_app->app_blocked = false;
3245 qseecom.app_block_ref_cnt--;
3246 wake_up_interruptible(&qseecom.app_block_wq);
3247 if (ret)
3248 pr_err("process_incomplete_cmd failed err: %d\n",
3249 ret);
3250 return ret;
3251 case QSEOS_RESULT_SUCCESS:
3252 return ret;
3253 default:
3254 pr_err("Response result %d not supported\n",
3255 resp->result);
3256 return -EINVAL;
3257 }
3258}
3259
3260static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3261 struct qseecom_send_cmd_req *req)
3262{
3263 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003264 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003265 u32 reqd_len_sb_in = 0;
3266 struct qseecom_client_send_data_ireq send_data_req = {0};
3267 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3268 struct qseecom_command_scm_resp resp;
3269 unsigned long flags;
3270 struct qseecom_registered_app_list *ptr_app;
3271 bool found_app = false;
3272 void *cmd_buf = NULL;
3273 size_t cmd_len;
3274 struct sglist_info *table = data->sglistinfo_ptr;
3275
3276 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3277 /* find app_id & img_name from list */
3278 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3279 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3280 list) {
3281 if ((ptr_app->app_id == data->client.app_id) &&
3282 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3283 found_app = true;
3284 break;
3285 }
3286 }
3287 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3288
3289 if (!found_app) {
3290 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3291 (char *)data->client.app_name);
3292 return -ENOENT;
3293 }
3294
3295 if (qseecom.qsee_version < QSEE_VERSION_40) {
3296 send_data_req.app_id = data->client.app_id;
3297 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3298 data, (uintptr_t)req->cmd_req_buf));
3299 send_data_req.req_len = req->cmd_req_len;
3300 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3301 data, (uintptr_t)req->resp_buf));
3302 send_data_req.rsp_len = req->resp_len;
3303 send_data_req.sglistinfo_ptr =
3304 (uint32_t)virt_to_phys(table);
3305 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3306 dmac_flush_range((void *)table,
3307 (void *)table + SGLISTINFO_TABLE_SIZE);
3308 cmd_buf = (void *)&send_data_req;
3309 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3310 } else {
3311 send_data_req_64bit.app_id = data->client.app_id;
3312 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3313 (uintptr_t)req->cmd_req_buf);
3314 send_data_req_64bit.req_len = req->cmd_req_len;
3315 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3316 (uintptr_t)req->resp_buf);
3317 send_data_req_64bit.rsp_len = req->resp_len;
3318 /* check if 32bit app's phys_addr region is under 4GB.*/
3319 if ((data->client.app_arch == ELFCLASS32) &&
3320 ((send_data_req_64bit.req_ptr >=
3321 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3322 (send_data_req_64bit.rsp_ptr >=
3323 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3324 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3325 data->client.app_name,
3326 send_data_req_64bit.req_ptr,
3327 send_data_req_64bit.req_len,
3328 send_data_req_64bit.rsp_ptr,
3329 send_data_req_64bit.rsp_len);
3330 return -EFAULT;
3331 }
3332 send_data_req_64bit.sglistinfo_ptr =
3333 (uint64_t)virt_to_phys(table);
3334 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3335 dmac_flush_range((void *)table,
3336 (void *)table + SGLISTINFO_TABLE_SIZE);
3337 cmd_buf = (void *)&send_data_req_64bit;
3338 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3339 }
3340
3341 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3342 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3343 else
3344 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3345
3346 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3347 data->client.sb_virt,
3348 reqd_len_sb_in,
3349 ION_IOC_CLEAN_INV_CACHES);
3350 if (ret) {
3351 pr_err("cache operation failed %d\n", ret);
3352 return ret;
3353 }
3354
3355 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3356
3357 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3358 cmd_buf, cmd_len,
3359 &resp, sizeof(resp));
3360 if (ret) {
3361 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3362 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003363 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003364 }
3365
3366 if (qseecom.qsee_reentrancy_support) {
3367 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003368 if (ret)
3369 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003370 } else {
3371 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3372 ret = __qseecom_process_incomplete_cmd(data, &resp);
3373 if (ret) {
3374 pr_err("process_incomplete_cmd failed err: %d\n",
3375 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003376 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003377 }
3378 } else {
3379 if (resp.result != QSEOS_RESULT_SUCCESS) {
3380 pr_err("Response result %d not supported\n",
3381 resp.result);
3382 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003383 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003384 }
3385 }
3386 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003387exit:
3388 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003389 data->client.sb_virt, data->client.sb_length,
3390 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003391 if (ret2) {
3392 pr_err("cache operation failed %d\n", ret2);
3393 return ret2;
3394 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003395 return ret;
3396}
3397
3398static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3399{
3400 int ret = 0;
3401 struct qseecom_send_cmd_req req;
3402
3403 ret = copy_from_user(&req, argp, sizeof(req));
3404 if (ret) {
3405 pr_err("copy_from_user failed\n");
3406 return ret;
3407 }
3408
3409 if (__validate_send_cmd_inputs(data, &req))
3410 return -EINVAL;
3411
3412 ret = __qseecom_send_cmd(data, &req);
3413
3414 if (ret)
3415 return ret;
3416
3417 return ret;
3418}
3419
3420int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3421 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3422 struct qseecom_dev_handle *data, int i) {
3423
3424 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3425 (req->ifd_data[i].fd > 0)) {
3426 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3427 (req->ifd_data[i].cmd_buf_offset >
3428 req->cmd_req_len - sizeof(uint32_t))) {
3429 pr_err("Invalid offset (req len) 0x%x\n",
3430 req->ifd_data[i].cmd_buf_offset);
3431 return -EINVAL;
3432 }
3433 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3434 (lstnr_resp->ifd_data[i].fd > 0)) {
3435 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3436 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3437 lstnr_resp->resp_len - sizeof(uint32_t))) {
3438 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3439 lstnr_resp->ifd_data[i].cmd_buf_offset);
3440 return -EINVAL;
3441 }
3442 }
3443 return 0;
3444}
3445
3446static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3447 struct qseecom_dev_handle *data)
3448{
3449 struct ion_handle *ihandle;
3450 char *field;
3451 int ret = 0;
3452 int i = 0;
3453 uint32_t len = 0;
3454 struct scatterlist *sg;
3455 struct qseecom_send_modfd_cmd_req *req = NULL;
3456 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3457 struct qseecom_registered_listener_list *this_lstnr = NULL;
3458 uint32_t offset;
3459 struct sg_table *sg_ptr;
3460
3461 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3462 (data->type != QSEECOM_CLIENT_APP))
3463 return -EFAULT;
3464
3465 if (msg == NULL) {
3466 pr_err("Invalid address\n");
3467 return -EINVAL;
3468 }
3469 if (data->type == QSEECOM_LISTENER_SERVICE) {
3470 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3471 this_lstnr = __qseecom_find_svc(data->listener.id);
3472 if (IS_ERR_OR_NULL(this_lstnr)) {
3473 pr_err("Invalid listener ID\n");
3474 return -ENOMEM;
3475 }
3476 } else {
3477 req = (struct qseecom_send_modfd_cmd_req *)msg;
3478 }
3479
3480 for (i = 0; i < MAX_ION_FD; i++) {
3481 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3482 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003483 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003484 req->ifd_data[i].fd);
3485 if (IS_ERR_OR_NULL(ihandle)) {
3486 pr_err("Ion client can't retrieve the handle\n");
3487 return -ENOMEM;
3488 }
3489 field = (char *) req->cmd_req_buf +
3490 req->ifd_data[i].cmd_buf_offset;
3491 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3492 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003493 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003494 lstnr_resp->ifd_data[i].fd);
3495 if (IS_ERR_OR_NULL(ihandle)) {
3496 pr_err("Ion client can't retrieve the handle\n");
3497 return -ENOMEM;
3498 }
3499 field = lstnr_resp->resp_buf_ptr +
3500 lstnr_resp->ifd_data[i].cmd_buf_offset;
3501 } else {
3502 continue;
3503 }
3504 /* Populate the cmd data structure with the phys_addr */
3505 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3506 if (IS_ERR_OR_NULL(sg_ptr)) {
3507 pr_err("IOn client could not retrieve sg table\n");
3508 goto err;
3509 }
3510 if (sg_ptr->nents == 0) {
3511 pr_err("Num of scattered entries is 0\n");
3512 goto err;
3513 }
3514 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3515 pr_err("Num of scattered entries");
3516 pr_err(" (%d) is greater than max supported %d\n",
3517 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3518 goto err;
3519 }
3520 sg = sg_ptr->sgl;
3521 if (sg_ptr->nents == 1) {
3522 uint32_t *update;
3523
3524 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3525 goto err;
3526 if ((data->type == QSEECOM_CLIENT_APP &&
3527 (data->client.app_arch == ELFCLASS32 ||
3528 data->client.app_arch == ELFCLASS64)) ||
3529 (data->type == QSEECOM_LISTENER_SERVICE)) {
3530 /*
3531 * Check if sg list phy add region is under 4GB
3532 */
3533 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3534 (!cleanup) &&
3535 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3536 >= PHY_ADDR_4G - sg->length)) {
3537 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3538 data->client.app_name,
3539 &(sg_dma_address(sg_ptr->sgl)),
3540 sg->length);
3541 goto err;
3542 }
3543 update = (uint32_t *) field;
3544 *update = cleanup ? 0 :
3545 (uint32_t)sg_dma_address(sg_ptr->sgl);
3546 } else {
3547 pr_err("QSEE app arch %u is not supported\n",
3548 data->client.app_arch);
3549 goto err;
3550 }
3551 len += (uint32_t)sg->length;
3552 } else {
3553 struct qseecom_sg_entry *update;
3554 int j = 0;
3555
3556 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3557 (req->ifd_data[i].fd > 0)) {
3558
3559 if ((req->cmd_req_len <
3560 SG_ENTRY_SZ * sg_ptr->nents) ||
3561 (req->ifd_data[i].cmd_buf_offset >
3562 (req->cmd_req_len -
3563 SG_ENTRY_SZ * sg_ptr->nents))) {
3564 pr_err("Invalid offset = 0x%x\n",
3565 req->ifd_data[i].cmd_buf_offset);
3566 goto err;
3567 }
3568
3569 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3570 (lstnr_resp->ifd_data[i].fd > 0)) {
3571
3572 if ((lstnr_resp->resp_len <
3573 SG_ENTRY_SZ * sg_ptr->nents) ||
3574 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3575 (lstnr_resp->resp_len -
3576 SG_ENTRY_SZ * sg_ptr->nents))) {
3577 goto err;
3578 }
3579 }
3580 if ((data->type == QSEECOM_CLIENT_APP &&
3581 (data->client.app_arch == ELFCLASS32 ||
3582 data->client.app_arch == ELFCLASS64)) ||
3583 (data->type == QSEECOM_LISTENER_SERVICE)) {
3584 update = (struct qseecom_sg_entry *)field;
3585 for (j = 0; j < sg_ptr->nents; j++) {
3586 /*
3587 * Check if sg list PA is under 4GB
3588 */
3589 if ((qseecom.qsee_version >=
3590 QSEE_VERSION_40) &&
3591 (!cleanup) &&
3592 ((uint64_t)(sg_dma_address(sg))
3593 >= PHY_ADDR_4G - sg->length)) {
3594 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3595 data->client.app_name,
3596 &(sg_dma_address(sg)),
3597 sg->length);
3598 goto err;
3599 }
3600 update->phys_addr = cleanup ? 0 :
3601 (uint32_t)sg_dma_address(sg);
3602 update->len = cleanup ? 0 : sg->length;
3603 update++;
3604 len += sg->length;
3605 sg = sg_next(sg);
3606 }
3607 } else {
3608 pr_err("QSEE app arch %u is not supported\n",
3609 data->client.app_arch);
3610 goto err;
3611 }
3612 }
3613
3614 if (cleanup) {
3615 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3616 ihandle, NULL, len,
3617 ION_IOC_INV_CACHES);
3618 if (ret) {
3619 pr_err("cache operation failed %d\n", ret);
3620 goto err;
3621 }
3622 } else {
3623 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3624 ihandle, NULL, len,
3625 ION_IOC_CLEAN_INV_CACHES);
3626 if (ret) {
3627 pr_err("cache operation failed %d\n", ret);
3628 goto err;
3629 }
3630 if (data->type == QSEECOM_CLIENT_APP) {
3631 offset = req->ifd_data[i].cmd_buf_offset;
3632 data->sglistinfo_ptr[i].indexAndFlags =
3633 SGLISTINFO_SET_INDEX_FLAG(
3634 (sg_ptr->nents == 1), 0, offset);
3635 data->sglistinfo_ptr[i].sizeOrCount =
3636 (sg_ptr->nents == 1) ?
3637 sg->length : sg_ptr->nents;
3638 data->sglist_cnt = i + 1;
3639 } else {
3640 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3641 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3642 (uintptr_t)this_lstnr->sb_virt);
3643 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3644 SGLISTINFO_SET_INDEX_FLAG(
3645 (sg_ptr->nents == 1), 0, offset);
3646 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3647 (sg_ptr->nents == 1) ?
3648 sg->length : sg_ptr->nents;
3649 this_lstnr->sglist_cnt = i + 1;
3650 }
3651 }
3652 /* Deallocate the handle */
3653 if (!IS_ERR_OR_NULL(ihandle))
3654 ion_free(qseecom.ion_clnt, ihandle);
3655 }
3656 return ret;
3657err:
3658 if (!IS_ERR_OR_NULL(ihandle))
3659 ion_free(qseecom.ion_clnt, ihandle);
3660 return -ENOMEM;
3661}
3662
3663static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3664 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3665{
3666 struct scatterlist *sg = sg_ptr->sgl;
3667 struct qseecom_sg_entry_64bit *sg_entry;
3668 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3669 void *buf;
3670 uint i;
3671 size_t size;
3672 dma_addr_t coh_pmem;
3673
3674 if (fd_idx >= MAX_ION_FD) {
3675 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3676 return -ENOMEM;
3677 }
3678 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3679 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3680 /* Allocate a contiguous kernel buffer */
3681 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3682 size = (size + PAGE_SIZE) & PAGE_MASK;
3683 buf = dma_alloc_coherent(qseecom.pdev,
3684 size, &coh_pmem, GFP_KERNEL);
3685 if (buf == NULL) {
3686 pr_err("failed to alloc memory for sg buf\n");
3687 return -ENOMEM;
3688 }
3689 /* update qseecom_sg_list_buf_hdr_64bit */
3690 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3691 buf_hdr->new_buf_phys_addr = coh_pmem;
3692 buf_hdr->nents_total = sg_ptr->nents;
3693 /* save the left sg entries into new allocated buf */
3694 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3695 for (i = 0; i < sg_ptr->nents; i++) {
3696 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3697 sg_entry->len = sg->length;
3698 sg_entry++;
3699 sg = sg_next(sg);
3700 }
3701
3702 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3703 data->client.sec_buf_fd[fd_idx].vbase = buf;
3704 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3705 data->client.sec_buf_fd[fd_idx].size = size;
3706
3707 return 0;
3708}
3709
3710static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3711 struct qseecom_dev_handle *data)
3712{
3713 struct ion_handle *ihandle;
3714 char *field;
3715 int ret = 0;
3716 int i = 0;
3717 uint32_t len = 0;
3718 struct scatterlist *sg;
3719 struct qseecom_send_modfd_cmd_req *req = NULL;
3720 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3721 struct qseecom_registered_listener_list *this_lstnr = NULL;
3722 uint32_t offset;
3723 struct sg_table *sg_ptr;
3724
3725 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3726 (data->type != QSEECOM_CLIENT_APP))
3727 return -EFAULT;
3728
3729 if (msg == NULL) {
3730 pr_err("Invalid address\n");
3731 return -EINVAL;
3732 }
3733 if (data->type == QSEECOM_LISTENER_SERVICE) {
3734 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3735 this_lstnr = __qseecom_find_svc(data->listener.id);
3736 if (IS_ERR_OR_NULL(this_lstnr)) {
3737 pr_err("Invalid listener ID\n");
3738 return -ENOMEM;
3739 }
3740 } else {
3741 req = (struct qseecom_send_modfd_cmd_req *)msg;
3742 }
3743
3744 for (i = 0; i < MAX_ION_FD; i++) {
3745 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3746 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003747 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003748 req->ifd_data[i].fd);
3749 if (IS_ERR_OR_NULL(ihandle)) {
3750 pr_err("Ion client can't retrieve the handle\n");
3751 return -ENOMEM;
3752 }
3753 field = (char *) req->cmd_req_buf +
3754 req->ifd_data[i].cmd_buf_offset;
3755 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3756 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003757 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003758 lstnr_resp->ifd_data[i].fd);
3759 if (IS_ERR_OR_NULL(ihandle)) {
3760 pr_err("Ion client can't retrieve the handle\n");
3761 return -ENOMEM;
3762 }
3763 field = lstnr_resp->resp_buf_ptr +
3764 lstnr_resp->ifd_data[i].cmd_buf_offset;
3765 } else {
3766 continue;
3767 }
3768 /* Populate the cmd data structure with the phys_addr */
3769 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3770 if (IS_ERR_OR_NULL(sg_ptr)) {
3771 pr_err("IOn client could not retrieve sg table\n");
3772 goto err;
3773 }
3774 if (sg_ptr->nents == 0) {
3775 pr_err("Num of scattered entries is 0\n");
3776 goto err;
3777 }
3778 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3779 pr_warn("Num of scattered entries");
3780 pr_warn(" (%d) is greater than %d\n",
3781 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3782 if (cleanup) {
3783 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3784 data->client.sec_buf_fd[i].vbase)
3785 dma_free_coherent(qseecom.pdev,
3786 data->client.sec_buf_fd[i].size,
3787 data->client.sec_buf_fd[i].vbase,
3788 data->client.sec_buf_fd[i].pbase);
3789 } else {
3790 ret = __qseecom_allocate_sg_list_buffer(data,
3791 field, i, sg_ptr);
3792 if (ret) {
3793 pr_err("Failed to allocate sg list buffer\n");
3794 goto err;
3795 }
3796 }
3797 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3798 sg = sg_ptr->sgl;
3799 goto cleanup;
3800 }
3801 sg = sg_ptr->sgl;
3802 if (sg_ptr->nents == 1) {
3803 uint64_t *update_64bit;
3804
3805 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3806 goto err;
3807 /* 64bit app uses 64bit address */
3808 update_64bit = (uint64_t *) field;
3809 *update_64bit = cleanup ? 0 :
3810 (uint64_t)sg_dma_address(sg_ptr->sgl);
3811 len += (uint32_t)sg->length;
3812 } else {
3813 struct qseecom_sg_entry_64bit *update_64bit;
3814 int j = 0;
3815
3816 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3817 (req->ifd_data[i].fd > 0)) {
3818
3819 if ((req->cmd_req_len <
3820 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3821 (req->ifd_data[i].cmd_buf_offset >
3822 (req->cmd_req_len -
3823 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3824 pr_err("Invalid offset = 0x%x\n",
3825 req->ifd_data[i].cmd_buf_offset);
3826 goto err;
3827 }
3828
3829 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3830 (lstnr_resp->ifd_data[i].fd > 0)) {
3831
3832 if ((lstnr_resp->resp_len <
3833 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3834 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3835 (lstnr_resp->resp_len -
3836 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3837 goto err;
3838 }
3839 }
3840 /* 64bit app uses 64bit address */
3841 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3842 for (j = 0; j < sg_ptr->nents; j++) {
3843 update_64bit->phys_addr = cleanup ? 0 :
3844 (uint64_t)sg_dma_address(sg);
3845 update_64bit->len = cleanup ? 0 :
3846 (uint32_t)sg->length;
3847 update_64bit++;
3848 len += sg->length;
3849 sg = sg_next(sg);
3850 }
3851 }
3852cleanup:
3853 if (cleanup) {
3854 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3855 ihandle, NULL, len,
3856 ION_IOC_INV_CACHES);
3857 if (ret) {
3858 pr_err("cache operation failed %d\n", ret);
3859 goto err;
3860 }
3861 } else {
3862 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3863 ihandle, NULL, len,
3864 ION_IOC_CLEAN_INV_CACHES);
3865 if (ret) {
3866 pr_err("cache operation failed %d\n", ret);
3867 goto err;
3868 }
3869 if (data->type == QSEECOM_CLIENT_APP) {
3870 offset = req->ifd_data[i].cmd_buf_offset;
3871 data->sglistinfo_ptr[i].indexAndFlags =
3872 SGLISTINFO_SET_INDEX_FLAG(
3873 (sg_ptr->nents == 1), 1, offset);
3874 data->sglistinfo_ptr[i].sizeOrCount =
3875 (sg_ptr->nents == 1) ?
3876 sg->length : sg_ptr->nents;
3877 data->sglist_cnt = i + 1;
3878 } else {
3879 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3880 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3881 (uintptr_t)this_lstnr->sb_virt);
3882 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3883 SGLISTINFO_SET_INDEX_FLAG(
3884 (sg_ptr->nents == 1), 1, offset);
3885 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3886 (sg_ptr->nents == 1) ?
3887 sg->length : sg_ptr->nents;
3888 this_lstnr->sglist_cnt = i + 1;
3889 }
3890 }
3891 /* Deallocate the handle */
3892 if (!IS_ERR_OR_NULL(ihandle))
3893 ion_free(qseecom.ion_clnt, ihandle);
3894 }
3895 return ret;
3896err:
3897 for (i = 0; i < MAX_ION_FD; i++)
3898 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3899 data->client.sec_buf_fd[i].vbase)
3900 dma_free_coherent(qseecom.pdev,
3901 data->client.sec_buf_fd[i].size,
3902 data->client.sec_buf_fd[i].vbase,
3903 data->client.sec_buf_fd[i].pbase);
3904 if (!IS_ERR_OR_NULL(ihandle))
3905 ion_free(qseecom.ion_clnt, ihandle);
3906 return -ENOMEM;
3907}
3908
3909static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3910 void __user *argp,
3911 bool is_64bit_addr)
3912{
3913 int ret = 0;
3914 int i;
3915 struct qseecom_send_modfd_cmd_req req;
3916 struct qseecom_send_cmd_req send_cmd_req;
3917
3918 ret = copy_from_user(&req, argp, sizeof(req));
3919 if (ret) {
3920 pr_err("copy_from_user failed\n");
3921 return ret;
3922 }
3923
3924 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3925 send_cmd_req.cmd_req_len = req.cmd_req_len;
3926 send_cmd_req.resp_buf = req.resp_buf;
3927 send_cmd_req.resp_len = req.resp_len;
3928
3929 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3930 return -EINVAL;
3931
3932 /* validate offsets */
3933 for (i = 0; i < MAX_ION_FD; i++) {
3934 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3935 pr_err("Invalid offset %d = 0x%x\n",
3936 i, req.ifd_data[i].cmd_buf_offset);
3937 return -EINVAL;
3938 }
3939 }
3940 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3941 (uintptr_t)req.cmd_req_buf);
3942 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3943 (uintptr_t)req.resp_buf);
3944
3945 if (!is_64bit_addr) {
3946 ret = __qseecom_update_cmd_buf(&req, false, data);
3947 if (ret)
3948 return ret;
3949 ret = __qseecom_send_cmd(data, &send_cmd_req);
3950 if (ret)
3951 return ret;
3952 ret = __qseecom_update_cmd_buf(&req, true, data);
3953 if (ret)
3954 return ret;
3955 } else {
3956 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3957 if (ret)
3958 return ret;
3959 ret = __qseecom_send_cmd(data, &send_cmd_req);
3960 if (ret)
3961 return ret;
3962 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3963 if (ret)
3964 return ret;
3965 }
3966
3967 return ret;
3968}
3969
3970static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3971 void __user *argp)
3972{
3973 return __qseecom_send_modfd_cmd(data, argp, false);
3974}
3975
3976static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3977 void __user *argp)
3978{
3979 return __qseecom_send_modfd_cmd(data, argp, true);
3980}
3981
3982
3983
3984static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3985 struct qseecom_registered_listener_list *svc)
3986{
3987 int ret;
3988
Zhen Kongf5087172018-10-11 17:22:05 -07003989 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003990 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003991}
3992
3993static int qseecom_receive_req(struct qseecom_dev_handle *data)
3994{
3995 int ret = 0;
3996 struct qseecom_registered_listener_list *this_lstnr;
3997
Zhen Kongbcdeda22018-11-16 13:50:51 -08003998 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003999 this_lstnr = __qseecom_find_svc(data->listener.id);
4000 if (!this_lstnr) {
4001 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08004002 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004003 return -ENODATA;
4004 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004005 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004006
4007 while (1) {
4008 if (wait_event_freezable(this_lstnr->rcv_req_wq,
4009 __qseecom_listener_has_rcvd_req(data,
4010 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07004011 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004012 (uint32_t)data->listener.id);
4013 /* woken up for different reason */
4014 return -ERESTARTSYS;
4015 }
4016
Zhen Kongbcdeda22018-11-16 13:50:51 -08004017 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004018 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07004019 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004020 return -ENODEV;
4021 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08004022 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004023 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08004024 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004025 break;
4026 }
4027 return ret;
4028}
4029
4030static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
4031{
4032 unsigned char app_arch = 0;
4033 struct elf32_hdr *ehdr;
4034 struct elf64_hdr *ehdr64;
4035
4036 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4037
4038 switch (app_arch) {
4039 case ELFCLASS32: {
4040 ehdr = (struct elf32_hdr *)fw_entry->data;
4041 if (fw_entry->size < sizeof(*ehdr)) {
4042 pr_err("%s: Not big enough to be an elf32 header\n",
4043 qseecom.pdev->init_name);
4044 return false;
4045 }
4046 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
4047 pr_err("%s: Not an elf32 header\n",
4048 qseecom.pdev->init_name);
4049 return false;
4050 }
4051 if (ehdr->e_phnum == 0) {
4052 pr_err("%s: No loadable segments\n",
4053 qseecom.pdev->init_name);
4054 return false;
4055 }
4056 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
4057 sizeof(struct elf32_hdr) > fw_entry->size) {
4058 pr_err("%s: Program headers not within mdt\n",
4059 qseecom.pdev->init_name);
4060 return false;
4061 }
4062 break;
4063 }
4064 case ELFCLASS64: {
4065 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4066 if (fw_entry->size < sizeof(*ehdr64)) {
4067 pr_err("%s: Not big enough to be an elf64 header\n",
4068 qseecom.pdev->init_name);
4069 return false;
4070 }
4071 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
4072 pr_err("%s: Not an elf64 header\n",
4073 qseecom.pdev->init_name);
4074 return false;
4075 }
4076 if (ehdr64->e_phnum == 0) {
4077 pr_err("%s: No loadable segments\n",
4078 qseecom.pdev->init_name);
4079 return false;
4080 }
4081 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
4082 sizeof(struct elf64_hdr) > fw_entry->size) {
4083 pr_err("%s: Program headers not within mdt\n",
4084 qseecom.pdev->init_name);
4085 return false;
4086 }
4087 break;
4088 }
4089 default: {
4090 pr_err("QSEE app arch %u is not supported\n", app_arch);
4091 return false;
4092 }
4093 }
4094 return true;
4095}
4096
4097static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4098 uint32_t *app_arch)
4099{
4100 int ret = -1;
4101 int i = 0, rc = 0;
4102 const struct firmware *fw_entry = NULL;
4103 char fw_name[MAX_APP_NAME_SIZE];
4104 struct elf32_hdr *ehdr;
4105 struct elf64_hdr *ehdr64;
4106 int num_images = 0;
4107
4108 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4109 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4110 if (rc) {
4111 pr_err("error with request_firmware\n");
4112 ret = -EIO;
4113 goto err;
4114 }
4115 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4116 ret = -EIO;
4117 goto err;
4118 }
4119 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4120 *fw_size = fw_entry->size;
4121 if (*app_arch == ELFCLASS32) {
4122 ehdr = (struct elf32_hdr *)fw_entry->data;
4123 num_images = ehdr->e_phnum;
4124 } else if (*app_arch == ELFCLASS64) {
4125 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4126 num_images = ehdr64->e_phnum;
4127 } else {
4128 pr_err("QSEE %s app, arch %u is not supported\n",
4129 appname, *app_arch);
4130 ret = -EIO;
4131 goto err;
4132 }
4133 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4134 release_firmware(fw_entry);
4135 fw_entry = NULL;
4136 for (i = 0; i < num_images; i++) {
4137 memset(fw_name, 0, sizeof(fw_name));
4138 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4139 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4140 if (ret)
4141 goto err;
4142 if (*fw_size > U32_MAX - fw_entry->size) {
4143 pr_err("QSEE %s app file size overflow\n", appname);
4144 ret = -EINVAL;
4145 goto err;
4146 }
4147 *fw_size += fw_entry->size;
4148 release_firmware(fw_entry);
4149 fw_entry = NULL;
4150 }
4151
4152 return ret;
4153err:
4154 if (fw_entry)
4155 release_firmware(fw_entry);
4156 *fw_size = 0;
4157 return ret;
4158}
4159
4160static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4161 uint32_t fw_size,
4162 struct qseecom_load_app_ireq *load_req)
4163{
4164 int ret = -1;
4165 int i = 0, rc = 0;
4166 const struct firmware *fw_entry = NULL;
4167 char fw_name[MAX_APP_NAME_SIZE];
4168 u8 *img_data_ptr = img_data;
4169 struct elf32_hdr *ehdr;
4170 struct elf64_hdr *ehdr64;
4171 int num_images = 0;
4172 unsigned char app_arch = 0;
4173
4174 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4175 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4176 if (rc) {
4177 ret = -EIO;
4178 goto err;
4179 }
4180
4181 load_req->img_len = fw_entry->size;
4182 if (load_req->img_len > fw_size) {
4183 pr_err("app %s size %zu is larger than buf size %u\n",
4184 appname, fw_entry->size, fw_size);
4185 ret = -EINVAL;
4186 goto err;
4187 }
4188 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4189 img_data_ptr = img_data_ptr + fw_entry->size;
4190 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4191
4192 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4193 if (app_arch == ELFCLASS32) {
4194 ehdr = (struct elf32_hdr *)fw_entry->data;
4195 num_images = ehdr->e_phnum;
4196 } else if (app_arch == ELFCLASS64) {
4197 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4198 num_images = ehdr64->e_phnum;
4199 } else {
4200 pr_err("QSEE %s app, arch %u is not supported\n",
4201 appname, app_arch);
4202 ret = -EIO;
4203 goto err;
4204 }
4205 release_firmware(fw_entry);
4206 fw_entry = NULL;
4207 for (i = 0; i < num_images; i++) {
4208 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4209 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4210 if (ret) {
4211 pr_err("Failed to locate blob %s\n", fw_name);
4212 goto err;
4213 }
4214 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4215 (fw_entry->size + load_req->img_len > fw_size)) {
4216 pr_err("Invalid file size for %s\n", fw_name);
4217 ret = -EINVAL;
4218 goto err;
4219 }
4220 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4221 img_data_ptr = img_data_ptr + fw_entry->size;
4222 load_req->img_len += fw_entry->size;
4223 release_firmware(fw_entry);
4224 fw_entry = NULL;
4225 }
4226 return ret;
4227err:
4228 release_firmware(fw_entry);
4229 return ret;
4230}
4231
4232static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4233 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4234{
4235 size_t len = 0;
4236 int ret = 0;
4237 ion_phys_addr_t pa;
4238 struct ion_handle *ihandle = NULL;
4239 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004240 int retry = 0;
Zhen Konge30e1342019-01-22 08:57:02 -08004241 int ion_flag = ION_FLAG_CACHED;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004242
Zhen Kong3dd92792017-12-08 09:47:15 -08004243 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004244 if (retry++) {
4245 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004246 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004247 mutex_lock(&app_access_lock);
4248 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004249 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
Zhen Konge30e1342019-01-22 08:57:02 -08004250 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), ion_flag);
Zhen Kong3dd92792017-12-08 09:47:15 -08004251 } while (IS_ERR_OR_NULL(ihandle) &&
4252 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004253
4254 if (IS_ERR_OR_NULL(ihandle)) {
4255 pr_err("ION alloc failed\n");
4256 return -ENOMEM;
4257 }
4258 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4259 ihandle);
4260
4261 if (IS_ERR_OR_NULL(img_data)) {
4262 pr_err("ION memory mapping for image loading failed\n");
4263 ret = -ENOMEM;
4264 goto exit_ion_free;
4265 }
4266 /* Get the physical address of the ION BUF */
4267 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4268 if (ret) {
4269 pr_err("physical memory retrieval failure\n");
4270 ret = -EIO;
4271 goto exit_ion_unmap_kernel;
4272 }
4273
4274 *pihandle = ihandle;
4275 *data = img_data;
4276 *paddr = pa;
4277 return ret;
4278
4279exit_ion_unmap_kernel:
4280 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4281exit_ion_free:
4282 ion_free(qseecom.ion_clnt, ihandle);
4283 ihandle = NULL;
4284 return ret;
4285}
4286
4287static void __qseecom_free_img_data(struct ion_handle **ihandle)
4288{
4289 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4290 ion_free(qseecom.ion_clnt, *ihandle);
4291 *ihandle = NULL;
4292}
4293
4294static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4295 uint32_t *app_id)
4296{
4297 int ret = -1;
4298 uint32_t fw_size = 0;
4299 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4300 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4301 struct qseecom_command_scm_resp resp;
4302 u8 *img_data = NULL;
4303 ion_phys_addr_t pa = 0;
4304 struct ion_handle *ihandle = NULL;
4305 void *cmd_buf = NULL;
4306 size_t cmd_len;
4307 uint32_t app_arch = 0;
4308
4309 if (!data || !appname || !app_id) {
4310 pr_err("Null pointer to data or appname or appid\n");
4311 return -EINVAL;
4312 }
4313 *app_id = 0;
4314 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4315 return -EIO;
4316 data->client.app_arch = app_arch;
4317
4318 /* Check and load cmnlib */
4319 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4320 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4321 ret = qseecom_load_commonlib_image(data, "cmnlib");
4322 if (ret) {
4323 pr_err("failed to load cmnlib\n");
4324 return -EIO;
4325 }
4326 qseecom.commonlib_loaded = true;
4327 pr_debug("cmnlib is loaded\n");
4328 }
4329
4330 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4331 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4332 if (ret) {
4333 pr_err("failed to load cmnlib64\n");
4334 return -EIO;
4335 }
4336 qseecom.commonlib64_loaded = true;
4337 pr_debug("cmnlib64 is loaded\n");
4338 }
4339 }
4340
4341 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4342 if (ret)
4343 return ret;
4344
4345 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4346 if (ret) {
4347 ret = -EIO;
4348 goto exit_free_img_data;
4349 }
4350
4351 /* Populate the load_req parameters */
4352 if (qseecom.qsee_version < QSEE_VERSION_40) {
4353 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4354 load_req.mdt_len = load_req.mdt_len;
4355 load_req.img_len = load_req.img_len;
4356 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4357 load_req.phy_addr = (uint32_t)pa;
4358 cmd_buf = (void *)&load_req;
4359 cmd_len = sizeof(struct qseecom_load_app_ireq);
4360 } else {
4361 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4362 load_req_64bit.mdt_len = load_req.mdt_len;
4363 load_req_64bit.img_len = load_req.img_len;
4364 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4365 load_req_64bit.phy_addr = (uint64_t)pa;
4366 cmd_buf = (void *)&load_req_64bit;
4367 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4368 }
4369
4370 if (qseecom.support_bus_scaling) {
4371 mutex_lock(&qsee_bw_mutex);
4372 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4373 mutex_unlock(&qsee_bw_mutex);
4374 if (ret) {
4375 ret = -EIO;
4376 goto exit_free_img_data;
4377 }
4378 }
4379
4380 ret = __qseecom_enable_clk_scale_up(data);
4381 if (ret) {
4382 ret = -EIO;
4383 goto exit_unregister_bus_bw_need;
4384 }
4385
4386 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4387 img_data, fw_size,
4388 ION_IOC_CLEAN_INV_CACHES);
4389 if (ret) {
4390 pr_err("cache operation failed %d\n", ret);
4391 goto exit_disable_clk_vote;
4392 }
4393
4394 /* SCM_CALL to load the image */
4395 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4396 &resp, sizeof(resp));
4397 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004398 pr_err("scm_call to load failed : ret %d, result %x\n",
4399 ret, resp.result);
4400 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4401 ret = -EEXIST;
4402 else
4403 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004404 goto exit_disable_clk_vote;
4405 }
4406
4407 switch (resp.result) {
4408 case QSEOS_RESULT_SUCCESS:
4409 *app_id = resp.data;
4410 break;
4411 case QSEOS_RESULT_INCOMPLETE:
4412 ret = __qseecom_process_incomplete_cmd(data, &resp);
4413 if (ret)
4414 pr_err("process_incomplete_cmd FAILED\n");
4415 else
4416 *app_id = resp.data;
4417 break;
4418 case QSEOS_RESULT_FAILURE:
4419 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4420 break;
4421 default:
4422 pr_err("scm call return unknown response %d\n", resp.result);
4423 ret = -EINVAL;
4424 break;
4425 }
4426
4427exit_disable_clk_vote:
4428 __qseecom_disable_clk_scale_down(data);
4429
4430exit_unregister_bus_bw_need:
4431 if (qseecom.support_bus_scaling) {
4432 mutex_lock(&qsee_bw_mutex);
4433 qseecom_unregister_bus_bandwidth_needs(data);
4434 mutex_unlock(&qsee_bw_mutex);
4435 }
4436
4437exit_free_img_data:
4438 __qseecom_free_img_data(&ihandle);
4439 return ret;
4440}
4441
4442static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4443 char *cmnlib_name)
4444{
4445 int ret = 0;
4446 uint32_t fw_size = 0;
4447 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4448 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4449 struct qseecom_command_scm_resp resp;
4450 u8 *img_data = NULL;
4451 ion_phys_addr_t pa = 0;
4452 void *cmd_buf = NULL;
4453 size_t cmd_len;
4454 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004455 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004456
4457 if (!cmnlib_name) {
4458 pr_err("cmnlib_name is NULL\n");
4459 return -EINVAL;
4460 }
4461 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4462 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4463 cmnlib_name, strlen(cmnlib_name));
4464 return -EINVAL;
4465 }
4466
4467 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4468 return -EIO;
4469
Zhen Kong3bafb312017-10-18 10:27:20 -07004470 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004471 &img_data, fw_size, &pa);
4472 if (ret)
4473 return -EIO;
4474
4475 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4476 if (ret) {
4477 ret = -EIO;
4478 goto exit_free_img_data;
4479 }
4480 if (qseecom.qsee_version < QSEE_VERSION_40) {
4481 load_req.phy_addr = (uint32_t)pa;
4482 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4483 cmd_buf = (void *)&load_req;
4484 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4485 } else {
4486 load_req_64bit.phy_addr = (uint64_t)pa;
4487 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4488 load_req_64bit.img_len = load_req.img_len;
4489 load_req_64bit.mdt_len = load_req.mdt_len;
4490 cmd_buf = (void *)&load_req_64bit;
4491 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4492 }
4493
4494 if (qseecom.support_bus_scaling) {
4495 mutex_lock(&qsee_bw_mutex);
4496 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4497 mutex_unlock(&qsee_bw_mutex);
4498 if (ret) {
4499 ret = -EIO;
4500 goto exit_free_img_data;
4501 }
4502 }
4503
4504 /* Vote for the SFPB clock */
4505 ret = __qseecom_enable_clk_scale_up(data);
4506 if (ret) {
4507 ret = -EIO;
4508 goto exit_unregister_bus_bw_need;
4509 }
4510
Zhen Kong3bafb312017-10-18 10:27:20 -07004511 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004512 img_data, fw_size,
4513 ION_IOC_CLEAN_INV_CACHES);
4514 if (ret) {
4515 pr_err("cache operation failed %d\n", ret);
4516 goto exit_disable_clk_vote;
4517 }
4518
4519 /* SCM_CALL to load the image */
4520 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4521 &resp, sizeof(resp));
4522 if (ret) {
4523 pr_err("scm_call to load failed : ret %d\n", ret);
4524 ret = -EIO;
4525 goto exit_disable_clk_vote;
4526 }
4527
4528 switch (resp.result) {
4529 case QSEOS_RESULT_SUCCESS:
4530 break;
4531 case QSEOS_RESULT_FAILURE:
4532 pr_err("scm call failed w/response result%d\n", resp.result);
4533 ret = -EINVAL;
4534 goto exit_disable_clk_vote;
4535 case QSEOS_RESULT_INCOMPLETE:
4536 ret = __qseecom_process_incomplete_cmd(data, &resp);
4537 if (ret) {
4538 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4539 goto exit_disable_clk_vote;
4540 }
4541 break;
4542 default:
4543 pr_err("scm call return unknown response %d\n", resp.result);
4544 ret = -EINVAL;
4545 goto exit_disable_clk_vote;
4546 }
4547
4548exit_disable_clk_vote:
4549 __qseecom_disable_clk_scale_down(data);
4550
4551exit_unregister_bus_bw_need:
4552 if (qseecom.support_bus_scaling) {
4553 mutex_lock(&qsee_bw_mutex);
4554 qseecom_unregister_bus_bandwidth_needs(data);
4555 mutex_unlock(&qsee_bw_mutex);
4556 }
4557
4558exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004559 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004560 return ret;
4561}
4562
4563static int qseecom_unload_commonlib_image(void)
4564{
4565 int ret = -EINVAL;
4566 struct qseecom_unload_lib_image_ireq unload_req = {0};
4567 struct qseecom_command_scm_resp resp;
4568
4569 /* Populate the remaining parameters */
4570 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4571
4572 /* SCM_CALL to load the image */
4573 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4574 sizeof(struct qseecom_unload_lib_image_ireq),
4575 &resp, sizeof(resp));
4576 if (ret) {
4577 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4578 ret = -EIO;
4579 } else {
4580 switch (resp.result) {
4581 case QSEOS_RESULT_SUCCESS:
4582 break;
4583 case QSEOS_RESULT_FAILURE:
4584 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4585 break;
4586 default:
4587 pr_err("scm call return unknown response %d\n",
4588 resp.result);
4589 ret = -EINVAL;
4590 break;
4591 }
4592 }
4593
4594 return ret;
4595}
4596
4597int qseecom_start_app(struct qseecom_handle **handle,
4598 char *app_name, uint32_t size)
4599{
4600 int32_t ret = 0;
4601 unsigned long flags = 0;
4602 struct qseecom_dev_handle *data = NULL;
4603 struct qseecom_check_app_ireq app_ireq;
4604 struct qseecom_registered_app_list *entry = NULL;
4605 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4606 bool found_app = false;
4607 size_t len;
4608 ion_phys_addr_t pa;
4609 uint32_t fw_size, app_arch;
4610 uint32_t app_id = 0;
4611
Zhen Kongc4c162a2019-01-23 12:07:12 -08004612 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004613
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004614 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4615 pr_err("Not allowed to be called in %d state\n",
4616 atomic_read(&qseecom.qseecom_state));
4617 return -EPERM;
4618 }
4619 if (!app_name) {
4620 pr_err("failed to get the app name\n");
4621 return -EINVAL;
4622 }
4623
Zhen Kong64a6d7282017-06-16 11:55:07 -07004624 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004625 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004626 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004627 return -EINVAL;
4628 }
4629
4630 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4631 if (!(*handle))
4632 return -ENOMEM;
4633
4634 data = kzalloc(sizeof(*data), GFP_KERNEL);
4635 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304636 ret = -ENOMEM;
4637 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004638 }
4639 data->abort = 0;
4640 data->type = QSEECOM_CLIENT_APP;
4641 data->released = false;
4642 data->client.sb_length = size;
4643 data->client.user_virt_sb_base = 0;
4644 data->client.ihandle = NULL;
4645
4646 init_waitqueue_head(&data->abort_wq);
4647
4648 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4649 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4650 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4651 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304652 ret = -ENOMEM;
4653 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004654 }
4655 mutex_lock(&app_access_lock);
4656
Zhen Kong5d02be92018-05-29 16:17:29 -07004657recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004658 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4659 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4660 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4661 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304662 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004663
4664 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4665 if (app_id) {
4666 pr_warn("App id %d for [%s] app exists\n", app_id,
4667 (char *)app_ireq.app_name);
4668 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4669 list_for_each_entry(entry,
4670 &qseecom.registered_app_list_head, list){
4671 if (entry->app_id == app_id) {
4672 entry->ref_cnt++;
4673 found_app = true;
4674 break;
4675 }
4676 }
4677 spin_unlock_irqrestore(
4678 &qseecom.registered_app_list_lock, flags);
4679 if (!found_app)
4680 pr_warn("App_id %d [%s] was loaded but not registered\n",
4681 ret, (char *)app_ireq.app_name);
4682 } else {
4683 /* load the app and get the app_id */
4684 pr_debug("%s: Loading app for the first time'\n",
4685 qseecom.pdev->init_name);
4686 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004687 if (ret == -EEXIST) {
4688 pr_err("recheck if TA %s is loaded\n", app_name);
4689 goto recheck;
4690 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304691 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004692 }
4693 data->client.app_id = app_id;
4694 if (!found_app) {
4695 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4696 if (!entry) {
4697 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304698 ret = -ENOMEM;
4699 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004700 }
4701 entry->app_id = app_id;
4702 entry->ref_cnt = 1;
4703 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4704 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4705 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304706 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004707 }
4708 entry->app_arch = app_arch;
4709 entry->app_blocked = false;
4710 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004711 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004712 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4713 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4714 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4715 flags);
4716 }
4717
4718 /* Get the physical address of the ION BUF */
4719 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4720 if (ret) {
4721 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4722 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304723 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004724 }
4725
4726 /* Populate the structure for sending scm call to load image */
4727 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4728 data->client.ihandle);
4729 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4730 pr_err("ION memory mapping for client shared buf failed\n");
4731 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304732 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004733 }
4734 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4735 data->client.sb_phys = (phys_addr_t)pa;
4736 (*handle)->dev = (void *)data;
4737 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4738 (*handle)->sbuf_len = data->client.sb_length;
4739
4740 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4741 if (!kclient_entry) {
4742 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304743 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004744 }
4745 kclient_entry->handle = *handle;
4746
4747 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4748 list_add_tail(&kclient_entry->list,
4749 &qseecom.registered_kclient_list_head);
4750 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4751
4752 mutex_unlock(&app_access_lock);
4753 return 0;
4754
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304755exit_ion_unmap_kernel:
4756 if (!IS_ERR_OR_NULL(data->client.ihandle))
4757 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4758exit_entry_free:
4759 kfree(entry);
4760exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004761 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304762 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4763 ion_free(qseecom.ion_clnt, data->client.ihandle);
4764 data->client.ihandle = NULL;
4765 }
4766exit_data_free:
4767 kfree(data);
4768exit_handle_free:
4769 if (*handle) {
4770 kfree(*handle);
4771 *handle = NULL;
4772 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004773 return ret;
4774}
4775EXPORT_SYMBOL(qseecom_start_app);
4776
4777int qseecom_shutdown_app(struct qseecom_handle **handle)
4778{
4779 int ret = -EINVAL;
4780 struct qseecom_dev_handle *data;
4781
4782 struct qseecom_registered_kclient_list *kclient = NULL;
4783 unsigned long flags = 0;
4784 bool found_handle = false;
4785
Zhen Kongc4c162a2019-01-23 12:07:12 -08004786 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004787
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004788 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4789 pr_err("Not allowed to be called in %d state\n",
4790 atomic_read(&qseecom.qseecom_state));
4791 return -EPERM;
4792 }
4793
4794 if ((handle == NULL) || (*handle == NULL)) {
4795 pr_err("Handle is not initialized\n");
4796 return -EINVAL;
4797 }
4798 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4799 mutex_lock(&app_access_lock);
4800
4801 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4802 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4803 list) {
4804 if (kclient->handle == (*handle)) {
4805 list_del(&kclient->list);
4806 found_handle = true;
4807 break;
4808 }
4809 }
4810 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4811 if (!found_handle)
4812 pr_err("Unable to find the handle, exiting\n");
4813 else
4814 ret = qseecom_unload_app(data, false);
4815
4816 mutex_unlock(&app_access_lock);
4817 if (ret == 0) {
4818 kzfree(data);
4819 kzfree(*handle);
4820 kzfree(kclient);
4821 *handle = NULL;
4822 }
4823
4824 return ret;
4825}
4826EXPORT_SYMBOL(qseecom_shutdown_app);
4827
4828int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4829 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4830{
4831 int ret = 0;
4832 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4833 struct qseecom_dev_handle *data;
4834 bool perf_enabled = false;
4835
Zhen Kongc4c162a2019-01-23 12:07:12 -08004836 __wakeup_unregister_listener_kthread();
Zhen Kongbcdeda22018-11-16 13:50:51 -08004837
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004838 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4839 pr_err("Not allowed to be called in %d state\n",
4840 atomic_read(&qseecom.qseecom_state));
4841 return -EPERM;
4842 }
4843
4844 if (handle == NULL) {
4845 pr_err("Handle is not initialized\n");
4846 return -EINVAL;
4847 }
4848 data = handle->dev;
4849
4850 req.cmd_req_len = sbuf_len;
4851 req.resp_len = rbuf_len;
4852 req.cmd_req_buf = send_buf;
4853 req.resp_buf = resp_buf;
4854
4855 if (__validate_send_cmd_inputs(data, &req))
4856 return -EINVAL;
4857
4858 mutex_lock(&app_access_lock);
4859 if (qseecom.support_bus_scaling) {
4860 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4861 if (ret) {
4862 pr_err("Failed to set bw.\n");
4863 mutex_unlock(&app_access_lock);
4864 return ret;
4865 }
4866 }
4867 /*
4868 * On targets where crypto clock is handled by HLOS,
4869 * if clk_access_cnt is zero and perf_enabled is false,
4870 * then the crypto clock was not enabled before sending cmd
4871 * to tz, qseecom will enable the clock to avoid service failure.
4872 */
4873 if (!qseecom.no_clock_support &&
4874 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4875 pr_debug("ce clock is not enabled!\n");
4876 ret = qseecom_perf_enable(data);
4877 if (ret) {
4878 pr_err("Failed to vote for clock with err %d\n",
4879 ret);
4880 mutex_unlock(&app_access_lock);
4881 return -EINVAL;
4882 }
4883 perf_enabled = true;
4884 }
4885 if (!strcmp(data->client.app_name, "securemm"))
4886 data->use_legacy_cmd = true;
4887
4888 ret = __qseecom_send_cmd(data, &req);
4889 data->use_legacy_cmd = false;
4890 if (qseecom.support_bus_scaling)
4891 __qseecom_add_bw_scale_down_timer(
4892 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4893
4894 if (perf_enabled) {
4895 qsee_disable_clock_vote(data, CLK_DFAB);
4896 qsee_disable_clock_vote(data, CLK_SFPB);
4897 }
4898
4899 mutex_unlock(&app_access_lock);
4900
4901 if (ret)
4902 return ret;
4903
4904 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4905 req.resp_len, req.resp_buf);
4906 return ret;
4907}
4908EXPORT_SYMBOL(qseecom_send_command);
4909
4910int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4911{
4912 int ret = 0;
4913
4914 if ((handle == NULL) || (handle->dev == NULL)) {
4915 pr_err("No valid kernel client\n");
4916 return -EINVAL;
4917 }
4918 if (high) {
4919 if (qseecom.support_bus_scaling) {
4920 mutex_lock(&qsee_bw_mutex);
4921 __qseecom_register_bus_bandwidth_needs(handle->dev,
4922 HIGH);
4923 mutex_unlock(&qsee_bw_mutex);
4924 } else {
4925 ret = qseecom_perf_enable(handle->dev);
4926 if (ret)
4927 pr_err("Failed to vote for clock with err %d\n",
4928 ret);
4929 }
4930 } else {
4931 if (!qseecom.support_bus_scaling) {
4932 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4933 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4934 } else {
4935 mutex_lock(&qsee_bw_mutex);
4936 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4937 mutex_unlock(&qsee_bw_mutex);
4938 }
4939 }
4940 return ret;
4941}
4942EXPORT_SYMBOL(qseecom_set_bandwidth);
4943
4944int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4945{
4946 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4947 struct qseecom_dev_handle dummy_private_data = {0};
4948 struct qseecom_command_scm_resp resp;
4949 int ret = 0;
4950
4951 if (!desc) {
4952 pr_err("desc is NULL\n");
4953 return -EINVAL;
4954 }
4955
4956 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004957 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004958 resp.data = desc->ret[2]; /*listener_id*/
4959
Zhen Konge7f525f2017-12-01 18:26:25 -08004960 dummy_private_data.client.app_id = desc->ret[1];
4961 dummy_app_entry.app_id = desc->ret[1];
4962
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004963 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004964 if (qseecom.qsee_reentrancy_support)
4965 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004966 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004967 else
4968 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4969 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004970 mutex_unlock(&app_access_lock);
4971 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004972 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004973 (int)desc->ret[0], (int)desc->ret[2],
4974 (int)desc->ret[1], ret);
4975 desc->ret[0] = resp.result;
4976 desc->ret[1] = resp.resp_type;
4977 desc->ret[2] = resp.data;
4978 return ret;
4979}
4980EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4981
4982static int qseecom_send_resp(void)
4983{
4984 qseecom.send_resp_flag = 1;
4985 wake_up_interruptible(&qseecom.send_resp_wq);
4986 return 0;
4987}
4988
4989static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4990{
4991 struct qseecom_registered_listener_list *this_lstnr = NULL;
4992
4993 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4994 this_lstnr = __qseecom_find_svc(data->listener.id);
4995 if (this_lstnr == NULL)
4996 return -EINVAL;
4997 qseecom.send_resp_flag = 1;
4998 this_lstnr->send_resp_flag = 1;
4999 wake_up_interruptible(&qseecom.send_resp_wq);
5000 return 0;
5001}
5002
5003static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
5004 struct qseecom_send_modfd_listener_resp *resp,
5005 struct qseecom_registered_listener_list *this_lstnr)
5006{
5007 int i;
5008
5009 if (!data || !resp || !this_lstnr) {
5010 pr_err("listener handle or resp msg is null\n");
5011 return -EINVAL;
5012 }
5013
5014 if (resp->resp_buf_ptr == NULL) {
5015 pr_err("resp buffer is null\n");
5016 return -EINVAL;
5017 }
5018 /* validate resp buf length */
5019 if ((resp->resp_len == 0) ||
5020 (resp->resp_len > this_lstnr->sb_length)) {
5021 pr_err("resp buf length %d not valid\n", resp->resp_len);
5022 return -EINVAL;
5023 }
5024
5025 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
5026 pr_err("Integer overflow in resp_len & resp_buf\n");
5027 return -EINVAL;
5028 }
5029 if ((uintptr_t)this_lstnr->user_virt_sb_base >
5030 (ULONG_MAX - this_lstnr->sb_length)) {
5031 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
5032 return -EINVAL;
5033 }
5034 /* validate resp buf */
5035 if (((uintptr_t)resp->resp_buf_ptr <
5036 (uintptr_t)this_lstnr->user_virt_sb_base) ||
5037 ((uintptr_t)resp->resp_buf_ptr >=
5038 ((uintptr_t)this_lstnr->user_virt_sb_base +
5039 this_lstnr->sb_length)) ||
5040 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
5041 ((uintptr_t)this_lstnr->user_virt_sb_base +
5042 this_lstnr->sb_length))) {
5043 pr_err("resp buf is out of shared buffer region\n");
5044 return -EINVAL;
5045 }
5046
5047 /* validate offsets */
5048 for (i = 0; i < MAX_ION_FD; i++) {
5049 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
5050 pr_err("Invalid offset %d = 0x%x\n",
5051 i, resp->ifd_data[i].cmd_buf_offset);
5052 return -EINVAL;
5053 }
5054 }
5055
5056 return 0;
5057}
5058
5059static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5060 void __user *argp, bool is_64bit_addr)
5061{
5062 struct qseecom_send_modfd_listener_resp resp;
5063 struct qseecom_registered_listener_list *this_lstnr = NULL;
5064
5065 if (copy_from_user(&resp, argp, sizeof(resp))) {
5066 pr_err("copy_from_user failed");
5067 return -EINVAL;
5068 }
5069
5070 this_lstnr = __qseecom_find_svc(data->listener.id);
5071 if (this_lstnr == NULL)
5072 return -EINVAL;
5073
5074 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
5075 return -EINVAL;
5076
5077 resp.resp_buf_ptr = this_lstnr->sb_virt +
5078 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
5079
5080 if (!is_64bit_addr)
5081 __qseecom_update_cmd_buf(&resp, false, data);
5082 else
5083 __qseecom_update_cmd_buf_64(&resp, false, data);
5084 qseecom.send_resp_flag = 1;
5085 this_lstnr->send_resp_flag = 1;
5086 wake_up_interruptible(&qseecom.send_resp_wq);
5087 return 0;
5088}
5089
5090static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
5091 void __user *argp)
5092{
5093 return __qseecom_send_modfd_resp(data, argp, false);
5094}
5095
5096static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5097 void __user *argp)
5098{
5099 return __qseecom_send_modfd_resp(data, argp, true);
5100}
5101
5102static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5103 void __user *argp)
5104{
5105 struct qseecom_qseos_version_req req;
5106
5107 if (copy_from_user(&req, argp, sizeof(req))) {
5108 pr_err("copy_from_user failed");
5109 return -EINVAL;
5110 }
5111 req.qseos_version = qseecom.qseos_version;
5112 if (copy_to_user(argp, &req, sizeof(req))) {
5113 pr_err("copy_to_user failed");
5114 return -EINVAL;
5115 }
5116 return 0;
5117}
5118
5119static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5120{
5121 int rc = 0;
5122 struct qseecom_clk *qclk = NULL;
5123
5124 if (qseecom.no_clock_support)
5125 return 0;
5126
5127 if (ce == CLK_QSEE)
5128 qclk = &qseecom.qsee;
5129 if (ce == CLK_CE_DRV)
5130 qclk = &qseecom.ce_drv;
5131
5132 if (qclk == NULL) {
5133 pr_err("CLK type not supported\n");
5134 return -EINVAL;
5135 }
5136 mutex_lock(&clk_access_lock);
5137
5138 if (qclk->clk_access_cnt == ULONG_MAX) {
5139 pr_err("clk_access_cnt beyond limitation\n");
5140 goto err;
5141 }
5142 if (qclk->clk_access_cnt > 0) {
5143 qclk->clk_access_cnt++;
5144 mutex_unlock(&clk_access_lock);
5145 return rc;
5146 }
5147
5148 /* Enable CE core clk */
5149 if (qclk->ce_core_clk != NULL) {
5150 rc = clk_prepare_enable(qclk->ce_core_clk);
5151 if (rc) {
5152 pr_err("Unable to enable/prepare CE core clk\n");
5153 goto err;
5154 }
5155 }
5156 /* Enable CE clk */
5157 if (qclk->ce_clk != NULL) {
5158 rc = clk_prepare_enable(qclk->ce_clk);
5159 if (rc) {
5160 pr_err("Unable to enable/prepare CE iface clk\n");
5161 goto ce_clk_err;
5162 }
5163 }
5164 /* Enable AXI clk */
5165 if (qclk->ce_bus_clk != NULL) {
5166 rc = clk_prepare_enable(qclk->ce_bus_clk);
5167 if (rc) {
5168 pr_err("Unable to enable/prepare CE bus clk\n");
5169 goto ce_bus_clk_err;
5170 }
5171 }
5172 qclk->clk_access_cnt++;
5173 mutex_unlock(&clk_access_lock);
5174 return 0;
5175
5176ce_bus_clk_err:
5177 if (qclk->ce_clk != NULL)
5178 clk_disable_unprepare(qclk->ce_clk);
5179ce_clk_err:
5180 if (qclk->ce_core_clk != NULL)
5181 clk_disable_unprepare(qclk->ce_core_clk);
5182err:
5183 mutex_unlock(&clk_access_lock);
5184 return -EIO;
5185}
5186
5187static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5188{
5189 struct qseecom_clk *qclk;
5190
5191 if (qseecom.no_clock_support)
5192 return;
5193
5194 if (ce == CLK_QSEE)
5195 qclk = &qseecom.qsee;
5196 else
5197 qclk = &qseecom.ce_drv;
5198
5199 mutex_lock(&clk_access_lock);
5200
5201 if (qclk->clk_access_cnt == 0) {
5202 mutex_unlock(&clk_access_lock);
5203 return;
5204 }
5205
5206 if (qclk->clk_access_cnt == 1) {
5207 if (qclk->ce_clk != NULL)
5208 clk_disable_unprepare(qclk->ce_clk);
5209 if (qclk->ce_core_clk != NULL)
5210 clk_disable_unprepare(qclk->ce_core_clk);
5211 if (qclk->ce_bus_clk != NULL)
5212 clk_disable_unprepare(qclk->ce_bus_clk);
5213 }
5214 qclk->clk_access_cnt--;
5215 mutex_unlock(&clk_access_lock);
5216}
5217
5218static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5219 int32_t clk_type)
5220{
5221 int ret = 0;
5222 struct qseecom_clk *qclk;
5223
5224 if (qseecom.no_clock_support)
5225 return 0;
5226
5227 qclk = &qseecom.qsee;
5228 if (!qseecom.qsee_perf_client)
5229 return ret;
5230
5231 switch (clk_type) {
5232 case CLK_DFAB:
5233 mutex_lock(&qsee_bw_mutex);
5234 if (!qseecom.qsee_bw_count) {
5235 if (qseecom.qsee_sfpb_bw_count > 0)
5236 ret = msm_bus_scale_client_update_request(
5237 qseecom.qsee_perf_client, 3);
5238 else {
5239 if (qclk->ce_core_src_clk != NULL)
5240 ret = __qseecom_enable_clk(CLK_QSEE);
5241 if (!ret) {
5242 ret =
5243 msm_bus_scale_client_update_request(
5244 qseecom.qsee_perf_client, 1);
5245 if ((ret) &&
5246 (qclk->ce_core_src_clk != NULL))
5247 __qseecom_disable_clk(CLK_QSEE);
5248 }
5249 }
5250 if (ret)
5251 pr_err("DFAB Bandwidth req failed (%d)\n",
5252 ret);
5253 else {
5254 qseecom.qsee_bw_count++;
5255 data->perf_enabled = true;
5256 }
5257 } else {
5258 qseecom.qsee_bw_count++;
5259 data->perf_enabled = true;
5260 }
5261 mutex_unlock(&qsee_bw_mutex);
5262 break;
5263 case CLK_SFPB:
5264 mutex_lock(&qsee_bw_mutex);
5265 if (!qseecom.qsee_sfpb_bw_count) {
5266 if (qseecom.qsee_bw_count > 0)
5267 ret = msm_bus_scale_client_update_request(
5268 qseecom.qsee_perf_client, 3);
5269 else {
5270 if (qclk->ce_core_src_clk != NULL)
5271 ret = __qseecom_enable_clk(CLK_QSEE);
5272 if (!ret) {
5273 ret =
5274 msm_bus_scale_client_update_request(
5275 qseecom.qsee_perf_client, 2);
5276 if ((ret) &&
5277 (qclk->ce_core_src_clk != NULL))
5278 __qseecom_disable_clk(CLK_QSEE);
5279 }
5280 }
5281
5282 if (ret)
5283 pr_err("SFPB Bandwidth req failed (%d)\n",
5284 ret);
5285 else {
5286 qseecom.qsee_sfpb_bw_count++;
5287 data->fast_load_enabled = true;
5288 }
5289 } else {
5290 qseecom.qsee_sfpb_bw_count++;
5291 data->fast_load_enabled = true;
5292 }
5293 mutex_unlock(&qsee_bw_mutex);
5294 break;
5295 default:
5296 pr_err("Clock type not defined\n");
5297 break;
5298 }
5299 return ret;
5300}
5301
5302static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5303 int32_t clk_type)
5304{
5305 int32_t ret = 0;
5306 struct qseecom_clk *qclk;
5307
5308 qclk = &qseecom.qsee;
5309
5310 if (qseecom.no_clock_support)
5311 return;
5312 if (!qseecom.qsee_perf_client)
5313 return;
5314
5315 switch (clk_type) {
5316 case CLK_DFAB:
5317 mutex_lock(&qsee_bw_mutex);
5318 if (qseecom.qsee_bw_count == 0) {
5319 pr_err("Client error.Extra call to disable DFAB clk\n");
5320 mutex_unlock(&qsee_bw_mutex);
5321 return;
5322 }
5323
5324 if (qseecom.qsee_bw_count == 1) {
5325 if (qseecom.qsee_sfpb_bw_count > 0)
5326 ret = msm_bus_scale_client_update_request(
5327 qseecom.qsee_perf_client, 2);
5328 else {
5329 ret = msm_bus_scale_client_update_request(
5330 qseecom.qsee_perf_client, 0);
5331 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5332 __qseecom_disable_clk(CLK_QSEE);
5333 }
5334 if (ret)
5335 pr_err("SFPB Bandwidth req fail (%d)\n",
5336 ret);
5337 else {
5338 qseecom.qsee_bw_count--;
5339 data->perf_enabled = false;
5340 }
5341 } else {
5342 qseecom.qsee_bw_count--;
5343 data->perf_enabled = false;
5344 }
5345 mutex_unlock(&qsee_bw_mutex);
5346 break;
5347 case CLK_SFPB:
5348 mutex_lock(&qsee_bw_mutex);
5349 if (qseecom.qsee_sfpb_bw_count == 0) {
5350 pr_err("Client error.Extra call to disable SFPB clk\n");
5351 mutex_unlock(&qsee_bw_mutex);
5352 return;
5353 }
5354 if (qseecom.qsee_sfpb_bw_count == 1) {
5355 if (qseecom.qsee_bw_count > 0)
5356 ret = msm_bus_scale_client_update_request(
5357 qseecom.qsee_perf_client, 1);
5358 else {
5359 ret = msm_bus_scale_client_update_request(
5360 qseecom.qsee_perf_client, 0);
5361 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5362 __qseecom_disable_clk(CLK_QSEE);
5363 }
5364 if (ret)
5365 pr_err("SFPB Bandwidth req fail (%d)\n",
5366 ret);
5367 else {
5368 qseecom.qsee_sfpb_bw_count--;
5369 data->fast_load_enabled = false;
5370 }
5371 } else {
5372 qseecom.qsee_sfpb_bw_count--;
5373 data->fast_load_enabled = false;
5374 }
5375 mutex_unlock(&qsee_bw_mutex);
5376 break;
5377 default:
5378 pr_err("Clock type not defined\n");
5379 break;
5380 }
5381
5382}
5383
5384static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5385 void __user *argp)
5386{
5387 struct ion_handle *ihandle; /* Ion handle */
5388 struct qseecom_load_img_req load_img_req;
5389 int uret = 0;
5390 int ret;
5391 ion_phys_addr_t pa = 0;
5392 size_t len;
5393 struct qseecom_load_app_ireq load_req;
5394 struct qseecom_load_app_64bit_ireq load_req_64bit;
5395 struct qseecom_command_scm_resp resp;
5396 void *cmd_buf = NULL;
5397 size_t cmd_len;
5398 /* Copy the relevant information needed for loading the image */
5399 if (copy_from_user(&load_img_req,
5400 (void __user *)argp,
5401 sizeof(struct qseecom_load_img_req))) {
5402 pr_err("copy_from_user failed\n");
5403 return -EFAULT;
5404 }
5405
5406 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005407 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005408 load_img_req.ifd_data_fd);
5409 if (IS_ERR_OR_NULL(ihandle)) {
5410 pr_err("Ion client could not retrieve the handle\n");
5411 return -ENOMEM;
5412 }
5413
5414 /* Get the physical address of the ION BUF */
5415 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5416 if (ret) {
5417 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5418 ret);
5419 return ret;
5420 }
5421 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5422 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5423 len, load_img_req.mdt_len,
5424 load_img_req.img_len);
5425 return ret;
5426 }
5427 /* Populate the structure for sending scm call to load image */
5428 if (qseecom.qsee_version < QSEE_VERSION_40) {
5429 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5430 load_req.mdt_len = load_img_req.mdt_len;
5431 load_req.img_len = load_img_req.img_len;
5432 load_req.phy_addr = (uint32_t)pa;
5433 cmd_buf = (void *)&load_req;
5434 cmd_len = sizeof(struct qseecom_load_app_ireq);
5435 } else {
5436 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5437 load_req_64bit.mdt_len = load_img_req.mdt_len;
5438 load_req_64bit.img_len = load_img_req.img_len;
5439 load_req_64bit.phy_addr = (uint64_t)pa;
5440 cmd_buf = (void *)&load_req_64bit;
5441 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5442 }
5443
5444 if (qseecom.support_bus_scaling) {
5445 mutex_lock(&qsee_bw_mutex);
5446 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5447 mutex_unlock(&qsee_bw_mutex);
5448 if (ret) {
5449 ret = -EIO;
5450 goto exit_cpu_restore;
5451 }
5452 }
5453
5454 /* Vote for the SFPB clock */
5455 ret = __qseecom_enable_clk_scale_up(data);
5456 if (ret) {
5457 ret = -EIO;
5458 goto exit_register_bus_bandwidth_needs;
5459 }
5460 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5461 ION_IOC_CLEAN_INV_CACHES);
5462 if (ret) {
5463 pr_err("cache operation failed %d\n", ret);
5464 goto exit_disable_clock;
5465 }
5466 /* SCM_CALL to load the external elf */
5467 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5468 &resp, sizeof(resp));
5469 if (ret) {
5470 pr_err("scm_call to load failed : ret %d\n",
5471 ret);
5472 ret = -EFAULT;
5473 goto exit_disable_clock;
5474 }
5475
5476 switch (resp.result) {
5477 case QSEOS_RESULT_SUCCESS:
5478 break;
5479 case QSEOS_RESULT_INCOMPLETE:
5480 pr_err("%s: qseos result incomplete\n", __func__);
5481 ret = __qseecom_process_incomplete_cmd(data, &resp);
5482 if (ret)
5483 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5484 break;
5485 case QSEOS_RESULT_FAILURE:
5486 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5487 ret = -EFAULT;
5488 break;
5489 default:
5490 pr_err("scm_call response result %d not supported\n",
5491 resp.result);
5492 ret = -EFAULT;
5493 break;
5494 }
5495
5496exit_disable_clock:
5497 __qseecom_disable_clk_scale_down(data);
5498
5499exit_register_bus_bandwidth_needs:
5500 if (qseecom.support_bus_scaling) {
5501 mutex_lock(&qsee_bw_mutex);
5502 uret = qseecom_unregister_bus_bandwidth_needs(data);
5503 mutex_unlock(&qsee_bw_mutex);
5504 if (uret)
5505 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5506 uret, ret);
5507 }
5508
5509exit_cpu_restore:
5510 /* Deallocate the handle */
5511 if (!IS_ERR_OR_NULL(ihandle))
5512 ion_free(qseecom.ion_clnt, ihandle);
5513 return ret;
5514}
5515
5516static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5517{
5518 int ret = 0;
5519 struct qseecom_command_scm_resp resp;
5520 struct qseecom_unload_app_ireq req;
5521
5522 /* unavailable client app */
5523 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5524
5525 /* Populate the structure for sending scm call to unload image */
5526 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5527
5528 /* SCM_CALL to unload the external elf */
5529 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5530 sizeof(struct qseecom_unload_app_ireq),
5531 &resp, sizeof(resp));
5532 if (ret) {
5533 pr_err("scm_call to unload failed : ret %d\n",
5534 ret);
5535 ret = -EFAULT;
5536 goto qseecom_unload_external_elf_scm_err;
5537 }
5538 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5539 ret = __qseecom_process_incomplete_cmd(data, &resp);
5540 if (ret)
5541 pr_err("process_incomplete_cmd fail err: %d\n",
5542 ret);
5543 } else {
5544 if (resp.result != QSEOS_RESULT_SUCCESS) {
5545 pr_err("scm_call to unload image failed resp.result =%d\n",
5546 resp.result);
5547 ret = -EFAULT;
5548 }
5549 }
5550
5551qseecom_unload_external_elf_scm_err:
5552
5553 return ret;
5554}
5555
5556static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5557 void __user *argp)
5558{
5559
5560 int32_t ret;
5561 struct qseecom_qseos_app_load_query query_req;
5562 struct qseecom_check_app_ireq req;
5563 struct qseecom_registered_app_list *entry = NULL;
5564 unsigned long flags = 0;
5565 uint32_t app_arch = 0, app_id = 0;
5566 bool found_app = false;
5567
5568 /* Copy the relevant information needed for loading the image */
5569 if (copy_from_user(&query_req,
5570 (void __user *)argp,
5571 sizeof(struct qseecom_qseos_app_load_query))) {
5572 pr_err("copy_from_user failed\n");
5573 return -EFAULT;
5574 }
5575
5576 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5577 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5578 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5579
5580 ret = __qseecom_check_app_exists(req, &app_id);
5581 if (ret) {
5582 pr_err(" scm call to check if app is loaded failed");
5583 return ret; /* scm call failed */
5584 }
5585 if (app_id) {
5586 pr_debug("App id %d (%s) already exists\n", app_id,
5587 (char *)(req.app_name));
5588 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5589 list_for_each_entry(entry,
5590 &qseecom.registered_app_list_head, list){
5591 if (entry->app_id == app_id) {
5592 app_arch = entry->app_arch;
5593 entry->ref_cnt++;
5594 found_app = true;
5595 break;
5596 }
5597 }
5598 spin_unlock_irqrestore(
5599 &qseecom.registered_app_list_lock, flags);
5600 data->client.app_id = app_id;
5601 query_req.app_id = app_id;
5602 if (app_arch) {
5603 data->client.app_arch = app_arch;
5604 query_req.app_arch = app_arch;
5605 } else {
5606 data->client.app_arch = 0;
5607 query_req.app_arch = 0;
5608 }
5609 strlcpy(data->client.app_name, query_req.app_name,
5610 MAX_APP_NAME_SIZE);
5611 /*
5612 * If app was loaded by appsbl before and was not registered,
5613 * regiser this app now.
5614 */
5615 if (!found_app) {
5616 pr_debug("Register app %d [%s] which was loaded before\n",
5617 ret, (char *)query_req.app_name);
5618 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5619 if (!entry) {
5620 pr_err("kmalloc for app entry failed\n");
5621 return -ENOMEM;
5622 }
5623 entry->app_id = app_id;
5624 entry->ref_cnt = 1;
5625 entry->app_arch = data->client.app_arch;
5626 strlcpy(entry->app_name, data->client.app_name,
5627 MAX_APP_NAME_SIZE);
5628 entry->app_blocked = false;
5629 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005630 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005631 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5632 flags);
5633 list_add_tail(&entry->list,
5634 &qseecom.registered_app_list_head);
5635 spin_unlock_irqrestore(
5636 &qseecom.registered_app_list_lock, flags);
5637 }
5638 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5639 pr_err("copy_to_user failed\n");
5640 return -EFAULT;
5641 }
5642 return -EEXIST; /* app already loaded */
5643 } else {
5644 return 0; /* app not loaded */
5645 }
5646}
5647
5648static int __qseecom_get_ce_pipe_info(
5649 enum qseecom_key_management_usage_type usage,
5650 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5651{
5652 int ret = -EINVAL;
5653 int i, j;
5654 struct qseecom_ce_info_use *p = NULL;
5655 int total = 0;
5656 struct qseecom_ce_pipe_entry *pcepipe;
5657
5658 switch (usage) {
5659 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5660 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5661 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5662 if (qseecom.support_fde) {
5663 p = qseecom.ce_info.fde;
5664 total = qseecom.ce_info.num_fde;
5665 } else {
5666 pr_err("system does not support fde\n");
5667 return -EINVAL;
5668 }
5669 break;
5670 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5671 if (qseecom.support_pfe) {
5672 p = qseecom.ce_info.pfe;
5673 total = qseecom.ce_info.num_pfe;
5674 } else {
5675 pr_err("system does not support pfe\n");
5676 return -EINVAL;
5677 }
5678 break;
5679 default:
5680 pr_err("unsupported usage %d\n", usage);
5681 return -EINVAL;
5682 }
5683
5684 for (j = 0; j < total; j++) {
5685 if (p->unit_num == unit) {
5686 pcepipe = p->ce_pipe_entry;
5687 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5688 (*ce_hw)[i] = pcepipe->ce_num;
5689 *pipe = pcepipe->ce_pipe_pair;
5690 pcepipe++;
5691 }
5692 ret = 0;
5693 break;
5694 }
5695 p++;
5696 }
5697 return ret;
5698}
5699
5700static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5701 enum qseecom_key_management_usage_type usage,
5702 struct qseecom_key_generate_ireq *ireq)
5703{
5704 struct qseecom_command_scm_resp resp;
5705 int ret;
5706
5707 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5708 usage >= QSEOS_KM_USAGE_MAX) {
5709 pr_err("Error:: unsupported usage %d\n", usage);
5710 return -EFAULT;
5711 }
5712 ret = __qseecom_enable_clk(CLK_QSEE);
5713 if (ret)
5714 return ret;
5715
5716 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5717 ireq, sizeof(struct qseecom_key_generate_ireq),
5718 &resp, sizeof(resp));
5719 if (ret) {
5720 if (ret == -EINVAL &&
5721 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5722 pr_debug("Key ID exists.\n");
5723 ret = 0;
5724 } else {
5725 pr_err("scm call to generate key failed : %d\n", ret);
5726 ret = -EFAULT;
5727 }
5728 goto generate_key_exit;
5729 }
5730
5731 switch (resp.result) {
5732 case QSEOS_RESULT_SUCCESS:
5733 break;
5734 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5735 pr_debug("Key ID exists.\n");
5736 break;
5737 case QSEOS_RESULT_INCOMPLETE:
5738 ret = __qseecom_process_incomplete_cmd(data, &resp);
5739 if (ret) {
5740 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5741 pr_debug("Key ID exists.\n");
5742 ret = 0;
5743 } else {
5744 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5745 resp.result);
5746 }
5747 }
5748 break;
5749 case QSEOS_RESULT_FAILURE:
5750 default:
5751 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5752 ret = -EINVAL;
5753 break;
5754 }
5755generate_key_exit:
5756 __qseecom_disable_clk(CLK_QSEE);
5757 return ret;
5758}
5759
5760static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5761 enum qseecom_key_management_usage_type usage,
5762 struct qseecom_key_delete_ireq *ireq)
5763{
5764 struct qseecom_command_scm_resp resp;
5765 int ret;
5766
5767 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5768 usage >= QSEOS_KM_USAGE_MAX) {
5769 pr_err("Error:: unsupported usage %d\n", usage);
5770 return -EFAULT;
5771 }
5772 ret = __qseecom_enable_clk(CLK_QSEE);
5773 if (ret)
5774 return ret;
5775
5776 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5777 ireq, sizeof(struct qseecom_key_delete_ireq),
5778 &resp, sizeof(struct qseecom_command_scm_resp));
5779 if (ret) {
5780 if (ret == -EINVAL &&
5781 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5782 pr_debug("Max attempts to input password reached.\n");
5783 ret = -ERANGE;
5784 } else {
5785 pr_err("scm call to delete key failed : %d\n", ret);
5786 ret = -EFAULT;
5787 }
5788 goto del_key_exit;
5789 }
5790
5791 switch (resp.result) {
5792 case QSEOS_RESULT_SUCCESS:
5793 break;
5794 case QSEOS_RESULT_INCOMPLETE:
5795 ret = __qseecom_process_incomplete_cmd(data, &resp);
5796 if (ret) {
5797 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5798 resp.result);
5799 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5800 pr_debug("Max attempts to input password reached.\n");
5801 ret = -ERANGE;
5802 }
5803 }
5804 break;
5805 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5806 pr_debug("Max attempts to input password reached.\n");
5807 ret = -ERANGE;
5808 break;
5809 case QSEOS_RESULT_FAILURE:
5810 default:
5811 pr_err("Delete key scm call failed resp.result %d\n",
5812 resp.result);
5813 ret = -EINVAL;
5814 break;
5815 }
5816del_key_exit:
5817 __qseecom_disable_clk(CLK_QSEE);
5818 return ret;
5819}
5820
5821static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5822 enum qseecom_key_management_usage_type usage,
5823 struct qseecom_key_select_ireq *ireq)
5824{
5825 struct qseecom_command_scm_resp resp;
5826 int ret;
5827
5828 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5829 usage >= QSEOS_KM_USAGE_MAX) {
5830 pr_err("Error:: unsupported usage %d\n", usage);
5831 return -EFAULT;
5832 }
5833 ret = __qseecom_enable_clk(CLK_QSEE);
5834 if (ret)
5835 return ret;
5836
5837 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5838 ret = __qseecom_enable_clk(CLK_CE_DRV);
5839 if (ret)
5840 return ret;
5841 }
5842
5843 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5844 ireq, sizeof(struct qseecom_key_select_ireq),
5845 &resp, sizeof(struct qseecom_command_scm_resp));
5846 if (ret) {
5847 if (ret == -EINVAL &&
5848 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5849 pr_debug("Max attempts to input password reached.\n");
5850 ret = -ERANGE;
5851 } else if (ret == -EINVAL &&
5852 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5853 pr_debug("Set Key operation under processing...\n");
5854 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5855 } else {
5856 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5857 ret);
5858 ret = -EFAULT;
5859 }
5860 goto set_key_exit;
5861 }
5862
5863 switch (resp.result) {
5864 case QSEOS_RESULT_SUCCESS:
5865 break;
5866 case QSEOS_RESULT_INCOMPLETE:
5867 ret = __qseecom_process_incomplete_cmd(data, &resp);
5868 if (ret) {
5869 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5870 resp.result);
5871 if (resp.result ==
5872 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5873 pr_debug("Set Key operation under processing...\n");
5874 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5875 }
5876 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5877 pr_debug("Max attempts to input password reached.\n");
5878 ret = -ERANGE;
5879 }
5880 }
5881 break;
5882 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5883 pr_debug("Max attempts to input password reached.\n");
5884 ret = -ERANGE;
5885 break;
5886 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5887 pr_debug("Set Key operation under processing...\n");
5888 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5889 break;
5890 case QSEOS_RESULT_FAILURE:
5891 default:
5892 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5893 ret = -EINVAL;
5894 break;
5895 }
5896set_key_exit:
5897 __qseecom_disable_clk(CLK_QSEE);
5898 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5899 __qseecom_disable_clk(CLK_CE_DRV);
5900 return ret;
5901}
5902
5903static int __qseecom_update_current_key_user_info(
5904 struct qseecom_dev_handle *data,
5905 enum qseecom_key_management_usage_type usage,
5906 struct qseecom_key_userinfo_update_ireq *ireq)
5907{
5908 struct qseecom_command_scm_resp resp;
5909 int ret;
5910
5911 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5912 usage >= QSEOS_KM_USAGE_MAX) {
5913 pr_err("Error:: unsupported usage %d\n", usage);
5914 return -EFAULT;
5915 }
5916 ret = __qseecom_enable_clk(CLK_QSEE);
5917 if (ret)
5918 return ret;
5919
5920 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5921 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5922 &resp, sizeof(struct qseecom_command_scm_resp));
5923 if (ret) {
5924 if (ret == -EINVAL &&
5925 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5926 pr_debug("Set Key operation under processing...\n");
5927 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5928 } else {
5929 pr_err("scm call to update key userinfo failed: %d\n",
5930 ret);
5931 __qseecom_disable_clk(CLK_QSEE);
5932 return -EFAULT;
5933 }
5934 }
5935
5936 switch (resp.result) {
5937 case QSEOS_RESULT_SUCCESS:
5938 break;
5939 case QSEOS_RESULT_INCOMPLETE:
5940 ret = __qseecom_process_incomplete_cmd(data, &resp);
5941 if (resp.result ==
5942 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5943 pr_debug("Set Key operation under processing...\n");
5944 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5945 }
5946 if (ret)
5947 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5948 resp.result);
5949 break;
5950 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5951 pr_debug("Update Key operation under processing...\n");
5952 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5953 break;
5954 case QSEOS_RESULT_FAILURE:
5955 default:
5956 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5957 ret = -EINVAL;
5958 break;
5959 }
5960
5961 __qseecom_disable_clk(CLK_QSEE);
5962 return ret;
5963}
5964
5965
5966static int qseecom_enable_ice_setup(int usage)
5967{
5968 int ret = 0;
5969
5970 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5971 ret = qcom_ice_setup_ice_hw("ufs", true);
5972 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5973 ret = qcom_ice_setup_ice_hw("sdcc", true);
5974
5975 return ret;
5976}
5977
5978static int qseecom_disable_ice_setup(int usage)
5979{
5980 int ret = 0;
5981
5982 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5983 ret = qcom_ice_setup_ice_hw("ufs", false);
5984 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5985 ret = qcom_ice_setup_ice_hw("sdcc", false);
5986
5987 return ret;
5988}
5989
5990static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5991{
5992 struct qseecom_ce_info_use *pce_info_use, *p;
5993 int total = 0;
5994 int i;
5995
5996 switch (usage) {
5997 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5998 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5999 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
6000 p = qseecom.ce_info.fde;
6001 total = qseecom.ce_info.num_fde;
6002 break;
6003 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
6004 p = qseecom.ce_info.pfe;
6005 total = qseecom.ce_info.num_pfe;
6006 break;
6007 default:
6008 pr_err("unsupported usage %d\n", usage);
6009 return -EINVAL;
6010 }
6011
6012 pce_info_use = NULL;
6013
6014 for (i = 0; i < total; i++) {
6015 if (p->unit_num == unit) {
6016 pce_info_use = p;
6017 break;
6018 }
6019 p++;
6020 }
6021 if (!pce_info_use) {
6022 pr_err("can not find %d\n", unit);
6023 return -EINVAL;
6024 }
6025 return pce_info_use->num_ce_pipe_entries;
6026}
6027
6028static int qseecom_create_key(struct qseecom_dev_handle *data,
6029 void __user *argp)
6030{
6031 int i;
6032 uint32_t *ce_hw = NULL;
6033 uint32_t pipe = 0;
6034 int ret = 0;
6035 uint32_t flags = 0;
6036 struct qseecom_create_key_req create_key_req;
6037 struct qseecom_key_generate_ireq generate_key_ireq;
6038 struct qseecom_key_select_ireq set_key_ireq;
6039 uint32_t entries = 0;
6040
6041 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
6042 if (ret) {
6043 pr_err("copy_from_user failed\n");
6044 return ret;
6045 }
6046
6047 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6048 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6049 pr_err("unsupported usage %d\n", create_key_req.usage);
6050 ret = -EFAULT;
6051 return ret;
6052 }
6053 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6054 create_key_req.usage);
6055 if (entries <= 0) {
6056 pr_err("no ce instance for usage %d instance %d\n",
6057 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
6058 ret = -EINVAL;
6059 return ret;
6060 }
6061
6062 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6063 if (!ce_hw) {
6064 ret = -ENOMEM;
6065 return ret;
6066 }
6067 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
6068 DEFAULT_CE_INFO_UNIT);
6069 if (ret) {
6070 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6071 ret = -EINVAL;
6072 goto free_buf;
6073 }
6074
6075 if (qseecom.fde_key_size)
6076 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6077 else
6078 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6079
Jiten Patela7bb1d52018-05-11 12:34:26 +05306080 if (qseecom.enable_key_wrap_in_ks == true)
6081 flags |= ENABLE_KEY_WRAP_IN_KS;
6082
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006083 generate_key_ireq.flags = flags;
6084 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
6085 memset((void *)generate_key_ireq.key_id,
6086 0, QSEECOM_KEY_ID_SIZE);
6087 memset((void *)generate_key_ireq.hash32,
6088 0, QSEECOM_HASH_SIZE);
6089 memcpy((void *)generate_key_ireq.key_id,
6090 (void *)key_id_array[create_key_req.usage].desc,
6091 QSEECOM_KEY_ID_SIZE);
6092 memcpy((void *)generate_key_ireq.hash32,
6093 (void *)create_key_req.hash32,
6094 QSEECOM_HASH_SIZE);
6095
6096 ret = __qseecom_generate_and_save_key(data,
6097 create_key_req.usage, &generate_key_ireq);
6098 if (ret) {
6099 pr_err("Failed to generate key on storage: %d\n", ret);
6100 goto free_buf;
6101 }
6102
6103 for (i = 0; i < entries; i++) {
6104 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6105 if (create_key_req.usage ==
6106 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6107 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6108 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6109
6110 } else if (create_key_req.usage ==
6111 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6112 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6113 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6114
6115 } else {
6116 set_key_ireq.ce = ce_hw[i];
6117 set_key_ireq.pipe = pipe;
6118 }
6119 set_key_ireq.flags = flags;
6120
6121 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6122 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6123 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6124 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6125 memcpy((void *)set_key_ireq.key_id,
6126 (void *)key_id_array[create_key_req.usage].desc,
6127 QSEECOM_KEY_ID_SIZE);
6128 memcpy((void *)set_key_ireq.hash32,
6129 (void *)create_key_req.hash32,
6130 QSEECOM_HASH_SIZE);
6131 /*
6132 * It will return false if it is GPCE based crypto instance or
6133 * ICE is setup properly
6134 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006135 ret = qseecom_enable_ice_setup(create_key_req.usage);
6136 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006137 goto free_buf;
6138
6139 do {
6140 ret = __qseecom_set_clear_ce_key(data,
6141 create_key_req.usage,
6142 &set_key_ireq);
6143 /*
6144 * wait a little before calling scm again to let other
6145 * processes run
6146 */
6147 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6148 msleep(50);
6149
6150 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6151
6152 qseecom_disable_ice_setup(create_key_req.usage);
6153
6154 if (ret) {
6155 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6156 pipe, ce_hw[i], ret);
6157 goto free_buf;
6158 } else {
6159 pr_err("Set the key successfully\n");
6160 if ((create_key_req.usage ==
6161 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6162 (create_key_req.usage ==
6163 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6164 goto free_buf;
6165 }
6166 }
6167
6168free_buf:
6169 kzfree(ce_hw);
6170 return ret;
6171}
6172
6173static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6174 void __user *argp)
6175{
6176 uint32_t *ce_hw = NULL;
6177 uint32_t pipe = 0;
6178 int ret = 0;
6179 uint32_t flags = 0;
6180 int i, j;
6181 struct qseecom_wipe_key_req wipe_key_req;
6182 struct qseecom_key_delete_ireq delete_key_ireq;
6183 struct qseecom_key_select_ireq clear_key_ireq;
6184 uint32_t entries = 0;
6185
6186 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6187 if (ret) {
6188 pr_err("copy_from_user failed\n");
6189 return ret;
6190 }
6191
6192 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6193 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6194 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6195 ret = -EFAULT;
6196 return ret;
6197 }
6198
6199 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6200 wipe_key_req.usage);
6201 if (entries <= 0) {
6202 pr_err("no ce instance for usage %d instance %d\n",
6203 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6204 ret = -EINVAL;
6205 return ret;
6206 }
6207
6208 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6209 if (!ce_hw) {
6210 ret = -ENOMEM;
6211 return ret;
6212 }
6213
6214 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6215 DEFAULT_CE_INFO_UNIT);
6216 if (ret) {
6217 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6218 ret = -EINVAL;
6219 goto free_buf;
6220 }
6221
6222 if (wipe_key_req.wipe_key_flag) {
6223 delete_key_ireq.flags = flags;
6224 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6225 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6226 memcpy((void *)delete_key_ireq.key_id,
6227 (void *)key_id_array[wipe_key_req.usage].desc,
6228 QSEECOM_KEY_ID_SIZE);
6229 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6230
6231 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6232 &delete_key_ireq);
6233 if (ret) {
6234 pr_err("Failed to delete key from ssd storage: %d\n",
6235 ret);
6236 ret = -EFAULT;
6237 goto free_buf;
6238 }
6239 }
6240
6241 for (j = 0; j < entries; j++) {
6242 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6243 if (wipe_key_req.usage ==
6244 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6245 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6246 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6247 } else if (wipe_key_req.usage ==
6248 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6249 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6250 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6251 } else {
6252 clear_key_ireq.ce = ce_hw[j];
6253 clear_key_ireq.pipe = pipe;
6254 }
6255 clear_key_ireq.flags = flags;
6256 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6257 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6258 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6259 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6260
6261 /*
6262 * It will return false if it is GPCE based crypto instance or
6263 * ICE is setup properly
6264 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006265 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6266 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006267 goto free_buf;
6268
6269 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6270 &clear_key_ireq);
6271
6272 qseecom_disable_ice_setup(wipe_key_req.usage);
6273
6274 if (ret) {
6275 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6276 pipe, ce_hw[j], ret);
6277 ret = -EFAULT;
6278 goto free_buf;
6279 }
6280 }
6281
6282free_buf:
6283 kzfree(ce_hw);
6284 return ret;
6285}
6286
6287static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6288 void __user *argp)
6289{
6290 int ret = 0;
6291 uint32_t flags = 0;
6292 struct qseecom_update_key_userinfo_req update_key_req;
6293 struct qseecom_key_userinfo_update_ireq ireq;
6294
6295 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6296 if (ret) {
6297 pr_err("copy_from_user failed\n");
6298 return ret;
6299 }
6300
6301 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6302 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6303 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6304 return -EFAULT;
6305 }
6306
6307 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6308
6309 if (qseecom.fde_key_size)
6310 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6311 else
6312 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6313
6314 ireq.flags = flags;
6315 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6316 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6317 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6318 memcpy((void *)ireq.key_id,
6319 (void *)key_id_array[update_key_req.usage].desc,
6320 QSEECOM_KEY_ID_SIZE);
6321 memcpy((void *)ireq.current_hash32,
6322 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6323 memcpy((void *)ireq.new_hash32,
6324 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6325
6326 do {
6327 ret = __qseecom_update_current_key_user_info(data,
6328 update_key_req.usage,
6329 &ireq);
6330 /*
6331 * wait a little before calling scm again to let other
6332 * processes run
6333 */
6334 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6335 msleep(50);
6336
6337 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6338 if (ret) {
6339 pr_err("Failed to update key info: %d\n", ret);
6340 return ret;
6341 }
6342 return ret;
6343
6344}
6345static int qseecom_is_es_activated(void __user *argp)
6346{
Zhen Kong26e62742018-05-04 17:19:06 -07006347 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006348 struct qseecom_command_scm_resp resp;
6349 int ret;
6350
6351 if (qseecom.qsee_version < QSEE_VERSION_04) {
6352 pr_err("invalid qsee version\n");
6353 return -ENODEV;
6354 }
6355
6356 if (argp == NULL) {
6357 pr_err("arg is null\n");
6358 return -EINVAL;
6359 }
6360
6361 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6362 &req, sizeof(req), &resp, sizeof(resp));
6363 if (ret) {
6364 pr_err("scm_call failed\n");
6365 return ret;
6366 }
6367
6368 req.is_activated = resp.result;
6369 ret = copy_to_user(argp, &req, sizeof(req));
6370 if (ret) {
6371 pr_err("copy_to_user failed\n");
6372 return ret;
6373 }
6374
6375 return 0;
6376}
6377
6378static int qseecom_save_partition_hash(void __user *argp)
6379{
6380 struct qseecom_save_partition_hash_req req;
6381 struct qseecom_command_scm_resp resp;
6382 int ret;
6383
6384 memset(&resp, 0x00, sizeof(resp));
6385
6386 if (qseecom.qsee_version < QSEE_VERSION_04) {
6387 pr_err("invalid qsee version\n");
6388 return -ENODEV;
6389 }
6390
6391 if (argp == NULL) {
6392 pr_err("arg is null\n");
6393 return -EINVAL;
6394 }
6395
6396 ret = copy_from_user(&req, argp, sizeof(req));
6397 if (ret) {
6398 pr_err("copy_from_user failed\n");
6399 return ret;
6400 }
6401
6402 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6403 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6404 if (ret) {
6405 pr_err("qseecom_scm_call failed\n");
6406 return ret;
6407 }
6408
6409 return 0;
6410}
6411
6412static int qseecom_mdtp_cipher_dip(void __user *argp)
6413{
6414 struct qseecom_mdtp_cipher_dip_req req;
6415 u32 tzbuflenin, tzbuflenout;
6416 char *tzbufin = NULL, *tzbufout = NULL;
6417 struct scm_desc desc = {0};
6418 int ret;
6419
6420 do {
6421 /* Copy the parameters from userspace */
6422 if (argp == NULL) {
6423 pr_err("arg is null\n");
6424 ret = -EINVAL;
6425 break;
6426 }
6427
6428 ret = copy_from_user(&req, argp, sizeof(req));
6429 if (ret) {
6430 pr_err("copy_from_user failed, ret= %d\n", ret);
6431 break;
6432 }
6433
6434 if (req.in_buf == NULL || req.out_buf == NULL ||
6435 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6436 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6437 req.direction > 1) {
6438 pr_err("invalid parameters\n");
6439 ret = -EINVAL;
6440 break;
6441 }
6442
6443 /* Copy the input buffer from userspace to kernel space */
6444 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6445 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6446 if (!tzbufin) {
6447 pr_err("error allocating in buffer\n");
6448 ret = -ENOMEM;
6449 break;
6450 }
6451
6452 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6453 if (ret) {
6454 pr_err("copy_from_user failed, ret=%d\n", ret);
6455 break;
6456 }
6457
6458 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6459
6460 /* Prepare the output buffer in kernel space */
6461 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6462 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6463 if (!tzbufout) {
6464 pr_err("error allocating out buffer\n");
6465 ret = -ENOMEM;
6466 break;
6467 }
6468
6469 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6470
6471 /* Send the command to TZ */
6472 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6473 desc.args[0] = virt_to_phys(tzbufin);
6474 desc.args[1] = req.in_buf_size;
6475 desc.args[2] = virt_to_phys(tzbufout);
6476 desc.args[3] = req.out_buf_size;
6477 desc.args[4] = req.direction;
6478
6479 ret = __qseecom_enable_clk(CLK_QSEE);
6480 if (ret)
6481 break;
6482
Zhen Kong03f220d2019-02-01 17:12:34 -08006483 ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006484
6485 __qseecom_disable_clk(CLK_QSEE);
6486
6487 if (ret) {
6488 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6489 ret);
6490 break;
6491 }
6492
6493 /* Copy the output buffer from kernel space to userspace */
6494 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6495 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6496 if (ret) {
6497 pr_err("copy_to_user failed, ret=%d\n", ret);
6498 break;
6499 }
6500 } while (0);
6501
6502 kzfree(tzbufin);
6503 kzfree(tzbufout);
6504
6505 return ret;
6506}
6507
6508static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6509 struct qseecom_qteec_req *req)
6510{
6511 if (!data || !data->client.ihandle) {
6512 pr_err("Client or client handle is not initialized\n");
6513 return -EINVAL;
6514 }
6515
6516 if (data->type != QSEECOM_CLIENT_APP)
6517 return -EFAULT;
6518
6519 if (req->req_len > UINT_MAX - req->resp_len) {
6520 pr_err("Integer overflow detected in req_len & rsp_len\n");
6521 return -EINVAL;
6522 }
6523
6524 if (req->req_len + req->resp_len > data->client.sb_length) {
6525 pr_debug("Not enough memory to fit cmd_buf.\n");
6526 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6527 (req->req_len + req->resp_len), data->client.sb_length);
6528 return -ENOMEM;
6529 }
6530
6531 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6532 pr_err("cmd buffer or response buffer is null\n");
6533 return -EINVAL;
6534 }
6535 if (((uintptr_t)req->req_ptr <
6536 data->client.user_virt_sb_base) ||
6537 ((uintptr_t)req->req_ptr >=
6538 (data->client.user_virt_sb_base + data->client.sb_length))) {
6539 pr_err("cmd buffer address not within shared bufffer\n");
6540 return -EINVAL;
6541 }
6542
6543 if (((uintptr_t)req->resp_ptr <
6544 data->client.user_virt_sb_base) ||
6545 ((uintptr_t)req->resp_ptr >=
6546 (data->client.user_virt_sb_base + data->client.sb_length))) {
6547 pr_err("response buffer address not within shared bufffer\n");
6548 return -EINVAL;
6549 }
6550
6551 if ((req->req_len == 0) || (req->resp_len == 0)) {
6552 pr_err("cmd buf lengtgh/response buf length not valid\n");
6553 return -EINVAL;
6554 }
6555
6556 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6557 pr_err("Integer overflow in req_len & req_ptr\n");
6558 return -EINVAL;
6559 }
6560
6561 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6562 pr_err("Integer overflow in resp_len & resp_ptr\n");
6563 return -EINVAL;
6564 }
6565
6566 if (data->client.user_virt_sb_base >
6567 (ULONG_MAX - data->client.sb_length)) {
6568 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6569 return -EINVAL;
6570 }
6571 if ((((uintptr_t)req->req_ptr + req->req_len) >
6572 ((uintptr_t)data->client.user_virt_sb_base +
6573 data->client.sb_length)) ||
6574 (((uintptr_t)req->resp_ptr + req->resp_len) >
6575 ((uintptr_t)data->client.user_virt_sb_base +
6576 data->client.sb_length))) {
6577 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6578 return -EINVAL;
6579 }
6580 return 0;
6581}
6582
6583static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6584 uint32_t fd_idx, struct sg_table *sg_ptr)
6585{
6586 struct scatterlist *sg = sg_ptr->sgl;
6587 struct qseecom_sg_entry *sg_entry;
6588 void *buf;
6589 uint i;
6590 size_t size;
6591 dma_addr_t coh_pmem;
6592
6593 if (fd_idx >= MAX_ION_FD) {
6594 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6595 return -ENOMEM;
6596 }
6597 /*
6598 * Allocate a buffer, populate it with number of entry plus
6599 * each sg entry's phy addr and length; then return the
6600 * phy_addr of the buffer.
6601 */
6602 size = sizeof(uint32_t) +
6603 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6604 size = (size + PAGE_SIZE) & PAGE_MASK;
6605 buf = dma_alloc_coherent(qseecom.pdev,
6606 size, &coh_pmem, GFP_KERNEL);
6607 if (buf == NULL) {
6608 pr_err("failed to alloc memory for sg buf\n");
6609 return -ENOMEM;
6610 }
6611 *(uint32_t *)buf = sg_ptr->nents;
6612 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6613 for (i = 0; i < sg_ptr->nents; i++) {
6614 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6615 sg_entry->len = sg->length;
6616 sg_entry++;
6617 sg = sg_next(sg);
6618 }
6619 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6620 data->client.sec_buf_fd[fd_idx].vbase = buf;
6621 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6622 data->client.sec_buf_fd[fd_idx].size = size;
6623 return 0;
6624}
6625
6626static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6627 struct qseecom_dev_handle *data, bool cleanup)
6628{
6629 struct ion_handle *ihandle;
6630 int ret = 0;
6631 int i = 0;
6632 uint32_t *update;
6633 struct sg_table *sg_ptr = NULL;
6634 struct scatterlist *sg;
6635 struct qseecom_param_memref *memref;
6636
6637 if (req == NULL) {
6638 pr_err("Invalid address\n");
6639 return -EINVAL;
6640 }
6641 for (i = 0; i < MAX_ION_FD; i++) {
6642 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006643 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006644 req->ifd_data[i].fd);
6645 if (IS_ERR_OR_NULL(ihandle)) {
6646 pr_err("Ion client can't retrieve the handle\n");
6647 return -ENOMEM;
6648 }
6649 if ((req->req_len < sizeof(uint32_t)) ||
6650 (req->ifd_data[i].cmd_buf_offset >
6651 req->req_len - sizeof(uint32_t))) {
6652 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6653 req->req_len,
6654 req->ifd_data[i].cmd_buf_offset);
6655 return -EINVAL;
6656 }
6657 update = (uint32_t *)((char *) req->req_ptr +
6658 req->ifd_data[i].cmd_buf_offset);
6659 if (!update) {
6660 pr_err("update pointer is NULL\n");
6661 return -EINVAL;
6662 }
6663 } else {
6664 continue;
6665 }
6666 /* Populate the cmd data structure with the phys_addr */
6667 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6668 if (IS_ERR_OR_NULL(sg_ptr)) {
6669 pr_err("IOn client could not retrieve sg table\n");
6670 goto err;
6671 }
6672 sg = sg_ptr->sgl;
6673 if (sg == NULL) {
6674 pr_err("sg is NULL\n");
6675 goto err;
6676 }
6677 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6678 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6679 sg_ptr->nents, sg->length);
6680 goto err;
6681 }
6682 /* clean up buf for pre-allocated fd */
6683 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6684 (*update)) {
6685 if (data->client.sec_buf_fd[i].vbase)
6686 dma_free_coherent(qseecom.pdev,
6687 data->client.sec_buf_fd[i].size,
6688 data->client.sec_buf_fd[i].vbase,
6689 data->client.sec_buf_fd[i].pbase);
6690 memset((void *)update, 0,
6691 sizeof(struct qseecom_param_memref));
6692 memset(&(data->client.sec_buf_fd[i]), 0,
6693 sizeof(struct qseecom_sec_buf_fd_info));
6694 goto clean;
6695 }
6696
6697 if (*update == 0) {
6698 /* update buf for pre-allocated fd from secure heap*/
6699 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6700 sg_ptr);
6701 if (ret) {
6702 pr_err("Failed to handle buf for fd[%d]\n", i);
6703 goto err;
6704 }
6705 memref = (struct qseecom_param_memref *)update;
6706 memref->buffer =
6707 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6708 memref->size =
6709 (uint32_t)(data->client.sec_buf_fd[i].size);
6710 } else {
6711 /* update buf for fd from non-secure qseecom heap */
6712 if (sg_ptr->nents != 1) {
6713 pr_err("Num of scat entr (%d) invalid\n",
6714 sg_ptr->nents);
6715 goto err;
6716 }
6717 if (cleanup)
6718 *update = 0;
6719 else
6720 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6721 }
6722clean:
6723 if (cleanup) {
6724 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6725 ihandle, NULL, sg->length,
6726 ION_IOC_INV_CACHES);
6727 if (ret) {
6728 pr_err("cache operation failed %d\n", ret);
6729 goto err;
6730 }
6731 } else {
6732 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6733 ihandle, NULL, sg->length,
6734 ION_IOC_CLEAN_INV_CACHES);
6735 if (ret) {
6736 pr_err("cache operation failed %d\n", ret);
6737 goto err;
6738 }
6739 data->sglistinfo_ptr[i].indexAndFlags =
6740 SGLISTINFO_SET_INDEX_FLAG(
6741 (sg_ptr->nents == 1), 0,
6742 req->ifd_data[i].cmd_buf_offset);
6743 data->sglistinfo_ptr[i].sizeOrCount =
6744 (sg_ptr->nents == 1) ?
6745 sg->length : sg_ptr->nents;
6746 data->sglist_cnt = i + 1;
6747 }
6748 /* Deallocate the handle */
6749 if (!IS_ERR_OR_NULL(ihandle))
6750 ion_free(qseecom.ion_clnt, ihandle);
6751 }
6752 return ret;
6753err:
6754 if (!IS_ERR_OR_NULL(ihandle))
6755 ion_free(qseecom.ion_clnt, ihandle);
6756 return -ENOMEM;
6757}
6758
6759static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6760 struct qseecom_qteec_req *req, uint32_t cmd_id)
6761{
6762 struct qseecom_command_scm_resp resp;
6763 struct qseecom_qteec_ireq ireq;
6764 struct qseecom_qteec_64bit_ireq ireq_64bit;
6765 struct qseecom_registered_app_list *ptr_app;
6766 bool found_app = false;
6767 unsigned long flags;
6768 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006769 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006770 uint32_t reqd_len_sb_in = 0;
6771 void *cmd_buf = NULL;
6772 size_t cmd_len;
6773 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306774 void *req_ptr = NULL;
6775 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006776
6777 ret = __qseecom_qteec_validate_msg(data, req);
6778 if (ret)
6779 return ret;
6780
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306781 req_ptr = req->req_ptr;
6782 resp_ptr = req->resp_ptr;
6783
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006784 /* find app_id & img_name from list */
6785 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6786 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6787 list) {
6788 if ((ptr_app->app_id == data->client.app_id) &&
6789 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6790 found_app = true;
6791 break;
6792 }
6793 }
6794 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6795 if (!found_app) {
6796 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6797 (char *)data->client.app_name);
6798 return -ENOENT;
6799 }
6800
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306801 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6802 (uintptr_t)req->req_ptr);
6803 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6804 (uintptr_t)req->resp_ptr);
6805
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006806 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6807 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6808 ret = __qseecom_update_qteec_req_buf(
6809 (struct qseecom_qteec_modfd_req *)req, data, false);
6810 if (ret)
6811 return ret;
6812 }
6813
6814 if (qseecom.qsee_version < QSEE_VERSION_40) {
6815 ireq.app_id = data->client.app_id;
6816 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306817 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006818 ireq.req_len = req->req_len;
6819 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306820 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006821 ireq.resp_len = req->resp_len;
6822 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6823 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6824 dmac_flush_range((void *)table,
6825 (void *)table + SGLISTINFO_TABLE_SIZE);
6826 cmd_buf = (void *)&ireq;
6827 cmd_len = sizeof(struct qseecom_qteec_ireq);
6828 } else {
6829 ireq_64bit.app_id = data->client.app_id;
6830 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306831 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006832 ireq_64bit.req_len = req->req_len;
6833 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306834 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006835 ireq_64bit.resp_len = req->resp_len;
6836 if ((data->client.app_arch == ELFCLASS32) &&
6837 ((ireq_64bit.req_ptr >=
6838 PHY_ADDR_4G - ireq_64bit.req_len) ||
6839 (ireq_64bit.resp_ptr >=
6840 PHY_ADDR_4G - ireq_64bit.resp_len))){
6841 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6842 data->client.app_name, data->client.app_id);
6843 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6844 ireq_64bit.req_ptr, ireq_64bit.req_len,
6845 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6846 return -EFAULT;
6847 }
6848 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6849 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6850 dmac_flush_range((void *)table,
6851 (void *)table + SGLISTINFO_TABLE_SIZE);
6852 cmd_buf = (void *)&ireq_64bit;
6853 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6854 }
6855 if (qseecom.whitelist_support == true
6856 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6857 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6858 else
6859 *(uint32_t *)cmd_buf = cmd_id;
6860
6861 reqd_len_sb_in = req->req_len + req->resp_len;
6862 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6863 data->client.sb_virt,
6864 reqd_len_sb_in,
6865 ION_IOC_CLEAN_INV_CACHES);
6866 if (ret) {
6867 pr_err("cache operation failed %d\n", ret);
6868 return ret;
6869 }
6870
6871 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6872
6873 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6874 cmd_buf, cmd_len,
6875 &resp, sizeof(resp));
6876 if (ret) {
6877 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6878 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006879 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006880 }
6881
6882 if (qseecom.qsee_reentrancy_support) {
6883 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006884 if (ret)
6885 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006886 } else {
6887 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6888 ret = __qseecom_process_incomplete_cmd(data, &resp);
6889 if (ret) {
6890 pr_err("process_incomplete_cmd failed err: %d\n",
6891 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006892 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006893 }
6894 } else {
6895 if (resp.result != QSEOS_RESULT_SUCCESS) {
6896 pr_err("Response result %d not supported\n",
6897 resp.result);
6898 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006899 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006900 }
6901 }
6902 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006903exit:
6904 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006905 data->client.sb_virt, data->client.sb_length,
6906 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006907 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006908 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006909 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006910 }
6911
6912 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6913 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006914 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006915 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006916 if (ret2)
6917 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006918 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006919 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006920}
6921
6922static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6923 void __user *argp)
6924{
6925 struct qseecom_qteec_modfd_req req;
6926 int ret = 0;
6927
6928 ret = copy_from_user(&req, argp,
6929 sizeof(struct qseecom_qteec_modfd_req));
6930 if (ret) {
6931 pr_err("copy_from_user failed\n");
6932 return ret;
6933 }
6934 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6935 QSEOS_TEE_OPEN_SESSION);
6936
6937 return ret;
6938}
6939
6940static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6941 void __user *argp)
6942{
6943 struct qseecom_qteec_req req;
6944 int ret = 0;
6945
6946 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6947 if (ret) {
6948 pr_err("copy_from_user failed\n");
6949 return ret;
6950 }
6951 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6952 return ret;
6953}
6954
6955static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6956 void __user *argp)
6957{
6958 struct qseecom_qteec_modfd_req req;
6959 struct qseecom_command_scm_resp resp;
6960 struct qseecom_qteec_ireq ireq;
6961 struct qseecom_qteec_64bit_ireq ireq_64bit;
6962 struct qseecom_registered_app_list *ptr_app;
6963 bool found_app = false;
6964 unsigned long flags;
6965 int ret = 0;
6966 int i = 0;
6967 uint32_t reqd_len_sb_in = 0;
6968 void *cmd_buf = NULL;
6969 size_t cmd_len;
6970 struct sglist_info *table = data->sglistinfo_ptr;
6971 void *req_ptr = NULL;
6972 void *resp_ptr = NULL;
6973
6974 ret = copy_from_user(&req, argp,
6975 sizeof(struct qseecom_qteec_modfd_req));
6976 if (ret) {
6977 pr_err("copy_from_user failed\n");
6978 return ret;
6979 }
6980 ret = __qseecom_qteec_validate_msg(data,
6981 (struct qseecom_qteec_req *)(&req));
6982 if (ret)
6983 return ret;
6984 req_ptr = req.req_ptr;
6985 resp_ptr = req.resp_ptr;
6986
6987 /* find app_id & img_name from list */
6988 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6989 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6990 list) {
6991 if ((ptr_app->app_id == data->client.app_id) &&
6992 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6993 found_app = true;
6994 break;
6995 }
6996 }
6997 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6998 if (!found_app) {
6999 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
7000 (char *)data->client.app_name);
7001 return -ENOENT;
7002 }
7003
7004 /* validate offsets */
7005 for (i = 0; i < MAX_ION_FD; i++) {
7006 if (req.ifd_data[i].fd) {
7007 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
7008 return -EINVAL;
7009 }
7010 }
7011 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7012 (uintptr_t)req.req_ptr);
7013 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
7014 (uintptr_t)req.resp_ptr);
7015 ret = __qseecom_update_qteec_req_buf(&req, data, false);
7016 if (ret)
7017 return ret;
7018
7019 if (qseecom.qsee_version < QSEE_VERSION_40) {
7020 ireq.app_id = data->client.app_id;
7021 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7022 (uintptr_t)req_ptr);
7023 ireq.req_len = req.req_len;
7024 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
7025 (uintptr_t)resp_ptr);
7026 ireq.resp_len = req.resp_len;
7027 cmd_buf = (void *)&ireq;
7028 cmd_len = sizeof(struct qseecom_qteec_ireq);
7029 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
7030 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7031 dmac_flush_range((void *)table,
7032 (void *)table + SGLISTINFO_TABLE_SIZE);
7033 } else {
7034 ireq_64bit.app_id = data->client.app_id;
7035 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7036 (uintptr_t)req_ptr);
7037 ireq_64bit.req_len = req.req_len;
7038 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
7039 (uintptr_t)resp_ptr);
7040 ireq_64bit.resp_len = req.resp_len;
7041 cmd_buf = (void *)&ireq_64bit;
7042 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
7043 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
7044 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
7045 dmac_flush_range((void *)table,
7046 (void *)table + SGLISTINFO_TABLE_SIZE);
7047 }
7048 reqd_len_sb_in = req.req_len + req.resp_len;
7049 if (qseecom.whitelist_support == true)
7050 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
7051 else
7052 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
7053
7054 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7055 data->client.sb_virt,
7056 reqd_len_sb_in,
7057 ION_IOC_CLEAN_INV_CACHES);
7058 if (ret) {
7059 pr_err("cache operation failed %d\n", ret);
7060 return ret;
7061 }
7062
7063 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
7064
7065 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
7066 cmd_buf, cmd_len,
7067 &resp, sizeof(resp));
7068 if (ret) {
7069 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
7070 ret, data->client.app_id);
7071 return ret;
7072 }
7073
7074 if (qseecom.qsee_reentrancy_support) {
7075 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
7076 } else {
7077 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
7078 ret = __qseecom_process_incomplete_cmd(data, &resp);
7079 if (ret) {
7080 pr_err("process_incomplete_cmd failed err: %d\n",
7081 ret);
7082 return ret;
7083 }
7084 } else {
7085 if (resp.result != QSEOS_RESULT_SUCCESS) {
7086 pr_err("Response result %d not supported\n",
7087 resp.result);
7088 ret = -EINVAL;
7089 }
7090 }
7091 }
7092 ret = __qseecom_update_qteec_req_buf(&req, data, true);
7093 if (ret)
7094 return ret;
7095
7096 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7097 data->client.sb_virt, data->client.sb_length,
7098 ION_IOC_INV_CACHES);
7099 if (ret) {
7100 pr_err("cache operation failed %d\n", ret);
7101 return ret;
7102 }
7103 return 0;
7104}
7105
7106static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7107 void __user *argp)
7108{
7109 struct qseecom_qteec_modfd_req req;
7110 int ret = 0;
7111
7112 ret = copy_from_user(&req, argp,
7113 sizeof(struct qseecom_qteec_modfd_req));
7114 if (ret) {
7115 pr_err("copy_from_user failed\n");
7116 return ret;
7117 }
7118 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7119 QSEOS_TEE_REQUEST_CANCELLATION);
7120
7121 return ret;
7122}
7123
7124static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7125{
7126 if (data->sglist_cnt) {
7127 memset(data->sglistinfo_ptr, 0,
7128 SGLISTINFO_TABLE_SIZE);
7129 data->sglist_cnt = 0;
7130 }
7131}
7132
AnilKumar Chimataa312d342019-01-25 12:43:23 +05307133static long qseecom_ioctl(struct file *file,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007134 unsigned int cmd, unsigned long arg)
7135{
7136 int ret = 0;
7137 struct qseecom_dev_handle *data = file->private_data;
7138 void __user *argp = (void __user *) arg;
7139 bool perf_enabled = false;
7140
7141 if (!data) {
7142 pr_err("Invalid/uninitialized device handle\n");
7143 return -EINVAL;
7144 }
7145
7146 if (data->abort) {
7147 pr_err("Aborting qseecom driver\n");
7148 return -ENODEV;
7149 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007150 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7151 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7152 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7153 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
Zhen Kongc4c162a2019-01-23 12:07:12 -08007154 __wakeup_unregister_listener_kthread();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007155
7156 switch (cmd) {
7157 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7158 if (data->type != QSEECOM_GENERIC) {
7159 pr_err("reg lstnr req: invalid handle (%d)\n",
7160 data->type);
7161 ret = -EINVAL;
7162 break;
7163 }
7164 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007165 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007166 atomic_inc(&data->ioctl_count);
7167 data->type = QSEECOM_LISTENER_SERVICE;
7168 ret = qseecom_register_listener(data, argp);
7169 atomic_dec(&data->ioctl_count);
7170 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007171 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007172 if (ret)
7173 pr_err("failed qseecom_register_listener: %d\n", ret);
7174 break;
7175 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307176 case QSEECOM_IOCTL_SET_ICE_INFO: {
7177 struct qseecom_ice_data_t ice_data;
7178
7179 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7180 if (ret) {
7181 pr_err("copy_from_user failed\n");
7182 return -EFAULT;
7183 }
7184 qcom_ice_set_fde_flag(ice_data.flag);
7185 break;
7186 }
7187
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007188 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7189 if ((data->listener.id == 0) ||
7190 (data->type != QSEECOM_LISTENER_SERVICE)) {
7191 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7192 data->type, data->listener.id);
7193 ret = -EINVAL;
7194 break;
7195 }
7196 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007197 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007198 atomic_inc(&data->ioctl_count);
7199 ret = qseecom_unregister_listener(data);
7200 atomic_dec(&data->ioctl_count);
7201 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007202 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007203 if (ret)
7204 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7205 break;
7206 }
7207 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7208 if ((data->client.app_id == 0) ||
7209 (data->type != QSEECOM_CLIENT_APP)) {
7210 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7211 data->type, data->client.app_id);
7212 ret = -EINVAL;
7213 break;
7214 }
7215 /* Only one client allowed here at a time */
7216 mutex_lock(&app_access_lock);
7217 if (qseecom.support_bus_scaling) {
7218 /* register bus bw in case the client doesn't do it */
7219 if (!data->mode) {
7220 mutex_lock(&qsee_bw_mutex);
7221 __qseecom_register_bus_bandwidth_needs(
7222 data, HIGH);
7223 mutex_unlock(&qsee_bw_mutex);
7224 }
7225 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7226 if (ret) {
7227 pr_err("Failed to set bw.\n");
7228 ret = -EINVAL;
7229 mutex_unlock(&app_access_lock);
7230 break;
7231 }
7232 }
7233 /*
7234 * On targets where crypto clock is handled by HLOS,
7235 * if clk_access_cnt is zero and perf_enabled is false,
7236 * then the crypto clock was not enabled before sending cmd to
7237 * tz, qseecom will enable the clock to avoid service failure.
7238 */
7239 if (!qseecom.no_clock_support &&
7240 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7241 pr_debug("ce clock is not enabled!\n");
7242 ret = qseecom_perf_enable(data);
7243 if (ret) {
7244 pr_err("Failed to vote for clock with err %d\n",
7245 ret);
7246 mutex_unlock(&app_access_lock);
7247 ret = -EINVAL;
7248 break;
7249 }
7250 perf_enabled = true;
7251 }
7252 atomic_inc(&data->ioctl_count);
7253 ret = qseecom_send_cmd(data, argp);
7254 if (qseecom.support_bus_scaling)
7255 __qseecom_add_bw_scale_down_timer(
7256 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7257 if (perf_enabled) {
7258 qsee_disable_clock_vote(data, CLK_DFAB);
7259 qsee_disable_clock_vote(data, CLK_SFPB);
7260 }
7261 atomic_dec(&data->ioctl_count);
7262 wake_up_all(&data->abort_wq);
7263 mutex_unlock(&app_access_lock);
7264 if (ret)
7265 pr_err("failed qseecom_send_cmd: %d\n", ret);
7266 break;
7267 }
7268 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7269 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7270 if ((data->client.app_id == 0) ||
7271 (data->type != QSEECOM_CLIENT_APP)) {
7272 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7273 data->type, data->client.app_id);
7274 ret = -EINVAL;
7275 break;
7276 }
7277 /* Only one client allowed here at a time */
7278 mutex_lock(&app_access_lock);
7279 if (qseecom.support_bus_scaling) {
7280 if (!data->mode) {
7281 mutex_lock(&qsee_bw_mutex);
7282 __qseecom_register_bus_bandwidth_needs(
7283 data, HIGH);
7284 mutex_unlock(&qsee_bw_mutex);
7285 }
7286 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7287 if (ret) {
7288 pr_err("Failed to set bw.\n");
7289 mutex_unlock(&app_access_lock);
7290 ret = -EINVAL;
7291 break;
7292 }
7293 }
7294 /*
7295 * On targets where crypto clock is handled by HLOS,
7296 * if clk_access_cnt is zero and perf_enabled is false,
7297 * then the crypto clock was not enabled before sending cmd to
7298 * tz, qseecom will enable the clock to avoid service failure.
7299 */
7300 if (!qseecom.no_clock_support &&
7301 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7302 pr_debug("ce clock is not enabled!\n");
7303 ret = qseecom_perf_enable(data);
7304 if (ret) {
7305 pr_err("Failed to vote for clock with err %d\n",
7306 ret);
7307 mutex_unlock(&app_access_lock);
7308 ret = -EINVAL;
7309 break;
7310 }
7311 perf_enabled = true;
7312 }
7313 atomic_inc(&data->ioctl_count);
7314 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7315 ret = qseecom_send_modfd_cmd(data, argp);
7316 else
7317 ret = qseecom_send_modfd_cmd_64(data, argp);
7318 if (qseecom.support_bus_scaling)
7319 __qseecom_add_bw_scale_down_timer(
7320 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7321 if (perf_enabled) {
7322 qsee_disable_clock_vote(data, CLK_DFAB);
7323 qsee_disable_clock_vote(data, CLK_SFPB);
7324 }
7325 atomic_dec(&data->ioctl_count);
7326 wake_up_all(&data->abort_wq);
7327 mutex_unlock(&app_access_lock);
7328 if (ret)
7329 pr_err("failed qseecom_send_cmd: %d\n", ret);
7330 __qseecom_clean_data_sglistinfo(data);
7331 break;
7332 }
7333 case QSEECOM_IOCTL_RECEIVE_REQ: {
7334 if ((data->listener.id == 0) ||
7335 (data->type != QSEECOM_LISTENER_SERVICE)) {
7336 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7337 data->type, data->listener.id);
7338 ret = -EINVAL;
7339 break;
7340 }
7341 atomic_inc(&data->ioctl_count);
7342 ret = qseecom_receive_req(data);
7343 atomic_dec(&data->ioctl_count);
7344 wake_up_all(&data->abort_wq);
7345 if (ret && (ret != -ERESTARTSYS))
7346 pr_err("failed qseecom_receive_req: %d\n", ret);
7347 break;
7348 }
7349 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7350 if ((data->listener.id == 0) ||
7351 (data->type != QSEECOM_LISTENER_SERVICE)) {
7352 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7353 data->type, data->listener.id);
7354 ret = -EINVAL;
7355 break;
7356 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007357 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007358 atomic_inc(&data->ioctl_count);
7359 if (!qseecom.qsee_reentrancy_support)
7360 ret = qseecom_send_resp();
7361 else
7362 ret = qseecom_reentrancy_send_resp(data);
7363 atomic_dec(&data->ioctl_count);
7364 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007365 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007366 if (ret)
7367 pr_err("failed qseecom_send_resp: %d\n", ret);
7368 break;
7369 }
7370 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7371 if ((data->type != QSEECOM_CLIENT_APP) &&
7372 (data->type != QSEECOM_GENERIC) &&
7373 (data->type != QSEECOM_SECURE_SERVICE)) {
7374 pr_err("set mem param req: invalid handle (%d)\n",
7375 data->type);
7376 ret = -EINVAL;
7377 break;
7378 }
7379 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7380 mutex_lock(&app_access_lock);
7381 atomic_inc(&data->ioctl_count);
7382 ret = qseecom_set_client_mem_param(data, argp);
7383 atomic_dec(&data->ioctl_count);
7384 mutex_unlock(&app_access_lock);
7385 if (ret)
7386 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7387 ret);
7388 break;
7389 }
7390 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7391 if ((data->type != QSEECOM_GENERIC) &&
7392 (data->type != QSEECOM_CLIENT_APP)) {
7393 pr_err("load app req: invalid handle (%d)\n",
7394 data->type);
7395 ret = -EINVAL;
7396 break;
7397 }
7398 data->type = QSEECOM_CLIENT_APP;
7399 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7400 mutex_lock(&app_access_lock);
7401 atomic_inc(&data->ioctl_count);
7402 ret = qseecom_load_app(data, argp);
7403 atomic_dec(&data->ioctl_count);
7404 mutex_unlock(&app_access_lock);
7405 if (ret)
7406 pr_err("failed load_app request: %d\n", ret);
7407 break;
7408 }
7409 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7410 if ((data->client.app_id == 0) ||
7411 (data->type != QSEECOM_CLIENT_APP)) {
7412 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7413 data->type, data->client.app_id);
7414 ret = -EINVAL;
7415 break;
7416 }
7417 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7418 mutex_lock(&app_access_lock);
7419 atomic_inc(&data->ioctl_count);
7420 ret = qseecom_unload_app(data, false);
7421 atomic_dec(&data->ioctl_count);
7422 mutex_unlock(&app_access_lock);
7423 if (ret)
7424 pr_err("failed unload_app request: %d\n", ret);
7425 break;
7426 }
7427 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7428 atomic_inc(&data->ioctl_count);
7429 ret = qseecom_get_qseos_version(data, argp);
7430 if (ret)
7431 pr_err("qseecom_get_qseos_version: %d\n", ret);
7432 atomic_dec(&data->ioctl_count);
7433 break;
7434 }
7435 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7436 if ((data->type != QSEECOM_GENERIC) &&
7437 (data->type != QSEECOM_CLIENT_APP)) {
7438 pr_err("perf enable req: invalid handle (%d)\n",
7439 data->type);
7440 ret = -EINVAL;
7441 break;
7442 }
7443 if ((data->type == QSEECOM_CLIENT_APP) &&
7444 (data->client.app_id == 0)) {
7445 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7446 data->type, data->client.app_id);
7447 ret = -EINVAL;
7448 break;
7449 }
7450 atomic_inc(&data->ioctl_count);
7451 if (qseecom.support_bus_scaling) {
7452 mutex_lock(&qsee_bw_mutex);
7453 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7454 mutex_unlock(&qsee_bw_mutex);
7455 } else {
7456 ret = qseecom_perf_enable(data);
7457 if (ret)
7458 pr_err("Fail to vote for clocks %d\n", ret);
7459 }
7460 atomic_dec(&data->ioctl_count);
7461 break;
7462 }
7463 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7464 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7465 (data->type != QSEECOM_CLIENT_APP)) {
7466 pr_err("perf disable req: invalid handle (%d)\n",
7467 data->type);
7468 ret = -EINVAL;
7469 break;
7470 }
7471 if ((data->type == QSEECOM_CLIENT_APP) &&
7472 (data->client.app_id == 0)) {
7473 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7474 data->type, data->client.app_id);
7475 ret = -EINVAL;
7476 break;
7477 }
7478 atomic_inc(&data->ioctl_count);
7479 if (!qseecom.support_bus_scaling) {
7480 qsee_disable_clock_vote(data, CLK_DFAB);
7481 qsee_disable_clock_vote(data, CLK_SFPB);
7482 } else {
7483 mutex_lock(&qsee_bw_mutex);
7484 qseecom_unregister_bus_bandwidth_needs(data);
7485 mutex_unlock(&qsee_bw_mutex);
7486 }
7487 atomic_dec(&data->ioctl_count);
7488 break;
7489 }
7490
7491 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7492 /* If crypto clock is not handled by HLOS, return directly. */
7493 if (qseecom.no_clock_support) {
7494 pr_debug("crypto clock is not handled by HLOS\n");
7495 break;
7496 }
7497 if ((data->client.app_id == 0) ||
7498 (data->type != QSEECOM_CLIENT_APP)) {
7499 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7500 data->type, data->client.app_id);
7501 ret = -EINVAL;
7502 break;
7503 }
7504 atomic_inc(&data->ioctl_count);
7505 ret = qseecom_scale_bus_bandwidth(data, argp);
7506 atomic_dec(&data->ioctl_count);
7507 break;
7508 }
7509 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7510 if (data->type != QSEECOM_GENERIC) {
7511 pr_err("load ext elf req: invalid client handle (%d)\n",
7512 data->type);
7513 ret = -EINVAL;
7514 break;
7515 }
7516 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7517 data->released = true;
7518 mutex_lock(&app_access_lock);
7519 atomic_inc(&data->ioctl_count);
7520 ret = qseecom_load_external_elf(data, argp);
7521 atomic_dec(&data->ioctl_count);
7522 mutex_unlock(&app_access_lock);
7523 if (ret)
7524 pr_err("failed load_external_elf request: %d\n", ret);
7525 break;
7526 }
7527 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7528 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7529 pr_err("unload ext elf req: invalid handle (%d)\n",
7530 data->type);
7531 ret = -EINVAL;
7532 break;
7533 }
7534 data->released = true;
7535 mutex_lock(&app_access_lock);
7536 atomic_inc(&data->ioctl_count);
7537 ret = qseecom_unload_external_elf(data);
7538 atomic_dec(&data->ioctl_count);
7539 mutex_unlock(&app_access_lock);
7540 if (ret)
7541 pr_err("failed unload_app request: %d\n", ret);
7542 break;
7543 }
7544 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7545 data->type = QSEECOM_CLIENT_APP;
7546 mutex_lock(&app_access_lock);
7547 atomic_inc(&data->ioctl_count);
7548 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7549 ret = qseecom_query_app_loaded(data, argp);
7550 atomic_dec(&data->ioctl_count);
7551 mutex_unlock(&app_access_lock);
7552 break;
7553 }
7554 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7555 if (data->type != QSEECOM_GENERIC) {
7556 pr_err("send cmd svc req: invalid handle (%d)\n",
7557 data->type);
7558 ret = -EINVAL;
7559 break;
7560 }
7561 data->type = QSEECOM_SECURE_SERVICE;
7562 if (qseecom.qsee_version < QSEE_VERSION_03) {
7563 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7564 qseecom.qsee_version);
7565 return -EINVAL;
7566 }
7567 mutex_lock(&app_access_lock);
7568 atomic_inc(&data->ioctl_count);
7569 ret = qseecom_send_service_cmd(data, argp);
7570 atomic_dec(&data->ioctl_count);
7571 mutex_unlock(&app_access_lock);
7572 break;
7573 }
7574 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7575 if (!(qseecom.support_pfe || qseecom.support_fde))
7576 pr_err("Features requiring key init not supported\n");
7577 if (data->type != QSEECOM_GENERIC) {
7578 pr_err("create key req: invalid handle (%d)\n",
7579 data->type);
7580 ret = -EINVAL;
7581 break;
7582 }
7583 if (qseecom.qsee_version < QSEE_VERSION_05) {
7584 pr_err("Create Key feature unsupported: qsee ver %u\n",
7585 qseecom.qsee_version);
7586 return -EINVAL;
7587 }
7588 data->released = true;
7589 mutex_lock(&app_access_lock);
7590 atomic_inc(&data->ioctl_count);
7591 ret = qseecom_create_key(data, argp);
7592 if (ret)
7593 pr_err("failed to create encryption key: %d\n", ret);
7594
7595 atomic_dec(&data->ioctl_count);
7596 mutex_unlock(&app_access_lock);
7597 break;
7598 }
7599 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7600 if (!(qseecom.support_pfe || qseecom.support_fde))
7601 pr_err("Features requiring key init not supported\n");
7602 if (data->type != QSEECOM_GENERIC) {
7603 pr_err("wipe key req: invalid handle (%d)\n",
7604 data->type);
7605 ret = -EINVAL;
7606 break;
7607 }
7608 if (qseecom.qsee_version < QSEE_VERSION_05) {
7609 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7610 qseecom.qsee_version);
7611 return -EINVAL;
7612 }
7613 data->released = true;
7614 mutex_lock(&app_access_lock);
7615 atomic_inc(&data->ioctl_count);
7616 ret = qseecom_wipe_key(data, argp);
7617 if (ret)
7618 pr_err("failed to wipe encryption key: %d\n", ret);
7619 atomic_dec(&data->ioctl_count);
7620 mutex_unlock(&app_access_lock);
7621 break;
7622 }
7623 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7624 if (!(qseecom.support_pfe || qseecom.support_fde))
7625 pr_err("Features requiring key init not supported\n");
7626 if (data->type != QSEECOM_GENERIC) {
7627 pr_err("update key req: invalid handle (%d)\n",
7628 data->type);
7629 ret = -EINVAL;
7630 break;
7631 }
7632 if (qseecom.qsee_version < QSEE_VERSION_05) {
7633 pr_err("Update Key feature unsupported in qsee ver %u\n",
7634 qseecom.qsee_version);
7635 return -EINVAL;
7636 }
7637 data->released = true;
7638 mutex_lock(&app_access_lock);
7639 atomic_inc(&data->ioctl_count);
7640 ret = qseecom_update_key_user_info(data, argp);
7641 if (ret)
7642 pr_err("failed to update key user info: %d\n", ret);
7643 atomic_dec(&data->ioctl_count);
7644 mutex_unlock(&app_access_lock);
7645 break;
7646 }
7647 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7648 if (data->type != QSEECOM_GENERIC) {
7649 pr_err("save part hash req: invalid handle (%d)\n",
7650 data->type);
7651 ret = -EINVAL;
7652 break;
7653 }
7654 data->released = true;
7655 mutex_lock(&app_access_lock);
7656 atomic_inc(&data->ioctl_count);
7657 ret = qseecom_save_partition_hash(argp);
7658 atomic_dec(&data->ioctl_count);
7659 mutex_unlock(&app_access_lock);
7660 break;
7661 }
7662 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7663 if (data->type != QSEECOM_GENERIC) {
7664 pr_err("ES activated req: invalid handle (%d)\n",
7665 data->type);
7666 ret = -EINVAL;
7667 break;
7668 }
7669 data->released = true;
7670 mutex_lock(&app_access_lock);
7671 atomic_inc(&data->ioctl_count);
7672 ret = qseecom_is_es_activated(argp);
7673 atomic_dec(&data->ioctl_count);
7674 mutex_unlock(&app_access_lock);
7675 break;
7676 }
7677 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7678 if (data->type != QSEECOM_GENERIC) {
7679 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7680 data->type);
7681 ret = -EINVAL;
7682 break;
7683 }
7684 data->released = true;
7685 mutex_lock(&app_access_lock);
7686 atomic_inc(&data->ioctl_count);
7687 ret = qseecom_mdtp_cipher_dip(argp);
7688 atomic_dec(&data->ioctl_count);
7689 mutex_unlock(&app_access_lock);
7690 break;
7691 }
7692 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7693 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7694 if ((data->listener.id == 0) ||
7695 (data->type != QSEECOM_LISTENER_SERVICE)) {
7696 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7697 data->type, data->listener.id);
7698 ret = -EINVAL;
7699 break;
7700 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007701 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007702 atomic_inc(&data->ioctl_count);
7703 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7704 ret = qseecom_send_modfd_resp(data, argp);
7705 else
7706 ret = qseecom_send_modfd_resp_64(data, argp);
7707 atomic_dec(&data->ioctl_count);
7708 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007709 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007710 if (ret)
7711 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7712 __qseecom_clean_data_sglistinfo(data);
7713 break;
7714 }
7715 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7716 if ((data->client.app_id == 0) ||
7717 (data->type != QSEECOM_CLIENT_APP)) {
7718 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7719 data->type, data->client.app_id);
7720 ret = -EINVAL;
7721 break;
7722 }
7723 if (qseecom.qsee_version < QSEE_VERSION_40) {
7724 pr_err("GP feature unsupported: qsee ver %u\n",
7725 qseecom.qsee_version);
7726 return -EINVAL;
7727 }
7728 /* Only one client allowed here at a time */
7729 mutex_lock(&app_access_lock);
7730 atomic_inc(&data->ioctl_count);
7731 ret = qseecom_qteec_open_session(data, argp);
7732 atomic_dec(&data->ioctl_count);
7733 wake_up_all(&data->abort_wq);
7734 mutex_unlock(&app_access_lock);
7735 if (ret)
7736 pr_err("failed open_session_cmd: %d\n", ret);
7737 __qseecom_clean_data_sglistinfo(data);
7738 break;
7739 }
7740 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7741 if ((data->client.app_id == 0) ||
7742 (data->type != QSEECOM_CLIENT_APP)) {
7743 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7744 data->type, data->client.app_id);
7745 ret = -EINVAL;
7746 break;
7747 }
7748 if (qseecom.qsee_version < QSEE_VERSION_40) {
7749 pr_err("GP feature unsupported: qsee ver %u\n",
7750 qseecom.qsee_version);
7751 return -EINVAL;
7752 }
7753 /* Only one client allowed here at a time */
7754 mutex_lock(&app_access_lock);
7755 atomic_inc(&data->ioctl_count);
7756 ret = qseecom_qteec_close_session(data, argp);
7757 atomic_dec(&data->ioctl_count);
7758 wake_up_all(&data->abort_wq);
7759 mutex_unlock(&app_access_lock);
7760 if (ret)
7761 pr_err("failed close_session_cmd: %d\n", ret);
7762 break;
7763 }
7764 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7765 if ((data->client.app_id == 0) ||
7766 (data->type != QSEECOM_CLIENT_APP)) {
7767 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7768 data->type, data->client.app_id);
7769 ret = -EINVAL;
7770 break;
7771 }
7772 if (qseecom.qsee_version < QSEE_VERSION_40) {
7773 pr_err("GP feature unsupported: qsee ver %u\n",
7774 qseecom.qsee_version);
7775 return -EINVAL;
7776 }
7777 /* Only one client allowed here at a time */
7778 mutex_lock(&app_access_lock);
7779 atomic_inc(&data->ioctl_count);
7780 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7781 atomic_dec(&data->ioctl_count);
7782 wake_up_all(&data->abort_wq);
7783 mutex_unlock(&app_access_lock);
7784 if (ret)
7785 pr_err("failed Invoke cmd: %d\n", ret);
7786 __qseecom_clean_data_sglistinfo(data);
7787 break;
7788 }
7789 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7790 if ((data->client.app_id == 0) ||
7791 (data->type != QSEECOM_CLIENT_APP)) {
7792 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7793 data->type, data->client.app_id);
7794 ret = -EINVAL;
7795 break;
7796 }
7797 if (qseecom.qsee_version < QSEE_VERSION_40) {
7798 pr_err("GP feature unsupported: qsee ver %u\n",
7799 qseecom.qsee_version);
7800 return -EINVAL;
7801 }
7802 /* Only one client allowed here at a time */
7803 mutex_lock(&app_access_lock);
7804 atomic_inc(&data->ioctl_count);
7805 ret = qseecom_qteec_request_cancellation(data, argp);
7806 atomic_dec(&data->ioctl_count);
7807 wake_up_all(&data->abort_wq);
7808 mutex_unlock(&app_access_lock);
7809 if (ret)
7810 pr_err("failed request_cancellation: %d\n", ret);
7811 break;
7812 }
7813 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7814 atomic_inc(&data->ioctl_count);
7815 ret = qseecom_get_ce_info(data, argp);
7816 if (ret)
7817 pr_err("failed get fde ce pipe info: %d\n", ret);
7818 atomic_dec(&data->ioctl_count);
7819 break;
7820 }
7821 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7822 atomic_inc(&data->ioctl_count);
7823 ret = qseecom_free_ce_info(data, argp);
7824 if (ret)
7825 pr_err("failed get fde ce pipe info: %d\n", ret);
7826 atomic_dec(&data->ioctl_count);
7827 break;
7828 }
7829 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7830 atomic_inc(&data->ioctl_count);
7831 ret = qseecom_query_ce_info(data, argp);
7832 if (ret)
7833 pr_err("failed get fde ce pipe info: %d\n", ret);
7834 atomic_dec(&data->ioctl_count);
7835 break;
7836 }
7837 default:
7838 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7839 return -EINVAL;
7840 }
7841 return ret;
7842}
7843
7844static int qseecom_open(struct inode *inode, struct file *file)
7845{
7846 int ret = 0;
7847 struct qseecom_dev_handle *data;
7848
7849 data = kzalloc(sizeof(*data), GFP_KERNEL);
7850 if (!data)
7851 return -ENOMEM;
7852 file->private_data = data;
7853 data->abort = 0;
7854 data->type = QSEECOM_GENERIC;
7855 data->released = false;
7856 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7857 data->mode = INACTIVE;
7858 init_waitqueue_head(&data->abort_wq);
7859 atomic_set(&data->ioctl_count, 0);
7860 return ret;
7861}
7862
7863static int qseecom_release(struct inode *inode, struct file *file)
7864{
7865 struct qseecom_dev_handle *data = file->private_data;
7866 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007867 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007868
7869 if (data->released == false) {
7870 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7871 data->type, data->mode, data);
7872 switch (data->type) {
7873 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007874 pr_debug("release lsnr svc %d\n", data->listener.id);
7875 free_private_data = false;
7876 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007877 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007878 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007879 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007880 break;
7881 case QSEECOM_CLIENT_APP:
7882 mutex_lock(&app_access_lock);
7883 ret = qseecom_unload_app(data, true);
7884 mutex_unlock(&app_access_lock);
7885 break;
7886 case QSEECOM_SECURE_SERVICE:
7887 case QSEECOM_GENERIC:
7888 ret = qseecom_unmap_ion_allocated_memory(data);
7889 if (ret)
7890 pr_err("Ion Unmap failed\n");
7891 break;
7892 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7893 break;
7894 default:
7895 pr_err("Unsupported clnt_handle_type %d",
7896 data->type);
7897 break;
7898 }
7899 }
7900
7901 if (qseecom.support_bus_scaling) {
7902 mutex_lock(&qsee_bw_mutex);
7903 if (data->mode != INACTIVE) {
7904 qseecom_unregister_bus_bandwidth_needs(data);
7905 if (qseecom.cumulative_mode == INACTIVE) {
7906 ret = __qseecom_set_msm_bus_request(INACTIVE);
7907 if (ret)
7908 pr_err("Fail to scale down bus\n");
7909 }
7910 }
7911 mutex_unlock(&qsee_bw_mutex);
7912 } else {
7913 if (data->fast_load_enabled == true)
7914 qsee_disable_clock_vote(data, CLK_SFPB);
7915 if (data->perf_enabled == true)
7916 qsee_disable_clock_vote(data, CLK_DFAB);
7917 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007918
Zhen Kongbcdeda22018-11-16 13:50:51 -08007919 if (free_private_data)
7920 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007921 return ret;
7922}
7923
7924#ifdef CONFIG_COMPAT
7925#include "compat_qseecom.c"
7926#else
7927#define compat_qseecom_ioctl NULL
7928#endif
7929
7930static const struct file_operations qseecom_fops = {
7931 .owner = THIS_MODULE,
7932 .unlocked_ioctl = qseecom_ioctl,
7933 .compat_ioctl = compat_qseecom_ioctl,
7934 .open = qseecom_open,
7935 .release = qseecom_release
7936};
7937
7938static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7939{
7940 int rc = 0;
7941 struct device *pdev;
7942 struct qseecom_clk *qclk;
7943 char *core_clk_src = NULL;
7944 char *core_clk = NULL;
7945 char *iface_clk = NULL;
7946 char *bus_clk = NULL;
7947
7948 switch (ce) {
7949 case CLK_QSEE: {
7950 core_clk_src = "core_clk_src";
7951 core_clk = "core_clk";
7952 iface_clk = "iface_clk";
7953 bus_clk = "bus_clk";
7954 qclk = &qseecom.qsee;
7955 qclk->instance = CLK_QSEE;
7956 break;
7957 };
7958 case CLK_CE_DRV: {
7959 core_clk_src = "ce_drv_core_clk_src";
7960 core_clk = "ce_drv_core_clk";
7961 iface_clk = "ce_drv_iface_clk";
7962 bus_clk = "ce_drv_bus_clk";
7963 qclk = &qseecom.ce_drv;
7964 qclk->instance = CLK_CE_DRV;
7965 break;
7966 };
7967 default:
7968 pr_err("Invalid ce hw instance: %d!\n", ce);
7969 return -EIO;
7970 }
7971
7972 if (qseecom.no_clock_support) {
7973 qclk->ce_core_clk = NULL;
7974 qclk->ce_clk = NULL;
7975 qclk->ce_bus_clk = NULL;
7976 qclk->ce_core_src_clk = NULL;
7977 return 0;
7978 }
7979
7980 pdev = qseecom.pdev;
7981
7982 /* Get CE3 src core clk. */
7983 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7984 if (!IS_ERR(qclk->ce_core_src_clk)) {
7985 rc = clk_set_rate(qclk->ce_core_src_clk,
7986 qseecom.ce_opp_freq_hz);
7987 if (rc) {
7988 clk_put(qclk->ce_core_src_clk);
7989 qclk->ce_core_src_clk = NULL;
7990 pr_err("Unable to set the core src clk @%uMhz.\n",
7991 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7992 return -EIO;
7993 }
7994 } else {
7995 pr_warn("Unable to get CE core src clk, set to NULL\n");
7996 qclk->ce_core_src_clk = NULL;
7997 }
7998
7999 /* Get CE core clk */
8000 qclk->ce_core_clk = clk_get(pdev, core_clk);
8001 if (IS_ERR(qclk->ce_core_clk)) {
8002 rc = PTR_ERR(qclk->ce_core_clk);
8003 pr_err("Unable to get CE core clk\n");
8004 if (qclk->ce_core_src_clk != NULL)
8005 clk_put(qclk->ce_core_src_clk);
8006 return -EIO;
8007 }
8008
8009 /* Get CE Interface clk */
8010 qclk->ce_clk = clk_get(pdev, iface_clk);
8011 if (IS_ERR(qclk->ce_clk)) {
8012 rc = PTR_ERR(qclk->ce_clk);
8013 pr_err("Unable to get CE interface clk\n");
8014 if (qclk->ce_core_src_clk != NULL)
8015 clk_put(qclk->ce_core_src_clk);
8016 clk_put(qclk->ce_core_clk);
8017 return -EIO;
8018 }
8019
8020 /* Get CE AXI clk */
8021 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
8022 if (IS_ERR(qclk->ce_bus_clk)) {
8023 rc = PTR_ERR(qclk->ce_bus_clk);
8024 pr_err("Unable to get CE BUS interface clk\n");
8025 if (qclk->ce_core_src_clk != NULL)
8026 clk_put(qclk->ce_core_src_clk);
8027 clk_put(qclk->ce_core_clk);
8028 clk_put(qclk->ce_clk);
8029 return -EIO;
8030 }
8031
8032 return rc;
8033}
8034
8035static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
8036{
8037 struct qseecom_clk *qclk;
8038
8039 if (ce == CLK_QSEE)
8040 qclk = &qseecom.qsee;
8041 else
8042 qclk = &qseecom.ce_drv;
8043
8044 if (qclk->ce_clk != NULL) {
8045 clk_put(qclk->ce_clk);
8046 qclk->ce_clk = NULL;
8047 }
8048 if (qclk->ce_core_clk != NULL) {
8049 clk_put(qclk->ce_core_clk);
8050 qclk->ce_core_clk = NULL;
8051 }
8052 if (qclk->ce_bus_clk != NULL) {
8053 clk_put(qclk->ce_bus_clk);
8054 qclk->ce_bus_clk = NULL;
8055 }
8056 if (qclk->ce_core_src_clk != NULL) {
8057 clk_put(qclk->ce_core_src_clk);
8058 qclk->ce_core_src_clk = NULL;
8059 }
8060 qclk->instance = CLK_INVALID;
8061}
8062
8063static int qseecom_retrieve_ce_data(struct platform_device *pdev)
8064{
8065 int rc = 0;
8066 uint32_t hlos_num_ce_hw_instances;
8067 uint32_t disk_encrypt_pipe;
8068 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07008069 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008070 int i;
8071 const int *tbl;
8072 int size;
8073 int entry;
8074 struct qseecom_crypto_info *pfde_tbl = NULL;
8075 struct qseecom_crypto_info *p;
8076 int tbl_size;
8077 int j;
8078 bool old_db = true;
8079 struct qseecom_ce_info_use *pce_info_use;
8080 uint32_t *unit_tbl = NULL;
8081 int total_units = 0;
8082 struct qseecom_ce_pipe_entry *pce_entry;
8083
8084 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
8085 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
8086
8087 if (of_property_read_u32((&pdev->dev)->of_node,
8088 "qcom,qsee-ce-hw-instance",
8089 &qseecom.ce_info.qsee_ce_hw_instance)) {
8090 pr_err("Fail to get qsee ce hw instance information.\n");
8091 rc = -EINVAL;
8092 goto out;
8093 } else {
8094 pr_debug("qsee-ce-hw-instance=0x%x\n",
8095 qseecom.ce_info.qsee_ce_hw_instance);
8096 }
8097
8098 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8099 "qcom,support-fde");
8100 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8101 "qcom,support-pfe");
8102
8103 if (!qseecom.support_pfe && !qseecom.support_fde) {
8104 pr_warn("Device does not support PFE/FDE");
8105 goto out;
8106 }
8107
8108 if (qseecom.support_fde)
8109 tbl = of_get_property((&pdev->dev)->of_node,
8110 "qcom,full-disk-encrypt-info", &size);
8111 else
8112 tbl = NULL;
8113 if (tbl) {
8114 old_db = false;
8115 if (size % sizeof(struct qseecom_crypto_info)) {
8116 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8117 size);
8118 rc = -EINVAL;
8119 goto out;
8120 }
8121 tbl_size = size / sizeof
8122 (struct qseecom_crypto_info);
8123
8124 pfde_tbl = kzalloc(size, GFP_KERNEL);
8125 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8126 total_units = 0;
8127
8128 if (!pfde_tbl || !unit_tbl) {
8129 pr_err("failed to alloc memory\n");
8130 rc = -ENOMEM;
8131 goto out;
8132 }
8133 if (of_property_read_u32_array((&pdev->dev)->of_node,
8134 "qcom,full-disk-encrypt-info",
8135 (u32 *)pfde_tbl, size/sizeof(u32))) {
8136 pr_err("failed to read full-disk-encrypt-info tbl\n");
8137 rc = -EINVAL;
8138 goto out;
8139 }
8140
8141 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8142 for (j = 0; j < total_units; j++) {
8143 if (p->unit_num == *(unit_tbl + j))
8144 break;
8145 }
8146 if (j == total_units) {
8147 *(unit_tbl + total_units) = p->unit_num;
8148 total_units++;
8149 }
8150 }
8151
8152 qseecom.ce_info.num_fde = total_units;
8153 pce_info_use = qseecom.ce_info.fde = kcalloc(
8154 total_units, sizeof(struct qseecom_ce_info_use),
8155 GFP_KERNEL);
8156 if (!pce_info_use) {
8157 pr_err("failed to alloc memory\n");
8158 rc = -ENOMEM;
8159 goto out;
8160 }
8161
8162 for (j = 0; j < total_units; j++, pce_info_use++) {
8163 pce_info_use->unit_num = *(unit_tbl + j);
8164 pce_info_use->alloc = false;
8165 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8166 pce_info_use->num_ce_pipe_entries = 0;
8167 pce_info_use->ce_pipe_entry = NULL;
8168 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8169 if (p->unit_num == pce_info_use->unit_num)
8170 pce_info_use->num_ce_pipe_entries++;
8171 }
8172
8173 entry = pce_info_use->num_ce_pipe_entries;
8174 pce_entry = pce_info_use->ce_pipe_entry =
8175 kcalloc(entry,
8176 sizeof(struct qseecom_ce_pipe_entry),
8177 GFP_KERNEL);
8178 if (pce_entry == NULL) {
8179 pr_err("failed to alloc memory\n");
8180 rc = -ENOMEM;
8181 goto out;
8182 }
8183
8184 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8185 if (p->unit_num == pce_info_use->unit_num) {
8186 pce_entry->ce_num = p->ce;
8187 pce_entry->ce_pipe_pair =
8188 p->pipe_pair;
8189 pce_entry->valid = true;
8190 pce_entry++;
8191 }
8192 }
8193 }
8194 kfree(unit_tbl);
8195 unit_tbl = NULL;
8196 kfree(pfde_tbl);
8197 pfde_tbl = NULL;
8198 }
8199
8200 if (qseecom.support_pfe)
8201 tbl = of_get_property((&pdev->dev)->of_node,
8202 "qcom,per-file-encrypt-info", &size);
8203 else
8204 tbl = NULL;
8205 if (tbl) {
8206 old_db = false;
8207 if (size % sizeof(struct qseecom_crypto_info)) {
8208 pr_err("per-file-encrypt-info tbl size(%d)\n",
8209 size);
8210 rc = -EINVAL;
8211 goto out;
8212 }
8213 tbl_size = size / sizeof
8214 (struct qseecom_crypto_info);
8215
8216 pfde_tbl = kzalloc(size, GFP_KERNEL);
8217 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8218 total_units = 0;
8219 if (!pfde_tbl || !unit_tbl) {
8220 pr_err("failed to alloc memory\n");
8221 rc = -ENOMEM;
8222 goto out;
8223 }
8224 if (of_property_read_u32_array((&pdev->dev)->of_node,
8225 "qcom,per-file-encrypt-info",
8226 (u32 *)pfde_tbl, size/sizeof(u32))) {
8227 pr_err("failed to read per-file-encrypt-info tbl\n");
8228 rc = -EINVAL;
8229 goto out;
8230 }
8231
8232 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8233 for (j = 0; j < total_units; j++) {
8234 if (p->unit_num == *(unit_tbl + j))
8235 break;
8236 }
8237 if (j == total_units) {
8238 *(unit_tbl + total_units) = p->unit_num;
8239 total_units++;
8240 }
8241 }
8242
8243 qseecom.ce_info.num_pfe = total_units;
8244 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8245 total_units, sizeof(struct qseecom_ce_info_use),
8246 GFP_KERNEL);
8247 if (!pce_info_use) {
8248 pr_err("failed to alloc memory\n");
8249 rc = -ENOMEM;
8250 goto out;
8251 }
8252
8253 for (j = 0; j < total_units; j++, pce_info_use++) {
8254 pce_info_use->unit_num = *(unit_tbl + j);
8255 pce_info_use->alloc = false;
8256 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8257 pce_info_use->num_ce_pipe_entries = 0;
8258 pce_info_use->ce_pipe_entry = NULL;
8259 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8260 if (p->unit_num == pce_info_use->unit_num)
8261 pce_info_use->num_ce_pipe_entries++;
8262 }
8263
8264 entry = pce_info_use->num_ce_pipe_entries;
8265 pce_entry = pce_info_use->ce_pipe_entry =
8266 kcalloc(entry,
8267 sizeof(struct qseecom_ce_pipe_entry),
8268 GFP_KERNEL);
8269 if (pce_entry == NULL) {
8270 pr_err("failed to alloc memory\n");
8271 rc = -ENOMEM;
8272 goto out;
8273 }
8274
8275 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8276 if (p->unit_num == pce_info_use->unit_num) {
8277 pce_entry->ce_num = p->ce;
8278 pce_entry->ce_pipe_pair =
8279 p->pipe_pair;
8280 pce_entry->valid = true;
8281 pce_entry++;
8282 }
8283 }
8284 }
8285 kfree(unit_tbl);
8286 unit_tbl = NULL;
8287 kfree(pfde_tbl);
8288 pfde_tbl = NULL;
8289 }
8290
8291 if (!old_db)
8292 goto out1;
8293
8294 if (of_property_read_bool((&pdev->dev)->of_node,
8295 "qcom,support-multiple-ce-hw-instance")) {
8296 if (of_property_read_u32((&pdev->dev)->of_node,
8297 "qcom,hlos-num-ce-hw-instances",
8298 &hlos_num_ce_hw_instances)) {
8299 pr_err("Fail: get hlos number of ce hw instance\n");
8300 rc = -EINVAL;
8301 goto out;
8302 }
8303 } else {
8304 hlos_num_ce_hw_instances = 1;
8305 }
8306
8307 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8308 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8309 MAX_CE_PIPE_PAIR_PER_UNIT);
8310 rc = -EINVAL;
8311 goto out;
8312 }
8313
8314 if (of_property_read_u32_array((&pdev->dev)->of_node,
8315 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8316 hlos_num_ce_hw_instances)) {
8317 pr_err("Fail: get hlos ce hw instance info\n");
8318 rc = -EINVAL;
8319 goto out;
8320 }
8321
8322 if (qseecom.support_fde) {
8323 pce_info_use = qseecom.ce_info.fde =
8324 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8325 if (!pce_info_use) {
8326 pr_err("failed to alloc memory\n");
8327 rc = -ENOMEM;
8328 goto out;
8329 }
8330 /* by default for old db */
8331 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8332 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8333 pce_info_use->alloc = false;
8334 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8335 pce_info_use->ce_pipe_entry = NULL;
8336 if (of_property_read_u32((&pdev->dev)->of_node,
8337 "qcom,disk-encrypt-pipe-pair",
8338 &disk_encrypt_pipe)) {
8339 pr_err("Fail to get FDE pipe information.\n");
8340 rc = -EINVAL;
8341 goto out;
8342 } else {
8343 pr_debug("disk-encrypt-pipe-pair=0x%x",
8344 disk_encrypt_pipe);
8345 }
8346 entry = pce_info_use->num_ce_pipe_entries =
8347 hlos_num_ce_hw_instances;
8348 pce_entry = pce_info_use->ce_pipe_entry =
8349 kcalloc(entry,
8350 sizeof(struct qseecom_ce_pipe_entry),
8351 GFP_KERNEL);
8352 if (pce_entry == NULL) {
8353 pr_err("failed to alloc memory\n");
8354 rc = -ENOMEM;
8355 goto out;
8356 }
8357 for (i = 0; i < entry; i++) {
8358 pce_entry->ce_num = hlos_ce_hw_instance[i];
8359 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8360 pce_entry->valid = 1;
8361 pce_entry++;
8362 }
8363 } else {
8364 pr_warn("Device does not support FDE");
8365 disk_encrypt_pipe = 0xff;
8366 }
8367 if (qseecom.support_pfe) {
8368 pce_info_use = qseecom.ce_info.pfe =
8369 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8370 if (!pce_info_use) {
8371 pr_err("failed to alloc memory\n");
8372 rc = -ENOMEM;
8373 goto out;
8374 }
8375 /* by default for old db */
8376 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8377 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8378 pce_info_use->alloc = false;
8379 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8380 pce_info_use->ce_pipe_entry = NULL;
8381
8382 if (of_property_read_u32((&pdev->dev)->of_node,
8383 "qcom,file-encrypt-pipe-pair",
8384 &file_encrypt_pipe)) {
8385 pr_err("Fail to get PFE pipe information.\n");
8386 rc = -EINVAL;
8387 goto out;
8388 } else {
8389 pr_debug("file-encrypt-pipe-pair=0x%x",
8390 file_encrypt_pipe);
8391 }
8392 entry = pce_info_use->num_ce_pipe_entries =
8393 hlos_num_ce_hw_instances;
8394 pce_entry = pce_info_use->ce_pipe_entry =
8395 kcalloc(entry,
8396 sizeof(struct qseecom_ce_pipe_entry),
8397 GFP_KERNEL);
8398 if (pce_entry == NULL) {
8399 pr_err("failed to alloc memory\n");
8400 rc = -ENOMEM;
8401 goto out;
8402 }
8403 for (i = 0; i < entry; i++) {
8404 pce_entry->ce_num = hlos_ce_hw_instance[i];
8405 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8406 pce_entry->valid = 1;
8407 pce_entry++;
8408 }
8409 } else {
8410 pr_warn("Device does not support PFE");
8411 file_encrypt_pipe = 0xff;
8412 }
8413
8414out1:
8415 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8416 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8417out:
8418 if (rc) {
8419 if (qseecom.ce_info.fde) {
8420 pce_info_use = qseecom.ce_info.fde;
8421 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8422 pce_entry = pce_info_use->ce_pipe_entry;
8423 kfree(pce_entry);
8424 pce_info_use++;
8425 }
8426 }
8427 kfree(qseecom.ce_info.fde);
8428 qseecom.ce_info.fde = NULL;
8429 if (qseecom.ce_info.pfe) {
8430 pce_info_use = qseecom.ce_info.pfe;
8431 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8432 pce_entry = pce_info_use->ce_pipe_entry;
8433 kfree(pce_entry);
8434 pce_info_use++;
8435 }
8436 }
8437 kfree(qseecom.ce_info.pfe);
8438 qseecom.ce_info.pfe = NULL;
8439 }
8440 kfree(unit_tbl);
8441 kfree(pfde_tbl);
8442 return rc;
8443}
8444
8445static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8446 void __user *argp)
8447{
8448 struct qseecom_ce_info_req req;
8449 struct qseecom_ce_info_req *pinfo = &req;
8450 int ret = 0;
8451 int i;
8452 unsigned int entries;
8453 struct qseecom_ce_info_use *pce_info_use, *p;
8454 int total = 0;
8455 bool found = false;
8456 struct qseecom_ce_pipe_entry *pce_entry;
8457
8458 ret = copy_from_user(pinfo, argp,
8459 sizeof(struct qseecom_ce_info_req));
8460 if (ret) {
8461 pr_err("copy_from_user failed\n");
8462 return ret;
8463 }
8464
8465 switch (pinfo->usage) {
8466 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8467 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8468 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8469 if (qseecom.support_fde) {
8470 p = qseecom.ce_info.fde;
8471 total = qseecom.ce_info.num_fde;
8472 } else {
8473 pr_err("system does not support fde\n");
8474 return -EINVAL;
8475 }
8476 break;
8477 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8478 if (qseecom.support_pfe) {
8479 p = qseecom.ce_info.pfe;
8480 total = qseecom.ce_info.num_pfe;
8481 } else {
8482 pr_err("system does not support pfe\n");
8483 return -EINVAL;
8484 }
8485 break;
8486 default:
8487 pr_err("unsupported usage %d\n", pinfo->usage);
8488 return -EINVAL;
8489 }
8490
8491 pce_info_use = NULL;
8492 for (i = 0; i < total; i++) {
8493 if (!p->alloc)
8494 pce_info_use = p;
8495 else if (!memcmp(p->handle, pinfo->handle,
8496 MAX_CE_INFO_HANDLE_SIZE)) {
8497 pce_info_use = p;
8498 found = true;
8499 break;
8500 }
8501 p++;
8502 }
8503
8504 if (pce_info_use == NULL)
8505 return -EBUSY;
8506
8507 pinfo->unit_num = pce_info_use->unit_num;
8508 if (!pce_info_use->alloc) {
8509 pce_info_use->alloc = true;
8510 memcpy(pce_info_use->handle,
8511 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8512 }
8513 if (pce_info_use->num_ce_pipe_entries >
8514 MAX_CE_PIPE_PAIR_PER_UNIT)
8515 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8516 else
8517 entries = pce_info_use->num_ce_pipe_entries;
8518 pinfo->num_ce_pipe_entries = entries;
8519 pce_entry = pce_info_use->ce_pipe_entry;
8520 for (i = 0; i < entries; i++, pce_entry++)
8521 pinfo->ce_pipe_entry[i] = *pce_entry;
8522 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8523 pinfo->ce_pipe_entry[i].valid = 0;
8524
8525 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8526 pr_err("copy_to_user failed\n");
8527 ret = -EFAULT;
8528 }
8529 return ret;
8530}
8531
8532static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8533 void __user *argp)
8534{
8535 struct qseecom_ce_info_req req;
8536 struct qseecom_ce_info_req *pinfo = &req;
8537 int ret = 0;
8538 struct qseecom_ce_info_use *p;
8539 int total = 0;
8540 int i;
8541 bool found = false;
8542
8543 ret = copy_from_user(pinfo, argp,
8544 sizeof(struct qseecom_ce_info_req));
8545 if (ret)
8546 return ret;
8547
8548 switch (pinfo->usage) {
8549 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8550 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8551 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8552 if (qseecom.support_fde) {
8553 p = qseecom.ce_info.fde;
8554 total = qseecom.ce_info.num_fde;
8555 } else {
8556 pr_err("system does not support fde\n");
8557 return -EINVAL;
8558 }
8559 break;
8560 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8561 if (qseecom.support_pfe) {
8562 p = qseecom.ce_info.pfe;
8563 total = qseecom.ce_info.num_pfe;
8564 } else {
8565 pr_err("system does not support pfe\n");
8566 return -EINVAL;
8567 }
8568 break;
8569 default:
8570 pr_err("unsupported usage %d\n", pinfo->usage);
8571 return -EINVAL;
8572 }
8573
8574 for (i = 0; i < total; i++) {
8575 if (p->alloc &&
8576 !memcmp(p->handle, pinfo->handle,
8577 MAX_CE_INFO_HANDLE_SIZE)) {
8578 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8579 p->alloc = false;
8580 found = true;
8581 break;
8582 }
8583 p++;
8584 }
8585 return ret;
8586}
8587
8588static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8589 void __user *argp)
8590{
8591 struct qseecom_ce_info_req req;
8592 struct qseecom_ce_info_req *pinfo = &req;
8593 int ret = 0;
8594 int i;
8595 unsigned int entries;
8596 struct qseecom_ce_info_use *pce_info_use, *p;
8597 int total = 0;
8598 bool found = false;
8599 struct qseecom_ce_pipe_entry *pce_entry;
8600
8601 ret = copy_from_user(pinfo, argp,
8602 sizeof(struct qseecom_ce_info_req));
8603 if (ret)
8604 return ret;
8605
8606 switch (pinfo->usage) {
8607 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8608 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8609 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8610 if (qseecom.support_fde) {
8611 p = qseecom.ce_info.fde;
8612 total = qseecom.ce_info.num_fde;
8613 } else {
8614 pr_err("system does not support fde\n");
8615 return -EINVAL;
8616 }
8617 break;
8618 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8619 if (qseecom.support_pfe) {
8620 p = qseecom.ce_info.pfe;
8621 total = qseecom.ce_info.num_pfe;
8622 } else {
8623 pr_err("system does not support pfe\n");
8624 return -EINVAL;
8625 }
8626 break;
8627 default:
8628 pr_err("unsupported usage %d\n", pinfo->usage);
8629 return -EINVAL;
8630 }
8631
8632 pce_info_use = NULL;
8633 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8634 pinfo->num_ce_pipe_entries = 0;
8635 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8636 pinfo->ce_pipe_entry[i].valid = 0;
8637
8638 for (i = 0; i < total; i++) {
8639
8640 if (p->alloc && !memcmp(p->handle,
8641 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8642 pce_info_use = p;
8643 found = true;
8644 break;
8645 }
8646 p++;
8647 }
8648 if (!pce_info_use)
8649 goto out;
8650 pinfo->unit_num = pce_info_use->unit_num;
8651 if (pce_info_use->num_ce_pipe_entries >
8652 MAX_CE_PIPE_PAIR_PER_UNIT)
8653 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8654 else
8655 entries = pce_info_use->num_ce_pipe_entries;
8656 pinfo->num_ce_pipe_entries = entries;
8657 pce_entry = pce_info_use->ce_pipe_entry;
8658 for (i = 0; i < entries; i++, pce_entry++)
8659 pinfo->ce_pipe_entry[i] = *pce_entry;
8660 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8661 pinfo->ce_pipe_entry[i].valid = 0;
8662out:
8663 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8664 pr_err("copy_to_user failed\n");
8665 ret = -EFAULT;
8666 }
8667 return ret;
8668}
8669
8670/*
8671 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8672 * then whitelist feature is not supported.
8673 */
8674static int qseecom_check_whitelist_feature(void)
8675{
8676 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8677
8678 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8679}
8680
8681static int qseecom_probe(struct platform_device *pdev)
8682{
8683 int rc;
8684 int i;
8685 uint32_t feature = 10;
8686 struct device *class_dev;
8687 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8688 struct qseecom_command_scm_resp resp;
8689 struct qseecom_ce_info_use *pce_info_use = NULL;
8690
8691 qseecom.qsee_bw_count = 0;
8692 qseecom.qsee_perf_client = 0;
8693 qseecom.qsee_sfpb_bw_count = 0;
8694
8695 qseecom.qsee.ce_core_clk = NULL;
8696 qseecom.qsee.ce_clk = NULL;
8697 qseecom.qsee.ce_core_src_clk = NULL;
8698 qseecom.qsee.ce_bus_clk = NULL;
8699
8700 qseecom.cumulative_mode = 0;
8701 qseecom.current_mode = INACTIVE;
8702 qseecom.support_bus_scaling = false;
8703 qseecom.support_fde = false;
8704 qseecom.support_pfe = false;
8705
8706 qseecom.ce_drv.ce_core_clk = NULL;
8707 qseecom.ce_drv.ce_clk = NULL;
8708 qseecom.ce_drv.ce_core_src_clk = NULL;
8709 qseecom.ce_drv.ce_bus_clk = NULL;
8710 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8711
8712 qseecom.app_block_ref_cnt = 0;
8713 init_waitqueue_head(&qseecom.app_block_wq);
8714 qseecom.whitelist_support = true;
8715
8716 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8717 if (rc < 0) {
8718 pr_err("alloc_chrdev_region failed %d\n", rc);
8719 return rc;
8720 }
8721
8722 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8723 if (IS_ERR(driver_class)) {
8724 rc = -ENOMEM;
8725 pr_err("class_create failed %d\n", rc);
8726 goto exit_unreg_chrdev_region;
8727 }
8728
8729 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8730 QSEECOM_DEV);
8731 if (IS_ERR(class_dev)) {
8732 pr_err("class_device_create failed %d\n", rc);
8733 rc = -ENOMEM;
8734 goto exit_destroy_class;
8735 }
8736
8737 cdev_init(&qseecom.cdev, &qseecom_fops);
8738 qseecom.cdev.owner = THIS_MODULE;
8739
8740 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8741 if (rc < 0) {
8742 pr_err("cdev_add failed %d\n", rc);
8743 goto exit_destroy_device;
8744 }
8745
8746 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008747 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8748 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008749 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008750 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8751 spin_lock_init(&qseecom.registered_kclient_list_lock);
8752 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008753 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
Zhen Kongc4c162a2019-01-23 12:07:12 -08008754 init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008755 qseecom.send_resp_flag = 0;
8756
8757 qseecom.qsee_version = QSEEE_VERSION_00;
Zhen Kong03f220d2019-02-01 17:12:34 -08008758 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008759 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8760 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008761 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008762 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8763 if (rc) {
8764 pr_err("Failed to get QSEE version info %d\n", rc);
8765 goto exit_del_cdev;
8766 }
8767 qseecom.qsee_version = resp.result;
8768 qseecom.qseos_version = QSEOS_VERSION_14;
8769 qseecom.commonlib_loaded = false;
8770 qseecom.commonlib64_loaded = false;
8771 qseecom.pdev = class_dev;
8772 /* Create ION msm client */
8773 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8774 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8775 pr_err("Ion client cannot be created\n");
8776 rc = -ENOMEM;
8777 goto exit_del_cdev;
8778 }
8779
8780 /* register client for bus scaling */
8781 if (pdev->dev.of_node) {
8782 qseecom.pdev->of_node = pdev->dev.of_node;
8783 qseecom.support_bus_scaling =
8784 of_property_read_bool((&pdev->dev)->of_node,
8785 "qcom,support-bus-scaling");
8786 rc = qseecom_retrieve_ce_data(pdev);
8787 if (rc)
8788 goto exit_destroy_ion_client;
8789 qseecom.appsbl_qseecom_support =
8790 of_property_read_bool((&pdev->dev)->of_node,
8791 "qcom,appsbl-qseecom-support");
8792 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8793 qseecom.appsbl_qseecom_support);
8794
8795 qseecom.commonlib64_loaded =
8796 of_property_read_bool((&pdev->dev)->of_node,
8797 "qcom,commonlib64-loaded-by-uefi");
8798 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8799 qseecom.commonlib64_loaded);
8800 qseecom.fde_key_size =
8801 of_property_read_bool((&pdev->dev)->of_node,
8802 "qcom,fde-key-size");
8803 qseecom.no_clock_support =
8804 of_property_read_bool((&pdev->dev)->of_node,
8805 "qcom,no-clock-support");
8806 if (!qseecom.no_clock_support) {
8807 pr_info("qseecom clocks handled by other subsystem\n");
8808 } else {
8809 pr_info("no-clock-support=0x%x",
8810 qseecom.no_clock_support);
8811 }
8812
8813 if (of_property_read_u32((&pdev->dev)->of_node,
8814 "qcom,qsee-reentrancy-support",
8815 &qseecom.qsee_reentrancy_support)) {
8816 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8817 qseecom.qsee_reentrancy_support = 0;
8818 } else {
8819 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8820 qseecom.qsee_reentrancy_support);
8821 }
8822
Jiten Patela7bb1d52018-05-11 12:34:26 +05308823 qseecom.enable_key_wrap_in_ks =
8824 of_property_read_bool((&pdev->dev)->of_node,
8825 "qcom,enable-key-wrap-in-ks");
8826 if (qseecom.enable_key_wrap_in_ks) {
8827 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8828 qseecom.enable_key_wrap_in_ks);
8829 }
8830
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008831 /*
8832 * The qseecom bus scaling flag can not be enabled when
8833 * crypto clock is not handled by HLOS.
8834 */
8835 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8836 pr_err("support_bus_scaling flag can not be enabled.\n");
8837 rc = -EINVAL;
8838 goto exit_destroy_ion_client;
8839 }
8840
8841 if (of_property_read_u32((&pdev->dev)->of_node,
8842 "qcom,ce-opp-freq",
8843 &qseecom.ce_opp_freq_hz)) {
8844 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8845 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8846 }
8847 rc = __qseecom_init_clk(CLK_QSEE);
8848 if (rc)
8849 goto exit_destroy_ion_client;
8850
8851 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8852 (qseecom.support_pfe || qseecom.support_fde)) {
8853 rc = __qseecom_init_clk(CLK_CE_DRV);
8854 if (rc) {
8855 __qseecom_deinit_clk(CLK_QSEE);
8856 goto exit_destroy_ion_client;
8857 }
8858 } else {
8859 struct qseecom_clk *qclk;
8860
8861 qclk = &qseecom.qsee;
8862 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8863 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8864 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8865 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8866 }
8867
8868 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8869 msm_bus_cl_get_pdata(pdev);
8870 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8871 (!qseecom.is_apps_region_protected &&
8872 !qseecom.appsbl_qseecom_support)) {
8873 struct resource *resource = NULL;
8874 struct qsee_apps_region_info_ireq req;
8875 struct qsee_apps_region_info_64bit_ireq req_64bit;
8876 struct qseecom_command_scm_resp resp;
8877 void *cmd_buf = NULL;
8878 size_t cmd_len;
8879
8880 resource = platform_get_resource_byname(pdev,
8881 IORESOURCE_MEM, "secapp-region");
8882 if (resource) {
8883 if (qseecom.qsee_version < QSEE_VERSION_40) {
8884 req.qsee_cmd_id =
8885 QSEOS_APP_REGION_NOTIFICATION;
8886 req.addr = (uint32_t)resource->start;
8887 req.size = resource_size(resource);
8888 cmd_buf = (void *)&req;
8889 cmd_len = sizeof(struct
8890 qsee_apps_region_info_ireq);
8891 pr_warn("secure app region addr=0x%x size=0x%x",
8892 req.addr, req.size);
8893 } else {
8894 req_64bit.qsee_cmd_id =
8895 QSEOS_APP_REGION_NOTIFICATION;
8896 req_64bit.addr = resource->start;
8897 req_64bit.size = resource_size(
8898 resource);
8899 cmd_buf = (void *)&req_64bit;
8900 cmd_len = sizeof(struct
8901 qsee_apps_region_info_64bit_ireq);
8902 pr_warn("secure app region addr=0x%llx size=0x%x",
8903 req_64bit.addr, req_64bit.size);
8904 }
8905 } else {
8906 pr_err("Fail to get secure app region info\n");
8907 rc = -EINVAL;
8908 goto exit_deinit_clock;
8909 }
8910 rc = __qseecom_enable_clk(CLK_QSEE);
8911 if (rc) {
8912 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8913 rc = -EIO;
8914 goto exit_deinit_clock;
8915 }
Zhen Kong03f220d2019-02-01 17:12:34 -08008916 mutex_lock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008917 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8918 cmd_buf, cmd_len,
8919 &resp, sizeof(resp));
Zhen Kong03f220d2019-02-01 17:12:34 -08008920 mutex_unlock(&app_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008921 __qseecom_disable_clk(CLK_QSEE);
8922 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8923 pr_err("send secapp reg fail %d resp.res %d\n",
8924 rc, resp.result);
8925 rc = -EINVAL;
8926 goto exit_deinit_clock;
8927 }
8928 }
8929 /*
8930 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8931 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8932 * Pls add "qseecom.commonlib64_loaded = true" here too.
8933 */
8934 if (qseecom.is_apps_region_protected ||
8935 qseecom.appsbl_qseecom_support)
8936 qseecom.commonlib_loaded = true;
8937 } else {
8938 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8939 pdev->dev.platform_data;
8940 }
8941 if (qseecom.support_bus_scaling) {
8942 init_timer(&(qseecom.bw_scale_down_timer));
8943 INIT_WORK(&qseecom.bw_inactive_req_ws,
8944 qseecom_bw_inactive_req_work);
8945 qseecom.bw_scale_down_timer.function =
8946 qseecom_scale_bus_bandwidth_timer_callback;
8947 }
8948 qseecom.timer_running = false;
8949 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8950 qseecom_platform_support);
8951
8952 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8953 pr_warn("qseecom.whitelist_support = %d\n",
8954 qseecom.whitelist_support);
8955
8956 if (!qseecom.qsee_perf_client)
8957 pr_err("Unable to register bus client\n");
8958
Zhen Kongc4c162a2019-01-23 12:07:12 -08008959 /*create a kthread to process pending listener unregister task */
8960 qseecom.unregister_lsnr_kthread_task = kthread_run(
8961 __qseecom_unregister_listener_kthread_func,
8962 NULL, "qseecom-unreg-lsnr");
8963 if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
8964 pr_err("failed to create kthread to unregister listener\n");
8965 rc = -EINVAL;
8966 goto exit_deinit_clock;
8967 }
8968 atomic_set(&qseecom.unregister_lsnr_kthread_state,
8969 LSNR_UNREG_KT_SLEEP);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008970 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8971 return 0;
8972
8973exit_deinit_clock:
8974 __qseecom_deinit_clk(CLK_QSEE);
8975 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8976 (qseecom.support_pfe || qseecom.support_fde))
8977 __qseecom_deinit_clk(CLK_CE_DRV);
8978exit_destroy_ion_client:
8979 if (qseecom.ce_info.fde) {
8980 pce_info_use = qseecom.ce_info.fde;
8981 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8982 kzfree(pce_info_use->ce_pipe_entry);
8983 pce_info_use++;
8984 }
8985 kfree(qseecom.ce_info.fde);
8986 }
8987 if (qseecom.ce_info.pfe) {
8988 pce_info_use = qseecom.ce_info.pfe;
8989 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8990 kzfree(pce_info_use->ce_pipe_entry);
8991 pce_info_use++;
8992 }
8993 kfree(qseecom.ce_info.pfe);
8994 }
8995 ion_client_destroy(qseecom.ion_clnt);
8996exit_del_cdev:
8997 cdev_del(&qseecom.cdev);
8998exit_destroy_device:
8999 device_destroy(driver_class, qseecom_device_no);
9000exit_destroy_class:
9001 class_destroy(driver_class);
9002exit_unreg_chrdev_region:
9003 unregister_chrdev_region(qseecom_device_no, 1);
9004 return rc;
9005}
9006
9007static int qseecom_remove(struct platform_device *pdev)
9008{
9009 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05309010 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009011 unsigned long flags = 0;
9012 int ret = 0;
9013 int i;
9014 struct qseecom_ce_pipe_entry *pce_entry;
9015 struct qseecom_ce_info_use *pce_info_use;
9016
9017 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
9018 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
9019
Monika Singhe711b162018-04-24 09:54:50 +05309020 list_for_each_entry_safe(kclient, kclient_tmp,
9021 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009022
9023 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07009024 if (!kclient->handle) {
9025 list_del(&kclient->list);
9026 kzfree(kclient);
9027 break;
9028 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009029
9030 list_del(&kclient->list);
9031 mutex_lock(&app_access_lock);
9032 ret = qseecom_unload_app(kclient->handle->dev, false);
9033 mutex_unlock(&app_access_lock);
9034 if (!ret) {
9035 kzfree(kclient->handle->dev);
9036 kzfree(kclient->handle);
9037 kzfree(kclient);
9038 }
9039 }
9040
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009041 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
9042
9043 if (qseecom.qseos_version > QSEEE_VERSION_00)
9044 qseecom_unload_commonlib_image();
9045
9046 if (qseecom.qsee_perf_client)
9047 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
9048 0);
9049 if (pdev->dev.platform_data != NULL)
9050 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
9051
9052 if (qseecom.support_bus_scaling) {
9053 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9054 del_timer_sync(&qseecom.bw_scale_down_timer);
9055 }
9056
9057 if (qseecom.ce_info.fde) {
9058 pce_info_use = qseecom.ce_info.fde;
9059 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
9060 pce_entry = pce_info_use->ce_pipe_entry;
9061 kfree(pce_entry);
9062 pce_info_use++;
9063 }
9064 }
9065 kfree(qseecom.ce_info.fde);
9066 if (qseecom.ce_info.pfe) {
9067 pce_info_use = qseecom.ce_info.pfe;
9068 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
9069 pce_entry = pce_info_use->ce_pipe_entry;
9070 kfree(pce_entry);
9071 pce_info_use++;
9072 }
9073 }
9074 kfree(qseecom.ce_info.pfe);
9075
9076 /* register client for bus scaling */
9077 if (pdev->dev.of_node) {
9078 __qseecom_deinit_clk(CLK_QSEE);
9079 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
9080 (qseecom.support_pfe || qseecom.support_fde))
9081 __qseecom_deinit_clk(CLK_CE_DRV);
9082 }
9083
9084 ion_client_destroy(qseecom.ion_clnt);
9085
Zhen Kongc4c162a2019-01-23 12:07:12 -08009086 kthread_stop(qseecom.unregister_lsnr_kthread_task);
9087
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07009088 cdev_del(&qseecom.cdev);
9089
9090 device_destroy(driver_class, qseecom_device_no);
9091
9092 class_destroy(driver_class);
9093
9094 unregister_chrdev_region(qseecom_device_no, 1);
9095
9096 return ret;
9097}
9098
9099static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
9100{
9101 int ret = 0;
9102 struct qseecom_clk *qclk;
9103
9104 qclk = &qseecom.qsee;
9105 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
9106 if (qseecom.no_clock_support)
9107 return 0;
9108
9109 mutex_lock(&qsee_bw_mutex);
9110 mutex_lock(&clk_access_lock);
9111
9112 if (qseecom.current_mode != INACTIVE) {
9113 ret = msm_bus_scale_client_update_request(
9114 qseecom.qsee_perf_client, INACTIVE);
9115 if (ret)
9116 pr_err("Fail to scale down bus\n");
9117 else
9118 qseecom.current_mode = INACTIVE;
9119 }
9120
9121 if (qclk->clk_access_cnt) {
9122 if (qclk->ce_clk != NULL)
9123 clk_disable_unprepare(qclk->ce_clk);
9124 if (qclk->ce_core_clk != NULL)
9125 clk_disable_unprepare(qclk->ce_core_clk);
9126 if (qclk->ce_bus_clk != NULL)
9127 clk_disable_unprepare(qclk->ce_bus_clk);
9128 }
9129
9130 del_timer_sync(&(qseecom.bw_scale_down_timer));
9131 qseecom.timer_running = false;
9132
9133 mutex_unlock(&clk_access_lock);
9134 mutex_unlock(&qsee_bw_mutex);
9135 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9136
9137 return 0;
9138}
9139
9140static int qseecom_resume(struct platform_device *pdev)
9141{
9142 int mode = 0;
9143 int ret = 0;
9144 struct qseecom_clk *qclk;
9145
9146 qclk = &qseecom.qsee;
9147 if (qseecom.no_clock_support)
9148 goto exit;
9149
9150 mutex_lock(&qsee_bw_mutex);
9151 mutex_lock(&clk_access_lock);
9152 if (qseecom.cumulative_mode >= HIGH)
9153 mode = HIGH;
9154 else
9155 mode = qseecom.cumulative_mode;
9156
9157 if (qseecom.cumulative_mode != INACTIVE) {
9158 ret = msm_bus_scale_client_update_request(
9159 qseecom.qsee_perf_client, mode);
9160 if (ret)
9161 pr_err("Fail to scale up bus to %d\n", mode);
9162 else
9163 qseecom.current_mode = mode;
9164 }
9165
9166 if (qclk->clk_access_cnt) {
9167 if (qclk->ce_core_clk != NULL) {
9168 ret = clk_prepare_enable(qclk->ce_core_clk);
9169 if (ret) {
9170 pr_err("Unable to enable/prep CE core clk\n");
9171 qclk->clk_access_cnt = 0;
9172 goto err;
9173 }
9174 }
9175 if (qclk->ce_clk != NULL) {
9176 ret = clk_prepare_enable(qclk->ce_clk);
9177 if (ret) {
9178 pr_err("Unable to enable/prep CE iface clk\n");
9179 qclk->clk_access_cnt = 0;
9180 goto ce_clk_err;
9181 }
9182 }
9183 if (qclk->ce_bus_clk != NULL) {
9184 ret = clk_prepare_enable(qclk->ce_bus_clk);
9185 if (ret) {
9186 pr_err("Unable to enable/prep CE bus clk\n");
9187 qclk->clk_access_cnt = 0;
9188 goto ce_bus_clk_err;
9189 }
9190 }
9191 }
9192
9193 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9194 qseecom.bw_scale_down_timer.expires = jiffies +
9195 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9196 mod_timer(&(qseecom.bw_scale_down_timer),
9197 qseecom.bw_scale_down_timer.expires);
9198 qseecom.timer_running = true;
9199 }
9200
9201 mutex_unlock(&clk_access_lock);
9202 mutex_unlock(&qsee_bw_mutex);
9203 goto exit;
9204
9205ce_bus_clk_err:
9206 if (qclk->ce_clk)
9207 clk_disable_unprepare(qclk->ce_clk);
9208ce_clk_err:
9209 if (qclk->ce_core_clk)
9210 clk_disable_unprepare(qclk->ce_core_clk);
9211err:
9212 mutex_unlock(&clk_access_lock);
9213 mutex_unlock(&qsee_bw_mutex);
9214 ret = -EIO;
9215exit:
9216 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9217 return ret;
9218}
9219
9220static const struct of_device_id qseecom_match[] = {
9221 {
9222 .compatible = "qcom,qseecom",
9223 },
9224 {}
9225};
9226
9227static struct platform_driver qseecom_plat_driver = {
9228 .probe = qseecom_probe,
9229 .remove = qseecom_remove,
9230 .suspend = qseecom_suspend,
9231 .resume = qseecom_resume,
9232 .driver = {
9233 .name = "qseecom",
9234 .owner = THIS_MODULE,
9235 .of_match_table = qseecom_match,
9236 },
9237};
9238
9239static int qseecom_init(void)
9240{
9241 return platform_driver_register(&qseecom_plat_driver);
9242}
9243
9244static void qseecom_exit(void)
9245{
9246 platform_driver_unregister(&qseecom_plat_driver);
9247}
9248
9249MODULE_LICENSE("GPL v2");
9250MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9251
9252module_init(qseecom_init);
9253module_exit(qseecom_exit);