blob: d9aa6e2abe3d5538fc30603df7af45de5c9524a5 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
Jiten Patela7bb1d52018-05-11 12:34:26 +0530112#define FDE_FLAG_POS 4
113#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
114
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700115enum qseecom_clk_definitions {
116 CLK_DFAB = 0,
117 CLK_SFPB,
118};
119
120enum qseecom_ice_key_size_type {
121 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
122 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
123 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
124 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
125 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
126 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
127};
128
129enum qseecom_client_handle_type {
130 QSEECOM_CLIENT_APP = 1,
131 QSEECOM_LISTENER_SERVICE,
132 QSEECOM_SECURE_SERVICE,
133 QSEECOM_GENERIC,
134 QSEECOM_UNAVAILABLE_CLIENT_APP,
135};
136
137enum qseecom_ce_hw_instance {
138 CLK_QSEE = 0,
139 CLK_CE_DRV,
140 CLK_INVALID,
141};
142
143static struct class *driver_class;
144static dev_t qseecom_device_no;
145
146static DEFINE_MUTEX(qsee_bw_mutex);
147static DEFINE_MUTEX(app_access_lock);
148static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800149static DEFINE_MUTEX(listener_access_lock);
150
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700151
152struct sglist_info {
153 uint32_t indexAndFlags;
154 uint32_t sizeOrCount;
155};
156
157/*
158 * The 31th bit indicates only one or multiple physical address inside
159 * the request buffer. If it is set, the index locates a single physical addr
160 * inside the request buffer, and `sizeOrCount` is the size of the memory being
161 * shared at that physical address.
162 * Otherwise, the index locates an array of {start, len} pairs (a
163 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
164 * that array.
165 *
166 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
167 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
168 *
169 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
170 */
171#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
172 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
173
174#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
175
176#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
177
178#define MAKE_WHITELIST_VERSION(major, minor, patch) \
179 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
180
181struct qseecom_registered_listener_list {
182 struct list_head list;
183 struct qseecom_register_listener_req svc;
184 void *user_virt_sb_base;
185 u8 *sb_virt;
186 phys_addr_t sb_phys;
187 size_t sb_length;
188 struct ion_handle *ihandle; /* Retrieve phy addr */
189 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800190 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700191 int rcv_req_flag;
192 int send_resp_flag;
193 bool listener_in_use;
194 /* wq for thread blocked on this listener*/
195 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
197 uint32_t sglist_cnt;
198 int abort;
199 bool unregister_pending;
200};
201
202struct qseecom_unregister_pending_list {
203 struct list_head list;
204 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700205};
206
207struct qseecom_registered_app_list {
208 struct list_head list;
209 u32 app_id;
210 u32 ref_cnt;
211 char app_name[MAX_APP_NAME_SIZE];
212 u32 app_arch;
213 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700214 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700215 u32 blocked_on_listener_id;
216};
217
218struct qseecom_registered_kclient_list {
219 struct list_head list;
220 struct qseecom_handle *handle;
221};
222
223struct qseecom_ce_info_use {
224 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
225 unsigned int unit_num;
226 unsigned int num_ce_pipe_entries;
227 struct qseecom_ce_pipe_entry *ce_pipe_entry;
228 bool alloc;
229 uint32_t type;
230};
231
232struct ce_hw_usage_info {
233 uint32_t qsee_ce_hw_instance;
234 uint32_t num_fde;
235 struct qseecom_ce_info_use *fde;
236 uint32_t num_pfe;
237 struct qseecom_ce_info_use *pfe;
238};
239
240struct qseecom_clk {
241 enum qseecom_ce_hw_instance instance;
242 struct clk *ce_core_clk;
243 struct clk *ce_clk;
244 struct clk *ce_core_src_clk;
245 struct clk *ce_bus_clk;
246 uint32_t clk_access_cnt;
247};
248
249struct qseecom_control {
250 struct ion_client *ion_clnt; /* Ion client */
251 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700252
253 struct list_head registered_app_list_head;
254 spinlock_t registered_app_list_lock;
255
256 struct list_head registered_kclient_list_head;
257 spinlock_t registered_kclient_list_lock;
258
259 wait_queue_head_t send_resp_wq;
260 int send_resp_flag;
261
262 uint32_t qseos_version;
263 uint32_t qsee_version;
264 struct device *pdev;
265 bool whitelist_support;
266 bool commonlib_loaded;
267 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700268 struct ce_hw_usage_info ce_info;
269
270 int qsee_bw_count;
271 int qsee_sfpb_bw_count;
272
273 uint32_t qsee_perf_client;
274 struct qseecom_clk qsee;
275 struct qseecom_clk ce_drv;
276
277 bool support_bus_scaling;
278 bool support_fde;
279 bool support_pfe;
280 bool fde_key_size;
281 uint32_t cumulative_mode;
282 enum qseecom_bandwidth_request_mode current_mode;
283 struct timer_list bw_scale_down_timer;
284 struct work_struct bw_inactive_req_ws;
285 struct cdev cdev;
286 bool timer_running;
287 bool no_clock_support;
288 unsigned int ce_opp_freq_hz;
289 bool appsbl_qseecom_support;
290 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530291 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700292
293 uint32_t app_block_ref_cnt;
294 wait_queue_head_t app_block_wq;
295 atomic_t qseecom_state;
296 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700297 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800298
299 struct list_head unregister_lsnr_pending_list_head;
300 wait_queue_head_t register_lsnr_pending_wq;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700301};
302
303struct qseecom_sec_buf_fd_info {
304 bool is_sec_buf_fd;
305 size_t size;
306 void *vbase;
307 dma_addr_t pbase;
308};
309
310struct qseecom_param_memref {
311 uint32_t buffer;
312 uint32_t size;
313};
314
315struct qseecom_client_handle {
316 u32 app_id;
317 u8 *sb_virt;
318 phys_addr_t sb_phys;
319 unsigned long user_virt_sb_base;
320 size_t sb_length;
321 struct ion_handle *ihandle; /* Retrieve phy addr */
322 char app_name[MAX_APP_NAME_SIZE];
323 u32 app_arch;
324 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
325};
326
327struct qseecom_listener_handle {
328 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800329 bool unregister_pending;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700330};
331
332static struct qseecom_control qseecom;
333
334struct qseecom_dev_handle {
335 enum qseecom_client_handle_type type;
336 union {
337 struct qseecom_client_handle client;
338 struct qseecom_listener_handle listener;
339 };
340 bool released;
341 int abort;
342 wait_queue_head_t abort_wq;
343 atomic_t ioctl_count;
344 bool perf_enabled;
345 bool fast_load_enabled;
346 enum qseecom_bandwidth_request_mode mode;
347 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
348 uint32_t sglist_cnt;
349 bool use_legacy_cmd;
350};
351
352struct qseecom_key_id_usage_desc {
353 uint8_t desc[QSEECOM_KEY_ID_SIZE];
354};
355
356struct qseecom_crypto_info {
357 unsigned int unit_num;
358 unsigned int ce;
359 unsigned int pipe_pair;
360};
361
362static struct qseecom_key_id_usage_desc key_id_array[] = {
363 {
364 .desc = "Undefined Usage Index",
365 },
366
367 {
368 .desc = "Full Disk Encryption",
369 },
370
371 {
372 .desc = "Per File Encryption",
373 },
374
375 {
376 .desc = "UFS ICE Full Disk Encryption",
377 },
378
379 {
380 .desc = "SDCC ICE Full Disk Encryption",
381 },
382};
383
384/* Function proto types */
385static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
386static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
387static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
388static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
389static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
390static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
391 char *cmnlib_name);
392static int qseecom_enable_ice_setup(int usage);
393static int qseecom_disable_ice_setup(int usage);
394static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
395static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
396 void __user *argp);
397static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
398 void __user *argp);
399static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
400 void __user *argp);
401
402static int get_qseecom_keymaster_status(char *str)
403{
404 get_option(&str, &qseecom.is_apps_region_protected);
405 return 1;
406}
407__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
408
409static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
410 const void *req_buf, void *resp_buf)
411{
412 int ret = 0;
413 uint32_t smc_id = 0;
414 uint32_t qseos_cmd_id = 0;
415 struct scm_desc desc = {0};
416 struct qseecom_command_scm_resp *scm_resp = NULL;
417
418 if (!req_buf || !resp_buf) {
419 pr_err("Invalid buffer pointer\n");
420 return -EINVAL;
421 }
422 qseos_cmd_id = *(uint32_t *)req_buf;
423 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
424
425 switch (svc_id) {
426 case 6: {
427 if (tz_cmd_id == 3) {
428 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
429 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
430 desc.args[0] = *(uint32_t *)req_buf;
431 } else {
432 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
433 svc_id, tz_cmd_id);
434 return -EINVAL;
435 }
436 ret = scm_call2(smc_id, &desc);
437 break;
438 }
439 case SCM_SVC_ES: {
440 switch (tz_cmd_id) {
441 case SCM_SAVE_PARTITION_HASH_ID: {
442 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
443 struct qseecom_save_partition_hash_req *p_hash_req =
444 (struct qseecom_save_partition_hash_req *)
445 req_buf;
446 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
447
448 if (!tzbuf)
449 return -ENOMEM;
450 memset(tzbuf, 0, tzbuflen);
451 memcpy(tzbuf, p_hash_req->digest,
452 SHA256_DIGEST_LENGTH);
453 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
454 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
455 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
456 desc.args[0] = p_hash_req->partition_id;
457 desc.args[1] = virt_to_phys(tzbuf);
458 desc.args[2] = SHA256_DIGEST_LENGTH;
459 ret = scm_call2(smc_id, &desc);
460 kzfree(tzbuf);
461 break;
462 }
463 default: {
464 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
465 tz_cmd_id);
466 ret = -EINVAL;
467 break;
468 }
469 } /* end of switch (tz_cmd_id) */
470 break;
471 } /* end of case SCM_SVC_ES */
472 case SCM_SVC_TZSCHEDULER: {
473 switch (qseos_cmd_id) {
474 case QSEOS_APP_START_COMMAND: {
475 struct qseecom_load_app_ireq *req;
476 struct qseecom_load_app_64bit_ireq *req_64bit;
477
478 smc_id = TZ_OS_APP_START_ID;
479 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
480 if (qseecom.qsee_version < QSEE_VERSION_40) {
481 req = (struct qseecom_load_app_ireq *)req_buf;
482 desc.args[0] = req->mdt_len;
483 desc.args[1] = req->img_len;
484 desc.args[2] = req->phy_addr;
485 } else {
486 req_64bit =
487 (struct qseecom_load_app_64bit_ireq *)
488 req_buf;
489 desc.args[0] = req_64bit->mdt_len;
490 desc.args[1] = req_64bit->img_len;
491 desc.args[2] = req_64bit->phy_addr;
492 }
493 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
494 ret = scm_call2(smc_id, &desc);
495 break;
496 }
497 case QSEOS_APP_SHUTDOWN_COMMAND: {
498 struct qseecom_unload_app_ireq *req;
499
500 req = (struct qseecom_unload_app_ireq *)req_buf;
501 smc_id = TZ_OS_APP_SHUTDOWN_ID;
502 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
503 desc.args[0] = req->app_id;
504 ret = scm_call2(smc_id, &desc);
505 break;
506 }
507 case QSEOS_APP_LOOKUP_COMMAND: {
508 struct qseecom_check_app_ireq *req;
509 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
510 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
511
512 if (!tzbuf)
513 return -ENOMEM;
514 req = (struct qseecom_check_app_ireq *)req_buf;
515 pr_debug("Lookup app_name = %s\n", req->app_name);
516 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
517 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
518 smc_id = TZ_OS_APP_LOOKUP_ID;
519 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
520 desc.args[0] = virt_to_phys(tzbuf);
521 desc.args[1] = strlen(req->app_name);
522 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
523 ret = scm_call2(smc_id, &desc);
524 kzfree(tzbuf);
525 break;
526 }
527 case QSEOS_APP_REGION_NOTIFICATION: {
528 struct qsee_apps_region_info_ireq *req;
529 struct qsee_apps_region_info_64bit_ireq *req_64bit;
530
531 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
532 desc.arginfo =
533 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
534 if (qseecom.qsee_version < QSEE_VERSION_40) {
535 req = (struct qsee_apps_region_info_ireq *)
536 req_buf;
537 desc.args[0] = req->addr;
538 desc.args[1] = req->size;
539 } else {
540 req_64bit =
541 (struct qsee_apps_region_info_64bit_ireq *)
542 req_buf;
543 desc.args[0] = req_64bit->addr;
544 desc.args[1] = req_64bit->size;
545 }
546 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
547 ret = scm_call2(smc_id, &desc);
548 break;
549 }
550 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
551 struct qseecom_load_lib_image_ireq *req;
552 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
553
554 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
555 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
556 if (qseecom.qsee_version < QSEE_VERSION_40) {
557 req = (struct qseecom_load_lib_image_ireq *)
558 req_buf;
559 desc.args[0] = req->mdt_len;
560 desc.args[1] = req->img_len;
561 desc.args[2] = req->phy_addr;
562 } else {
563 req_64bit =
564 (struct qseecom_load_lib_image_64bit_ireq *)
565 req_buf;
566 desc.args[0] = req_64bit->mdt_len;
567 desc.args[1] = req_64bit->img_len;
568 desc.args[2] = req_64bit->phy_addr;
569 }
570 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
571 ret = scm_call2(smc_id, &desc);
572 break;
573 }
574 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
575 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
576 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
577 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
578 ret = scm_call2(smc_id, &desc);
579 break;
580 }
581 case QSEOS_REGISTER_LISTENER: {
582 struct qseecom_register_listener_ireq *req;
583 struct qseecom_register_listener_64bit_ireq *req_64bit;
584
585 desc.arginfo =
586 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
587 if (qseecom.qsee_version < QSEE_VERSION_40) {
588 req = (struct qseecom_register_listener_ireq *)
589 req_buf;
590 desc.args[0] = req->listener_id;
591 desc.args[1] = req->sb_ptr;
592 desc.args[2] = req->sb_len;
593 } else {
594 req_64bit =
595 (struct qseecom_register_listener_64bit_ireq *)
596 req_buf;
597 desc.args[0] = req_64bit->listener_id;
598 desc.args[1] = req_64bit->sb_ptr;
599 desc.args[2] = req_64bit->sb_len;
600 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700601 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700602 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700603 ret = scm_call2(smc_id, &desc);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800604 if (ret && ret != -EBUSY) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700605 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700606 smc_id = TZ_OS_REGISTER_LISTENER_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700607 ret = scm_call2(smc_id, &desc);
608 }
609 break;
610 }
611 case QSEOS_DEREGISTER_LISTENER: {
612 struct qseecom_unregister_listener_ireq *req;
613
614 req = (struct qseecom_unregister_listener_ireq *)
615 req_buf;
616 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
617 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
618 desc.args[0] = req->listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700619 ret = scm_call2(smc_id, &desc);
620 break;
621 }
622 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
623 struct qseecom_client_listener_data_irsp *req;
624
625 req = (struct qseecom_client_listener_data_irsp *)
626 req_buf;
627 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
628 desc.arginfo =
629 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
630 desc.args[0] = req->listener_id;
631 desc.args[1] = req->status;
632 ret = scm_call2(smc_id, &desc);
633 break;
634 }
635 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
636 struct qseecom_client_listener_data_irsp *req;
637 struct qseecom_client_listener_data_64bit_irsp *req_64;
638
639 smc_id =
640 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
641 desc.arginfo =
642 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
643 if (qseecom.qsee_version < QSEE_VERSION_40) {
644 req =
645 (struct qseecom_client_listener_data_irsp *)
646 req_buf;
647 desc.args[0] = req->listener_id;
648 desc.args[1] = req->status;
649 desc.args[2] = req->sglistinfo_ptr;
650 desc.args[3] = req->sglistinfo_len;
651 } else {
652 req_64 =
653 (struct qseecom_client_listener_data_64bit_irsp *)
654 req_buf;
655 desc.args[0] = req_64->listener_id;
656 desc.args[1] = req_64->status;
657 desc.args[2] = req_64->sglistinfo_ptr;
658 desc.args[3] = req_64->sglistinfo_len;
659 }
660 ret = scm_call2(smc_id, &desc);
661 break;
662 }
663 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
664 struct qseecom_load_app_ireq *req;
665 struct qseecom_load_app_64bit_ireq *req_64bit;
666
667 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
668 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
669 if (qseecom.qsee_version < QSEE_VERSION_40) {
670 req = (struct qseecom_load_app_ireq *)req_buf;
671 desc.args[0] = req->mdt_len;
672 desc.args[1] = req->img_len;
673 desc.args[2] = req->phy_addr;
674 } else {
675 req_64bit =
676 (struct qseecom_load_app_64bit_ireq *)req_buf;
677 desc.args[0] = req_64bit->mdt_len;
678 desc.args[1] = req_64bit->img_len;
679 desc.args[2] = req_64bit->phy_addr;
680 }
681 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
682 ret = scm_call2(smc_id, &desc);
683 break;
684 }
685 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
686 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
687 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
688 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
689 ret = scm_call2(smc_id, &desc);
690 break;
691 }
692
693 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
694 struct qseecom_client_send_data_ireq *req;
695 struct qseecom_client_send_data_64bit_ireq *req_64bit;
696
697 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
698 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
699 if (qseecom.qsee_version < QSEE_VERSION_40) {
700 req = (struct qseecom_client_send_data_ireq *)
701 req_buf;
702 desc.args[0] = req->app_id;
703 desc.args[1] = req->req_ptr;
704 desc.args[2] = req->req_len;
705 desc.args[3] = req->rsp_ptr;
706 desc.args[4] = req->rsp_len;
707 } else {
708 req_64bit =
709 (struct qseecom_client_send_data_64bit_ireq *)
710 req_buf;
711 desc.args[0] = req_64bit->app_id;
712 desc.args[1] = req_64bit->req_ptr;
713 desc.args[2] = req_64bit->req_len;
714 desc.args[3] = req_64bit->rsp_ptr;
715 desc.args[4] = req_64bit->rsp_len;
716 }
717 ret = scm_call2(smc_id, &desc);
718 break;
719 }
720 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
721 struct qseecom_client_send_data_ireq *req;
722 struct qseecom_client_send_data_64bit_ireq *req_64bit;
723
724 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
725 desc.arginfo =
726 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
727 if (qseecom.qsee_version < QSEE_VERSION_40) {
728 req = (struct qseecom_client_send_data_ireq *)
729 req_buf;
730 desc.args[0] = req->app_id;
731 desc.args[1] = req->req_ptr;
732 desc.args[2] = req->req_len;
733 desc.args[3] = req->rsp_ptr;
734 desc.args[4] = req->rsp_len;
735 desc.args[5] = req->sglistinfo_ptr;
736 desc.args[6] = req->sglistinfo_len;
737 } else {
738 req_64bit =
739 (struct qseecom_client_send_data_64bit_ireq *)
740 req_buf;
741 desc.args[0] = req_64bit->app_id;
742 desc.args[1] = req_64bit->req_ptr;
743 desc.args[2] = req_64bit->req_len;
744 desc.args[3] = req_64bit->rsp_ptr;
745 desc.args[4] = req_64bit->rsp_len;
746 desc.args[5] = req_64bit->sglistinfo_ptr;
747 desc.args[6] = req_64bit->sglistinfo_len;
748 }
749 ret = scm_call2(smc_id, &desc);
750 break;
751 }
752 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
753 struct qseecom_client_send_service_ireq *req;
754
755 req = (struct qseecom_client_send_service_ireq *)
756 req_buf;
757 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
758 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
759 desc.args[0] = req->key_type;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_RPMB_ERASE_COMMAND: {
765 smc_id = TZ_OS_RPMB_ERASE_ID;
766 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
767 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
768 ret = scm_call2(smc_id, &desc);
769 break;
770 }
771 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
772 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
773 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
774 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
775 ret = scm_call2(smc_id, &desc);
776 break;
777 }
778 case QSEOS_GENERATE_KEY: {
779 u32 tzbuflen = PAGE_ALIGN(sizeof
780 (struct qseecom_key_generate_ireq) -
781 sizeof(uint32_t));
782 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
783
784 if (!tzbuf)
785 return -ENOMEM;
786 memset(tzbuf, 0, tzbuflen);
787 memcpy(tzbuf, req_buf + sizeof(uint32_t),
788 (sizeof(struct qseecom_key_generate_ireq) -
789 sizeof(uint32_t)));
790 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
791 smc_id = TZ_OS_KS_GEN_KEY_ID;
792 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
793 desc.args[0] = virt_to_phys(tzbuf);
794 desc.args[1] = tzbuflen;
795 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
796 ret = scm_call2(smc_id, &desc);
797 kzfree(tzbuf);
798 break;
799 }
800 case QSEOS_DELETE_KEY: {
801 u32 tzbuflen = PAGE_ALIGN(sizeof
802 (struct qseecom_key_delete_ireq) -
803 sizeof(uint32_t));
804 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
805
806 if (!tzbuf)
807 return -ENOMEM;
808 memset(tzbuf, 0, tzbuflen);
809 memcpy(tzbuf, req_buf + sizeof(uint32_t),
810 (sizeof(struct qseecom_key_delete_ireq) -
811 sizeof(uint32_t)));
812 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
813 smc_id = TZ_OS_KS_DEL_KEY_ID;
814 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
815 desc.args[0] = virt_to_phys(tzbuf);
816 desc.args[1] = tzbuflen;
817 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
818 ret = scm_call2(smc_id, &desc);
819 kzfree(tzbuf);
820 break;
821 }
822 case QSEOS_SET_KEY: {
823 u32 tzbuflen = PAGE_ALIGN(sizeof
824 (struct qseecom_key_select_ireq) -
825 sizeof(uint32_t));
826 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
827
828 if (!tzbuf)
829 return -ENOMEM;
830 memset(tzbuf, 0, tzbuflen);
831 memcpy(tzbuf, req_buf + sizeof(uint32_t),
832 (sizeof(struct qseecom_key_select_ireq) -
833 sizeof(uint32_t)));
834 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
835 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
836 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
837 desc.args[0] = virt_to_phys(tzbuf);
838 desc.args[1] = tzbuflen;
839 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
840 ret = scm_call2(smc_id, &desc);
841 kzfree(tzbuf);
842 break;
843 }
844 case QSEOS_UPDATE_KEY_USERINFO: {
845 u32 tzbuflen = PAGE_ALIGN(sizeof
846 (struct qseecom_key_userinfo_update_ireq) -
847 sizeof(uint32_t));
848 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
849
850 if (!tzbuf)
851 return -ENOMEM;
852 memset(tzbuf, 0, tzbuflen);
853 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
854 (struct qseecom_key_userinfo_update_ireq) -
855 sizeof(uint32_t)));
856 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
857 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
858 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
859 desc.args[0] = virt_to_phys(tzbuf);
860 desc.args[1] = tzbuflen;
861 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
862 ret = scm_call2(smc_id, &desc);
863 kzfree(tzbuf);
864 break;
865 }
866 case QSEOS_TEE_OPEN_SESSION: {
867 struct qseecom_qteec_ireq *req;
868 struct qseecom_qteec_64bit_ireq *req_64bit;
869
870 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
871 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
872 if (qseecom.qsee_version < QSEE_VERSION_40) {
873 req = (struct qseecom_qteec_ireq *)req_buf;
874 desc.args[0] = req->app_id;
875 desc.args[1] = req->req_ptr;
876 desc.args[2] = req->req_len;
877 desc.args[3] = req->resp_ptr;
878 desc.args[4] = req->resp_len;
879 } else {
880 req_64bit = (struct qseecom_qteec_64bit_ireq *)
881 req_buf;
882 desc.args[0] = req_64bit->app_id;
883 desc.args[1] = req_64bit->req_ptr;
884 desc.args[2] = req_64bit->req_len;
885 desc.args[3] = req_64bit->resp_ptr;
886 desc.args[4] = req_64bit->resp_len;
887 }
888 ret = scm_call2(smc_id, &desc);
889 break;
890 }
891 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
892 struct qseecom_qteec_ireq *req;
893 struct qseecom_qteec_64bit_ireq *req_64bit;
894
895 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
896 desc.arginfo =
897 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
898 if (qseecom.qsee_version < QSEE_VERSION_40) {
899 req = (struct qseecom_qteec_ireq *)req_buf;
900 desc.args[0] = req->app_id;
901 desc.args[1] = req->req_ptr;
902 desc.args[2] = req->req_len;
903 desc.args[3] = req->resp_ptr;
904 desc.args[4] = req->resp_len;
905 desc.args[5] = req->sglistinfo_ptr;
906 desc.args[6] = req->sglistinfo_len;
907 } else {
908 req_64bit = (struct qseecom_qteec_64bit_ireq *)
909 req_buf;
910 desc.args[0] = req_64bit->app_id;
911 desc.args[1] = req_64bit->req_ptr;
912 desc.args[2] = req_64bit->req_len;
913 desc.args[3] = req_64bit->resp_ptr;
914 desc.args[4] = req_64bit->resp_len;
915 desc.args[5] = req_64bit->sglistinfo_ptr;
916 desc.args[6] = req_64bit->sglistinfo_len;
917 }
918 ret = scm_call2(smc_id, &desc);
919 break;
920 }
921 case QSEOS_TEE_INVOKE_COMMAND: {
922 struct qseecom_qteec_ireq *req;
923 struct qseecom_qteec_64bit_ireq *req_64bit;
924
925 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
926 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
927 if (qseecom.qsee_version < QSEE_VERSION_40) {
928 req = (struct qseecom_qteec_ireq *)req_buf;
929 desc.args[0] = req->app_id;
930 desc.args[1] = req->req_ptr;
931 desc.args[2] = req->req_len;
932 desc.args[3] = req->resp_ptr;
933 desc.args[4] = req->resp_len;
934 } else {
935 req_64bit = (struct qseecom_qteec_64bit_ireq *)
936 req_buf;
937 desc.args[0] = req_64bit->app_id;
938 desc.args[1] = req_64bit->req_ptr;
939 desc.args[2] = req_64bit->req_len;
940 desc.args[3] = req_64bit->resp_ptr;
941 desc.args[4] = req_64bit->resp_len;
942 }
943 ret = scm_call2(smc_id, &desc);
944 break;
945 }
946 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
947 struct qseecom_qteec_ireq *req;
948 struct qseecom_qteec_64bit_ireq *req_64bit;
949
950 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
951 desc.arginfo =
952 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
953 if (qseecom.qsee_version < QSEE_VERSION_40) {
954 req = (struct qseecom_qteec_ireq *)req_buf;
955 desc.args[0] = req->app_id;
956 desc.args[1] = req->req_ptr;
957 desc.args[2] = req->req_len;
958 desc.args[3] = req->resp_ptr;
959 desc.args[4] = req->resp_len;
960 desc.args[5] = req->sglistinfo_ptr;
961 desc.args[6] = req->sglistinfo_len;
962 } else {
963 req_64bit = (struct qseecom_qteec_64bit_ireq *)
964 req_buf;
965 desc.args[0] = req_64bit->app_id;
966 desc.args[1] = req_64bit->req_ptr;
967 desc.args[2] = req_64bit->req_len;
968 desc.args[3] = req_64bit->resp_ptr;
969 desc.args[4] = req_64bit->resp_len;
970 desc.args[5] = req_64bit->sglistinfo_ptr;
971 desc.args[6] = req_64bit->sglistinfo_len;
972 }
973 ret = scm_call2(smc_id, &desc);
974 break;
975 }
976 case QSEOS_TEE_CLOSE_SESSION: {
977 struct qseecom_qteec_ireq *req;
978 struct qseecom_qteec_64bit_ireq *req_64bit;
979
980 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
981 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
982 if (qseecom.qsee_version < QSEE_VERSION_40) {
983 req = (struct qseecom_qteec_ireq *)req_buf;
984 desc.args[0] = req->app_id;
985 desc.args[1] = req->req_ptr;
986 desc.args[2] = req->req_len;
987 desc.args[3] = req->resp_ptr;
988 desc.args[4] = req->resp_len;
989 } else {
990 req_64bit = (struct qseecom_qteec_64bit_ireq *)
991 req_buf;
992 desc.args[0] = req_64bit->app_id;
993 desc.args[1] = req_64bit->req_ptr;
994 desc.args[2] = req_64bit->req_len;
995 desc.args[3] = req_64bit->resp_ptr;
996 desc.args[4] = req_64bit->resp_len;
997 }
998 ret = scm_call2(smc_id, &desc);
999 break;
1000 }
1001 case QSEOS_TEE_REQUEST_CANCELLATION: {
1002 struct qseecom_qteec_ireq *req;
1003 struct qseecom_qteec_64bit_ireq *req_64bit;
1004
1005 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1006 desc.arginfo =
1007 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1008 if (qseecom.qsee_version < QSEE_VERSION_40) {
1009 req = (struct qseecom_qteec_ireq *)req_buf;
1010 desc.args[0] = req->app_id;
1011 desc.args[1] = req->req_ptr;
1012 desc.args[2] = req->req_len;
1013 desc.args[3] = req->resp_ptr;
1014 desc.args[4] = req->resp_len;
1015 } else {
1016 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1017 req_buf;
1018 desc.args[0] = req_64bit->app_id;
1019 desc.args[1] = req_64bit->req_ptr;
1020 desc.args[2] = req_64bit->req_len;
1021 desc.args[3] = req_64bit->resp_ptr;
1022 desc.args[4] = req_64bit->resp_len;
1023 }
1024 ret = scm_call2(smc_id, &desc);
1025 break;
1026 }
1027 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1028 struct qseecom_continue_blocked_request_ireq *req =
1029 (struct qseecom_continue_blocked_request_ireq *)
1030 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001031 if (qseecom.smcinvoke_support)
1032 smc_id =
1033 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1034 else
1035 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001036 desc.arginfo =
1037 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001038 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001039 ret = scm_call2(smc_id, &desc);
1040 break;
1041 }
1042 default: {
1043 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1044 qseos_cmd_id);
1045 ret = -EINVAL;
1046 break;
1047 }
1048 } /*end of switch (qsee_cmd_id) */
1049 break;
1050 } /*end of case SCM_SVC_TZSCHEDULER*/
1051 default: {
1052 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1053 svc_id);
1054 ret = -EINVAL;
1055 break;
1056 }
1057 } /*end of switch svc_id */
1058 scm_resp->result = desc.ret[0];
1059 scm_resp->resp_type = desc.ret[1];
1060 scm_resp->data = desc.ret[2];
1061 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1062 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1063 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1064 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1065 return ret;
1066}
1067
1068
1069static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1070 size_t cmd_len, void *resp_buf, size_t resp_len)
1071{
1072 if (!is_scm_armv8())
1073 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1074 resp_buf, resp_len);
1075 else
1076 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1077}
1078
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001079static struct qseecom_registered_listener_list *__qseecom_find_svc(
1080 int32_t listener_id)
1081{
1082 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001083
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001084 list_for_each_entry(entry,
1085 &qseecom.registered_listener_list_head, list) {
1086 if (entry->svc.listener_id == listener_id)
1087 break;
1088 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001089 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001090 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001091 return NULL;
1092 }
1093
1094 return entry;
1095}
1096
1097static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1098 struct qseecom_dev_handle *handle,
1099 struct qseecom_register_listener_req *listener)
1100{
1101 int ret = 0;
1102 struct qseecom_register_listener_ireq req;
1103 struct qseecom_register_listener_64bit_ireq req_64bit;
1104 struct qseecom_command_scm_resp resp;
1105 ion_phys_addr_t pa;
1106 void *cmd_buf = NULL;
1107 size_t cmd_len;
1108
1109 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001110 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001111 listener->ifd_data_fd);
1112 if (IS_ERR_OR_NULL(svc->ihandle)) {
1113 pr_err("Ion client could not retrieve the handle\n");
1114 return -ENOMEM;
1115 }
1116
1117 /* Get the physical address of the ION BUF */
1118 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1119 if (ret) {
1120 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1121 ret);
1122 return ret;
1123 }
1124 /* Populate the structure for sending scm call to load image */
1125 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1126 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1127 pr_err("ION memory mapping for listener shared buffer failed\n");
1128 return -ENOMEM;
1129 }
1130 svc->sb_phys = (phys_addr_t)pa;
1131
1132 if (qseecom.qsee_version < QSEE_VERSION_40) {
1133 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1134 req.listener_id = svc->svc.listener_id;
1135 req.sb_len = svc->sb_length;
1136 req.sb_ptr = (uint32_t)svc->sb_phys;
1137 cmd_buf = (void *)&req;
1138 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1139 } else {
1140 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1141 req_64bit.listener_id = svc->svc.listener_id;
1142 req_64bit.sb_len = svc->sb_length;
1143 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1144 cmd_buf = (void *)&req_64bit;
1145 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1146 }
1147
1148 resp.result = QSEOS_RESULT_INCOMPLETE;
1149
1150 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1151 &resp, sizeof(resp));
1152 if (ret) {
1153 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1154 return -EINVAL;
1155 }
1156
1157 if (resp.result != QSEOS_RESULT_SUCCESS) {
1158 pr_err("Error SB registration req: resp.result = %d\n",
1159 resp.result);
1160 return -EPERM;
1161 }
1162 return 0;
1163}
1164
1165static int qseecom_register_listener(struct qseecom_dev_handle *data,
1166 void __user *argp)
1167{
1168 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001169 struct qseecom_register_listener_req rcvd_lstnr;
1170 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001171 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001172
1173 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1174 if (ret) {
1175 pr_err("copy_from_user failed\n");
1176 return ret;
1177 }
1178 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1179 rcvd_lstnr.sb_size))
1180 return -EFAULT;
1181
Zhen Kong3c674612018-09-06 22:51:27 -07001182 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001183
1184 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1185 if (ptr_svc) {
1186 if (ptr_svc->unregister_pending == false) {
1187 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001188 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001189 data->released = true;
1190 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001191 } else {
1192 /*wait until listener is unregistered*/
1193 pr_debug("register %d has to wait\n",
1194 rcvd_lstnr.listener_id);
1195 mutex_unlock(&listener_access_lock);
1196 ret = wait_event_freezable(
1197 qseecom.register_lsnr_pending_wq,
1198 list_empty(
1199 &qseecom.unregister_lsnr_pending_list_head));
1200 if (ret) {
1201 pr_err("interrupted register_pending_wq %d\n",
1202 rcvd_lstnr.listener_id);
1203 mutex_lock(&listener_access_lock);
1204 return -ERESTARTSYS;
1205 }
1206 mutex_lock(&listener_access_lock);
1207 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001208 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001209 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1210 if (!new_entry)
1211 return -ENOMEM;
1212 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001213 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001214
1215 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1216 new_entry->sb_length = rcvd_lstnr.sb_size;
1217 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1218 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001219 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1220 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001221 kzfree(new_entry);
1222 return -ENOMEM;
1223 }
1224
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001225 init_waitqueue_head(&new_entry->rcv_req_wq);
1226 init_waitqueue_head(&new_entry->listener_block_app_wq);
1227 new_entry->send_resp_flag = 0;
1228 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001229 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001230
Zhen Kong3c674612018-09-06 22:51:27 -07001231 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001232 return ret;
1233}
1234
Zhen Kongbcdeda22018-11-16 13:50:51 -08001235static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1236 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001237{
1238 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001239 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001240 struct qseecom_command_scm_resp resp;
1241 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1242
1243 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1244 req.listener_id = data->listener.id;
1245 resp.result = QSEOS_RESULT_INCOMPLETE;
1246
1247 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1248 sizeof(req), &resp, sizeof(resp));
1249 if (ret) {
1250 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1251 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001252 if (ret == -EBUSY)
1253 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001254 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001255 }
1256
1257 if (resp.result != QSEOS_RESULT_SUCCESS) {
1258 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1259 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001260 ret = -EPERM;
1261 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001262 }
1263
1264 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001265 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001266
1267 while (atomic_read(&data->ioctl_count) > 1) {
1268 if (wait_event_freezable(data->abort_wq,
1269 atomic_read(&data->ioctl_count) <= 1)) {
1270 pr_err("Interrupted from abort\n");
1271 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001272 }
1273 }
1274
Zhen Kong3c674612018-09-06 22:51:27 -07001275exit:
1276 if (ptr_svc->sb_virt) {
1277 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001278 if (!IS_ERR_OR_NULL(ihandle)) {
1279 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1280 ion_free(qseecom.ion_clnt, ihandle);
1281 }
1282 }
Zhen Kong3c674612018-09-06 22:51:27 -07001283 list_del(&ptr_svc->list);
1284 kzfree(ptr_svc);
1285
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001286 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001287 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001288 return ret;
1289}
1290
Zhen Kongbcdeda22018-11-16 13:50:51 -08001291static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1292{
1293 struct qseecom_registered_listener_list *ptr_svc = NULL;
1294 struct qseecom_unregister_pending_list *entry = NULL;
1295
1296 ptr_svc = __qseecom_find_svc(data->listener.id);
1297 if (!ptr_svc) {
1298 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1299 return -ENODATA;
1300 }
1301 /* stop CA thread waiting for listener response */
1302 ptr_svc->abort = 1;
1303 wake_up_interruptible_all(&qseecom.send_resp_wq);
1304
1305 /* return directly if pending*/
1306 if (ptr_svc->unregister_pending)
1307 return 0;
1308
1309 /*add unregistration into pending list*/
1310 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1311 if (!entry)
1312 return -ENOMEM;
1313 entry->data = data;
1314 list_add_tail(&entry->list,
1315 &qseecom.unregister_lsnr_pending_list_head);
1316 ptr_svc->unregister_pending = true;
1317 pr_debug("unregister %d pending\n", data->listener.id);
1318 return 0;
1319}
1320
1321static void __qseecom_processing_pending_lsnr_unregister(void)
1322{
1323 struct qseecom_unregister_pending_list *entry = NULL;
1324 struct qseecom_registered_listener_list *ptr_svc = NULL;
1325 struct list_head *pos;
1326 int ret = 0;
1327
1328 mutex_lock(&listener_access_lock);
1329 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1330 pos = qseecom.unregister_lsnr_pending_list_head.next;
1331 entry = list_entry(pos,
1332 struct qseecom_unregister_pending_list, list);
1333 if (entry && entry->data) {
1334 pr_debug("process pending unregister %d\n",
1335 entry->data->listener.id);
1336 ptr_svc = __qseecom_find_svc(
1337 entry->data->listener.id);
1338 if (ptr_svc) {
1339 ret = __qseecom_unregister_listener(
1340 entry->data, ptr_svc);
1341 if (ret == -EBUSY) {
1342 pr_debug("unregister %d pending again\n",
1343 entry->data->listener.id);
1344 mutex_unlock(&listener_access_lock);
1345 return;
1346 }
1347 } else
1348 pr_err("invalid listener %d\n",
1349 entry->data->listener.id);
1350 kzfree(entry->data);
1351 }
1352 list_del(pos);
1353 kzfree(entry);
1354 }
1355 mutex_unlock(&listener_access_lock);
1356 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1357}
1358
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001359static int __qseecom_set_msm_bus_request(uint32_t mode)
1360{
1361 int ret = 0;
1362 struct qseecom_clk *qclk;
1363
1364 qclk = &qseecom.qsee;
1365 if (qclk->ce_core_src_clk != NULL) {
1366 if (mode == INACTIVE) {
1367 __qseecom_disable_clk(CLK_QSEE);
1368 } else {
1369 ret = __qseecom_enable_clk(CLK_QSEE);
1370 if (ret)
1371 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1372 ret, mode);
1373 }
1374 }
1375
1376 if ((!ret) && (qseecom.current_mode != mode)) {
1377 ret = msm_bus_scale_client_update_request(
1378 qseecom.qsee_perf_client, mode);
1379 if (ret) {
1380 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1381 ret, mode);
1382 if (qclk->ce_core_src_clk != NULL) {
1383 if (mode == INACTIVE) {
1384 ret = __qseecom_enable_clk(CLK_QSEE);
1385 if (ret)
1386 pr_err("CLK enable failed\n");
1387 } else
1388 __qseecom_disable_clk(CLK_QSEE);
1389 }
1390 }
1391 qseecom.current_mode = mode;
1392 }
1393 return ret;
1394}
1395
1396static void qseecom_bw_inactive_req_work(struct work_struct *work)
1397{
1398 mutex_lock(&app_access_lock);
1399 mutex_lock(&qsee_bw_mutex);
1400 if (qseecom.timer_running)
1401 __qseecom_set_msm_bus_request(INACTIVE);
1402 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1403 qseecom.current_mode, qseecom.cumulative_mode);
1404 qseecom.timer_running = false;
1405 mutex_unlock(&qsee_bw_mutex);
1406 mutex_unlock(&app_access_lock);
1407}
1408
1409static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1410{
1411 schedule_work(&qseecom.bw_inactive_req_ws);
1412}
1413
1414static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1415{
1416 struct qseecom_clk *qclk;
1417 int ret = 0;
1418
1419 mutex_lock(&clk_access_lock);
1420 if (ce == CLK_QSEE)
1421 qclk = &qseecom.qsee;
1422 else
1423 qclk = &qseecom.ce_drv;
1424
1425 if (qclk->clk_access_cnt > 2) {
1426 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1427 ret = -EINVAL;
1428 goto err_dec_ref_cnt;
1429 }
1430 if (qclk->clk_access_cnt == 2)
1431 qclk->clk_access_cnt--;
1432
1433err_dec_ref_cnt:
1434 mutex_unlock(&clk_access_lock);
1435 return ret;
1436}
1437
1438
1439static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1440{
1441 int32_t ret = 0;
1442 int32_t request_mode = INACTIVE;
1443
1444 mutex_lock(&qsee_bw_mutex);
1445 if (mode == 0) {
1446 if (qseecom.cumulative_mode > MEDIUM)
1447 request_mode = HIGH;
1448 else
1449 request_mode = qseecom.cumulative_mode;
1450 } else {
1451 request_mode = mode;
1452 }
1453
1454 ret = __qseecom_set_msm_bus_request(request_mode);
1455 if (ret) {
1456 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1457 ret, request_mode);
1458 goto err_scale_timer;
1459 }
1460
1461 if (qseecom.timer_running) {
1462 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1463 if (ret) {
1464 pr_err("Failed to decrease clk ref count.\n");
1465 goto err_scale_timer;
1466 }
1467 del_timer_sync(&(qseecom.bw_scale_down_timer));
1468 qseecom.timer_running = false;
1469 }
1470err_scale_timer:
1471 mutex_unlock(&qsee_bw_mutex);
1472 return ret;
1473}
1474
1475
1476static int qseecom_unregister_bus_bandwidth_needs(
1477 struct qseecom_dev_handle *data)
1478{
1479 int32_t ret = 0;
1480
1481 qseecom.cumulative_mode -= data->mode;
1482 data->mode = INACTIVE;
1483
1484 return ret;
1485}
1486
1487static int __qseecom_register_bus_bandwidth_needs(
1488 struct qseecom_dev_handle *data, uint32_t request_mode)
1489{
1490 int32_t ret = 0;
1491
1492 if (data->mode == INACTIVE) {
1493 qseecom.cumulative_mode += request_mode;
1494 data->mode = request_mode;
1495 } else {
1496 if (data->mode != request_mode) {
1497 qseecom.cumulative_mode -= data->mode;
1498 qseecom.cumulative_mode += request_mode;
1499 data->mode = request_mode;
1500 }
1501 }
1502 return ret;
1503}
1504
1505static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1506{
1507 int ret = 0;
1508
1509 ret = qsee_vote_for_clock(data, CLK_DFAB);
1510 if (ret) {
1511 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1512 goto perf_enable_exit;
1513 }
1514 ret = qsee_vote_for_clock(data, CLK_SFPB);
1515 if (ret) {
1516 qsee_disable_clock_vote(data, CLK_DFAB);
1517 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1518 goto perf_enable_exit;
1519 }
1520
1521perf_enable_exit:
1522 return ret;
1523}
1524
1525static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1526 void __user *argp)
1527{
1528 int32_t ret = 0;
1529 int32_t req_mode;
1530
1531 if (qseecom.no_clock_support)
1532 return 0;
1533
1534 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1535 if (ret) {
1536 pr_err("copy_from_user failed\n");
1537 return ret;
1538 }
1539 if (req_mode > HIGH) {
1540 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1541 return -EINVAL;
1542 }
1543
1544 /*
1545 * Register bus bandwidth needs if bus scaling feature is enabled;
1546 * otherwise, qseecom enable/disable clocks for the client directly.
1547 */
1548 if (qseecom.support_bus_scaling) {
1549 mutex_lock(&qsee_bw_mutex);
1550 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1551 mutex_unlock(&qsee_bw_mutex);
1552 } else {
1553 pr_debug("Bus scaling feature is NOT enabled\n");
1554 pr_debug("request bandwidth mode %d for the client\n",
1555 req_mode);
1556 if (req_mode != INACTIVE) {
1557 ret = qseecom_perf_enable(data);
1558 if (ret)
1559 pr_err("Failed to vote for clock with err %d\n",
1560 ret);
1561 } else {
1562 qsee_disable_clock_vote(data, CLK_DFAB);
1563 qsee_disable_clock_vote(data, CLK_SFPB);
1564 }
1565 }
1566 return ret;
1567}
1568
1569static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1570{
1571 if (qseecom.no_clock_support)
1572 return;
1573
1574 mutex_lock(&qsee_bw_mutex);
1575 qseecom.bw_scale_down_timer.expires = jiffies +
1576 msecs_to_jiffies(duration);
1577 mod_timer(&(qseecom.bw_scale_down_timer),
1578 qseecom.bw_scale_down_timer.expires);
1579 qseecom.timer_running = true;
1580 mutex_unlock(&qsee_bw_mutex);
1581}
1582
1583static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1584{
1585 if (!qseecom.support_bus_scaling)
1586 qsee_disable_clock_vote(data, CLK_SFPB);
1587 else
1588 __qseecom_add_bw_scale_down_timer(
1589 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1590}
1591
1592static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1593{
1594 int ret = 0;
1595
1596 if (qseecom.support_bus_scaling) {
1597 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1598 if (ret)
1599 pr_err("Failed to set bw MEDIUM.\n");
1600 } else {
1601 ret = qsee_vote_for_clock(data, CLK_SFPB);
1602 if (ret)
1603 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1604 }
1605 return ret;
1606}
1607
1608static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1609 void __user *argp)
1610{
1611 ion_phys_addr_t pa;
1612 int32_t ret;
1613 struct qseecom_set_sb_mem_param_req req;
1614 size_t len;
1615
1616 /* Copy the relevant information needed for loading the image */
1617 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1618 return -EFAULT;
1619
1620 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1621 (req.sb_len == 0)) {
1622 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1623 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1624 return -EFAULT;
1625 }
1626 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1627 req.sb_len))
1628 return -EFAULT;
1629
1630 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001631 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001632 req.ifd_data_fd);
1633 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1634 pr_err("Ion client could not retrieve the handle\n");
1635 return -ENOMEM;
1636 }
1637 /* Get the physical address of the ION BUF */
1638 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1639 if (ret) {
1640
1641 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1642 ret);
1643 return ret;
1644 }
1645
1646 if (len < req.sb_len) {
1647 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1648 req.sb_len, len);
1649 return -EINVAL;
1650 }
1651 /* Populate the structure for sending scm call to load image */
1652 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1653 data->client.ihandle);
1654 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1655 pr_err("ION memory mapping for client shared buf failed\n");
1656 return -ENOMEM;
1657 }
1658 data->client.sb_phys = (phys_addr_t)pa;
1659 data->client.sb_length = req.sb_len;
1660 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1661 return 0;
1662}
1663
Zhen Kong26e62742018-05-04 17:19:06 -07001664static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1665 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001666{
1667 int ret;
1668
1669 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001670 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001671}
1672
1673static int __qseecom_reentrancy_listener_has_sent_rsp(
1674 struct qseecom_dev_handle *data,
1675 struct qseecom_registered_listener_list *ptr_svc)
1676{
1677 int ret;
1678
1679 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001680 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001681}
1682
1683static void __qseecom_clean_listener_sglistinfo(
1684 struct qseecom_registered_listener_list *ptr_svc)
1685{
1686 if (ptr_svc->sglist_cnt) {
1687 memset(ptr_svc->sglistinfo_ptr, 0,
1688 SGLISTINFO_TABLE_SIZE);
1689 ptr_svc->sglist_cnt = 0;
1690 }
1691}
1692
1693static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1694 struct qseecom_command_scm_resp *resp)
1695{
1696 int ret = 0;
1697 int rc = 0;
1698 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001699 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1700 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1701 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001702 struct qseecom_registered_listener_list *ptr_svc = NULL;
1703 sigset_t new_sigset;
1704 sigset_t old_sigset;
1705 uint32_t status;
1706 void *cmd_buf = NULL;
1707 size_t cmd_len;
1708 struct sglist_info *table = NULL;
1709
Zhen Kongbcdeda22018-11-16 13:50:51 -08001710 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001711 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1712 lstnr = resp->data;
1713 /*
1714 * Wake up blocking lsitener service with the lstnr id
1715 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001716 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001717 list_for_each_entry(ptr_svc,
1718 &qseecom.registered_listener_list_head, list) {
1719 if (ptr_svc->svc.listener_id == lstnr) {
1720 ptr_svc->listener_in_use = true;
1721 ptr_svc->rcv_req_flag = 1;
1722 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1723 break;
1724 }
1725 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001726
1727 if (ptr_svc == NULL) {
1728 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001729 rc = -EINVAL;
1730 status = QSEOS_RESULT_FAILURE;
1731 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001732 }
1733
1734 if (!ptr_svc->ihandle) {
1735 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001736 rc = -EINVAL;
1737 status = QSEOS_RESULT_FAILURE;
1738 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001739 }
1740
1741 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001742 pr_err("Service %d does not exist\n",
1743 lstnr);
1744 rc = -ERESTARTSYS;
1745 ptr_svc = NULL;
1746 status = QSEOS_RESULT_FAILURE;
1747 goto err_resp;
1748 }
1749
1750 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001751 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001752 lstnr, ptr_svc->abort);
1753 rc = -ENODEV;
1754 status = QSEOS_RESULT_FAILURE;
1755 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001756 }
Zhen Kong25731112018-09-20 13:10:03 -07001757
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001758 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1759
1760 /* initialize the new signal mask with all signals*/
1761 sigfillset(&new_sigset);
1762 /* block all signals */
1763 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1764
Zhen Kongbcdeda22018-11-16 13:50:51 -08001765 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001766 do {
1767 /*
1768 * When reentrancy is not supported, check global
1769 * send_resp_flag; otherwise, check this listener's
1770 * send_resp_flag.
1771 */
1772 if (!qseecom.qsee_reentrancy_support &&
1773 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001774 __qseecom_listener_has_sent_rsp(
1775 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001776 break;
1777 }
1778
1779 if (qseecom.qsee_reentrancy_support &&
1780 !wait_event_freezable(qseecom.send_resp_wq,
1781 __qseecom_reentrancy_listener_has_sent_rsp(
1782 data, ptr_svc))) {
1783 break;
1784 }
1785 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001786 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001787 /* restore signal mask */
1788 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001789 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001790 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1791 data->client.app_id, lstnr, ret);
1792 rc = -ENODEV;
1793 status = QSEOS_RESULT_FAILURE;
1794 } else {
1795 status = QSEOS_RESULT_SUCCESS;
1796 }
Zhen Kong26e62742018-05-04 17:19:06 -07001797err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001798 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001799 if (ptr_svc) {
1800 ptr_svc->send_resp_flag = 0;
1801 table = ptr_svc->sglistinfo_ptr;
1802 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001803 if (qseecom.qsee_version < QSEE_VERSION_40) {
1804 send_data_rsp.listener_id = lstnr;
1805 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001806 if (table) {
1807 send_data_rsp.sglistinfo_ptr =
1808 (uint32_t)virt_to_phys(table);
1809 send_data_rsp.sglistinfo_len =
1810 SGLISTINFO_TABLE_SIZE;
1811 dmac_flush_range((void *)table,
1812 (void *)table + SGLISTINFO_TABLE_SIZE);
1813 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001814 cmd_buf = (void *)&send_data_rsp;
1815 cmd_len = sizeof(send_data_rsp);
1816 } else {
1817 send_data_rsp_64bit.listener_id = lstnr;
1818 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001819 if (table) {
1820 send_data_rsp_64bit.sglistinfo_ptr =
1821 virt_to_phys(table);
1822 send_data_rsp_64bit.sglistinfo_len =
1823 SGLISTINFO_TABLE_SIZE;
1824 dmac_flush_range((void *)table,
1825 (void *)table + SGLISTINFO_TABLE_SIZE);
1826 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001827 cmd_buf = (void *)&send_data_rsp_64bit;
1828 cmd_len = sizeof(send_data_rsp_64bit);
1829 }
Zhen Kong7d500032018-08-06 16:58:31 -07001830 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001831 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1832 else
1833 *(uint32_t *)cmd_buf =
1834 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001835 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001836 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1837 ptr_svc->ihandle,
1838 ptr_svc->sb_virt, ptr_svc->sb_length,
1839 ION_IOC_CLEAN_INV_CACHES);
1840 if (ret) {
1841 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001842 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001843 }
1844 }
1845
1846 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1847 ret = __qseecom_enable_clk(CLK_QSEE);
1848 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001849 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001850 }
1851
1852 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1853 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001854 if (ptr_svc) {
1855 ptr_svc->listener_in_use = false;
1856 __qseecom_clean_listener_sglistinfo(ptr_svc);
1857 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001858 if (ret) {
1859 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1860 ret, data->client.app_id);
1861 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1862 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001863 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001864 }
Zhen Kong26e62742018-05-04 17:19:06 -07001865 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1866 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001867 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1868 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1869 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1870 resp->result, data->client.app_id, lstnr);
1871 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001872 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001873 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001874exit:
1875 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001876 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1877 __qseecom_disable_clk(CLK_QSEE);
1878
1879 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001880 qseecom.app_block_ref_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001881 if (rc)
1882 return rc;
1883
1884 return ret;
1885}
1886
Zhen Konga91aaf02018-02-02 17:21:04 -08001887static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001888 struct qseecom_command_scm_resp *resp,
1889 struct qseecom_registered_app_list *ptr_app,
1890 struct qseecom_dev_handle *data)
1891{
1892 struct qseecom_registered_listener_list *list_ptr;
1893 int ret = 0;
1894 struct qseecom_continue_blocked_request_ireq ireq;
1895 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001896 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001897 sigset_t new_sigset;
1898 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001899 unsigned long flags;
1900 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001901
1902 if (!resp || !data) {
1903 pr_err("invalid resp or data pointer\n");
1904 ret = -EINVAL;
1905 goto exit;
1906 }
1907
1908 /* find app_id & img_name from list */
1909 if (!ptr_app) {
1910 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1911 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1912 list) {
1913 if ((ptr_app->app_id == data->client.app_id) &&
1914 (!strcmp(ptr_app->app_name,
1915 data->client.app_name))) {
1916 found_app = true;
1917 break;
1918 }
1919 }
1920 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1921 flags);
1922 if (!found_app) {
1923 pr_err("app_id %d (%s) is not found\n",
1924 data->client.app_id,
1925 (char *)data->client.app_name);
1926 ret = -ENOENT;
1927 goto exit;
1928 }
1929 }
1930
Zhen Kongd8cc0052017-11-13 15:13:31 -08001931 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001932 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001933 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001934 list_ptr = __qseecom_find_svc(resp->data);
1935 if (!list_ptr) {
1936 pr_err("Invalid listener ID %d\n", resp->data);
1937 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001938 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08001939 goto exit;
1940 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001941 ptr_app->blocked_on_listener_id = resp->data;
1942
1943 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1944 resp->data, list_ptr->listener_in_use,
1945 session_id, data->client.app_id);
1946
1947 /* sleep until listener is available */
1948 sigfillset(&new_sigset);
1949 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1950
1951 do {
1952 qseecom.app_block_ref_cnt++;
1953 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001954 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001955 mutex_unlock(&app_access_lock);
1956 wait_event_freezable(
1957 list_ptr->listener_block_app_wq,
1958 !list_ptr->listener_in_use);
1959 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001960 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001961 ptr_app->app_blocked = false;
1962 qseecom.app_block_ref_cnt--;
1963 } while (list_ptr->listener_in_use);
1964
1965 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1966
1967 ptr_app->blocked_on_listener_id = 0;
1968 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1969 resp->data, session_id, data->client.app_id);
1970
1971 /* notify TZ that listener is available */
1972 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1973
1974 if (qseecom.smcinvoke_support)
1975 ireq.app_or_session_id = session_id;
1976 else
1977 ireq.app_or_session_id = data->client.app_id;
1978
1979 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1980 &ireq, sizeof(ireq),
1981 &continue_resp, sizeof(continue_resp));
1982 if (ret && qseecom.smcinvoke_support) {
1983 /* retry with legacy cmd */
1984 qseecom.smcinvoke_support = false;
1985 ireq.app_or_session_id = data->client.app_id;
1986 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1987 &ireq, sizeof(ireq),
1988 &continue_resp, sizeof(continue_resp));
1989 qseecom.smcinvoke_support = true;
1990 if (ret) {
1991 pr_err("unblock app %d or session %d fail\n",
1992 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001993 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001994 goto exit;
1995 }
1996 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001997 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001998 resp->result = continue_resp.result;
1999 resp->resp_type = continue_resp.resp_type;
2000 resp->data = continue_resp.data;
2001 pr_debug("unblock resp = %d\n", resp->result);
2002 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2003
2004 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2005 pr_err("Unexpected unblock resp %d\n", resp->result);
2006 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002007 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002008exit:
2009 return ret;
2010}
2011
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002012static int __qseecom_reentrancy_process_incomplete_cmd(
2013 struct qseecom_dev_handle *data,
2014 struct qseecom_command_scm_resp *resp)
2015{
2016 int ret = 0;
2017 int rc = 0;
2018 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002019 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2020 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2021 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002022 struct qseecom_registered_listener_list *ptr_svc = NULL;
2023 sigset_t new_sigset;
2024 sigset_t old_sigset;
2025 uint32_t status;
2026 void *cmd_buf = NULL;
2027 size_t cmd_len;
2028 struct sglist_info *table = NULL;
2029
Zhen Kong26e62742018-05-04 17:19:06 -07002030 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002031 lstnr = resp->data;
2032 /*
2033 * Wake up blocking lsitener service with the lstnr id
2034 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002035 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002036 list_for_each_entry(ptr_svc,
2037 &qseecom.registered_listener_list_head, list) {
2038 if (ptr_svc->svc.listener_id == lstnr) {
2039 ptr_svc->listener_in_use = true;
2040 ptr_svc->rcv_req_flag = 1;
2041 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2042 break;
2043 }
2044 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002045
2046 if (ptr_svc == NULL) {
2047 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002048 rc = -EINVAL;
2049 status = QSEOS_RESULT_FAILURE;
2050 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002051 }
2052
2053 if (!ptr_svc->ihandle) {
2054 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002055 rc = -EINVAL;
2056 status = QSEOS_RESULT_FAILURE;
2057 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002058 }
2059
2060 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002061 pr_err("Service %d does not exist\n",
2062 lstnr);
2063 rc = -ERESTARTSYS;
2064 ptr_svc = NULL;
2065 status = QSEOS_RESULT_FAILURE;
2066 goto err_resp;
2067 }
2068
2069 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002070 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002071 lstnr, ptr_svc->abort);
2072 rc = -ENODEV;
2073 status = QSEOS_RESULT_FAILURE;
2074 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002075 }
Zhen Kong25731112018-09-20 13:10:03 -07002076
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002077 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2078
2079 /* initialize the new signal mask with all signals*/
2080 sigfillset(&new_sigset);
2081
2082 /* block all signals */
2083 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2084
2085 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002086 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002087 mutex_unlock(&app_access_lock);
2088 do {
2089 if (!wait_event_freezable(qseecom.send_resp_wq,
2090 __qseecom_reentrancy_listener_has_sent_rsp(
2091 data, ptr_svc))) {
2092 break;
2093 }
2094 } while (1);
2095 /* lock mutex again after resp sent */
2096 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002097 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002098 ptr_svc->send_resp_flag = 0;
2099 qseecom.send_resp_flag = 0;
2100
2101 /* restore signal mask */
2102 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002103 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002104 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2105 data->client.app_id, lstnr, ret);
2106 rc = -ENODEV;
2107 status = QSEOS_RESULT_FAILURE;
2108 } else {
2109 status = QSEOS_RESULT_SUCCESS;
2110 }
Zhen Kong26e62742018-05-04 17:19:06 -07002111err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002112 if (ptr_svc)
2113 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002114 if (qseecom.qsee_version < QSEE_VERSION_40) {
2115 send_data_rsp.listener_id = lstnr;
2116 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002117 if (table) {
2118 send_data_rsp.sglistinfo_ptr =
2119 (uint32_t)virt_to_phys(table);
2120 send_data_rsp.sglistinfo_len =
2121 SGLISTINFO_TABLE_SIZE;
2122 dmac_flush_range((void *)table,
2123 (void *)table + SGLISTINFO_TABLE_SIZE);
2124 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002125 cmd_buf = (void *)&send_data_rsp;
2126 cmd_len = sizeof(send_data_rsp);
2127 } else {
2128 send_data_rsp_64bit.listener_id = lstnr;
2129 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002130 if (table) {
2131 send_data_rsp_64bit.sglistinfo_ptr =
2132 virt_to_phys(table);
2133 send_data_rsp_64bit.sglistinfo_len =
2134 SGLISTINFO_TABLE_SIZE;
2135 dmac_flush_range((void *)table,
2136 (void *)table + SGLISTINFO_TABLE_SIZE);
2137 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002138 cmd_buf = (void *)&send_data_rsp_64bit;
2139 cmd_len = sizeof(send_data_rsp_64bit);
2140 }
Zhen Kong7d500032018-08-06 16:58:31 -07002141 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002142 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2143 else
2144 *(uint32_t *)cmd_buf =
2145 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002146 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002147 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2148 ptr_svc->ihandle,
2149 ptr_svc->sb_virt, ptr_svc->sb_length,
2150 ION_IOC_CLEAN_INV_CACHES);
2151 if (ret) {
2152 pr_err("cache operation failed %d\n", ret);
2153 return ret;
2154 }
2155 }
2156 if (lstnr == RPMB_SERVICE) {
2157 ret = __qseecom_enable_clk(CLK_QSEE);
2158 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002159 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002160 }
2161
2162 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2163 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002164 if (ptr_svc) {
2165 ptr_svc->listener_in_use = false;
2166 __qseecom_clean_listener_sglistinfo(ptr_svc);
2167 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2168 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002169
2170 if (ret) {
2171 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2172 ret, data->client.app_id);
2173 goto exit;
2174 }
2175
2176 switch (resp->result) {
2177 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2178 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2179 lstnr, data->client.app_id, resp->data);
2180 if (lstnr == resp->data) {
2181 pr_err("lstnr %d should not be blocked!\n",
2182 lstnr);
2183 ret = -EINVAL;
2184 goto exit;
2185 }
2186 ret = __qseecom_process_reentrancy_blocked_on_listener(
2187 resp, NULL, data);
2188 if (ret) {
2189 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2190 data->client.app_id,
2191 data->client.app_name, resp->data);
2192 goto exit;
2193 }
2194 case QSEOS_RESULT_SUCCESS:
2195 case QSEOS_RESULT_INCOMPLETE:
2196 break;
2197 default:
2198 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2199 resp->result, data->client.app_id, lstnr);
2200 ret = -EINVAL;
2201 goto exit;
2202 }
2203exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002204 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002205 if (lstnr == RPMB_SERVICE)
2206 __qseecom_disable_clk(CLK_QSEE);
2207
2208 }
2209 if (rc)
2210 return rc;
2211
2212 return ret;
2213}
2214
2215/*
2216 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2217 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2218 * So, needs to first check if no app blocked before sending OS level scm call,
2219 * then wait until all apps are unblocked.
2220 */
2221static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2222{
2223 sigset_t new_sigset, old_sigset;
2224
2225 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2226 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2227 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2228 /* thread sleep until this app unblocked */
2229 while (qseecom.app_block_ref_cnt > 0) {
2230 sigfillset(&new_sigset);
2231 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2232 mutex_unlock(&app_access_lock);
2233 do {
2234 if (!wait_event_freezable(qseecom.app_block_wq,
2235 (qseecom.app_block_ref_cnt == 0)))
2236 break;
2237 } while (1);
2238 mutex_lock(&app_access_lock);
2239 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2240 }
2241 }
2242}
2243
2244/*
2245 * scm_call of send data will fail if this TA is blocked or there are more
2246 * than one TA requesting listener services; So, first check to see if need
2247 * to wait.
2248 */
2249static void __qseecom_reentrancy_check_if_this_app_blocked(
2250 struct qseecom_registered_app_list *ptr_app)
2251{
2252 sigset_t new_sigset, old_sigset;
2253
2254 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002255 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002256 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2257 /* thread sleep until this app unblocked */
2258 sigfillset(&new_sigset);
2259 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2260 mutex_unlock(&app_access_lock);
2261 do {
2262 if (!wait_event_freezable(qseecom.app_block_wq,
2263 (!ptr_app->app_blocked &&
2264 qseecom.app_block_ref_cnt <= 1)))
2265 break;
2266 } while (1);
2267 mutex_lock(&app_access_lock);
2268 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2269 }
Zhen Kongdea10592018-07-30 17:50:10 -07002270 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002271 }
2272}
2273
2274static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2275 uint32_t *app_id)
2276{
2277 int32_t ret;
2278 struct qseecom_command_scm_resp resp;
2279 bool found_app = false;
2280 struct qseecom_registered_app_list *entry = NULL;
2281 unsigned long flags = 0;
2282
2283 if (!app_id) {
2284 pr_err("Null pointer to app_id\n");
2285 return -EINVAL;
2286 }
2287 *app_id = 0;
2288
2289 /* check if app exists and has been registered locally */
2290 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2291 list_for_each_entry(entry,
2292 &qseecom.registered_app_list_head, list) {
2293 if (!strcmp(entry->app_name, req.app_name)) {
2294 found_app = true;
2295 break;
2296 }
2297 }
2298 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2299 if (found_app) {
2300 pr_debug("Found app with id %d\n", entry->app_id);
2301 *app_id = entry->app_id;
2302 return 0;
2303 }
2304
2305 memset((void *)&resp, 0, sizeof(resp));
2306
2307 /* SCM_CALL to check if app_id for the mentioned app exists */
2308 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2309 sizeof(struct qseecom_check_app_ireq),
2310 &resp, sizeof(resp));
2311 if (ret) {
2312 pr_err("scm_call to check if app is already loaded failed\n");
2313 return -EINVAL;
2314 }
2315
2316 if (resp.result == QSEOS_RESULT_FAILURE)
2317 return 0;
2318
2319 switch (resp.resp_type) {
2320 /*qsee returned listener type response */
2321 case QSEOS_LISTENER_ID:
2322 pr_err("resp type is of listener type instead of app");
2323 return -EINVAL;
2324 case QSEOS_APP_ID:
2325 *app_id = resp.data;
2326 return 0;
2327 default:
2328 pr_err("invalid resp type (%d) from qsee",
2329 resp.resp_type);
2330 return -ENODEV;
2331 }
2332}
2333
2334static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2335{
2336 struct qseecom_registered_app_list *entry = NULL;
2337 unsigned long flags = 0;
2338 u32 app_id = 0;
2339 struct ion_handle *ihandle; /* Ion handle */
2340 struct qseecom_load_img_req load_img_req;
2341 int32_t ret = 0;
2342 ion_phys_addr_t pa = 0;
2343 size_t len;
2344 struct qseecom_command_scm_resp resp;
2345 struct qseecom_check_app_ireq req;
2346 struct qseecom_load_app_ireq load_req;
2347 struct qseecom_load_app_64bit_ireq load_req_64bit;
2348 void *cmd_buf = NULL;
2349 size_t cmd_len;
2350 bool first_time = false;
2351
2352 /* Copy the relevant information needed for loading the image */
2353 if (copy_from_user(&load_img_req,
2354 (void __user *)argp,
2355 sizeof(struct qseecom_load_img_req))) {
2356 pr_err("copy_from_user failed\n");
2357 return -EFAULT;
2358 }
2359
2360 /* Check and load cmnlib */
2361 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2362 if (!qseecom.commonlib_loaded &&
2363 load_img_req.app_arch == ELFCLASS32) {
2364 ret = qseecom_load_commonlib_image(data, "cmnlib");
2365 if (ret) {
2366 pr_err("failed to load cmnlib\n");
2367 return -EIO;
2368 }
2369 qseecom.commonlib_loaded = true;
2370 pr_debug("cmnlib is loaded\n");
2371 }
2372
2373 if (!qseecom.commonlib64_loaded &&
2374 load_img_req.app_arch == ELFCLASS64) {
2375 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2376 if (ret) {
2377 pr_err("failed to load cmnlib64\n");
2378 return -EIO;
2379 }
2380 qseecom.commonlib64_loaded = true;
2381 pr_debug("cmnlib64 is loaded\n");
2382 }
2383 }
2384
2385 if (qseecom.support_bus_scaling) {
2386 mutex_lock(&qsee_bw_mutex);
2387 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2388 mutex_unlock(&qsee_bw_mutex);
2389 if (ret)
2390 return ret;
2391 }
2392
2393 /* Vote for the SFPB clock */
2394 ret = __qseecom_enable_clk_scale_up(data);
2395 if (ret)
2396 goto enable_clk_err;
2397
2398 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2399 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2400 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2401
2402 ret = __qseecom_check_app_exists(req, &app_id);
2403 if (ret < 0)
2404 goto loadapp_err;
2405
2406 if (app_id) {
2407 pr_debug("App id %d (%s) already exists\n", app_id,
2408 (char *)(req.app_name));
2409 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2410 list_for_each_entry(entry,
2411 &qseecom.registered_app_list_head, list){
2412 if (entry->app_id == app_id) {
2413 entry->ref_cnt++;
2414 break;
2415 }
2416 }
2417 spin_unlock_irqrestore(
2418 &qseecom.registered_app_list_lock, flags);
2419 ret = 0;
2420 } else {
2421 first_time = true;
2422 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2423 (char *)(load_img_req.img_name));
2424 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002425 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002426 load_img_req.ifd_data_fd);
2427 if (IS_ERR_OR_NULL(ihandle)) {
2428 pr_err("Ion client could not retrieve the handle\n");
2429 ret = -ENOMEM;
2430 goto loadapp_err;
2431 }
2432
2433 /* Get the physical address of the ION BUF */
2434 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2435 if (ret) {
2436 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2437 ret);
2438 goto loadapp_err;
2439 }
2440 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2441 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2442 len, load_img_req.mdt_len,
2443 load_img_req.img_len);
2444 ret = -EINVAL;
2445 goto loadapp_err;
2446 }
2447 /* Populate the structure for sending scm call to load image */
2448 if (qseecom.qsee_version < QSEE_VERSION_40) {
2449 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2450 load_req.mdt_len = load_img_req.mdt_len;
2451 load_req.img_len = load_img_req.img_len;
2452 strlcpy(load_req.app_name, load_img_req.img_name,
2453 MAX_APP_NAME_SIZE);
2454 load_req.phy_addr = (uint32_t)pa;
2455 cmd_buf = (void *)&load_req;
2456 cmd_len = sizeof(struct qseecom_load_app_ireq);
2457 } else {
2458 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2459 load_req_64bit.mdt_len = load_img_req.mdt_len;
2460 load_req_64bit.img_len = load_img_req.img_len;
2461 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2462 MAX_APP_NAME_SIZE);
2463 load_req_64bit.phy_addr = (uint64_t)pa;
2464 cmd_buf = (void *)&load_req_64bit;
2465 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2466 }
2467
2468 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2469 ION_IOC_CLEAN_INV_CACHES);
2470 if (ret) {
2471 pr_err("cache operation failed %d\n", ret);
2472 goto loadapp_err;
2473 }
2474
2475 /* SCM_CALL to load the app and get the app_id back */
2476 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2477 cmd_len, &resp, sizeof(resp));
2478 if (ret) {
2479 pr_err("scm_call to load app failed\n");
2480 if (!IS_ERR_OR_NULL(ihandle))
2481 ion_free(qseecom.ion_clnt, ihandle);
2482 ret = -EINVAL;
2483 goto loadapp_err;
2484 }
2485
2486 if (resp.result == QSEOS_RESULT_FAILURE) {
2487 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2488 if (!IS_ERR_OR_NULL(ihandle))
2489 ion_free(qseecom.ion_clnt, ihandle);
2490 ret = -EFAULT;
2491 goto loadapp_err;
2492 }
2493
2494 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2495 ret = __qseecom_process_incomplete_cmd(data, &resp);
2496 if (ret) {
2497 pr_err("process_incomplete_cmd failed err: %d\n",
2498 ret);
2499 if (!IS_ERR_OR_NULL(ihandle))
2500 ion_free(qseecom.ion_clnt, ihandle);
2501 ret = -EFAULT;
2502 goto loadapp_err;
2503 }
2504 }
2505
2506 if (resp.result != QSEOS_RESULT_SUCCESS) {
2507 pr_err("scm_call failed resp.result unknown, %d\n",
2508 resp.result);
2509 if (!IS_ERR_OR_NULL(ihandle))
2510 ion_free(qseecom.ion_clnt, ihandle);
2511 ret = -EFAULT;
2512 goto loadapp_err;
2513 }
2514
2515 app_id = resp.data;
2516
2517 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2518 if (!entry) {
2519 ret = -ENOMEM;
2520 goto loadapp_err;
2521 }
2522 entry->app_id = app_id;
2523 entry->ref_cnt = 1;
2524 entry->app_arch = load_img_req.app_arch;
2525 /*
2526 * keymaster app may be first loaded as "keymaste" by qseecomd,
2527 * and then used as "keymaster" on some targets. To avoid app
2528 * name checking error, register "keymaster" into app_list and
2529 * thread private data.
2530 */
2531 if (!strcmp(load_img_req.img_name, "keymaste"))
2532 strlcpy(entry->app_name, "keymaster",
2533 MAX_APP_NAME_SIZE);
2534 else
2535 strlcpy(entry->app_name, load_img_req.img_name,
2536 MAX_APP_NAME_SIZE);
2537 entry->app_blocked = false;
2538 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002539 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002540
2541 /* Deallocate the handle */
2542 if (!IS_ERR_OR_NULL(ihandle))
2543 ion_free(qseecom.ion_clnt, ihandle);
2544
2545 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2546 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2547 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2548 flags);
2549
2550 pr_warn("App with id %u (%s) now loaded\n", app_id,
2551 (char *)(load_img_req.img_name));
2552 }
2553 data->client.app_id = app_id;
2554 data->client.app_arch = load_img_req.app_arch;
2555 if (!strcmp(load_img_req.img_name, "keymaste"))
2556 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2557 else
2558 strlcpy(data->client.app_name, load_img_req.img_name,
2559 MAX_APP_NAME_SIZE);
2560 load_img_req.app_id = app_id;
2561 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2562 pr_err("copy_to_user failed\n");
2563 ret = -EFAULT;
2564 if (first_time == true) {
2565 spin_lock_irqsave(
2566 &qseecom.registered_app_list_lock, flags);
2567 list_del(&entry->list);
2568 spin_unlock_irqrestore(
2569 &qseecom.registered_app_list_lock, flags);
2570 kzfree(entry);
2571 }
2572 }
2573
2574loadapp_err:
2575 __qseecom_disable_clk_scale_down(data);
2576enable_clk_err:
2577 if (qseecom.support_bus_scaling) {
2578 mutex_lock(&qsee_bw_mutex);
2579 qseecom_unregister_bus_bandwidth_needs(data);
2580 mutex_unlock(&qsee_bw_mutex);
2581 }
2582 return ret;
2583}
2584
2585static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2586{
2587 int ret = 1; /* Set unload app */
2588
2589 wake_up_all(&qseecom.send_resp_wq);
2590 if (qseecom.qsee_reentrancy_support)
2591 mutex_unlock(&app_access_lock);
2592 while (atomic_read(&data->ioctl_count) > 1) {
2593 if (wait_event_freezable(data->abort_wq,
2594 atomic_read(&data->ioctl_count) <= 1)) {
2595 pr_err("Interrupted from abort\n");
2596 ret = -ERESTARTSYS;
2597 break;
2598 }
2599 }
2600 if (qseecom.qsee_reentrancy_support)
2601 mutex_lock(&app_access_lock);
2602 return ret;
2603}
2604
2605static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2606{
2607 int ret = 0;
2608
2609 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2610 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2611 ion_free(qseecom.ion_clnt, data->client.ihandle);
2612 data->client.ihandle = NULL;
2613 }
2614 return ret;
2615}
2616
2617static int qseecom_unload_app(struct qseecom_dev_handle *data,
2618 bool app_crash)
2619{
2620 unsigned long flags;
2621 unsigned long flags1;
2622 int ret = 0;
2623 struct qseecom_command_scm_resp resp;
2624 struct qseecom_registered_app_list *ptr_app = NULL;
2625 bool unload = false;
2626 bool found_app = false;
2627 bool found_dead_app = false;
2628
2629 if (!data) {
2630 pr_err("Invalid/uninitialized device handle\n");
2631 return -EINVAL;
2632 }
2633
2634 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2635 pr_debug("Do not unload keymaster app from tz\n");
2636 goto unload_exit;
2637 }
2638
2639 __qseecom_cleanup_app(data);
2640 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2641
2642 if (data->client.app_id > 0) {
2643 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2644 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2645 list) {
2646 if (ptr_app->app_id == data->client.app_id) {
2647 if (!strcmp((void *)ptr_app->app_name,
2648 (void *)data->client.app_name)) {
2649 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002650 if (ptr_app->app_blocked ||
2651 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002652 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002653 if (app_crash || ptr_app->ref_cnt == 1)
2654 unload = true;
2655 break;
2656 }
2657 found_dead_app = true;
2658 break;
2659 }
2660 }
2661 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2662 flags);
2663 if (found_app == false && found_dead_app == false) {
2664 pr_err("Cannot find app with id = %d (%s)\n",
2665 data->client.app_id,
2666 (char *)data->client.app_name);
2667 ret = -EINVAL;
2668 goto unload_exit;
2669 }
2670 }
2671
2672 if (found_dead_app)
2673 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2674 (char *)data->client.app_name);
2675
2676 if (unload) {
2677 struct qseecom_unload_app_ireq req;
2678 /* Populate the structure for sending scm call to load image */
2679 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2680 req.app_id = data->client.app_id;
2681
2682 /* SCM_CALL to unload the app */
2683 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2684 sizeof(struct qseecom_unload_app_ireq),
2685 &resp, sizeof(resp));
2686 if (ret) {
2687 pr_err("scm_call to unload app (id = %d) failed\n",
2688 req.app_id);
2689 ret = -EFAULT;
2690 goto unload_exit;
2691 } else {
2692 pr_warn("App id %d now unloaded\n", req.app_id);
2693 }
2694 if (resp.result == QSEOS_RESULT_FAILURE) {
2695 pr_err("app (%d) unload_failed!!\n",
2696 data->client.app_id);
2697 ret = -EFAULT;
2698 goto unload_exit;
2699 }
2700 if (resp.result == QSEOS_RESULT_SUCCESS)
2701 pr_debug("App (%d) is unloaded!!\n",
2702 data->client.app_id);
2703 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2704 ret = __qseecom_process_incomplete_cmd(data, &resp);
2705 if (ret) {
2706 pr_err("process_incomplete_cmd fail err: %d\n",
2707 ret);
2708 goto unload_exit;
2709 }
2710 }
2711 }
2712
Zhen Kong7d500032018-08-06 16:58:31 -07002713unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002714 if (found_app) {
2715 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2716 if (app_crash) {
2717 ptr_app->ref_cnt = 0;
2718 pr_debug("app_crash: ref_count = 0\n");
2719 } else {
2720 if (ptr_app->ref_cnt == 1) {
2721 ptr_app->ref_cnt = 0;
2722 pr_debug("ref_count set to 0\n");
2723 } else {
2724 ptr_app->ref_cnt--;
2725 pr_debug("Can't unload app(%d) inuse\n",
2726 ptr_app->app_id);
2727 }
2728 }
2729 if (unload) {
2730 list_del(&ptr_app->list);
2731 kzfree(ptr_app);
2732 }
2733 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2734 flags1);
2735 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002736 qseecom_unmap_ion_allocated_memory(data);
2737 data->released = true;
2738 return ret;
2739}
2740
2741static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2742 unsigned long virt)
2743{
2744 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2745}
2746
2747static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2748 unsigned long virt)
2749{
2750 return (uintptr_t)data->client.sb_virt +
2751 (virt - data->client.user_virt_sb_base);
2752}
2753
2754int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2755 struct qseecom_send_svc_cmd_req *req_ptr,
2756 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2757{
2758 int ret = 0;
2759 void *req_buf = NULL;
2760
2761 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2762 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2763 req_ptr, send_svc_ireq_ptr);
2764 return -EINVAL;
2765 }
2766
2767 /* Clients need to ensure req_buf is at base offset of shared buffer */
2768 if ((uintptr_t)req_ptr->cmd_req_buf !=
2769 data_ptr->client.user_virt_sb_base) {
2770 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2771 return -EINVAL;
2772 }
2773
2774 if (data_ptr->client.sb_length <
2775 sizeof(struct qseecom_rpmb_provision_key)) {
2776 pr_err("shared buffer is too small to hold key type\n");
2777 return -EINVAL;
2778 }
2779 req_buf = data_ptr->client.sb_virt;
2780
2781 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2782 send_svc_ireq_ptr->key_type =
2783 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2784 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2785 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2786 data_ptr, (uintptr_t)req_ptr->resp_buf));
2787 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2788
2789 return ret;
2790}
2791
2792int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2793 struct qseecom_send_svc_cmd_req *req_ptr,
2794 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2795{
2796 int ret = 0;
2797 uint32_t reqd_len_sb_in = 0;
2798
2799 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2800 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2801 req_ptr, send_svc_ireq_ptr);
2802 return -EINVAL;
2803 }
2804
2805 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2806 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2807 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2808 pr_err("Required: %u, Available: %zu\n",
2809 reqd_len_sb_in, data_ptr->client.sb_length);
2810 return -ENOMEM;
2811 }
2812
2813 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2814 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2815 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2816 data_ptr, (uintptr_t)req_ptr->resp_buf));
2817 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2818
2819 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2820 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2821
2822
2823 return ret;
2824}
2825
2826static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2827 struct qseecom_send_svc_cmd_req *req)
2828{
2829 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2830 pr_err("req or cmd buffer or response buffer is null\n");
2831 return -EINVAL;
2832 }
2833
2834 if (!data || !data->client.ihandle) {
2835 pr_err("Client or client handle is not initialized\n");
2836 return -EINVAL;
2837 }
2838
2839 if (data->client.sb_virt == NULL) {
2840 pr_err("sb_virt null\n");
2841 return -EINVAL;
2842 }
2843
2844 if (data->client.user_virt_sb_base == 0) {
2845 pr_err("user_virt_sb_base is null\n");
2846 return -EINVAL;
2847 }
2848
2849 if (data->client.sb_length == 0) {
2850 pr_err("sb_length is 0\n");
2851 return -EINVAL;
2852 }
2853
2854 if (((uintptr_t)req->cmd_req_buf <
2855 data->client.user_virt_sb_base) ||
2856 ((uintptr_t)req->cmd_req_buf >=
2857 (data->client.user_virt_sb_base + data->client.sb_length))) {
2858 pr_err("cmd buffer address not within shared bufffer\n");
2859 return -EINVAL;
2860 }
2861 if (((uintptr_t)req->resp_buf <
2862 data->client.user_virt_sb_base) ||
2863 ((uintptr_t)req->resp_buf >=
2864 (data->client.user_virt_sb_base + data->client.sb_length))) {
2865 pr_err("response buffer address not within shared bufffer\n");
2866 return -EINVAL;
2867 }
2868 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2869 (req->cmd_req_len > data->client.sb_length) ||
2870 (req->resp_len > data->client.sb_length)) {
2871 pr_err("cmd buf length or response buf length not valid\n");
2872 return -EINVAL;
2873 }
2874 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2875 pr_err("Integer overflow detected in req_len & rsp_len\n");
2876 return -EINVAL;
2877 }
2878
2879 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2880 pr_debug("Not enough memory to fit cmd_buf.\n");
2881 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2882 (req->cmd_req_len + req->resp_len),
2883 data->client.sb_length);
2884 return -ENOMEM;
2885 }
2886 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2887 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2888 return -EINVAL;
2889 }
2890 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2891 pr_err("Integer overflow in resp_len & resp_buf\n");
2892 return -EINVAL;
2893 }
2894 if (data->client.user_virt_sb_base >
2895 (ULONG_MAX - data->client.sb_length)) {
2896 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2897 return -EINVAL;
2898 }
2899 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2900 ((uintptr_t)data->client.user_virt_sb_base +
2901 data->client.sb_length)) ||
2902 (((uintptr_t)req->resp_buf + req->resp_len) >
2903 ((uintptr_t)data->client.user_virt_sb_base +
2904 data->client.sb_length))) {
2905 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2906 return -EINVAL;
2907 }
2908 return 0;
2909}
2910
2911static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2912 void __user *argp)
2913{
2914 int ret = 0;
2915 struct qseecom_client_send_service_ireq send_svc_ireq;
2916 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2917 struct qseecom_command_scm_resp resp;
2918 struct qseecom_send_svc_cmd_req req;
2919 void *send_req_ptr;
2920 size_t req_buf_size;
2921
2922 /*struct qseecom_command_scm_resp resp;*/
2923
2924 if (copy_from_user(&req,
2925 (void __user *)argp,
2926 sizeof(req))) {
2927 pr_err("copy_from_user failed\n");
2928 return -EFAULT;
2929 }
2930
2931 if (__validate_send_service_cmd_inputs(data, &req))
2932 return -EINVAL;
2933
2934 data->type = QSEECOM_SECURE_SERVICE;
2935
2936 switch (req.cmd_id) {
2937 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2938 case QSEOS_RPMB_ERASE_COMMAND:
2939 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2940 send_req_ptr = &send_svc_ireq;
2941 req_buf_size = sizeof(send_svc_ireq);
2942 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2943 send_req_ptr))
2944 return -EINVAL;
2945 break;
2946 case QSEOS_FSM_LTEOTA_REQ_CMD:
2947 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2948 case QSEOS_FSM_IKE_REQ_CMD:
2949 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2950 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2951 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2952 case QSEOS_FSM_ENCFS_REQ_CMD:
2953 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2954 send_req_ptr = &send_fsm_key_svc_ireq;
2955 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2956 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2957 send_req_ptr))
2958 return -EINVAL;
2959 break;
2960 default:
2961 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2962 return -EINVAL;
2963 }
2964
2965 if (qseecom.support_bus_scaling) {
2966 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2967 if (ret) {
2968 pr_err("Fail to set bw HIGH\n");
2969 return ret;
2970 }
2971 } else {
2972 ret = qseecom_perf_enable(data);
2973 if (ret) {
2974 pr_err("Failed to vote for clocks with err %d\n", ret);
2975 goto exit;
2976 }
2977 }
2978
2979 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2980 data->client.sb_virt, data->client.sb_length,
2981 ION_IOC_CLEAN_INV_CACHES);
2982 if (ret) {
2983 pr_err("cache operation failed %d\n", ret);
2984 goto exit;
2985 }
2986 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2987 (const void *)send_req_ptr,
2988 req_buf_size, &resp, sizeof(resp));
2989 if (ret) {
2990 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2991 if (!qseecom.support_bus_scaling) {
2992 qsee_disable_clock_vote(data, CLK_DFAB);
2993 qsee_disable_clock_vote(data, CLK_SFPB);
2994 } else {
2995 __qseecom_add_bw_scale_down_timer(
2996 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2997 }
2998 goto exit;
2999 }
3000 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3001 data->client.sb_virt, data->client.sb_length,
3002 ION_IOC_INV_CACHES);
3003 if (ret) {
3004 pr_err("cache operation failed %d\n", ret);
3005 goto exit;
3006 }
3007 switch (resp.result) {
3008 case QSEOS_RESULT_SUCCESS:
3009 break;
3010 case QSEOS_RESULT_INCOMPLETE:
3011 pr_debug("qseos_result_incomplete\n");
3012 ret = __qseecom_process_incomplete_cmd(data, &resp);
3013 if (ret) {
3014 pr_err("process_incomplete_cmd fail with result: %d\n",
3015 resp.result);
3016 }
3017 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3018 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303019 if (put_user(resp.result,
3020 (uint32_t __user *)req.resp_buf)) {
3021 ret = -EINVAL;
3022 goto exit;
3023 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003024 ret = 0;
3025 }
3026 break;
3027 case QSEOS_RESULT_FAILURE:
3028 pr_err("scm call failed with resp.result: %d\n", resp.result);
3029 ret = -EINVAL;
3030 break;
3031 default:
3032 pr_err("Response result %d not supported\n",
3033 resp.result);
3034 ret = -EINVAL;
3035 break;
3036 }
3037 if (!qseecom.support_bus_scaling) {
3038 qsee_disable_clock_vote(data, CLK_DFAB);
3039 qsee_disable_clock_vote(data, CLK_SFPB);
3040 } else {
3041 __qseecom_add_bw_scale_down_timer(
3042 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3043 }
3044
3045exit:
3046 return ret;
3047}
3048
3049static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3050 struct qseecom_send_cmd_req *req)
3051
3052{
3053 if (!data || !data->client.ihandle) {
3054 pr_err("Client or client handle is not initialized\n");
3055 return -EINVAL;
3056 }
3057 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3058 (req->cmd_req_buf == NULL)) {
3059 pr_err("cmd buffer or response buffer is null\n");
3060 return -EINVAL;
3061 }
3062 if (((uintptr_t)req->cmd_req_buf <
3063 data->client.user_virt_sb_base) ||
3064 ((uintptr_t)req->cmd_req_buf >=
3065 (data->client.user_virt_sb_base + data->client.sb_length))) {
3066 pr_err("cmd buffer address not within shared bufffer\n");
3067 return -EINVAL;
3068 }
3069 if (((uintptr_t)req->resp_buf <
3070 data->client.user_virt_sb_base) ||
3071 ((uintptr_t)req->resp_buf >=
3072 (data->client.user_virt_sb_base + data->client.sb_length))) {
3073 pr_err("response buffer address not within shared bufffer\n");
3074 return -EINVAL;
3075 }
3076 if ((req->cmd_req_len == 0) ||
3077 (req->cmd_req_len > data->client.sb_length) ||
3078 (req->resp_len > data->client.sb_length)) {
3079 pr_err("cmd buf length or response buf length not valid\n");
3080 return -EINVAL;
3081 }
3082 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3083 pr_err("Integer overflow detected in req_len & rsp_len\n");
3084 return -EINVAL;
3085 }
3086
3087 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3088 pr_debug("Not enough memory to fit cmd_buf.\n");
3089 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3090 (req->cmd_req_len + req->resp_len),
3091 data->client.sb_length);
3092 return -ENOMEM;
3093 }
3094 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3095 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3096 return -EINVAL;
3097 }
3098 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3099 pr_err("Integer overflow in resp_len & resp_buf\n");
3100 return -EINVAL;
3101 }
3102 if (data->client.user_virt_sb_base >
3103 (ULONG_MAX - data->client.sb_length)) {
3104 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3105 return -EINVAL;
3106 }
3107 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3108 ((uintptr_t)data->client.user_virt_sb_base +
3109 data->client.sb_length)) ||
3110 (((uintptr_t)req->resp_buf + req->resp_len) >
3111 ((uintptr_t)data->client.user_virt_sb_base +
3112 data->client.sb_length))) {
3113 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3114 return -EINVAL;
3115 }
3116 return 0;
3117}
3118
3119int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3120 struct qseecom_registered_app_list *ptr_app,
3121 struct qseecom_dev_handle *data)
3122{
3123 int ret = 0;
3124
3125 switch (resp->result) {
3126 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3127 pr_warn("App(%d) %s is blocked on listener %d\n",
3128 data->client.app_id, data->client.app_name,
3129 resp->data);
3130 ret = __qseecom_process_reentrancy_blocked_on_listener(
3131 resp, ptr_app, data);
3132 if (ret) {
3133 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3134 data->client.app_id, data->client.app_name, resp->data);
3135 return ret;
3136 }
3137
3138 case QSEOS_RESULT_INCOMPLETE:
3139 qseecom.app_block_ref_cnt++;
3140 ptr_app->app_blocked = true;
3141 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3142 ptr_app->app_blocked = false;
3143 qseecom.app_block_ref_cnt--;
3144 wake_up_interruptible(&qseecom.app_block_wq);
3145 if (ret)
3146 pr_err("process_incomplete_cmd failed err: %d\n",
3147 ret);
3148 return ret;
3149 case QSEOS_RESULT_SUCCESS:
3150 return ret;
3151 default:
3152 pr_err("Response result %d not supported\n",
3153 resp->result);
3154 return -EINVAL;
3155 }
3156}
3157
3158static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3159 struct qseecom_send_cmd_req *req)
3160{
3161 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003162 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003163 u32 reqd_len_sb_in = 0;
3164 struct qseecom_client_send_data_ireq send_data_req = {0};
3165 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3166 struct qseecom_command_scm_resp resp;
3167 unsigned long flags;
3168 struct qseecom_registered_app_list *ptr_app;
3169 bool found_app = false;
3170 void *cmd_buf = NULL;
3171 size_t cmd_len;
3172 struct sglist_info *table = data->sglistinfo_ptr;
3173
3174 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3175 /* find app_id & img_name from list */
3176 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3177 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3178 list) {
3179 if ((ptr_app->app_id == data->client.app_id) &&
3180 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3181 found_app = true;
3182 break;
3183 }
3184 }
3185 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3186
3187 if (!found_app) {
3188 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3189 (char *)data->client.app_name);
3190 return -ENOENT;
3191 }
3192
3193 if (qseecom.qsee_version < QSEE_VERSION_40) {
3194 send_data_req.app_id = data->client.app_id;
3195 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3196 data, (uintptr_t)req->cmd_req_buf));
3197 send_data_req.req_len = req->cmd_req_len;
3198 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3199 data, (uintptr_t)req->resp_buf));
3200 send_data_req.rsp_len = req->resp_len;
3201 send_data_req.sglistinfo_ptr =
3202 (uint32_t)virt_to_phys(table);
3203 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3204 dmac_flush_range((void *)table,
3205 (void *)table + SGLISTINFO_TABLE_SIZE);
3206 cmd_buf = (void *)&send_data_req;
3207 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3208 } else {
3209 send_data_req_64bit.app_id = data->client.app_id;
3210 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3211 (uintptr_t)req->cmd_req_buf);
3212 send_data_req_64bit.req_len = req->cmd_req_len;
3213 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3214 (uintptr_t)req->resp_buf);
3215 send_data_req_64bit.rsp_len = req->resp_len;
3216 /* check if 32bit app's phys_addr region is under 4GB.*/
3217 if ((data->client.app_arch == ELFCLASS32) &&
3218 ((send_data_req_64bit.req_ptr >=
3219 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3220 (send_data_req_64bit.rsp_ptr >=
3221 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3222 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3223 data->client.app_name,
3224 send_data_req_64bit.req_ptr,
3225 send_data_req_64bit.req_len,
3226 send_data_req_64bit.rsp_ptr,
3227 send_data_req_64bit.rsp_len);
3228 return -EFAULT;
3229 }
3230 send_data_req_64bit.sglistinfo_ptr =
3231 (uint64_t)virt_to_phys(table);
3232 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3233 dmac_flush_range((void *)table,
3234 (void *)table + SGLISTINFO_TABLE_SIZE);
3235 cmd_buf = (void *)&send_data_req_64bit;
3236 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3237 }
3238
3239 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3240 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3241 else
3242 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3243
3244 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3245 data->client.sb_virt,
3246 reqd_len_sb_in,
3247 ION_IOC_CLEAN_INV_CACHES);
3248 if (ret) {
3249 pr_err("cache operation failed %d\n", ret);
3250 return ret;
3251 }
3252
3253 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3254
3255 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3256 cmd_buf, cmd_len,
3257 &resp, sizeof(resp));
3258 if (ret) {
3259 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3260 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003261 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003262 }
3263
3264 if (qseecom.qsee_reentrancy_support) {
3265 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003266 if (ret)
3267 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 } else {
3269 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3270 ret = __qseecom_process_incomplete_cmd(data, &resp);
3271 if (ret) {
3272 pr_err("process_incomplete_cmd failed err: %d\n",
3273 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003274 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003275 }
3276 } else {
3277 if (resp.result != QSEOS_RESULT_SUCCESS) {
3278 pr_err("Response result %d not supported\n",
3279 resp.result);
3280 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003281 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003282 }
3283 }
3284 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003285exit:
3286 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003287 data->client.sb_virt, data->client.sb_length,
3288 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003289 if (ret2) {
3290 pr_err("cache operation failed %d\n", ret2);
3291 return ret2;
3292 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003293 __qseecom_processing_pending_lsnr_unregister();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003294 return ret;
3295}
3296
3297static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3298{
3299 int ret = 0;
3300 struct qseecom_send_cmd_req req;
3301
3302 ret = copy_from_user(&req, argp, sizeof(req));
3303 if (ret) {
3304 pr_err("copy_from_user failed\n");
3305 return ret;
3306 }
3307
3308 if (__validate_send_cmd_inputs(data, &req))
3309 return -EINVAL;
3310
3311 ret = __qseecom_send_cmd(data, &req);
3312
3313 if (ret)
3314 return ret;
3315
3316 return ret;
3317}
3318
3319int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3320 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3321 struct qseecom_dev_handle *data, int i) {
3322
3323 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3324 (req->ifd_data[i].fd > 0)) {
3325 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3326 (req->ifd_data[i].cmd_buf_offset >
3327 req->cmd_req_len - sizeof(uint32_t))) {
3328 pr_err("Invalid offset (req len) 0x%x\n",
3329 req->ifd_data[i].cmd_buf_offset);
3330 return -EINVAL;
3331 }
3332 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3333 (lstnr_resp->ifd_data[i].fd > 0)) {
3334 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3335 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3336 lstnr_resp->resp_len - sizeof(uint32_t))) {
3337 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3338 lstnr_resp->ifd_data[i].cmd_buf_offset);
3339 return -EINVAL;
3340 }
3341 }
3342 return 0;
3343}
3344
3345static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3346 struct qseecom_dev_handle *data)
3347{
3348 struct ion_handle *ihandle;
3349 char *field;
3350 int ret = 0;
3351 int i = 0;
3352 uint32_t len = 0;
3353 struct scatterlist *sg;
3354 struct qseecom_send_modfd_cmd_req *req = NULL;
3355 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3356 struct qseecom_registered_listener_list *this_lstnr = NULL;
3357 uint32_t offset;
3358 struct sg_table *sg_ptr;
3359
3360 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3361 (data->type != QSEECOM_CLIENT_APP))
3362 return -EFAULT;
3363
3364 if (msg == NULL) {
3365 pr_err("Invalid address\n");
3366 return -EINVAL;
3367 }
3368 if (data->type == QSEECOM_LISTENER_SERVICE) {
3369 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3370 this_lstnr = __qseecom_find_svc(data->listener.id);
3371 if (IS_ERR_OR_NULL(this_lstnr)) {
3372 pr_err("Invalid listener ID\n");
3373 return -ENOMEM;
3374 }
3375 } else {
3376 req = (struct qseecom_send_modfd_cmd_req *)msg;
3377 }
3378
3379 for (i = 0; i < MAX_ION_FD; i++) {
3380 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3381 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003382 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003383 req->ifd_data[i].fd);
3384 if (IS_ERR_OR_NULL(ihandle)) {
3385 pr_err("Ion client can't retrieve the handle\n");
3386 return -ENOMEM;
3387 }
3388 field = (char *) req->cmd_req_buf +
3389 req->ifd_data[i].cmd_buf_offset;
3390 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3391 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003392 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003393 lstnr_resp->ifd_data[i].fd);
3394 if (IS_ERR_OR_NULL(ihandle)) {
3395 pr_err("Ion client can't retrieve the handle\n");
3396 return -ENOMEM;
3397 }
3398 field = lstnr_resp->resp_buf_ptr +
3399 lstnr_resp->ifd_data[i].cmd_buf_offset;
3400 } else {
3401 continue;
3402 }
3403 /* Populate the cmd data structure with the phys_addr */
3404 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3405 if (IS_ERR_OR_NULL(sg_ptr)) {
3406 pr_err("IOn client could not retrieve sg table\n");
3407 goto err;
3408 }
3409 if (sg_ptr->nents == 0) {
3410 pr_err("Num of scattered entries is 0\n");
3411 goto err;
3412 }
3413 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3414 pr_err("Num of scattered entries");
3415 pr_err(" (%d) is greater than max supported %d\n",
3416 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3417 goto err;
3418 }
3419 sg = sg_ptr->sgl;
3420 if (sg_ptr->nents == 1) {
3421 uint32_t *update;
3422
3423 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3424 goto err;
3425 if ((data->type == QSEECOM_CLIENT_APP &&
3426 (data->client.app_arch == ELFCLASS32 ||
3427 data->client.app_arch == ELFCLASS64)) ||
3428 (data->type == QSEECOM_LISTENER_SERVICE)) {
3429 /*
3430 * Check if sg list phy add region is under 4GB
3431 */
3432 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3433 (!cleanup) &&
3434 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3435 >= PHY_ADDR_4G - sg->length)) {
3436 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3437 data->client.app_name,
3438 &(sg_dma_address(sg_ptr->sgl)),
3439 sg->length);
3440 goto err;
3441 }
3442 update = (uint32_t *) field;
3443 *update = cleanup ? 0 :
3444 (uint32_t)sg_dma_address(sg_ptr->sgl);
3445 } else {
3446 pr_err("QSEE app arch %u is not supported\n",
3447 data->client.app_arch);
3448 goto err;
3449 }
3450 len += (uint32_t)sg->length;
3451 } else {
3452 struct qseecom_sg_entry *update;
3453 int j = 0;
3454
3455 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3456 (req->ifd_data[i].fd > 0)) {
3457
3458 if ((req->cmd_req_len <
3459 SG_ENTRY_SZ * sg_ptr->nents) ||
3460 (req->ifd_data[i].cmd_buf_offset >
3461 (req->cmd_req_len -
3462 SG_ENTRY_SZ * sg_ptr->nents))) {
3463 pr_err("Invalid offset = 0x%x\n",
3464 req->ifd_data[i].cmd_buf_offset);
3465 goto err;
3466 }
3467
3468 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3469 (lstnr_resp->ifd_data[i].fd > 0)) {
3470
3471 if ((lstnr_resp->resp_len <
3472 SG_ENTRY_SZ * sg_ptr->nents) ||
3473 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3474 (lstnr_resp->resp_len -
3475 SG_ENTRY_SZ * sg_ptr->nents))) {
3476 goto err;
3477 }
3478 }
3479 if ((data->type == QSEECOM_CLIENT_APP &&
3480 (data->client.app_arch == ELFCLASS32 ||
3481 data->client.app_arch == ELFCLASS64)) ||
3482 (data->type == QSEECOM_LISTENER_SERVICE)) {
3483 update = (struct qseecom_sg_entry *)field;
3484 for (j = 0; j < sg_ptr->nents; j++) {
3485 /*
3486 * Check if sg list PA is under 4GB
3487 */
3488 if ((qseecom.qsee_version >=
3489 QSEE_VERSION_40) &&
3490 (!cleanup) &&
3491 ((uint64_t)(sg_dma_address(sg))
3492 >= PHY_ADDR_4G - sg->length)) {
3493 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3494 data->client.app_name,
3495 &(sg_dma_address(sg)),
3496 sg->length);
3497 goto err;
3498 }
3499 update->phys_addr = cleanup ? 0 :
3500 (uint32_t)sg_dma_address(sg);
3501 update->len = cleanup ? 0 : sg->length;
3502 update++;
3503 len += sg->length;
3504 sg = sg_next(sg);
3505 }
3506 } else {
3507 pr_err("QSEE app arch %u is not supported\n",
3508 data->client.app_arch);
3509 goto err;
3510 }
3511 }
3512
3513 if (cleanup) {
3514 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3515 ihandle, NULL, len,
3516 ION_IOC_INV_CACHES);
3517 if (ret) {
3518 pr_err("cache operation failed %d\n", ret);
3519 goto err;
3520 }
3521 } else {
3522 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3523 ihandle, NULL, len,
3524 ION_IOC_CLEAN_INV_CACHES);
3525 if (ret) {
3526 pr_err("cache operation failed %d\n", ret);
3527 goto err;
3528 }
3529 if (data->type == QSEECOM_CLIENT_APP) {
3530 offset = req->ifd_data[i].cmd_buf_offset;
3531 data->sglistinfo_ptr[i].indexAndFlags =
3532 SGLISTINFO_SET_INDEX_FLAG(
3533 (sg_ptr->nents == 1), 0, offset);
3534 data->sglistinfo_ptr[i].sizeOrCount =
3535 (sg_ptr->nents == 1) ?
3536 sg->length : sg_ptr->nents;
3537 data->sglist_cnt = i + 1;
3538 } else {
3539 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3540 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3541 (uintptr_t)this_lstnr->sb_virt);
3542 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3543 SGLISTINFO_SET_INDEX_FLAG(
3544 (sg_ptr->nents == 1), 0, offset);
3545 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3546 (sg_ptr->nents == 1) ?
3547 sg->length : sg_ptr->nents;
3548 this_lstnr->sglist_cnt = i + 1;
3549 }
3550 }
3551 /* Deallocate the handle */
3552 if (!IS_ERR_OR_NULL(ihandle))
3553 ion_free(qseecom.ion_clnt, ihandle);
3554 }
3555 return ret;
3556err:
3557 if (!IS_ERR_OR_NULL(ihandle))
3558 ion_free(qseecom.ion_clnt, ihandle);
3559 return -ENOMEM;
3560}
3561
3562static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3563 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3564{
3565 struct scatterlist *sg = sg_ptr->sgl;
3566 struct qseecom_sg_entry_64bit *sg_entry;
3567 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3568 void *buf;
3569 uint i;
3570 size_t size;
3571 dma_addr_t coh_pmem;
3572
3573 if (fd_idx >= MAX_ION_FD) {
3574 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3575 return -ENOMEM;
3576 }
3577 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3578 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3579 /* Allocate a contiguous kernel buffer */
3580 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3581 size = (size + PAGE_SIZE) & PAGE_MASK;
3582 buf = dma_alloc_coherent(qseecom.pdev,
3583 size, &coh_pmem, GFP_KERNEL);
3584 if (buf == NULL) {
3585 pr_err("failed to alloc memory for sg buf\n");
3586 return -ENOMEM;
3587 }
3588 /* update qseecom_sg_list_buf_hdr_64bit */
3589 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3590 buf_hdr->new_buf_phys_addr = coh_pmem;
3591 buf_hdr->nents_total = sg_ptr->nents;
3592 /* save the left sg entries into new allocated buf */
3593 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3594 for (i = 0; i < sg_ptr->nents; i++) {
3595 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3596 sg_entry->len = sg->length;
3597 sg_entry++;
3598 sg = sg_next(sg);
3599 }
3600
3601 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3602 data->client.sec_buf_fd[fd_idx].vbase = buf;
3603 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3604 data->client.sec_buf_fd[fd_idx].size = size;
3605
3606 return 0;
3607}
3608
3609static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3610 struct qseecom_dev_handle *data)
3611{
3612 struct ion_handle *ihandle;
3613 char *field;
3614 int ret = 0;
3615 int i = 0;
3616 uint32_t len = 0;
3617 struct scatterlist *sg;
3618 struct qseecom_send_modfd_cmd_req *req = NULL;
3619 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3620 struct qseecom_registered_listener_list *this_lstnr = NULL;
3621 uint32_t offset;
3622 struct sg_table *sg_ptr;
3623
3624 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3625 (data->type != QSEECOM_CLIENT_APP))
3626 return -EFAULT;
3627
3628 if (msg == NULL) {
3629 pr_err("Invalid address\n");
3630 return -EINVAL;
3631 }
3632 if (data->type == QSEECOM_LISTENER_SERVICE) {
3633 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3634 this_lstnr = __qseecom_find_svc(data->listener.id);
3635 if (IS_ERR_OR_NULL(this_lstnr)) {
3636 pr_err("Invalid listener ID\n");
3637 return -ENOMEM;
3638 }
3639 } else {
3640 req = (struct qseecom_send_modfd_cmd_req *)msg;
3641 }
3642
3643 for (i = 0; i < MAX_ION_FD; i++) {
3644 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3645 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003646 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003647 req->ifd_data[i].fd);
3648 if (IS_ERR_OR_NULL(ihandle)) {
3649 pr_err("Ion client can't retrieve the handle\n");
3650 return -ENOMEM;
3651 }
3652 field = (char *) req->cmd_req_buf +
3653 req->ifd_data[i].cmd_buf_offset;
3654 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3655 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003656 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003657 lstnr_resp->ifd_data[i].fd);
3658 if (IS_ERR_OR_NULL(ihandle)) {
3659 pr_err("Ion client can't retrieve the handle\n");
3660 return -ENOMEM;
3661 }
3662 field = lstnr_resp->resp_buf_ptr +
3663 lstnr_resp->ifd_data[i].cmd_buf_offset;
3664 } else {
3665 continue;
3666 }
3667 /* Populate the cmd data structure with the phys_addr */
3668 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3669 if (IS_ERR_OR_NULL(sg_ptr)) {
3670 pr_err("IOn client could not retrieve sg table\n");
3671 goto err;
3672 }
3673 if (sg_ptr->nents == 0) {
3674 pr_err("Num of scattered entries is 0\n");
3675 goto err;
3676 }
3677 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3678 pr_warn("Num of scattered entries");
3679 pr_warn(" (%d) is greater than %d\n",
3680 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3681 if (cleanup) {
3682 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3683 data->client.sec_buf_fd[i].vbase)
3684 dma_free_coherent(qseecom.pdev,
3685 data->client.sec_buf_fd[i].size,
3686 data->client.sec_buf_fd[i].vbase,
3687 data->client.sec_buf_fd[i].pbase);
3688 } else {
3689 ret = __qseecom_allocate_sg_list_buffer(data,
3690 field, i, sg_ptr);
3691 if (ret) {
3692 pr_err("Failed to allocate sg list buffer\n");
3693 goto err;
3694 }
3695 }
3696 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3697 sg = sg_ptr->sgl;
3698 goto cleanup;
3699 }
3700 sg = sg_ptr->sgl;
3701 if (sg_ptr->nents == 1) {
3702 uint64_t *update_64bit;
3703
3704 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3705 goto err;
3706 /* 64bit app uses 64bit address */
3707 update_64bit = (uint64_t *) field;
3708 *update_64bit = cleanup ? 0 :
3709 (uint64_t)sg_dma_address(sg_ptr->sgl);
3710 len += (uint32_t)sg->length;
3711 } else {
3712 struct qseecom_sg_entry_64bit *update_64bit;
3713 int j = 0;
3714
3715 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3716 (req->ifd_data[i].fd > 0)) {
3717
3718 if ((req->cmd_req_len <
3719 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3720 (req->ifd_data[i].cmd_buf_offset >
3721 (req->cmd_req_len -
3722 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3723 pr_err("Invalid offset = 0x%x\n",
3724 req->ifd_data[i].cmd_buf_offset);
3725 goto err;
3726 }
3727
3728 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3729 (lstnr_resp->ifd_data[i].fd > 0)) {
3730
3731 if ((lstnr_resp->resp_len <
3732 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3733 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3734 (lstnr_resp->resp_len -
3735 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3736 goto err;
3737 }
3738 }
3739 /* 64bit app uses 64bit address */
3740 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3741 for (j = 0; j < sg_ptr->nents; j++) {
3742 update_64bit->phys_addr = cleanup ? 0 :
3743 (uint64_t)sg_dma_address(sg);
3744 update_64bit->len = cleanup ? 0 :
3745 (uint32_t)sg->length;
3746 update_64bit++;
3747 len += sg->length;
3748 sg = sg_next(sg);
3749 }
3750 }
3751cleanup:
3752 if (cleanup) {
3753 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3754 ihandle, NULL, len,
3755 ION_IOC_INV_CACHES);
3756 if (ret) {
3757 pr_err("cache operation failed %d\n", ret);
3758 goto err;
3759 }
3760 } else {
3761 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3762 ihandle, NULL, len,
3763 ION_IOC_CLEAN_INV_CACHES);
3764 if (ret) {
3765 pr_err("cache operation failed %d\n", ret);
3766 goto err;
3767 }
3768 if (data->type == QSEECOM_CLIENT_APP) {
3769 offset = req->ifd_data[i].cmd_buf_offset;
3770 data->sglistinfo_ptr[i].indexAndFlags =
3771 SGLISTINFO_SET_INDEX_FLAG(
3772 (sg_ptr->nents == 1), 1, offset);
3773 data->sglistinfo_ptr[i].sizeOrCount =
3774 (sg_ptr->nents == 1) ?
3775 sg->length : sg_ptr->nents;
3776 data->sglist_cnt = i + 1;
3777 } else {
3778 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3779 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3780 (uintptr_t)this_lstnr->sb_virt);
3781 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3782 SGLISTINFO_SET_INDEX_FLAG(
3783 (sg_ptr->nents == 1), 1, offset);
3784 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3785 (sg_ptr->nents == 1) ?
3786 sg->length : sg_ptr->nents;
3787 this_lstnr->sglist_cnt = i + 1;
3788 }
3789 }
3790 /* Deallocate the handle */
3791 if (!IS_ERR_OR_NULL(ihandle))
3792 ion_free(qseecom.ion_clnt, ihandle);
3793 }
3794 return ret;
3795err:
3796 for (i = 0; i < MAX_ION_FD; i++)
3797 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3798 data->client.sec_buf_fd[i].vbase)
3799 dma_free_coherent(qseecom.pdev,
3800 data->client.sec_buf_fd[i].size,
3801 data->client.sec_buf_fd[i].vbase,
3802 data->client.sec_buf_fd[i].pbase);
3803 if (!IS_ERR_OR_NULL(ihandle))
3804 ion_free(qseecom.ion_clnt, ihandle);
3805 return -ENOMEM;
3806}
3807
3808static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3809 void __user *argp,
3810 bool is_64bit_addr)
3811{
3812 int ret = 0;
3813 int i;
3814 struct qseecom_send_modfd_cmd_req req;
3815 struct qseecom_send_cmd_req send_cmd_req;
3816
3817 ret = copy_from_user(&req, argp, sizeof(req));
3818 if (ret) {
3819 pr_err("copy_from_user failed\n");
3820 return ret;
3821 }
3822
3823 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3824 send_cmd_req.cmd_req_len = req.cmd_req_len;
3825 send_cmd_req.resp_buf = req.resp_buf;
3826 send_cmd_req.resp_len = req.resp_len;
3827
3828 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3829 return -EINVAL;
3830
3831 /* validate offsets */
3832 for (i = 0; i < MAX_ION_FD; i++) {
3833 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3834 pr_err("Invalid offset %d = 0x%x\n",
3835 i, req.ifd_data[i].cmd_buf_offset);
3836 return -EINVAL;
3837 }
3838 }
3839 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3840 (uintptr_t)req.cmd_req_buf);
3841 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3842 (uintptr_t)req.resp_buf);
3843
3844 if (!is_64bit_addr) {
3845 ret = __qseecom_update_cmd_buf(&req, false, data);
3846 if (ret)
3847 return ret;
3848 ret = __qseecom_send_cmd(data, &send_cmd_req);
3849 if (ret)
3850 return ret;
3851 ret = __qseecom_update_cmd_buf(&req, true, data);
3852 if (ret)
3853 return ret;
3854 } else {
3855 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3856 if (ret)
3857 return ret;
3858 ret = __qseecom_send_cmd(data, &send_cmd_req);
3859 if (ret)
3860 return ret;
3861 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3862 if (ret)
3863 return ret;
3864 }
3865
3866 return ret;
3867}
3868
3869static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3870 void __user *argp)
3871{
3872 return __qseecom_send_modfd_cmd(data, argp, false);
3873}
3874
3875static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3876 void __user *argp)
3877{
3878 return __qseecom_send_modfd_cmd(data, argp, true);
3879}
3880
3881
3882
3883static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3884 struct qseecom_registered_listener_list *svc)
3885{
3886 int ret;
3887
Zhen Kongf5087172018-10-11 17:22:05 -07003888 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003889 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003890}
3891
3892static int qseecom_receive_req(struct qseecom_dev_handle *data)
3893{
3894 int ret = 0;
3895 struct qseecom_registered_listener_list *this_lstnr;
3896
Zhen Kongbcdeda22018-11-16 13:50:51 -08003897 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003898 this_lstnr = __qseecom_find_svc(data->listener.id);
3899 if (!this_lstnr) {
3900 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003901 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003902 return -ENODATA;
3903 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003904 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003905
3906 while (1) {
3907 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3908 __qseecom_listener_has_rcvd_req(data,
3909 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003910 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003911 (uint32_t)data->listener.id);
3912 /* woken up for different reason */
3913 return -ERESTARTSYS;
3914 }
3915
Zhen Kongbcdeda22018-11-16 13:50:51 -08003916 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003917 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003918 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003919 return -ENODEV;
3920 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003921 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003922 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08003923 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003924 break;
3925 }
3926 return ret;
3927}
3928
3929static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3930{
3931 unsigned char app_arch = 0;
3932 struct elf32_hdr *ehdr;
3933 struct elf64_hdr *ehdr64;
3934
3935 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3936
3937 switch (app_arch) {
3938 case ELFCLASS32: {
3939 ehdr = (struct elf32_hdr *)fw_entry->data;
3940 if (fw_entry->size < sizeof(*ehdr)) {
3941 pr_err("%s: Not big enough to be an elf32 header\n",
3942 qseecom.pdev->init_name);
3943 return false;
3944 }
3945 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3946 pr_err("%s: Not an elf32 header\n",
3947 qseecom.pdev->init_name);
3948 return false;
3949 }
3950 if (ehdr->e_phnum == 0) {
3951 pr_err("%s: No loadable segments\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3956 sizeof(struct elf32_hdr) > fw_entry->size) {
3957 pr_err("%s: Program headers not within mdt\n",
3958 qseecom.pdev->init_name);
3959 return false;
3960 }
3961 break;
3962 }
3963 case ELFCLASS64: {
3964 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3965 if (fw_entry->size < sizeof(*ehdr64)) {
3966 pr_err("%s: Not big enough to be an elf64 header\n",
3967 qseecom.pdev->init_name);
3968 return false;
3969 }
3970 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3971 pr_err("%s: Not an elf64 header\n",
3972 qseecom.pdev->init_name);
3973 return false;
3974 }
3975 if (ehdr64->e_phnum == 0) {
3976 pr_err("%s: No loadable segments\n",
3977 qseecom.pdev->init_name);
3978 return false;
3979 }
3980 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3981 sizeof(struct elf64_hdr) > fw_entry->size) {
3982 pr_err("%s: Program headers not within mdt\n",
3983 qseecom.pdev->init_name);
3984 return false;
3985 }
3986 break;
3987 }
3988 default: {
3989 pr_err("QSEE app arch %u is not supported\n", app_arch);
3990 return false;
3991 }
3992 }
3993 return true;
3994}
3995
3996static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3997 uint32_t *app_arch)
3998{
3999 int ret = -1;
4000 int i = 0, rc = 0;
4001 const struct firmware *fw_entry = NULL;
4002 char fw_name[MAX_APP_NAME_SIZE];
4003 struct elf32_hdr *ehdr;
4004 struct elf64_hdr *ehdr64;
4005 int num_images = 0;
4006
4007 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4008 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4009 if (rc) {
4010 pr_err("error with request_firmware\n");
4011 ret = -EIO;
4012 goto err;
4013 }
4014 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4015 ret = -EIO;
4016 goto err;
4017 }
4018 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4019 *fw_size = fw_entry->size;
4020 if (*app_arch == ELFCLASS32) {
4021 ehdr = (struct elf32_hdr *)fw_entry->data;
4022 num_images = ehdr->e_phnum;
4023 } else if (*app_arch == ELFCLASS64) {
4024 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4025 num_images = ehdr64->e_phnum;
4026 } else {
4027 pr_err("QSEE %s app, arch %u is not supported\n",
4028 appname, *app_arch);
4029 ret = -EIO;
4030 goto err;
4031 }
4032 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4033 release_firmware(fw_entry);
4034 fw_entry = NULL;
4035 for (i = 0; i < num_images; i++) {
4036 memset(fw_name, 0, sizeof(fw_name));
4037 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4038 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4039 if (ret)
4040 goto err;
4041 if (*fw_size > U32_MAX - fw_entry->size) {
4042 pr_err("QSEE %s app file size overflow\n", appname);
4043 ret = -EINVAL;
4044 goto err;
4045 }
4046 *fw_size += fw_entry->size;
4047 release_firmware(fw_entry);
4048 fw_entry = NULL;
4049 }
4050
4051 return ret;
4052err:
4053 if (fw_entry)
4054 release_firmware(fw_entry);
4055 *fw_size = 0;
4056 return ret;
4057}
4058
4059static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4060 uint32_t fw_size,
4061 struct qseecom_load_app_ireq *load_req)
4062{
4063 int ret = -1;
4064 int i = 0, rc = 0;
4065 const struct firmware *fw_entry = NULL;
4066 char fw_name[MAX_APP_NAME_SIZE];
4067 u8 *img_data_ptr = img_data;
4068 struct elf32_hdr *ehdr;
4069 struct elf64_hdr *ehdr64;
4070 int num_images = 0;
4071 unsigned char app_arch = 0;
4072
4073 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4074 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4075 if (rc) {
4076 ret = -EIO;
4077 goto err;
4078 }
4079
4080 load_req->img_len = fw_entry->size;
4081 if (load_req->img_len > fw_size) {
4082 pr_err("app %s size %zu is larger than buf size %u\n",
4083 appname, fw_entry->size, fw_size);
4084 ret = -EINVAL;
4085 goto err;
4086 }
4087 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4088 img_data_ptr = img_data_ptr + fw_entry->size;
4089 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4090
4091 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4092 if (app_arch == ELFCLASS32) {
4093 ehdr = (struct elf32_hdr *)fw_entry->data;
4094 num_images = ehdr->e_phnum;
4095 } else if (app_arch == ELFCLASS64) {
4096 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4097 num_images = ehdr64->e_phnum;
4098 } else {
4099 pr_err("QSEE %s app, arch %u is not supported\n",
4100 appname, app_arch);
4101 ret = -EIO;
4102 goto err;
4103 }
4104 release_firmware(fw_entry);
4105 fw_entry = NULL;
4106 for (i = 0; i < num_images; i++) {
4107 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4108 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4109 if (ret) {
4110 pr_err("Failed to locate blob %s\n", fw_name);
4111 goto err;
4112 }
4113 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4114 (fw_entry->size + load_req->img_len > fw_size)) {
4115 pr_err("Invalid file size for %s\n", fw_name);
4116 ret = -EINVAL;
4117 goto err;
4118 }
4119 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4120 img_data_ptr = img_data_ptr + fw_entry->size;
4121 load_req->img_len += fw_entry->size;
4122 release_firmware(fw_entry);
4123 fw_entry = NULL;
4124 }
4125 return ret;
4126err:
4127 release_firmware(fw_entry);
4128 return ret;
4129}
4130
4131static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4132 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4133{
4134 size_t len = 0;
4135 int ret = 0;
4136 ion_phys_addr_t pa;
4137 struct ion_handle *ihandle = NULL;
4138 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004139 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004140
Zhen Kong3dd92792017-12-08 09:47:15 -08004141 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004142 if (retry++) {
4143 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004144 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004145 mutex_lock(&app_access_lock);
4146 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004147 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4148 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4149 } while (IS_ERR_OR_NULL(ihandle) &&
4150 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004151
4152 if (IS_ERR_OR_NULL(ihandle)) {
4153 pr_err("ION alloc failed\n");
4154 return -ENOMEM;
4155 }
4156 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4157 ihandle);
4158
4159 if (IS_ERR_OR_NULL(img_data)) {
4160 pr_err("ION memory mapping for image loading failed\n");
4161 ret = -ENOMEM;
4162 goto exit_ion_free;
4163 }
4164 /* Get the physical address of the ION BUF */
4165 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4166 if (ret) {
4167 pr_err("physical memory retrieval failure\n");
4168 ret = -EIO;
4169 goto exit_ion_unmap_kernel;
4170 }
4171
4172 *pihandle = ihandle;
4173 *data = img_data;
4174 *paddr = pa;
4175 return ret;
4176
4177exit_ion_unmap_kernel:
4178 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4179exit_ion_free:
4180 ion_free(qseecom.ion_clnt, ihandle);
4181 ihandle = NULL;
4182 return ret;
4183}
4184
4185static void __qseecom_free_img_data(struct ion_handle **ihandle)
4186{
4187 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4188 ion_free(qseecom.ion_clnt, *ihandle);
4189 *ihandle = NULL;
4190}
4191
4192static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4193 uint32_t *app_id)
4194{
4195 int ret = -1;
4196 uint32_t fw_size = 0;
4197 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4198 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4199 struct qseecom_command_scm_resp resp;
4200 u8 *img_data = NULL;
4201 ion_phys_addr_t pa = 0;
4202 struct ion_handle *ihandle = NULL;
4203 void *cmd_buf = NULL;
4204 size_t cmd_len;
4205 uint32_t app_arch = 0;
4206
4207 if (!data || !appname || !app_id) {
4208 pr_err("Null pointer to data or appname or appid\n");
4209 return -EINVAL;
4210 }
4211 *app_id = 0;
4212 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4213 return -EIO;
4214 data->client.app_arch = app_arch;
4215
4216 /* Check and load cmnlib */
4217 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4218 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4219 ret = qseecom_load_commonlib_image(data, "cmnlib");
4220 if (ret) {
4221 pr_err("failed to load cmnlib\n");
4222 return -EIO;
4223 }
4224 qseecom.commonlib_loaded = true;
4225 pr_debug("cmnlib is loaded\n");
4226 }
4227
4228 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4229 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4230 if (ret) {
4231 pr_err("failed to load cmnlib64\n");
4232 return -EIO;
4233 }
4234 qseecom.commonlib64_loaded = true;
4235 pr_debug("cmnlib64 is loaded\n");
4236 }
4237 }
4238
4239 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4240 if (ret)
4241 return ret;
4242
4243 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4244 if (ret) {
4245 ret = -EIO;
4246 goto exit_free_img_data;
4247 }
4248
4249 /* Populate the load_req parameters */
4250 if (qseecom.qsee_version < QSEE_VERSION_40) {
4251 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4252 load_req.mdt_len = load_req.mdt_len;
4253 load_req.img_len = load_req.img_len;
4254 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4255 load_req.phy_addr = (uint32_t)pa;
4256 cmd_buf = (void *)&load_req;
4257 cmd_len = sizeof(struct qseecom_load_app_ireq);
4258 } else {
4259 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4260 load_req_64bit.mdt_len = load_req.mdt_len;
4261 load_req_64bit.img_len = load_req.img_len;
4262 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4263 load_req_64bit.phy_addr = (uint64_t)pa;
4264 cmd_buf = (void *)&load_req_64bit;
4265 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4266 }
4267
4268 if (qseecom.support_bus_scaling) {
4269 mutex_lock(&qsee_bw_mutex);
4270 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4271 mutex_unlock(&qsee_bw_mutex);
4272 if (ret) {
4273 ret = -EIO;
4274 goto exit_free_img_data;
4275 }
4276 }
4277
4278 ret = __qseecom_enable_clk_scale_up(data);
4279 if (ret) {
4280 ret = -EIO;
4281 goto exit_unregister_bus_bw_need;
4282 }
4283
4284 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4285 img_data, fw_size,
4286 ION_IOC_CLEAN_INV_CACHES);
4287 if (ret) {
4288 pr_err("cache operation failed %d\n", ret);
4289 goto exit_disable_clk_vote;
4290 }
4291
4292 /* SCM_CALL to load the image */
4293 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4294 &resp, sizeof(resp));
4295 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004296 pr_err("scm_call to load failed : ret %d, result %x\n",
4297 ret, resp.result);
4298 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4299 ret = -EEXIST;
4300 else
4301 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004302 goto exit_disable_clk_vote;
4303 }
4304
4305 switch (resp.result) {
4306 case QSEOS_RESULT_SUCCESS:
4307 *app_id = resp.data;
4308 break;
4309 case QSEOS_RESULT_INCOMPLETE:
4310 ret = __qseecom_process_incomplete_cmd(data, &resp);
4311 if (ret)
4312 pr_err("process_incomplete_cmd FAILED\n");
4313 else
4314 *app_id = resp.data;
4315 break;
4316 case QSEOS_RESULT_FAILURE:
4317 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4318 break;
4319 default:
4320 pr_err("scm call return unknown response %d\n", resp.result);
4321 ret = -EINVAL;
4322 break;
4323 }
4324
4325exit_disable_clk_vote:
4326 __qseecom_disable_clk_scale_down(data);
4327
4328exit_unregister_bus_bw_need:
4329 if (qseecom.support_bus_scaling) {
4330 mutex_lock(&qsee_bw_mutex);
4331 qseecom_unregister_bus_bandwidth_needs(data);
4332 mutex_unlock(&qsee_bw_mutex);
4333 }
4334
4335exit_free_img_data:
4336 __qseecom_free_img_data(&ihandle);
4337 return ret;
4338}
4339
4340static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4341 char *cmnlib_name)
4342{
4343 int ret = 0;
4344 uint32_t fw_size = 0;
4345 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4346 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4347 struct qseecom_command_scm_resp resp;
4348 u8 *img_data = NULL;
4349 ion_phys_addr_t pa = 0;
4350 void *cmd_buf = NULL;
4351 size_t cmd_len;
4352 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004353 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004354
4355 if (!cmnlib_name) {
4356 pr_err("cmnlib_name is NULL\n");
4357 return -EINVAL;
4358 }
4359 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4360 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4361 cmnlib_name, strlen(cmnlib_name));
4362 return -EINVAL;
4363 }
4364
4365 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4366 return -EIO;
4367
Zhen Kong3bafb312017-10-18 10:27:20 -07004368 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004369 &img_data, fw_size, &pa);
4370 if (ret)
4371 return -EIO;
4372
4373 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4374 if (ret) {
4375 ret = -EIO;
4376 goto exit_free_img_data;
4377 }
4378 if (qseecom.qsee_version < QSEE_VERSION_40) {
4379 load_req.phy_addr = (uint32_t)pa;
4380 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4381 cmd_buf = (void *)&load_req;
4382 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4383 } else {
4384 load_req_64bit.phy_addr = (uint64_t)pa;
4385 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4386 load_req_64bit.img_len = load_req.img_len;
4387 load_req_64bit.mdt_len = load_req.mdt_len;
4388 cmd_buf = (void *)&load_req_64bit;
4389 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4390 }
4391
4392 if (qseecom.support_bus_scaling) {
4393 mutex_lock(&qsee_bw_mutex);
4394 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4395 mutex_unlock(&qsee_bw_mutex);
4396 if (ret) {
4397 ret = -EIO;
4398 goto exit_free_img_data;
4399 }
4400 }
4401
4402 /* Vote for the SFPB clock */
4403 ret = __qseecom_enable_clk_scale_up(data);
4404 if (ret) {
4405 ret = -EIO;
4406 goto exit_unregister_bus_bw_need;
4407 }
4408
Zhen Kong3bafb312017-10-18 10:27:20 -07004409 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004410 img_data, fw_size,
4411 ION_IOC_CLEAN_INV_CACHES);
4412 if (ret) {
4413 pr_err("cache operation failed %d\n", ret);
4414 goto exit_disable_clk_vote;
4415 }
4416
4417 /* SCM_CALL to load the image */
4418 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4419 &resp, sizeof(resp));
4420 if (ret) {
4421 pr_err("scm_call to load failed : ret %d\n", ret);
4422 ret = -EIO;
4423 goto exit_disable_clk_vote;
4424 }
4425
4426 switch (resp.result) {
4427 case QSEOS_RESULT_SUCCESS:
4428 break;
4429 case QSEOS_RESULT_FAILURE:
4430 pr_err("scm call failed w/response result%d\n", resp.result);
4431 ret = -EINVAL;
4432 goto exit_disable_clk_vote;
4433 case QSEOS_RESULT_INCOMPLETE:
4434 ret = __qseecom_process_incomplete_cmd(data, &resp);
4435 if (ret) {
4436 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4437 goto exit_disable_clk_vote;
4438 }
4439 break;
4440 default:
4441 pr_err("scm call return unknown response %d\n", resp.result);
4442 ret = -EINVAL;
4443 goto exit_disable_clk_vote;
4444 }
4445
4446exit_disable_clk_vote:
4447 __qseecom_disable_clk_scale_down(data);
4448
4449exit_unregister_bus_bw_need:
4450 if (qseecom.support_bus_scaling) {
4451 mutex_lock(&qsee_bw_mutex);
4452 qseecom_unregister_bus_bandwidth_needs(data);
4453 mutex_unlock(&qsee_bw_mutex);
4454 }
4455
4456exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004457 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004458 return ret;
4459}
4460
4461static int qseecom_unload_commonlib_image(void)
4462{
4463 int ret = -EINVAL;
4464 struct qseecom_unload_lib_image_ireq unload_req = {0};
4465 struct qseecom_command_scm_resp resp;
4466
4467 /* Populate the remaining parameters */
4468 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4469
4470 /* SCM_CALL to load the image */
4471 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4472 sizeof(struct qseecom_unload_lib_image_ireq),
4473 &resp, sizeof(resp));
4474 if (ret) {
4475 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4476 ret = -EIO;
4477 } else {
4478 switch (resp.result) {
4479 case QSEOS_RESULT_SUCCESS:
4480 break;
4481 case QSEOS_RESULT_FAILURE:
4482 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4483 break;
4484 default:
4485 pr_err("scm call return unknown response %d\n",
4486 resp.result);
4487 ret = -EINVAL;
4488 break;
4489 }
4490 }
4491
4492 return ret;
4493}
4494
4495int qseecom_start_app(struct qseecom_handle **handle,
4496 char *app_name, uint32_t size)
4497{
4498 int32_t ret = 0;
4499 unsigned long flags = 0;
4500 struct qseecom_dev_handle *data = NULL;
4501 struct qseecom_check_app_ireq app_ireq;
4502 struct qseecom_registered_app_list *entry = NULL;
4503 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4504 bool found_app = false;
4505 size_t len;
4506 ion_phys_addr_t pa;
4507 uint32_t fw_size, app_arch;
4508 uint32_t app_id = 0;
4509
Zhen Kongbcdeda22018-11-16 13:50:51 -08004510 __qseecom_processing_pending_lsnr_unregister();
4511
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004512 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4513 pr_err("Not allowed to be called in %d state\n",
4514 atomic_read(&qseecom.qseecom_state));
4515 return -EPERM;
4516 }
4517 if (!app_name) {
4518 pr_err("failed to get the app name\n");
4519 return -EINVAL;
4520 }
4521
Zhen Kong64a6d7282017-06-16 11:55:07 -07004522 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004523 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004524 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004525 return -EINVAL;
4526 }
4527
4528 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4529 if (!(*handle))
4530 return -ENOMEM;
4531
4532 data = kzalloc(sizeof(*data), GFP_KERNEL);
4533 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304534 ret = -ENOMEM;
4535 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004536 }
4537 data->abort = 0;
4538 data->type = QSEECOM_CLIENT_APP;
4539 data->released = false;
4540 data->client.sb_length = size;
4541 data->client.user_virt_sb_base = 0;
4542 data->client.ihandle = NULL;
4543
4544 init_waitqueue_head(&data->abort_wq);
4545
4546 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4547 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4548 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4549 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304550 ret = -ENOMEM;
4551 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004552 }
4553 mutex_lock(&app_access_lock);
4554
Zhen Kong5d02be92018-05-29 16:17:29 -07004555recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004556 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4557 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4558 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4559 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304560 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004561
4562 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4563 if (app_id) {
4564 pr_warn("App id %d for [%s] app exists\n", app_id,
4565 (char *)app_ireq.app_name);
4566 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4567 list_for_each_entry(entry,
4568 &qseecom.registered_app_list_head, list){
4569 if (entry->app_id == app_id) {
4570 entry->ref_cnt++;
4571 found_app = true;
4572 break;
4573 }
4574 }
4575 spin_unlock_irqrestore(
4576 &qseecom.registered_app_list_lock, flags);
4577 if (!found_app)
4578 pr_warn("App_id %d [%s] was loaded but not registered\n",
4579 ret, (char *)app_ireq.app_name);
4580 } else {
4581 /* load the app and get the app_id */
4582 pr_debug("%s: Loading app for the first time'\n",
4583 qseecom.pdev->init_name);
4584 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004585 if (ret == -EEXIST) {
4586 pr_err("recheck if TA %s is loaded\n", app_name);
4587 goto recheck;
4588 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304589 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004590 }
4591 data->client.app_id = app_id;
4592 if (!found_app) {
4593 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4594 if (!entry) {
4595 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304596 ret = -ENOMEM;
4597 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004598 }
4599 entry->app_id = app_id;
4600 entry->ref_cnt = 1;
4601 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4602 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4603 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304604 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004605 }
4606 entry->app_arch = app_arch;
4607 entry->app_blocked = false;
4608 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004609 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004610 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4611 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4612 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4613 flags);
4614 }
4615
4616 /* Get the physical address of the ION BUF */
4617 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4618 if (ret) {
4619 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4620 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304621 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004622 }
4623
4624 /* Populate the structure for sending scm call to load image */
4625 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4626 data->client.ihandle);
4627 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4628 pr_err("ION memory mapping for client shared buf failed\n");
4629 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304630 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004631 }
4632 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4633 data->client.sb_phys = (phys_addr_t)pa;
4634 (*handle)->dev = (void *)data;
4635 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4636 (*handle)->sbuf_len = data->client.sb_length;
4637
4638 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4639 if (!kclient_entry) {
4640 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304641 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004642 }
4643 kclient_entry->handle = *handle;
4644
4645 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4646 list_add_tail(&kclient_entry->list,
4647 &qseecom.registered_kclient_list_head);
4648 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4649
4650 mutex_unlock(&app_access_lock);
4651 return 0;
4652
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304653exit_ion_unmap_kernel:
4654 if (!IS_ERR_OR_NULL(data->client.ihandle))
4655 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4656exit_entry_free:
4657 kfree(entry);
4658exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004659 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304660 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4661 ion_free(qseecom.ion_clnt, data->client.ihandle);
4662 data->client.ihandle = NULL;
4663 }
4664exit_data_free:
4665 kfree(data);
4666exit_handle_free:
4667 if (*handle) {
4668 kfree(*handle);
4669 *handle = NULL;
4670 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004671 return ret;
4672}
4673EXPORT_SYMBOL(qseecom_start_app);
4674
4675int qseecom_shutdown_app(struct qseecom_handle **handle)
4676{
4677 int ret = -EINVAL;
4678 struct qseecom_dev_handle *data;
4679
4680 struct qseecom_registered_kclient_list *kclient = NULL;
4681 unsigned long flags = 0;
4682 bool found_handle = false;
4683
Zhen Kongbcdeda22018-11-16 13:50:51 -08004684 __qseecom_processing_pending_lsnr_unregister();
4685
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004686 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4687 pr_err("Not allowed to be called in %d state\n",
4688 atomic_read(&qseecom.qseecom_state));
4689 return -EPERM;
4690 }
4691
4692 if ((handle == NULL) || (*handle == NULL)) {
4693 pr_err("Handle is not initialized\n");
4694 return -EINVAL;
4695 }
4696 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4697 mutex_lock(&app_access_lock);
4698
4699 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4700 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4701 list) {
4702 if (kclient->handle == (*handle)) {
4703 list_del(&kclient->list);
4704 found_handle = true;
4705 break;
4706 }
4707 }
4708 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4709 if (!found_handle)
4710 pr_err("Unable to find the handle, exiting\n");
4711 else
4712 ret = qseecom_unload_app(data, false);
4713
4714 mutex_unlock(&app_access_lock);
4715 if (ret == 0) {
4716 kzfree(data);
4717 kzfree(*handle);
4718 kzfree(kclient);
4719 *handle = NULL;
4720 }
4721
4722 return ret;
4723}
4724EXPORT_SYMBOL(qseecom_shutdown_app);
4725
4726int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4727 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4728{
4729 int ret = 0;
4730 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4731 struct qseecom_dev_handle *data;
4732 bool perf_enabled = false;
4733
Zhen Kongbcdeda22018-11-16 13:50:51 -08004734 __qseecom_processing_pending_lsnr_unregister();
4735
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004736 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4737 pr_err("Not allowed to be called in %d state\n",
4738 atomic_read(&qseecom.qseecom_state));
4739 return -EPERM;
4740 }
4741
4742 if (handle == NULL) {
4743 pr_err("Handle is not initialized\n");
4744 return -EINVAL;
4745 }
4746 data = handle->dev;
4747
4748 req.cmd_req_len = sbuf_len;
4749 req.resp_len = rbuf_len;
4750 req.cmd_req_buf = send_buf;
4751 req.resp_buf = resp_buf;
4752
4753 if (__validate_send_cmd_inputs(data, &req))
4754 return -EINVAL;
4755
4756 mutex_lock(&app_access_lock);
4757 if (qseecom.support_bus_scaling) {
4758 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4759 if (ret) {
4760 pr_err("Failed to set bw.\n");
4761 mutex_unlock(&app_access_lock);
4762 return ret;
4763 }
4764 }
4765 /*
4766 * On targets where crypto clock is handled by HLOS,
4767 * if clk_access_cnt is zero and perf_enabled is false,
4768 * then the crypto clock was not enabled before sending cmd
4769 * to tz, qseecom will enable the clock to avoid service failure.
4770 */
4771 if (!qseecom.no_clock_support &&
4772 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4773 pr_debug("ce clock is not enabled!\n");
4774 ret = qseecom_perf_enable(data);
4775 if (ret) {
4776 pr_err("Failed to vote for clock with err %d\n",
4777 ret);
4778 mutex_unlock(&app_access_lock);
4779 return -EINVAL;
4780 }
4781 perf_enabled = true;
4782 }
4783 if (!strcmp(data->client.app_name, "securemm"))
4784 data->use_legacy_cmd = true;
4785
4786 ret = __qseecom_send_cmd(data, &req);
4787 data->use_legacy_cmd = false;
4788 if (qseecom.support_bus_scaling)
4789 __qseecom_add_bw_scale_down_timer(
4790 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4791
4792 if (perf_enabled) {
4793 qsee_disable_clock_vote(data, CLK_DFAB);
4794 qsee_disable_clock_vote(data, CLK_SFPB);
4795 }
4796
4797 mutex_unlock(&app_access_lock);
4798
4799 if (ret)
4800 return ret;
4801
4802 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4803 req.resp_len, req.resp_buf);
4804 return ret;
4805}
4806EXPORT_SYMBOL(qseecom_send_command);
4807
4808int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4809{
4810 int ret = 0;
4811
4812 if ((handle == NULL) || (handle->dev == NULL)) {
4813 pr_err("No valid kernel client\n");
4814 return -EINVAL;
4815 }
4816 if (high) {
4817 if (qseecom.support_bus_scaling) {
4818 mutex_lock(&qsee_bw_mutex);
4819 __qseecom_register_bus_bandwidth_needs(handle->dev,
4820 HIGH);
4821 mutex_unlock(&qsee_bw_mutex);
4822 } else {
4823 ret = qseecom_perf_enable(handle->dev);
4824 if (ret)
4825 pr_err("Failed to vote for clock with err %d\n",
4826 ret);
4827 }
4828 } else {
4829 if (!qseecom.support_bus_scaling) {
4830 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4831 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4832 } else {
4833 mutex_lock(&qsee_bw_mutex);
4834 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4835 mutex_unlock(&qsee_bw_mutex);
4836 }
4837 }
4838 return ret;
4839}
4840EXPORT_SYMBOL(qseecom_set_bandwidth);
4841
4842int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4843{
4844 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4845 struct qseecom_dev_handle dummy_private_data = {0};
4846 struct qseecom_command_scm_resp resp;
4847 int ret = 0;
4848
4849 if (!desc) {
4850 pr_err("desc is NULL\n");
4851 return -EINVAL;
4852 }
4853
4854 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004855 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004856 resp.data = desc->ret[2]; /*listener_id*/
4857
Zhen Konge7f525f2017-12-01 18:26:25 -08004858 dummy_private_data.client.app_id = desc->ret[1];
4859 dummy_app_entry.app_id = desc->ret[1];
4860
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004861 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004862 if (qseecom.qsee_reentrancy_support)
4863 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004864 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004865 else
4866 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4867 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004868 mutex_unlock(&app_access_lock);
4869 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004870 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004871 (int)desc->ret[0], (int)desc->ret[2],
4872 (int)desc->ret[1], ret);
4873 desc->ret[0] = resp.result;
4874 desc->ret[1] = resp.resp_type;
4875 desc->ret[2] = resp.data;
4876 return ret;
4877}
4878EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4879
4880static int qseecom_send_resp(void)
4881{
4882 qseecom.send_resp_flag = 1;
4883 wake_up_interruptible(&qseecom.send_resp_wq);
4884 return 0;
4885}
4886
4887static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4888{
4889 struct qseecom_registered_listener_list *this_lstnr = NULL;
4890
4891 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4892 this_lstnr = __qseecom_find_svc(data->listener.id);
4893 if (this_lstnr == NULL)
4894 return -EINVAL;
4895 qseecom.send_resp_flag = 1;
4896 this_lstnr->send_resp_flag = 1;
4897 wake_up_interruptible(&qseecom.send_resp_wq);
4898 return 0;
4899}
4900
4901static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4902 struct qseecom_send_modfd_listener_resp *resp,
4903 struct qseecom_registered_listener_list *this_lstnr)
4904{
4905 int i;
4906
4907 if (!data || !resp || !this_lstnr) {
4908 pr_err("listener handle or resp msg is null\n");
4909 return -EINVAL;
4910 }
4911
4912 if (resp->resp_buf_ptr == NULL) {
4913 pr_err("resp buffer is null\n");
4914 return -EINVAL;
4915 }
4916 /* validate resp buf length */
4917 if ((resp->resp_len == 0) ||
4918 (resp->resp_len > this_lstnr->sb_length)) {
4919 pr_err("resp buf length %d not valid\n", resp->resp_len);
4920 return -EINVAL;
4921 }
4922
4923 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4924 pr_err("Integer overflow in resp_len & resp_buf\n");
4925 return -EINVAL;
4926 }
4927 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4928 (ULONG_MAX - this_lstnr->sb_length)) {
4929 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4930 return -EINVAL;
4931 }
4932 /* validate resp buf */
4933 if (((uintptr_t)resp->resp_buf_ptr <
4934 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4935 ((uintptr_t)resp->resp_buf_ptr >=
4936 ((uintptr_t)this_lstnr->user_virt_sb_base +
4937 this_lstnr->sb_length)) ||
4938 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4939 ((uintptr_t)this_lstnr->user_virt_sb_base +
4940 this_lstnr->sb_length))) {
4941 pr_err("resp buf is out of shared buffer region\n");
4942 return -EINVAL;
4943 }
4944
4945 /* validate offsets */
4946 for (i = 0; i < MAX_ION_FD; i++) {
4947 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4948 pr_err("Invalid offset %d = 0x%x\n",
4949 i, resp->ifd_data[i].cmd_buf_offset);
4950 return -EINVAL;
4951 }
4952 }
4953
4954 return 0;
4955}
4956
4957static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4958 void __user *argp, bool is_64bit_addr)
4959{
4960 struct qseecom_send_modfd_listener_resp resp;
4961 struct qseecom_registered_listener_list *this_lstnr = NULL;
4962
4963 if (copy_from_user(&resp, argp, sizeof(resp))) {
4964 pr_err("copy_from_user failed");
4965 return -EINVAL;
4966 }
4967
4968 this_lstnr = __qseecom_find_svc(data->listener.id);
4969 if (this_lstnr == NULL)
4970 return -EINVAL;
4971
4972 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4973 return -EINVAL;
4974
4975 resp.resp_buf_ptr = this_lstnr->sb_virt +
4976 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4977
4978 if (!is_64bit_addr)
4979 __qseecom_update_cmd_buf(&resp, false, data);
4980 else
4981 __qseecom_update_cmd_buf_64(&resp, false, data);
4982 qseecom.send_resp_flag = 1;
4983 this_lstnr->send_resp_flag = 1;
4984 wake_up_interruptible(&qseecom.send_resp_wq);
4985 return 0;
4986}
4987
4988static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4989 void __user *argp)
4990{
4991 return __qseecom_send_modfd_resp(data, argp, false);
4992}
4993
4994static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4995 void __user *argp)
4996{
4997 return __qseecom_send_modfd_resp(data, argp, true);
4998}
4999
5000static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5001 void __user *argp)
5002{
5003 struct qseecom_qseos_version_req req;
5004
5005 if (copy_from_user(&req, argp, sizeof(req))) {
5006 pr_err("copy_from_user failed");
5007 return -EINVAL;
5008 }
5009 req.qseos_version = qseecom.qseos_version;
5010 if (copy_to_user(argp, &req, sizeof(req))) {
5011 pr_err("copy_to_user failed");
5012 return -EINVAL;
5013 }
5014 return 0;
5015}
5016
5017static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5018{
5019 int rc = 0;
5020 struct qseecom_clk *qclk = NULL;
5021
5022 if (qseecom.no_clock_support)
5023 return 0;
5024
5025 if (ce == CLK_QSEE)
5026 qclk = &qseecom.qsee;
5027 if (ce == CLK_CE_DRV)
5028 qclk = &qseecom.ce_drv;
5029
5030 if (qclk == NULL) {
5031 pr_err("CLK type not supported\n");
5032 return -EINVAL;
5033 }
5034 mutex_lock(&clk_access_lock);
5035
5036 if (qclk->clk_access_cnt == ULONG_MAX) {
5037 pr_err("clk_access_cnt beyond limitation\n");
5038 goto err;
5039 }
5040 if (qclk->clk_access_cnt > 0) {
5041 qclk->clk_access_cnt++;
5042 mutex_unlock(&clk_access_lock);
5043 return rc;
5044 }
5045
5046 /* Enable CE core clk */
5047 if (qclk->ce_core_clk != NULL) {
5048 rc = clk_prepare_enable(qclk->ce_core_clk);
5049 if (rc) {
5050 pr_err("Unable to enable/prepare CE core clk\n");
5051 goto err;
5052 }
5053 }
5054 /* Enable CE clk */
5055 if (qclk->ce_clk != NULL) {
5056 rc = clk_prepare_enable(qclk->ce_clk);
5057 if (rc) {
5058 pr_err("Unable to enable/prepare CE iface clk\n");
5059 goto ce_clk_err;
5060 }
5061 }
5062 /* Enable AXI clk */
5063 if (qclk->ce_bus_clk != NULL) {
5064 rc = clk_prepare_enable(qclk->ce_bus_clk);
5065 if (rc) {
5066 pr_err("Unable to enable/prepare CE bus clk\n");
5067 goto ce_bus_clk_err;
5068 }
5069 }
5070 qclk->clk_access_cnt++;
5071 mutex_unlock(&clk_access_lock);
5072 return 0;
5073
5074ce_bus_clk_err:
5075 if (qclk->ce_clk != NULL)
5076 clk_disable_unprepare(qclk->ce_clk);
5077ce_clk_err:
5078 if (qclk->ce_core_clk != NULL)
5079 clk_disable_unprepare(qclk->ce_core_clk);
5080err:
5081 mutex_unlock(&clk_access_lock);
5082 return -EIO;
5083}
5084
5085static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5086{
5087 struct qseecom_clk *qclk;
5088
5089 if (qseecom.no_clock_support)
5090 return;
5091
5092 if (ce == CLK_QSEE)
5093 qclk = &qseecom.qsee;
5094 else
5095 qclk = &qseecom.ce_drv;
5096
5097 mutex_lock(&clk_access_lock);
5098
5099 if (qclk->clk_access_cnt == 0) {
5100 mutex_unlock(&clk_access_lock);
5101 return;
5102 }
5103
5104 if (qclk->clk_access_cnt == 1) {
5105 if (qclk->ce_clk != NULL)
5106 clk_disable_unprepare(qclk->ce_clk);
5107 if (qclk->ce_core_clk != NULL)
5108 clk_disable_unprepare(qclk->ce_core_clk);
5109 if (qclk->ce_bus_clk != NULL)
5110 clk_disable_unprepare(qclk->ce_bus_clk);
5111 }
5112 qclk->clk_access_cnt--;
5113 mutex_unlock(&clk_access_lock);
5114}
5115
5116static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5117 int32_t clk_type)
5118{
5119 int ret = 0;
5120 struct qseecom_clk *qclk;
5121
5122 if (qseecom.no_clock_support)
5123 return 0;
5124
5125 qclk = &qseecom.qsee;
5126 if (!qseecom.qsee_perf_client)
5127 return ret;
5128
5129 switch (clk_type) {
5130 case CLK_DFAB:
5131 mutex_lock(&qsee_bw_mutex);
5132 if (!qseecom.qsee_bw_count) {
5133 if (qseecom.qsee_sfpb_bw_count > 0)
5134 ret = msm_bus_scale_client_update_request(
5135 qseecom.qsee_perf_client, 3);
5136 else {
5137 if (qclk->ce_core_src_clk != NULL)
5138 ret = __qseecom_enable_clk(CLK_QSEE);
5139 if (!ret) {
5140 ret =
5141 msm_bus_scale_client_update_request(
5142 qseecom.qsee_perf_client, 1);
5143 if ((ret) &&
5144 (qclk->ce_core_src_clk != NULL))
5145 __qseecom_disable_clk(CLK_QSEE);
5146 }
5147 }
5148 if (ret)
5149 pr_err("DFAB Bandwidth req failed (%d)\n",
5150 ret);
5151 else {
5152 qseecom.qsee_bw_count++;
5153 data->perf_enabled = true;
5154 }
5155 } else {
5156 qseecom.qsee_bw_count++;
5157 data->perf_enabled = true;
5158 }
5159 mutex_unlock(&qsee_bw_mutex);
5160 break;
5161 case CLK_SFPB:
5162 mutex_lock(&qsee_bw_mutex);
5163 if (!qseecom.qsee_sfpb_bw_count) {
5164 if (qseecom.qsee_bw_count > 0)
5165 ret = msm_bus_scale_client_update_request(
5166 qseecom.qsee_perf_client, 3);
5167 else {
5168 if (qclk->ce_core_src_clk != NULL)
5169 ret = __qseecom_enable_clk(CLK_QSEE);
5170 if (!ret) {
5171 ret =
5172 msm_bus_scale_client_update_request(
5173 qseecom.qsee_perf_client, 2);
5174 if ((ret) &&
5175 (qclk->ce_core_src_clk != NULL))
5176 __qseecom_disable_clk(CLK_QSEE);
5177 }
5178 }
5179
5180 if (ret)
5181 pr_err("SFPB Bandwidth req failed (%d)\n",
5182 ret);
5183 else {
5184 qseecom.qsee_sfpb_bw_count++;
5185 data->fast_load_enabled = true;
5186 }
5187 } else {
5188 qseecom.qsee_sfpb_bw_count++;
5189 data->fast_load_enabled = true;
5190 }
5191 mutex_unlock(&qsee_bw_mutex);
5192 break;
5193 default:
5194 pr_err("Clock type not defined\n");
5195 break;
5196 }
5197 return ret;
5198}
5199
5200static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5201 int32_t clk_type)
5202{
5203 int32_t ret = 0;
5204 struct qseecom_clk *qclk;
5205
5206 qclk = &qseecom.qsee;
5207
5208 if (qseecom.no_clock_support)
5209 return;
5210 if (!qseecom.qsee_perf_client)
5211 return;
5212
5213 switch (clk_type) {
5214 case CLK_DFAB:
5215 mutex_lock(&qsee_bw_mutex);
5216 if (qseecom.qsee_bw_count == 0) {
5217 pr_err("Client error.Extra call to disable DFAB clk\n");
5218 mutex_unlock(&qsee_bw_mutex);
5219 return;
5220 }
5221
5222 if (qseecom.qsee_bw_count == 1) {
5223 if (qseecom.qsee_sfpb_bw_count > 0)
5224 ret = msm_bus_scale_client_update_request(
5225 qseecom.qsee_perf_client, 2);
5226 else {
5227 ret = msm_bus_scale_client_update_request(
5228 qseecom.qsee_perf_client, 0);
5229 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5230 __qseecom_disable_clk(CLK_QSEE);
5231 }
5232 if (ret)
5233 pr_err("SFPB Bandwidth req fail (%d)\n",
5234 ret);
5235 else {
5236 qseecom.qsee_bw_count--;
5237 data->perf_enabled = false;
5238 }
5239 } else {
5240 qseecom.qsee_bw_count--;
5241 data->perf_enabled = false;
5242 }
5243 mutex_unlock(&qsee_bw_mutex);
5244 break;
5245 case CLK_SFPB:
5246 mutex_lock(&qsee_bw_mutex);
5247 if (qseecom.qsee_sfpb_bw_count == 0) {
5248 pr_err("Client error.Extra call to disable SFPB clk\n");
5249 mutex_unlock(&qsee_bw_mutex);
5250 return;
5251 }
5252 if (qseecom.qsee_sfpb_bw_count == 1) {
5253 if (qseecom.qsee_bw_count > 0)
5254 ret = msm_bus_scale_client_update_request(
5255 qseecom.qsee_perf_client, 1);
5256 else {
5257 ret = msm_bus_scale_client_update_request(
5258 qseecom.qsee_perf_client, 0);
5259 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5260 __qseecom_disable_clk(CLK_QSEE);
5261 }
5262 if (ret)
5263 pr_err("SFPB Bandwidth req fail (%d)\n",
5264 ret);
5265 else {
5266 qseecom.qsee_sfpb_bw_count--;
5267 data->fast_load_enabled = false;
5268 }
5269 } else {
5270 qseecom.qsee_sfpb_bw_count--;
5271 data->fast_load_enabled = false;
5272 }
5273 mutex_unlock(&qsee_bw_mutex);
5274 break;
5275 default:
5276 pr_err("Clock type not defined\n");
5277 break;
5278 }
5279
5280}
5281
5282static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5283 void __user *argp)
5284{
5285 struct ion_handle *ihandle; /* Ion handle */
5286 struct qseecom_load_img_req load_img_req;
5287 int uret = 0;
5288 int ret;
5289 ion_phys_addr_t pa = 0;
5290 size_t len;
5291 struct qseecom_load_app_ireq load_req;
5292 struct qseecom_load_app_64bit_ireq load_req_64bit;
5293 struct qseecom_command_scm_resp resp;
5294 void *cmd_buf = NULL;
5295 size_t cmd_len;
5296 /* Copy the relevant information needed for loading the image */
5297 if (copy_from_user(&load_img_req,
5298 (void __user *)argp,
5299 sizeof(struct qseecom_load_img_req))) {
5300 pr_err("copy_from_user failed\n");
5301 return -EFAULT;
5302 }
5303
5304 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005305 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005306 load_img_req.ifd_data_fd);
5307 if (IS_ERR_OR_NULL(ihandle)) {
5308 pr_err("Ion client could not retrieve the handle\n");
5309 return -ENOMEM;
5310 }
5311
5312 /* Get the physical address of the ION BUF */
5313 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5314 if (ret) {
5315 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5316 ret);
5317 return ret;
5318 }
5319 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5320 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5321 len, load_img_req.mdt_len,
5322 load_img_req.img_len);
5323 return ret;
5324 }
5325 /* Populate the structure for sending scm call to load image */
5326 if (qseecom.qsee_version < QSEE_VERSION_40) {
5327 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5328 load_req.mdt_len = load_img_req.mdt_len;
5329 load_req.img_len = load_img_req.img_len;
5330 load_req.phy_addr = (uint32_t)pa;
5331 cmd_buf = (void *)&load_req;
5332 cmd_len = sizeof(struct qseecom_load_app_ireq);
5333 } else {
5334 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5335 load_req_64bit.mdt_len = load_img_req.mdt_len;
5336 load_req_64bit.img_len = load_img_req.img_len;
5337 load_req_64bit.phy_addr = (uint64_t)pa;
5338 cmd_buf = (void *)&load_req_64bit;
5339 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5340 }
5341
5342 if (qseecom.support_bus_scaling) {
5343 mutex_lock(&qsee_bw_mutex);
5344 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5345 mutex_unlock(&qsee_bw_mutex);
5346 if (ret) {
5347 ret = -EIO;
5348 goto exit_cpu_restore;
5349 }
5350 }
5351
5352 /* Vote for the SFPB clock */
5353 ret = __qseecom_enable_clk_scale_up(data);
5354 if (ret) {
5355 ret = -EIO;
5356 goto exit_register_bus_bandwidth_needs;
5357 }
5358 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5359 ION_IOC_CLEAN_INV_CACHES);
5360 if (ret) {
5361 pr_err("cache operation failed %d\n", ret);
5362 goto exit_disable_clock;
5363 }
5364 /* SCM_CALL to load the external elf */
5365 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5366 &resp, sizeof(resp));
5367 if (ret) {
5368 pr_err("scm_call to load failed : ret %d\n",
5369 ret);
5370 ret = -EFAULT;
5371 goto exit_disable_clock;
5372 }
5373
5374 switch (resp.result) {
5375 case QSEOS_RESULT_SUCCESS:
5376 break;
5377 case QSEOS_RESULT_INCOMPLETE:
5378 pr_err("%s: qseos result incomplete\n", __func__);
5379 ret = __qseecom_process_incomplete_cmd(data, &resp);
5380 if (ret)
5381 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5382 break;
5383 case QSEOS_RESULT_FAILURE:
5384 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5385 ret = -EFAULT;
5386 break;
5387 default:
5388 pr_err("scm_call response result %d not supported\n",
5389 resp.result);
5390 ret = -EFAULT;
5391 break;
5392 }
5393
5394exit_disable_clock:
5395 __qseecom_disable_clk_scale_down(data);
5396
5397exit_register_bus_bandwidth_needs:
5398 if (qseecom.support_bus_scaling) {
5399 mutex_lock(&qsee_bw_mutex);
5400 uret = qseecom_unregister_bus_bandwidth_needs(data);
5401 mutex_unlock(&qsee_bw_mutex);
5402 if (uret)
5403 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5404 uret, ret);
5405 }
5406
5407exit_cpu_restore:
5408 /* Deallocate the handle */
5409 if (!IS_ERR_OR_NULL(ihandle))
5410 ion_free(qseecom.ion_clnt, ihandle);
5411 return ret;
5412}
5413
5414static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5415{
5416 int ret = 0;
5417 struct qseecom_command_scm_resp resp;
5418 struct qseecom_unload_app_ireq req;
5419
5420 /* unavailable client app */
5421 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5422
5423 /* Populate the structure for sending scm call to unload image */
5424 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5425
5426 /* SCM_CALL to unload the external elf */
5427 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5428 sizeof(struct qseecom_unload_app_ireq),
5429 &resp, sizeof(resp));
5430 if (ret) {
5431 pr_err("scm_call to unload failed : ret %d\n",
5432 ret);
5433 ret = -EFAULT;
5434 goto qseecom_unload_external_elf_scm_err;
5435 }
5436 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5437 ret = __qseecom_process_incomplete_cmd(data, &resp);
5438 if (ret)
5439 pr_err("process_incomplete_cmd fail err: %d\n",
5440 ret);
5441 } else {
5442 if (resp.result != QSEOS_RESULT_SUCCESS) {
5443 pr_err("scm_call to unload image failed resp.result =%d\n",
5444 resp.result);
5445 ret = -EFAULT;
5446 }
5447 }
5448
5449qseecom_unload_external_elf_scm_err:
5450
5451 return ret;
5452}
5453
5454static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5455 void __user *argp)
5456{
5457
5458 int32_t ret;
5459 struct qseecom_qseos_app_load_query query_req;
5460 struct qseecom_check_app_ireq req;
5461 struct qseecom_registered_app_list *entry = NULL;
5462 unsigned long flags = 0;
5463 uint32_t app_arch = 0, app_id = 0;
5464 bool found_app = false;
5465
5466 /* Copy the relevant information needed for loading the image */
5467 if (copy_from_user(&query_req,
5468 (void __user *)argp,
5469 sizeof(struct qseecom_qseos_app_load_query))) {
5470 pr_err("copy_from_user failed\n");
5471 return -EFAULT;
5472 }
5473
5474 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5475 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5476 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5477
5478 ret = __qseecom_check_app_exists(req, &app_id);
5479 if (ret) {
5480 pr_err(" scm call to check if app is loaded failed");
5481 return ret; /* scm call failed */
5482 }
5483 if (app_id) {
5484 pr_debug("App id %d (%s) already exists\n", app_id,
5485 (char *)(req.app_name));
5486 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5487 list_for_each_entry(entry,
5488 &qseecom.registered_app_list_head, list){
5489 if (entry->app_id == app_id) {
5490 app_arch = entry->app_arch;
5491 entry->ref_cnt++;
5492 found_app = true;
5493 break;
5494 }
5495 }
5496 spin_unlock_irqrestore(
5497 &qseecom.registered_app_list_lock, flags);
5498 data->client.app_id = app_id;
5499 query_req.app_id = app_id;
5500 if (app_arch) {
5501 data->client.app_arch = app_arch;
5502 query_req.app_arch = app_arch;
5503 } else {
5504 data->client.app_arch = 0;
5505 query_req.app_arch = 0;
5506 }
5507 strlcpy(data->client.app_name, query_req.app_name,
5508 MAX_APP_NAME_SIZE);
5509 /*
5510 * If app was loaded by appsbl before and was not registered,
5511 * regiser this app now.
5512 */
5513 if (!found_app) {
5514 pr_debug("Register app %d [%s] which was loaded before\n",
5515 ret, (char *)query_req.app_name);
5516 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5517 if (!entry) {
5518 pr_err("kmalloc for app entry failed\n");
5519 return -ENOMEM;
5520 }
5521 entry->app_id = app_id;
5522 entry->ref_cnt = 1;
5523 entry->app_arch = data->client.app_arch;
5524 strlcpy(entry->app_name, data->client.app_name,
5525 MAX_APP_NAME_SIZE);
5526 entry->app_blocked = false;
5527 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005528 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005529 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5530 flags);
5531 list_add_tail(&entry->list,
5532 &qseecom.registered_app_list_head);
5533 spin_unlock_irqrestore(
5534 &qseecom.registered_app_list_lock, flags);
5535 }
5536 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5537 pr_err("copy_to_user failed\n");
5538 return -EFAULT;
5539 }
5540 return -EEXIST; /* app already loaded */
5541 } else {
5542 return 0; /* app not loaded */
5543 }
5544}
5545
5546static int __qseecom_get_ce_pipe_info(
5547 enum qseecom_key_management_usage_type usage,
5548 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5549{
5550 int ret = -EINVAL;
5551 int i, j;
5552 struct qseecom_ce_info_use *p = NULL;
5553 int total = 0;
5554 struct qseecom_ce_pipe_entry *pcepipe;
5555
5556 switch (usage) {
5557 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5558 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5559 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5560 if (qseecom.support_fde) {
5561 p = qseecom.ce_info.fde;
5562 total = qseecom.ce_info.num_fde;
5563 } else {
5564 pr_err("system does not support fde\n");
5565 return -EINVAL;
5566 }
5567 break;
5568 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5569 if (qseecom.support_pfe) {
5570 p = qseecom.ce_info.pfe;
5571 total = qseecom.ce_info.num_pfe;
5572 } else {
5573 pr_err("system does not support pfe\n");
5574 return -EINVAL;
5575 }
5576 break;
5577 default:
5578 pr_err("unsupported usage %d\n", usage);
5579 return -EINVAL;
5580 }
5581
5582 for (j = 0; j < total; j++) {
5583 if (p->unit_num == unit) {
5584 pcepipe = p->ce_pipe_entry;
5585 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5586 (*ce_hw)[i] = pcepipe->ce_num;
5587 *pipe = pcepipe->ce_pipe_pair;
5588 pcepipe++;
5589 }
5590 ret = 0;
5591 break;
5592 }
5593 p++;
5594 }
5595 return ret;
5596}
5597
5598static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5599 enum qseecom_key_management_usage_type usage,
5600 struct qseecom_key_generate_ireq *ireq)
5601{
5602 struct qseecom_command_scm_resp resp;
5603 int ret;
5604
5605 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5606 usage >= QSEOS_KM_USAGE_MAX) {
5607 pr_err("Error:: unsupported usage %d\n", usage);
5608 return -EFAULT;
5609 }
5610 ret = __qseecom_enable_clk(CLK_QSEE);
5611 if (ret)
5612 return ret;
5613
5614 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5615 ireq, sizeof(struct qseecom_key_generate_ireq),
5616 &resp, sizeof(resp));
5617 if (ret) {
5618 if (ret == -EINVAL &&
5619 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5620 pr_debug("Key ID exists.\n");
5621 ret = 0;
5622 } else {
5623 pr_err("scm call to generate key failed : %d\n", ret);
5624 ret = -EFAULT;
5625 }
5626 goto generate_key_exit;
5627 }
5628
5629 switch (resp.result) {
5630 case QSEOS_RESULT_SUCCESS:
5631 break;
5632 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5633 pr_debug("Key ID exists.\n");
5634 break;
5635 case QSEOS_RESULT_INCOMPLETE:
5636 ret = __qseecom_process_incomplete_cmd(data, &resp);
5637 if (ret) {
5638 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5639 pr_debug("Key ID exists.\n");
5640 ret = 0;
5641 } else {
5642 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5643 resp.result);
5644 }
5645 }
5646 break;
5647 case QSEOS_RESULT_FAILURE:
5648 default:
5649 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5650 ret = -EINVAL;
5651 break;
5652 }
5653generate_key_exit:
5654 __qseecom_disable_clk(CLK_QSEE);
5655 return ret;
5656}
5657
5658static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5659 enum qseecom_key_management_usage_type usage,
5660 struct qseecom_key_delete_ireq *ireq)
5661{
5662 struct qseecom_command_scm_resp resp;
5663 int ret;
5664
5665 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5666 usage >= QSEOS_KM_USAGE_MAX) {
5667 pr_err("Error:: unsupported usage %d\n", usage);
5668 return -EFAULT;
5669 }
5670 ret = __qseecom_enable_clk(CLK_QSEE);
5671 if (ret)
5672 return ret;
5673
5674 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5675 ireq, sizeof(struct qseecom_key_delete_ireq),
5676 &resp, sizeof(struct qseecom_command_scm_resp));
5677 if (ret) {
5678 if (ret == -EINVAL &&
5679 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5680 pr_debug("Max attempts to input password reached.\n");
5681 ret = -ERANGE;
5682 } else {
5683 pr_err("scm call to delete key failed : %d\n", ret);
5684 ret = -EFAULT;
5685 }
5686 goto del_key_exit;
5687 }
5688
5689 switch (resp.result) {
5690 case QSEOS_RESULT_SUCCESS:
5691 break;
5692 case QSEOS_RESULT_INCOMPLETE:
5693 ret = __qseecom_process_incomplete_cmd(data, &resp);
5694 if (ret) {
5695 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5696 resp.result);
5697 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5698 pr_debug("Max attempts to input password reached.\n");
5699 ret = -ERANGE;
5700 }
5701 }
5702 break;
5703 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5704 pr_debug("Max attempts to input password reached.\n");
5705 ret = -ERANGE;
5706 break;
5707 case QSEOS_RESULT_FAILURE:
5708 default:
5709 pr_err("Delete key scm call failed resp.result %d\n",
5710 resp.result);
5711 ret = -EINVAL;
5712 break;
5713 }
5714del_key_exit:
5715 __qseecom_disable_clk(CLK_QSEE);
5716 return ret;
5717}
5718
5719static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5720 enum qseecom_key_management_usage_type usage,
5721 struct qseecom_key_select_ireq *ireq)
5722{
5723 struct qseecom_command_scm_resp resp;
5724 int ret;
5725
5726 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5727 usage >= QSEOS_KM_USAGE_MAX) {
5728 pr_err("Error:: unsupported usage %d\n", usage);
5729 return -EFAULT;
5730 }
5731 ret = __qseecom_enable_clk(CLK_QSEE);
5732 if (ret)
5733 return ret;
5734
5735 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5736 ret = __qseecom_enable_clk(CLK_CE_DRV);
5737 if (ret)
5738 return ret;
5739 }
5740
5741 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5742 ireq, sizeof(struct qseecom_key_select_ireq),
5743 &resp, sizeof(struct qseecom_command_scm_resp));
5744 if (ret) {
5745 if (ret == -EINVAL &&
5746 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5747 pr_debug("Max attempts to input password reached.\n");
5748 ret = -ERANGE;
5749 } else if (ret == -EINVAL &&
5750 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5751 pr_debug("Set Key operation under processing...\n");
5752 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5753 } else {
5754 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5755 ret);
5756 ret = -EFAULT;
5757 }
5758 goto set_key_exit;
5759 }
5760
5761 switch (resp.result) {
5762 case QSEOS_RESULT_SUCCESS:
5763 break;
5764 case QSEOS_RESULT_INCOMPLETE:
5765 ret = __qseecom_process_incomplete_cmd(data, &resp);
5766 if (ret) {
5767 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5768 resp.result);
5769 if (resp.result ==
5770 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5771 pr_debug("Set Key operation under processing...\n");
5772 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5773 }
5774 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5775 pr_debug("Max attempts to input password reached.\n");
5776 ret = -ERANGE;
5777 }
5778 }
5779 break;
5780 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5781 pr_debug("Max attempts to input password reached.\n");
5782 ret = -ERANGE;
5783 break;
5784 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5785 pr_debug("Set Key operation under processing...\n");
5786 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5787 break;
5788 case QSEOS_RESULT_FAILURE:
5789 default:
5790 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5791 ret = -EINVAL;
5792 break;
5793 }
5794set_key_exit:
5795 __qseecom_disable_clk(CLK_QSEE);
5796 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5797 __qseecom_disable_clk(CLK_CE_DRV);
5798 return ret;
5799}
5800
5801static int __qseecom_update_current_key_user_info(
5802 struct qseecom_dev_handle *data,
5803 enum qseecom_key_management_usage_type usage,
5804 struct qseecom_key_userinfo_update_ireq *ireq)
5805{
5806 struct qseecom_command_scm_resp resp;
5807 int ret;
5808
5809 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5810 usage >= QSEOS_KM_USAGE_MAX) {
5811 pr_err("Error:: unsupported usage %d\n", usage);
5812 return -EFAULT;
5813 }
5814 ret = __qseecom_enable_clk(CLK_QSEE);
5815 if (ret)
5816 return ret;
5817
5818 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5819 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5820 &resp, sizeof(struct qseecom_command_scm_resp));
5821 if (ret) {
5822 if (ret == -EINVAL &&
5823 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5824 pr_debug("Set Key operation under processing...\n");
5825 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5826 } else {
5827 pr_err("scm call to update key userinfo failed: %d\n",
5828 ret);
5829 __qseecom_disable_clk(CLK_QSEE);
5830 return -EFAULT;
5831 }
5832 }
5833
5834 switch (resp.result) {
5835 case QSEOS_RESULT_SUCCESS:
5836 break;
5837 case QSEOS_RESULT_INCOMPLETE:
5838 ret = __qseecom_process_incomplete_cmd(data, &resp);
5839 if (resp.result ==
5840 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5841 pr_debug("Set Key operation under processing...\n");
5842 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5843 }
5844 if (ret)
5845 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5846 resp.result);
5847 break;
5848 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5849 pr_debug("Update Key operation under processing...\n");
5850 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5851 break;
5852 case QSEOS_RESULT_FAILURE:
5853 default:
5854 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5855 ret = -EINVAL;
5856 break;
5857 }
5858
5859 __qseecom_disable_clk(CLK_QSEE);
5860 return ret;
5861}
5862
5863
5864static int qseecom_enable_ice_setup(int usage)
5865{
5866 int ret = 0;
5867
5868 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5869 ret = qcom_ice_setup_ice_hw("ufs", true);
5870 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5871 ret = qcom_ice_setup_ice_hw("sdcc", true);
5872
5873 return ret;
5874}
5875
5876static int qseecom_disable_ice_setup(int usage)
5877{
5878 int ret = 0;
5879
5880 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5881 ret = qcom_ice_setup_ice_hw("ufs", false);
5882 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5883 ret = qcom_ice_setup_ice_hw("sdcc", false);
5884
5885 return ret;
5886}
5887
5888static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5889{
5890 struct qseecom_ce_info_use *pce_info_use, *p;
5891 int total = 0;
5892 int i;
5893
5894 switch (usage) {
5895 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5896 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5897 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5898 p = qseecom.ce_info.fde;
5899 total = qseecom.ce_info.num_fde;
5900 break;
5901 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5902 p = qseecom.ce_info.pfe;
5903 total = qseecom.ce_info.num_pfe;
5904 break;
5905 default:
5906 pr_err("unsupported usage %d\n", usage);
5907 return -EINVAL;
5908 }
5909
5910 pce_info_use = NULL;
5911
5912 for (i = 0; i < total; i++) {
5913 if (p->unit_num == unit) {
5914 pce_info_use = p;
5915 break;
5916 }
5917 p++;
5918 }
5919 if (!pce_info_use) {
5920 pr_err("can not find %d\n", unit);
5921 return -EINVAL;
5922 }
5923 return pce_info_use->num_ce_pipe_entries;
5924}
5925
5926static int qseecom_create_key(struct qseecom_dev_handle *data,
5927 void __user *argp)
5928{
5929 int i;
5930 uint32_t *ce_hw = NULL;
5931 uint32_t pipe = 0;
5932 int ret = 0;
5933 uint32_t flags = 0;
5934 struct qseecom_create_key_req create_key_req;
5935 struct qseecom_key_generate_ireq generate_key_ireq;
5936 struct qseecom_key_select_ireq set_key_ireq;
5937 uint32_t entries = 0;
5938
5939 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5940 if (ret) {
5941 pr_err("copy_from_user failed\n");
5942 return ret;
5943 }
5944
5945 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5946 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5947 pr_err("unsupported usage %d\n", create_key_req.usage);
5948 ret = -EFAULT;
5949 return ret;
5950 }
5951 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5952 create_key_req.usage);
5953 if (entries <= 0) {
5954 pr_err("no ce instance for usage %d instance %d\n",
5955 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5956 ret = -EINVAL;
5957 return ret;
5958 }
5959
5960 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5961 if (!ce_hw) {
5962 ret = -ENOMEM;
5963 return ret;
5964 }
5965 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5966 DEFAULT_CE_INFO_UNIT);
5967 if (ret) {
5968 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5969 ret = -EINVAL;
5970 goto free_buf;
5971 }
5972
5973 if (qseecom.fde_key_size)
5974 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5975 else
5976 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5977
Jiten Patela7bb1d52018-05-11 12:34:26 +05305978 if (qseecom.enable_key_wrap_in_ks == true)
5979 flags |= ENABLE_KEY_WRAP_IN_KS;
5980
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005981 generate_key_ireq.flags = flags;
5982 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5983 memset((void *)generate_key_ireq.key_id,
5984 0, QSEECOM_KEY_ID_SIZE);
5985 memset((void *)generate_key_ireq.hash32,
5986 0, QSEECOM_HASH_SIZE);
5987 memcpy((void *)generate_key_ireq.key_id,
5988 (void *)key_id_array[create_key_req.usage].desc,
5989 QSEECOM_KEY_ID_SIZE);
5990 memcpy((void *)generate_key_ireq.hash32,
5991 (void *)create_key_req.hash32,
5992 QSEECOM_HASH_SIZE);
5993
5994 ret = __qseecom_generate_and_save_key(data,
5995 create_key_req.usage, &generate_key_ireq);
5996 if (ret) {
5997 pr_err("Failed to generate key on storage: %d\n", ret);
5998 goto free_buf;
5999 }
6000
6001 for (i = 0; i < entries; i++) {
6002 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6003 if (create_key_req.usage ==
6004 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6005 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6006 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6007
6008 } else if (create_key_req.usage ==
6009 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6010 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6011 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6012
6013 } else {
6014 set_key_ireq.ce = ce_hw[i];
6015 set_key_ireq.pipe = pipe;
6016 }
6017 set_key_ireq.flags = flags;
6018
6019 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6020 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6021 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6022 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6023 memcpy((void *)set_key_ireq.key_id,
6024 (void *)key_id_array[create_key_req.usage].desc,
6025 QSEECOM_KEY_ID_SIZE);
6026 memcpy((void *)set_key_ireq.hash32,
6027 (void *)create_key_req.hash32,
6028 QSEECOM_HASH_SIZE);
6029 /*
6030 * It will return false if it is GPCE based crypto instance or
6031 * ICE is setup properly
6032 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006033 ret = qseecom_enable_ice_setup(create_key_req.usage);
6034 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006035 goto free_buf;
6036
6037 do {
6038 ret = __qseecom_set_clear_ce_key(data,
6039 create_key_req.usage,
6040 &set_key_ireq);
6041 /*
6042 * wait a little before calling scm again to let other
6043 * processes run
6044 */
6045 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6046 msleep(50);
6047
6048 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6049
6050 qseecom_disable_ice_setup(create_key_req.usage);
6051
6052 if (ret) {
6053 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6054 pipe, ce_hw[i], ret);
6055 goto free_buf;
6056 } else {
6057 pr_err("Set the key successfully\n");
6058 if ((create_key_req.usage ==
6059 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6060 (create_key_req.usage ==
6061 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6062 goto free_buf;
6063 }
6064 }
6065
6066free_buf:
6067 kzfree(ce_hw);
6068 return ret;
6069}
6070
6071static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6072 void __user *argp)
6073{
6074 uint32_t *ce_hw = NULL;
6075 uint32_t pipe = 0;
6076 int ret = 0;
6077 uint32_t flags = 0;
6078 int i, j;
6079 struct qseecom_wipe_key_req wipe_key_req;
6080 struct qseecom_key_delete_ireq delete_key_ireq;
6081 struct qseecom_key_select_ireq clear_key_ireq;
6082 uint32_t entries = 0;
6083
6084 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6085 if (ret) {
6086 pr_err("copy_from_user failed\n");
6087 return ret;
6088 }
6089
6090 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6091 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6092 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6093 ret = -EFAULT;
6094 return ret;
6095 }
6096
6097 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6098 wipe_key_req.usage);
6099 if (entries <= 0) {
6100 pr_err("no ce instance for usage %d instance %d\n",
6101 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6102 ret = -EINVAL;
6103 return ret;
6104 }
6105
6106 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6107 if (!ce_hw) {
6108 ret = -ENOMEM;
6109 return ret;
6110 }
6111
6112 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6113 DEFAULT_CE_INFO_UNIT);
6114 if (ret) {
6115 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6116 ret = -EINVAL;
6117 goto free_buf;
6118 }
6119
6120 if (wipe_key_req.wipe_key_flag) {
6121 delete_key_ireq.flags = flags;
6122 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6123 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6124 memcpy((void *)delete_key_ireq.key_id,
6125 (void *)key_id_array[wipe_key_req.usage].desc,
6126 QSEECOM_KEY_ID_SIZE);
6127 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6128
6129 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6130 &delete_key_ireq);
6131 if (ret) {
6132 pr_err("Failed to delete key from ssd storage: %d\n",
6133 ret);
6134 ret = -EFAULT;
6135 goto free_buf;
6136 }
6137 }
6138
6139 for (j = 0; j < entries; j++) {
6140 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6141 if (wipe_key_req.usage ==
6142 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6143 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6144 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6145 } else if (wipe_key_req.usage ==
6146 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6147 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6148 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6149 } else {
6150 clear_key_ireq.ce = ce_hw[j];
6151 clear_key_ireq.pipe = pipe;
6152 }
6153 clear_key_ireq.flags = flags;
6154 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6155 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6156 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6157 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6158
6159 /*
6160 * It will return false if it is GPCE based crypto instance or
6161 * ICE is setup properly
6162 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006163 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6164 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006165 goto free_buf;
6166
6167 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6168 &clear_key_ireq);
6169
6170 qseecom_disable_ice_setup(wipe_key_req.usage);
6171
6172 if (ret) {
6173 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6174 pipe, ce_hw[j], ret);
6175 ret = -EFAULT;
6176 goto free_buf;
6177 }
6178 }
6179
6180free_buf:
6181 kzfree(ce_hw);
6182 return ret;
6183}
6184
6185static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6186 void __user *argp)
6187{
6188 int ret = 0;
6189 uint32_t flags = 0;
6190 struct qseecom_update_key_userinfo_req update_key_req;
6191 struct qseecom_key_userinfo_update_ireq ireq;
6192
6193 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6194 if (ret) {
6195 pr_err("copy_from_user failed\n");
6196 return ret;
6197 }
6198
6199 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6200 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6201 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6202 return -EFAULT;
6203 }
6204
6205 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6206
6207 if (qseecom.fde_key_size)
6208 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6209 else
6210 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6211
6212 ireq.flags = flags;
6213 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6214 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6215 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6216 memcpy((void *)ireq.key_id,
6217 (void *)key_id_array[update_key_req.usage].desc,
6218 QSEECOM_KEY_ID_SIZE);
6219 memcpy((void *)ireq.current_hash32,
6220 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6221 memcpy((void *)ireq.new_hash32,
6222 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6223
6224 do {
6225 ret = __qseecom_update_current_key_user_info(data,
6226 update_key_req.usage,
6227 &ireq);
6228 /*
6229 * wait a little before calling scm again to let other
6230 * processes run
6231 */
6232 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6233 msleep(50);
6234
6235 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6236 if (ret) {
6237 pr_err("Failed to update key info: %d\n", ret);
6238 return ret;
6239 }
6240 return ret;
6241
6242}
6243static int qseecom_is_es_activated(void __user *argp)
6244{
Zhen Kong26e62742018-05-04 17:19:06 -07006245 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006246 struct qseecom_command_scm_resp resp;
6247 int ret;
6248
6249 if (qseecom.qsee_version < QSEE_VERSION_04) {
6250 pr_err("invalid qsee version\n");
6251 return -ENODEV;
6252 }
6253
6254 if (argp == NULL) {
6255 pr_err("arg is null\n");
6256 return -EINVAL;
6257 }
6258
6259 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6260 &req, sizeof(req), &resp, sizeof(resp));
6261 if (ret) {
6262 pr_err("scm_call failed\n");
6263 return ret;
6264 }
6265
6266 req.is_activated = resp.result;
6267 ret = copy_to_user(argp, &req, sizeof(req));
6268 if (ret) {
6269 pr_err("copy_to_user failed\n");
6270 return ret;
6271 }
6272
6273 return 0;
6274}
6275
6276static int qseecom_save_partition_hash(void __user *argp)
6277{
6278 struct qseecom_save_partition_hash_req req;
6279 struct qseecom_command_scm_resp resp;
6280 int ret;
6281
6282 memset(&resp, 0x00, sizeof(resp));
6283
6284 if (qseecom.qsee_version < QSEE_VERSION_04) {
6285 pr_err("invalid qsee version\n");
6286 return -ENODEV;
6287 }
6288
6289 if (argp == NULL) {
6290 pr_err("arg is null\n");
6291 return -EINVAL;
6292 }
6293
6294 ret = copy_from_user(&req, argp, sizeof(req));
6295 if (ret) {
6296 pr_err("copy_from_user failed\n");
6297 return ret;
6298 }
6299
6300 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6301 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6302 if (ret) {
6303 pr_err("qseecom_scm_call failed\n");
6304 return ret;
6305 }
6306
6307 return 0;
6308}
6309
6310static int qseecom_mdtp_cipher_dip(void __user *argp)
6311{
6312 struct qseecom_mdtp_cipher_dip_req req;
6313 u32 tzbuflenin, tzbuflenout;
6314 char *tzbufin = NULL, *tzbufout = NULL;
6315 struct scm_desc desc = {0};
6316 int ret;
6317
6318 do {
6319 /* Copy the parameters from userspace */
6320 if (argp == NULL) {
6321 pr_err("arg is null\n");
6322 ret = -EINVAL;
6323 break;
6324 }
6325
6326 ret = copy_from_user(&req, argp, sizeof(req));
6327 if (ret) {
6328 pr_err("copy_from_user failed, ret= %d\n", ret);
6329 break;
6330 }
6331
6332 if (req.in_buf == NULL || req.out_buf == NULL ||
6333 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6334 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6335 req.direction > 1) {
6336 pr_err("invalid parameters\n");
6337 ret = -EINVAL;
6338 break;
6339 }
6340
6341 /* Copy the input buffer from userspace to kernel space */
6342 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6343 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6344 if (!tzbufin) {
6345 pr_err("error allocating in buffer\n");
6346 ret = -ENOMEM;
6347 break;
6348 }
6349
6350 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6351 if (ret) {
6352 pr_err("copy_from_user failed, ret=%d\n", ret);
6353 break;
6354 }
6355
6356 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6357
6358 /* Prepare the output buffer in kernel space */
6359 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6360 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6361 if (!tzbufout) {
6362 pr_err("error allocating out buffer\n");
6363 ret = -ENOMEM;
6364 break;
6365 }
6366
6367 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6368
6369 /* Send the command to TZ */
6370 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6371 desc.args[0] = virt_to_phys(tzbufin);
6372 desc.args[1] = req.in_buf_size;
6373 desc.args[2] = virt_to_phys(tzbufout);
6374 desc.args[3] = req.out_buf_size;
6375 desc.args[4] = req.direction;
6376
6377 ret = __qseecom_enable_clk(CLK_QSEE);
6378 if (ret)
6379 break;
6380
6381 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6382
6383 __qseecom_disable_clk(CLK_QSEE);
6384
6385 if (ret) {
6386 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6387 ret);
6388 break;
6389 }
6390
6391 /* Copy the output buffer from kernel space to userspace */
6392 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6393 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6394 if (ret) {
6395 pr_err("copy_to_user failed, ret=%d\n", ret);
6396 break;
6397 }
6398 } while (0);
6399
6400 kzfree(tzbufin);
6401 kzfree(tzbufout);
6402
6403 return ret;
6404}
6405
6406static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6407 struct qseecom_qteec_req *req)
6408{
6409 if (!data || !data->client.ihandle) {
6410 pr_err("Client or client handle is not initialized\n");
6411 return -EINVAL;
6412 }
6413
6414 if (data->type != QSEECOM_CLIENT_APP)
6415 return -EFAULT;
6416
6417 if (req->req_len > UINT_MAX - req->resp_len) {
6418 pr_err("Integer overflow detected in req_len & rsp_len\n");
6419 return -EINVAL;
6420 }
6421
6422 if (req->req_len + req->resp_len > data->client.sb_length) {
6423 pr_debug("Not enough memory to fit cmd_buf.\n");
6424 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6425 (req->req_len + req->resp_len), data->client.sb_length);
6426 return -ENOMEM;
6427 }
6428
6429 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6430 pr_err("cmd buffer or response buffer is null\n");
6431 return -EINVAL;
6432 }
6433 if (((uintptr_t)req->req_ptr <
6434 data->client.user_virt_sb_base) ||
6435 ((uintptr_t)req->req_ptr >=
6436 (data->client.user_virt_sb_base + data->client.sb_length))) {
6437 pr_err("cmd buffer address not within shared bufffer\n");
6438 return -EINVAL;
6439 }
6440
6441 if (((uintptr_t)req->resp_ptr <
6442 data->client.user_virt_sb_base) ||
6443 ((uintptr_t)req->resp_ptr >=
6444 (data->client.user_virt_sb_base + data->client.sb_length))) {
6445 pr_err("response buffer address not within shared bufffer\n");
6446 return -EINVAL;
6447 }
6448
6449 if ((req->req_len == 0) || (req->resp_len == 0)) {
6450 pr_err("cmd buf lengtgh/response buf length not valid\n");
6451 return -EINVAL;
6452 }
6453
6454 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6455 pr_err("Integer overflow in req_len & req_ptr\n");
6456 return -EINVAL;
6457 }
6458
6459 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6460 pr_err("Integer overflow in resp_len & resp_ptr\n");
6461 return -EINVAL;
6462 }
6463
6464 if (data->client.user_virt_sb_base >
6465 (ULONG_MAX - data->client.sb_length)) {
6466 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6467 return -EINVAL;
6468 }
6469 if ((((uintptr_t)req->req_ptr + req->req_len) >
6470 ((uintptr_t)data->client.user_virt_sb_base +
6471 data->client.sb_length)) ||
6472 (((uintptr_t)req->resp_ptr + req->resp_len) >
6473 ((uintptr_t)data->client.user_virt_sb_base +
6474 data->client.sb_length))) {
6475 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6476 return -EINVAL;
6477 }
6478 return 0;
6479}
6480
6481static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6482 uint32_t fd_idx, struct sg_table *sg_ptr)
6483{
6484 struct scatterlist *sg = sg_ptr->sgl;
6485 struct qseecom_sg_entry *sg_entry;
6486 void *buf;
6487 uint i;
6488 size_t size;
6489 dma_addr_t coh_pmem;
6490
6491 if (fd_idx >= MAX_ION_FD) {
6492 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6493 return -ENOMEM;
6494 }
6495 /*
6496 * Allocate a buffer, populate it with number of entry plus
6497 * each sg entry's phy addr and length; then return the
6498 * phy_addr of the buffer.
6499 */
6500 size = sizeof(uint32_t) +
6501 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6502 size = (size + PAGE_SIZE) & PAGE_MASK;
6503 buf = dma_alloc_coherent(qseecom.pdev,
6504 size, &coh_pmem, GFP_KERNEL);
6505 if (buf == NULL) {
6506 pr_err("failed to alloc memory for sg buf\n");
6507 return -ENOMEM;
6508 }
6509 *(uint32_t *)buf = sg_ptr->nents;
6510 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6511 for (i = 0; i < sg_ptr->nents; i++) {
6512 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6513 sg_entry->len = sg->length;
6514 sg_entry++;
6515 sg = sg_next(sg);
6516 }
6517 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6518 data->client.sec_buf_fd[fd_idx].vbase = buf;
6519 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6520 data->client.sec_buf_fd[fd_idx].size = size;
6521 return 0;
6522}
6523
6524static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6525 struct qseecom_dev_handle *data, bool cleanup)
6526{
6527 struct ion_handle *ihandle;
6528 int ret = 0;
6529 int i = 0;
6530 uint32_t *update;
6531 struct sg_table *sg_ptr = NULL;
6532 struct scatterlist *sg;
6533 struct qseecom_param_memref *memref;
6534
6535 if (req == NULL) {
6536 pr_err("Invalid address\n");
6537 return -EINVAL;
6538 }
6539 for (i = 0; i < MAX_ION_FD; i++) {
6540 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006541 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006542 req->ifd_data[i].fd);
6543 if (IS_ERR_OR_NULL(ihandle)) {
6544 pr_err("Ion client can't retrieve the handle\n");
6545 return -ENOMEM;
6546 }
6547 if ((req->req_len < sizeof(uint32_t)) ||
6548 (req->ifd_data[i].cmd_buf_offset >
6549 req->req_len - sizeof(uint32_t))) {
6550 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6551 req->req_len,
6552 req->ifd_data[i].cmd_buf_offset);
6553 return -EINVAL;
6554 }
6555 update = (uint32_t *)((char *) req->req_ptr +
6556 req->ifd_data[i].cmd_buf_offset);
6557 if (!update) {
6558 pr_err("update pointer is NULL\n");
6559 return -EINVAL;
6560 }
6561 } else {
6562 continue;
6563 }
6564 /* Populate the cmd data structure with the phys_addr */
6565 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6566 if (IS_ERR_OR_NULL(sg_ptr)) {
6567 pr_err("IOn client could not retrieve sg table\n");
6568 goto err;
6569 }
6570 sg = sg_ptr->sgl;
6571 if (sg == NULL) {
6572 pr_err("sg is NULL\n");
6573 goto err;
6574 }
6575 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6576 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6577 sg_ptr->nents, sg->length);
6578 goto err;
6579 }
6580 /* clean up buf for pre-allocated fd */
6581 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6582 (*update)) {
6583 if (data->client.sec_buf_fd[i].vbase)
6584 dma_free_coherent(qseecom.pdev,
6585 data->client.sec_buf_fd[i].size,
6586 data->client.sec_buf_fd[i].vbase,
6587 data->client.sec_buf_fd[i].pbase);
6588 memset((void *)update, 0,
6589 sizeof(struct qseecom_param_memref));
6590 memset(&(data->client.sec_buf_fd[i]), 0,
6591 sizeof(struct qseecom_sec_buf_fd_info));
6592 goto clean;
6593 }
6594
6595 if (*update == 0) {
6596 /* update buf for pre-allocated fd from secure heap*/
6597 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6598 sg_ptr);
6599 if (ret) {
6600 pr_err("Failed to handle buf for fd[%d]\n", i);
6601 goto err;
6602 }
6603 memref = (struct qseecom_param_memref *)update;
6604 memref->buffer =
6605 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6606 memref->size =
6607 (uint32_t)(data->client.sec_buf_fd[i].size);
6608 } else {
6609 /* update buf for fd from non-secure qseecom heap */
6610 if (sg_ptr->nents != 1) {
6611 pr_err("Num of scat entr (%d) invalid\n",
6612 sg_ptr->nents);
6613 goto err;
6614 }
6615 if (cleanup)
6616 *update = 0;
6617 else
6618 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6619 }
6620clean:
6621 if (cleanup) {
6622 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6623 ihandle, NULL, sg->length,
6624 ION_IOC_INV_CACHES);
6625 if (ret) {
6626 pr_err("cache operation failed %d\n", ret);
6627 goto err;
6628 }
6629 } else {
6630 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6631 ihandle, NULL, sg->length,
6632 ION_IOC_CLEAN_INV_CACHES);
6633 if (ret) {
6634 pr_err("cache operation failed %d\n", ret);
6635 goto err;
6636 }
6637 data->sglistinfo_ptr[i].indexAndFlags =
6638 SGLISTINFO_SET_INDEX_FLAG(
6639 (sg_ptr->nents == 1), 0,
6640 req->ifd_data[i].cmd_buf_offset);
6641 data->sglistinfo_ptr[i].sizeOrCount =
6642 (sg_ptr->nents == 1) ?
6643 sg->length : sg_ptr->nents;
6644 data->sglist_cnt = i + 1;
6645 }
6646 /* Deallocate the handle */
6647 if (!IS_ERR_OR_NULL(ihandle))
6648 ion_free(qseecom.ion_clnt, ihandle);
6649 }
6650 return ret;
6651err:
6652 if (!IS_ERR_OR_NULL(ihandle))
6653 ion_free(qseecom.ion_clnt, ihandle);
6654 return -ENOMEM;
6655}
6656
6657static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6658 struct qseecom_qteec_req *req, uint32_t cmd_id)
6659{
6660 struct qseecom_command_scm_resp resp;
6661 struct qseecom_qteec_ireq ireq;
6662 struct qseecom_qteec_64bit_ireq ireq_64bit;
6663 struct qseecom_registered_app_list *ptr_app;
6664 bool found_app = false;
6665 unsigned long flags;
6666 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006667 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006668 uint32_t reqd_len_sb_in = 0;
6669 void *cmd_buf = NULL;
6670 size_t cmd_len;
6671 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306672 void *req_ptr = NULL;
6673 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006674
6675 ret = __qseecom_qteec_validate_msg(data, req);
6676 if (ret)
6677 return ret;
6678
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306679 req_ptr = req->req_ptr;
6680 resp_ptr = req->resp_ptr;
6681
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006682 /* find app_id & img_name from list */
6683 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6684 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6685 list) {
6686 if ((ptr_app->app_id == data->client.app_id) &&
6687 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6688 found_app = true;
6689 break;
6690 }
6691 }
6692 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6693 if (!found_app) {
6694 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6695 (char *)data->client.app_name);
6696 return -ENOENT;
6697 }
6698
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306699 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6700 (uintptr_t)req->req_ptr);
6701 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6702 (uintptr_t)req->resp_ptr);
6703
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006704 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6705 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6706 ret = __qseecom_update_qteec_req_buf(
6707 (struct qseecom_qteec_modfd_req *)req, data, false);
6708 if (ret)
6709 return ret;
6710 }
6711
6712 if (qseecom.qsee_version < QSEE_VERSION_40) {
6713 ireq.app_id = data->client.app_id;
6714 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306715 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006716 ireq.req_len = req->req_len;
6717 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306718 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006719 ireq.resp_len = req->resp_len;
6720 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6721 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6722 dmac_flush_range((void *)table,
6723 (void *)table + SGLISTINFO_TABLE_SIZE);
6724 cmd_buf = (void *)&ireq;
6725 cmd_len = sizeof(struct qseecom_qteec_ireq);
6726 } else {
6727 ireq_64bit.app_id = data->client.app_id;
6728 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306729 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006730 ireq_64bit.req_len = req->req_len;
6731 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306732 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006733 ireq_64bit.resp_len = req->resp_len;
6734 if ((data->client.app_arch == ELFCLASS32) &&
6735 ((ireq_64bit.req_ptr >=
6736 PHY_ADDR_4G - ireq_64bit.req_len) ||
6737 (ireq_64bit.resp_ptr >=
6738 PHY_ADDR_4G - ireq_64bit.resp_len))){
6739 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6740 data->client.app_name, data->client.app_id);
6741 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6742 ireq_64bit.req_ptr, ireq_64bit.req_len,
6743 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6744 return -EFAULT;
6745 }
6746 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6747 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6748 dmac_flush_range((void *)table,
6749 (void *)table + SGLISTINFO_TABLE_SIZE);
6750 cmd_buf = (void *)&ireq_64bit;
6751 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6752 }
6753 if (qseecom.whitelist_support == true
6754 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6755 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6756 else
6757 *(uint32_t *)cmd_buf = cmd_id;
6758
6759 reqd_len_sb_in = req->req_len + req->resp_len;
6760 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6761 data->client.sb_virt,
6762 reqd_len_sb_in,
6763 ION_IOC_CLEAN_INV_CACHES);
6764 if (ret) {
6765 pr_err("cache operation failed %d\n", ret);
6766 return ret;
6767 }
6768
6769 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6770
6771 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6772 cmd_buf, cmd_len,
6773 &resp, sizeof(resp));
6774 if (ret) {
6775 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6776 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006777 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006778 }
6779
6780 if (qseecom.qsee_reentrancy_support) {
6781 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006782 if (ret)
6783 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006784 } else {
6785 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6786 ret = __qseecom_process_incomplete_cmd(data, &resp);
6787 if (ret) {
6788 pr_err("process_incomplete_cmd failed err: %d\n",
6789 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006790 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006791 }
6792 } else {
6793 if (resp.result != QSEOS_RESULT_SUCCESS) {
6794 pr_err("Response result %d not supported\n",
6795 resp.result);
6796 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006797 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006798 }
6799 }
6800 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006801exit:
6802 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006803 data->client.sb_virt, data->client.sb_length,
6804 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006805 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006806 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006807 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006808 }
6809
6810 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6811 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006812 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006813 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006814 if (ret2)
6815 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006816 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006817 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006818}
6819
6820static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6821 void __user *argp)
6822{
6823 struct qseecom_qteec_modfd_req req;
6824 int ret = 0;
6825
6826 ret = copy_from_user(&req, argp,
6827 sizeof(struct qseecom_qteec_modfd_req));
6828 if (ret) {
6829 pr_err("copy_from_user failed\n");
6830 return ret;
6831 }
6832 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6833 QSEOS_TEE_OPEN_SESSION);
6834
6835 return ret;
6836}
6837
6838static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6839 void __user *argp)
6840{
6841 struct qseecom_qteec_req req;
6842 int ret = 0;
6843
6844 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6845 if (ret) {
6846 pr_err("copy_from_user failed\n");
6847 return ret;
6848 }
6849 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6850 return ret;
6851}
6852
6853static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6854 void __user *argp)
6855{
6856 struct qseecom_qteec_modfd_req req;
6857 struct qseecom_command_scm_resp resp;
6858 struct qseecom_qteec_ireq ireq;
6859 struct qseecom_qteec_64bit_ireq ireq_64bit;
6860 struct qseecom_registered_app_list *ptr_app;
6861 bool found_app = false;
6862 unsigned long flags;
6863 int ret = 0;
6864 int i = 0;
6865 uint32_t reqd_len_sb_in = 0;
6866 void *cmd_buf = NULL;
6867 size_t cmd_len;
6868 struct sglist_info *table = data->sglistinfo_ptr;
6869 void *req_ptr = NULL;
6870 void *resp_ptr = NULL;
6871
6872 ret = copy_from_user(&req, argp,
6873 sizeof(struct qseecom_qteec_modfd_req));
6874 if (ret) {
6875 pr_err("copy_from_user failed\n");
6876 return ret;
6877 }
6878 ret = __qseecom_qteec_validate_msg(data,
6879 (struct qseecom_qteec_req *)(&req));
6880 if (ret)
6881 return ret;
6882 req_ptr = req.req_ptr;
6883 resp_ptr = req.resp_ptr;
6884
6885 /* find app_id & img_name from list */
6886 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6887 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6888 list) {
6889 if ((ptr_app->app_id == data->client.app_id) &&
6890 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6891 found_app = true;
6892 break;
6893 }
6894 }
6895 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6896 if (!found_app) {
6897 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6898 (char *)data->client.app_name);
6899 return -ENOENT;
6900 }
6901
6902 /* validate offsets */
6903 for (i = 0; i < MAX_ION_FD; i++) {
6904 if (req.ifd_data[i].fd) {
6905 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6906 return -EINVAL;
6907 }
6908 }
6909 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6910 (uintptr_t)req.req_ptr);
6911 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6912 (uintptr_t)req.resp_ptr);
6913 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6914 if (ret)
6915 return ret;
6916
6917 if (qseecom.qsee_version < QSEE_VERSION_40) {
6918 ireq.app_id = data->client.app_id;
6919 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6920 (uintptr_t)req_ptr);
6921 ireq.req_len = req.req_len;
6922 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6923 (uintptr_t)resp_ptr);
6924 ireq.resp_len = req.resp_len;
6925 cmd_buf = (void *)&ireq;
6926 cmd_len = sizeof(struct qseecom_qteec_ireq);
6927 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6928 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6929 dmac_flush_range((void *)table,
6930 (void *)table + SGLISTINFO_TABLE_SIZE);
6931 } else {
6932 ireq_64bit.app_id = data->client.app_id;
6933 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6934 (uintptr_t)req_ptr);
6935 ireq_64bit.req_len = req.req_len;
6936 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6937 (uintptr_t)resp_ptr);
6938 ireq_64bit.resp_len = req.resp_len;
6939 cmd_buf = (void *)&ireq_64bit;
6940 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6941 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6942 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6943 dmac_flush_range((void *)table,
6944 (void *)table + SGLISTINFO_TABLE_SIZE);
6945 }
6946 reqd_len_sb_in = req.req_len + req.resp_len;
6947 if (qseecom.whitelist_support == true)
6948 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6949 else
6950 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6951
6952 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6953 data->client.sb_virt,
6954 reqd_len_sb_in,
6955 ION_IOC_CLEAN_INV_CACHES);
6956 if (ret) {
6957 pr_err("cache operation failed %d\n", ret);
6958 return ret;
6959 }
6960
6961 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6962
6963 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6964 cmd_buf, cmd_len,
6965 &resp, sizeof(resp));
6966 if (ret) {
6967 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6968 ret, data->client.app_id);
6969 return ret;
6970 }
6971
6972 if (qseecom.qsee_reentrancy_support) {
6973 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6974 } else {
6975 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6976 ret = __qseecom_process_incomplete_cmd(data, &resp);
6977 if (ret) {
6978 pr_err("process_incomplete_cmd failed err: %d\n",
6979 ret);
6980 return ret;
6981 }
6982 } else {
6983 if (resp.result != QSEOS_RESULT_SUCCESS) {
6984 pr_err("Response result %d not supported\n",
6985 resp.result);
6986 ret = -EINVAL;
6987 }
6988 }
6989 }
6990 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6991 if (ret)
6992 return ret;
6993
6994 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6995 data->client.sb_virt, data->client.sb_length,
6996 ION_IOC_INV_CACHES);
6997 if (ret) {
6998 pr_err("cache operation failed %d\n", ret);
6999 return ret;
7000 }
7001 return 0;
7002}
7003
7004static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7005 void __user *argp)
7006{
7007 struct qseecom_qteec_modfd_req req;
7008 int ret = 0;
7009
7010 ret = copy_from_user(&req, argp,
7011 sizeof(struct qseecom_qteec_modfd_req));
7012 if (ret) {
7013 pr_err("copy_from_user failed\n");
7014 return ret;
7015 }
7016 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7017 QSEOS_TEE_REQUEST_CANCELLATION);
7018
7019 return ret;
7020}
7021
7022static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7023{
7024 if (data->sglist_cnt) {
7025 memset(data->sglistinfo_ptr, 0,
7026 SGLISTINFO_TABLE_SIZE);
7027 data->sglist_cnt = 0;
7028 }
7029}
7030
7031static inline long qseecom_ioctl(struct file *file,
7032 unsigned int cmd, unsigned long arg)
7033{
7034 int ret = 0;
7035 struct qseecom_dev_handle *data = file->private_data;
7036 void __user *argp = (void __user *) arg;
7037 bool perf_enabled = false;
7038
7039 if (!data) {
7040 pr_err("Invalid/uninitialized device handle\n");
7041 return -EINVAL;
7042 }
7043
7044 if (data->abort) {
7045 pr_err("Aborting qseecom driver\n");
7046 return -ENODEV;
7047 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007048 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7049 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7050 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7051 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
7052 __qseecom_processing_pending_lsnr_unregister();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007053
7054 switch (cmd) {
7055 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7056 if (data->type != QSEECOM_GENERIC) {
7057 pr_err("reg lstnr req: invalid handle (%d)\n",
7058 data->type);
7059 ret = -EINVAL;
7060 break;
7061 }
7062 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007063 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007064 atomic_inc(&data->ioctl_count);
7065 data->type = QSEECOM_LISTENER_SERVICE;
7066 ret = qseecom_register_listener(data, argp);
7067 atomic_dec(&data->ioctl_count);
7068 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007069 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007070 if (ret)
7071 pr_err("failed qseecom_register_listener: %d\n", ret);
7072 break;
7073 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307074 case QSEECOM_IOCTL_SET_ICE_INFO: {
7075 struct qseecom_ice_data_t ice_data;
7076
7077 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7078 if (ret) {
7079 pr_err("copy_from_user failed\n");
7080 return -EFAULT;
7081 }
7082 qcom_ice_set_fde_flag(ice_data.flag);
7083 break;
7084 }
7085
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007086 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7087 if ((data->listener.id == 0) ||
7088 (data->type != QSEECOM_LISTENER_SERVICE)) {
7089 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7090 data->type, data->listener.id);
7091 ret = -EINVAL;
7092 break;
7093 }
7094 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007095 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007096 atomic_inc(&data->ioctl_count);
7097 ret = qseecom_unregister_listener(data);
7098 atomic_dec(&data->ioctl_count);
7099 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007100 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007101 if (ret)
7102 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7103 break;
7104 }
7105 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7106 if ((data->client.app_id == 0) ||
7107 (data->type != QSEECOM_CLIENT_APP)) {
7108 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7109 data->type, data->client.app_id);
7110 ret = -EINVAL;
7111 break;
7112 }
7113 /* Only one client allowed here at a time */
7114 mutex_lock(&app_access_lock);
7115 if (qseecom.support_bus_scaling) {
7116 /* register bus bw in case the client doesn't do it */
7117 if (!data->mode) {
7118 mutex_lock(&qsee_bw_mutex);
7119 __qseecom_register_bus_bandwidth_needs(
7120 data, HIGH);
7121 mutex_unlock(&qsee_bw_mutex);
7122 }
7123 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7124 if (ret) {
7125 pr_err("Failed to set bw.\n");
7126 ret = -EINVAL;
7127 mutex_unlock(&app_access_lock);
7128 break;
7129 }
7130 }
7131 /*
7132 * On targets where crypto clock is handled by HLOS,
7133 * if clk_access_cnt is zero and perf_enabled is false,
7134 * then the crypto clock was not enabled before sending cmd to
7135 * tz, qseecom will enable the clock to avoid service failure.
7136 */
7137 if (!qseecom.no_clock_support &&
7138 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7139 pr_debug("ce clock is not enabled!\n");
7140 ret = qseecom_perf_enable(data);
7141 if (ret) {
7142 pr_err("Failed to vote for clock with err %d\n",
7143 ret);
7144 mutex_unlock(&app_access_lock);
7145 ret = -EINVAL;
7146 break;
7147 }
7148 perf_enabled = true;
7149 }
7150 atomic_inc(&data->ioctl_count);
7151 ret = qseecom_send_cmd(data, argp);
7152 if (qseecom.support_bus_scaling)
7153 __qseecom_add_bw_scale_down_timer(
7154 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7155 if (perf_enabled) {
7156 qsee_disable_clock_vote(data, CLK_DFAB);
7157 qsee_disable_clock_vote(data, CLK_SFPB);
7158 }
7159 atomic_dec(&data->ioctl_count);
7160 wake_up_all(&data->abort_wq);
7161 mutex_unlock(&app_access_lock);
7162 if (ret)
7163 pr_err("failed qseecom_send_cmd: %d\n", ret);
7164 break;
7165 }
7166 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7167 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7168 if ((data->client.app_id == 0) ||
7169 (data->type != QSEECOM_CLIENT_APP)) {
7170 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7171 data->type, data->client.app_id);
7172 ret = -EINVAL;
7173 break;
7174 }
7175 /* Only one client allowed here at a time */
7176 mutex_lock(&app_access_lock);
7177 if (qseecom.support_bus_scaling) {
7178 if (!data->mode) {
7179 mutex_lock(&qsee_bw_mutex);
7180 __qseecom_register_bus_bandwidth_needs(
7181 data, HIGH);
7182 mutex_unlock(&qsee_bw_mutex);
7183 }
7184 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7185 if (ret) {
7186 pr_err("Failed to set bw.\n");
7187 mutex_unlock(&app_access_lock);
7188 ret = -EINVAL;
7189 break;
7190 }
7191 }
7192 /*
7193 * On targets where crypto clock is handled by HLOS,
7194 * if clk_access_cnt is zero and perf_enabled is false,
7195 * then the crypto clock was not enabled before sending cmd to
7196 * tz, qseecom will enable the clock to avoid service failure.
7197 */
7198 if (!qseecom.no_clock_support &&
7199 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7200 pr_debug("ce clock is not enabled!\n");
7201 ret = qseecom_perf_enable(data);
7202 if (ret) {
7203 pr_err("Failed to vote for clock with err %d\n",
7204 ret);
7205 mutex_unlock(&app_access_lock);
7206 ret = -EINVAL;
7207 break;
7208 }
7209 perf_enabled = true;
7210 }
7211 atomic_inc(&data->ioctl_count);
7212 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7213 ret = qseecom_send_modfd_cmd(data, argp);
7214 else
7215 ret = qseecom_send_modfd_cmd_64(data, argp);
7216 if (qseecom.support_bus_scaling)
7217 __qseecom_add_bw_scale_down_timer(
7218 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7219 if (perf_enabled) {
7220 qsee_disable_clock_vote(data, CLK_DFAB);
7221 qsee_disable_clock_vote(data, CLK_SFPB);
7222 }
7223 atomic_dec(&data->ioctl_count);
7224 wake_up_all(&data->abort_wq);
7225 mutex_unlock(&app_access_lock);
7226 if (ret)
7227 pr_err("failed qseecom_send_cmd: %d\n", ret);
7228 __qseecom_clean_data_sglistinfo(data);
7229 break;
7230 }
7231 case QSEECOM_IOCTL_RECEIVE_REQ: {
7232 if ((data->listener.id == 0) ||
7233 (data->type != QSEECOM_LISTENER_SERVICE)) {
7234 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7235 data->type, data->listener.id);
7236 ret = -EINVAL;
7237 break;
7238 }
7239 atomic_inc(&data->ioctl_count);
7240 ret = qseecom_receive_req(data);
7241 atomic_dec(&data->ioctl_count);
7242 wake_up_all(&data->abort_wq);
7243 if (ret && (ret != -ERESTARTSYS))
7244 pr_err("failed qseecom_receive_req: %d\n", ret);
7245 break;
7246 }
7247 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7248 if ((data->listener.id == 0) ||
7249 (data->type != QSEECOM_LISTENER_SERVICE)) {
7250 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7251 data->type, data->listener.id);
7252 ret = -EINVAL;
7253 break;
7254 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007255 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007256 atomic_inc(&data->ioctl_count);
7257 if (!qseecom.qsee_reentrancy_support)
7258 ret = qseecom_send_resp();
7259 else
7260 ret = qseecom_reentrancy_send_resp(data);
7261 atomic_dec(&data->ioctl_count);
7262 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007263 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007264 if (ret)
7265 pr_err("failed qseecom_send_resp: %d\n", ret);
7266 break;
7267 }
7268 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7269 if ((data->type != QSEECOM_CLIENT_APP) &&
7270 (data->type != QSEECOM_GENERIC) &&
7271 (data->type != QSEECOM_SECURE_SERVICE)) {
7272 pr_err("set mem param req: invalid handle (%d)\n",
7273 data->type);
7274 ret = -EINVAL;
7275 break;
7276 }
7277 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7278 mutex_lock(&app_access_lock);
7279 atomic_inc(&data->ioctl_count);
7280 ret = qseecom_set_client_mem_param(data, argp);
7281 atomic_dec(&data->ioctl_count);
7282 mutex_unlock(&app_access_lock);
7283 if (ret)
7284 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7285 ret);
7286 break;
7287 }
7288 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7289 if ((data->type != QSEECOM_GENERIC) &&
7290 (data->type != QSEECOM_CLIENT_APP)) {
7291 pr_err("load app req: invalid handle (%d)\n",
7292 data->type);
7293 ret = -EINVAL;
7294 break;
7295 }
7296 data->type = QSEECOM_CLIENT_APP;
7297 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7298 mutex_lock(&app_access_lock);
7299 atomic_inc(&data->ioctl_count);
7300 ret = qseecom_load_app(data, argp);
7301 atomic_dec(&data->ioctl_count);
7302 mutex_unlock(&app_access_lock);
7303 if (ret)
7304 pr_err("failed load_app request: %d\n", ret);
7305 break;
7306 }
7307 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7308 if ((data->client.app_id == 0) ||
7309 (data->type != QSEECOM_CLIENT_APP)) {
7310 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7311 data->type, data->client.app_id);
7312 ret = -EINVAL;
7313 break;
7314 }
7315 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7316 mutex_lock(&app_access_lock);
7317 atomic_inc(&data->ioctl_count);
7318 ret = qseecom_unload_app(data, false);
7319 atomic_dec(&data->ioctl_count);
7320 mutex_unlock(&app_access_lock);
7321 if (ret)
7322 pr_err("failed unload_app request: %d\n", ret);
7323 break;
7324 }
7325 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7326 atomic_inc(&data->ioctl_count);
7327 ret = qseecom_get_qseos_version(data, argp);
7328 if (ret)
7329 pr_err("qseecom_get_qseos_version: %d\n", ret);
7330 atomic_dec(&data->ioctl_count);
7331 break;
7332 }
7333 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7334 if ((data->type != QSEECOM_GENERIC) &&
7335 (data->type != QSEECOM_CLIENT_APP)) {
7336 pr_err("perf enable req: invalid handle (%d)\n",
7337 data->type);
7338 ret = -EINVAL;
7339 break;
7340 }
7341 if ((data->type == QSEECOM_CLIENT_APP) &&
7342 (data->client.app_id == 0)) {
7343 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7344 data->type, data->client.app_id);
7345 ret = -EINVAL;
7346 break;
7347 }
7348 atomic_inc(&data->ioctl_count);
7349 if (qseecom.support_bus_scaling) {
7350 mutex_lock(&qsee_bw_mutex);
7351 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7352 mutex_unlock(&qsee_bw_mutex);
7353 } else {
7354 ret = qseecom_perf_enable(data);
7355 if (ret)
7356 pr_err("Fail to vote for clocks %d\n", ret);
7357 }
7358 atomic_dec(&data->ioctl_count);
7359 break;
7360 }
7361 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7362 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7363 (data->type != QSEECOM_CLIENT_APP)) {
7364 pr_err("perf disable req: invalid handle (%d)\n",
7365 data->type);
7366 ret = -EINVAL;
7367 break;
7368 }
7369 if ((data->type == QSEECOM_CLIENT_APP) &&
7370 (data->client.app_id == 0)) {
7371 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7372 data->type, data->client.app_id);
7373 ret = -EINVAL;
7374 break;
7375 }
7376 atomic_inc(&data->ioctl_count);
7377 if (!qseecom.support_bus_scaling) {
7378 qsee_disable_clock_vote(data, CLK_DFAB);
7379 qsee_disable_clock_vote(data, CLK_SFPB);
7380 } else {
7381 mutex_lock(&qsee_bw_mutex);
7382 qseecom_unregister_bus_bandwidth_needs(data);
7383 mutex_unlock(&qsee_bw_mutex);
7384 }
7385 atomic_dec(&data->ioctl_count);
7386 break;
7387 }
7388
7389 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7390 /* If crypto clock is not handled by HLOS, return directly. */
7391 if (qseecom.no_clock_support) {
7392 pr_debug("crypto clock is not handled by HLOS\n");
7393 break;
7394 }
7395 if ((data->client.app_id == 0) ||
7396 (data->type != QSEECOM_CLIENT_APP)) {
7397 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7398 data->type, data->client.app_id);
7399 ret = -EINVAL;
7400 break;
7401 }
7402 atomic_inc(&data->ioctl_count);
7403 ret = qseecom_scale_bus_bandwidth(data, argp);
7404 atomic_dec(&data->ioctl_count);
7405 break;
7406 }
7407 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7408 if (data->type != QSEECOM_GENERIC) {
7409 pr_err("load ext elf req: invalid client handle (%d)\n",
7410 data->type);
7411 ret = -EINVAL;
7412 break;
7413 }
7414 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7415 data->released = true;
7416 mutex_lock(&app_access_lock);
7417 atomic_inc(&data->ioctl_count);
7418 ret = qseecom_load_external_elf(data, argp);
7419 atomic_dec(&data->ioctl_count);
7420 mutex_unlock(&app_access_lock);
7421 if (ret)
7422 pr_err("failed load_external_elf request: %d\n", ret);
7423 break;
7424 }
7425 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7426 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7427 pr_err("unload ext elf req: invalid handle (%d)\n",
7428 data->type);
7429 ret = -EINVAL;
7430 break;
7431 }
7432 data->released = true;
7433 mutex_lock(&app_access_lock);
7434 atomic_inc(&data->ioctl_count);
7435 ret = qseecom_unload_external_elf(data);
7436 atomic_dec(&data->ioctl_count);
7437 mutex_unlock(&app_access_lock);
7438 if (ret)
7439 pr_err("failed unload_app request: %d\n", ret);
7440 break;
7441 }
7442 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7443 data->type = QSEECOM_CLIENT_APP;
7444 mutex_lock(&app_access_lock);
7445 atomic_inc(&data->ioctl_count);
7446 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7447 ret = qseecom_query_app_loaded(data, argp);
7448 atomic_dec(&data->ioctl_count);
7449 mutex_unlock(&app_access_lock);
7450 break;
7451 }
7452 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7453 if (data->type != QSEECOM_GENERIC) {
7454 pr_err("send cmd svc req: invalid handle (%d)\n",
7455 data->type);
7456 ret = -EINVAL;
7457 break;
7458 }
7459 data->type = QSEECOM_SECURE_SERVICE;
7460 if (qseecom.qsee_version < QSEE_VERSION_03) {
7461 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7462 qseecom.qsee_version);
7463 return -EINVAL;
7464 }
7465 mutex_lock(&app_access_lock);
7466 atomic_inc(&data->ioctl_count);
7467 ret = qseecom_send_service_cmd(data, argp);
7468 atomic_dec(&data->ioctl_count);
7469 mutex_unlock(&app_access_lock);
7470 break;
7471 }
7472 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7473 if (!(qseecom.support_pfe || qseecom.support_fde))
7474 pr_err("Features requiring key init not supported\n");
7475 if (data->type != QSEECOM_GENERIC) {
7476 pr_err("create key req: invalid handle (%d)\n",
7477 data->type);
7478 ret = -EINVAL;
7479 break;
7480 }
7481 if (qseecom.qsee_version < QSEE_VERSION_05) {
7482 pr_err("Create Key feature unsupported: qsee ver %u\n",
7483 qseecom.qsee_version);
7484 return -EINVAL;
7485 }
7486 data->released = true;
7487 mutex_lock(&app_access_lock);
7488 atomic_inc(&data->ioctl_count);
7489 ret = qseecom_create_key(data, argp);
7490 if (ret)
7491 pr_err("failed to create encryption key: %d\n", ret);
7492
7493 atomic_dec(&data->ioctl_count);
7494 mutex_unlock(&app_access_lock);
7495 break;
7496 }
7497 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7498 if (!(qseecom.support_pfe || qseecom.support_fde))
7499 pr_err("Features requiring key init not supported\n");
7500 if (data->type != QSEECOM_GENERIC) {
7501 pr_err("wipe key req: invalid handle (%d)\n",
7502 data->type);
7503 ret = -EINVAL;
7504 break;
7505 }
7506 if (qseecom.qsee_version < QSEE_VERSION_05) {
7507 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7508 qseecom.qsee_version);
7509 return -EINVAL;
7510 }
7511 data->released = true;
7512 mutex_lock(&app_access_lock);
7513 atomic_inc(&data->ioctl_count);
7514 ret = qseecom_wipe_key(data, argp);
7515 if (ret)
7516 pr_err("failed to wipe encryption key: %d\n", ret);
7517 atomic_dec(&data->ioctl_count);
7518 mutex_unlock(&app_access_lock);
7519 break;
7520 }
7521 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7522 if (!(qseecom.support_pfe || qseecom.support_fde))
7523 pr_err("Features requiring key init not supported\n");
7524 if (data->type != QSEECOM_GENERIC) {
7525 pr_err("update key req: invalid handle (%d)\n",
7526 data->type);
7527 ret = -EINVAL;
7528 break;
7529 }
7530 if (qseecom.qsee_version < QSEE_VERSION_05) {
7531 pr_err("Update Key feature unsupported in qsee ver %u\n",
7532 qseecom.qsee_version);
7533 return -EINVAL;
7534 }
7535 data->released = true;
7536 mutex_lock(&app_access_lock);
7537 atomic_inc(&data->ioctl_count);
7538 ret = qseecom_update_key_user_info(data, argp);
7539 if (ret)
7540 pr_err("failed to update key user info: %d\n", ret);
7541 atomic_dec(&data->ioctl_count);
7542 mutex_unlock(&app_access_lock);
7543 break;
7544 }
7545 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7546 if (data->type != QSEECOM_GENERIC) {
7547 pr_err("save part hash req: invalid handle (%d)\n",
7548 data->type);
7549 ret = -EINVAL;
7550 break;
7551 }
7552 data->released = true;
7553 mutex_lock(&app_access_lock);
7554 atomic_inc(&data->ioctl_count);
7555 ret = qseecom_save_partition_hash(argp);
7556 atomic_dec(&data->ioctl_count);
7557 mutex_unlock(&app_access_lock);
7558 break;
7559 }
7560 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7561 if (data->type != QSEECOM_GENERIC) {
7562 pr_err("ES activated req: invalid handle (%d)\n",
7563 data->type);
7564 ret = -EINVAL;
7565 break;
7566 }
7567 data->released = true;
7568 mutex_lock(&app_access_lock);
7569 atomic_inc(&data->ioctl_count);
7570 ret = qseecom_is_es_activated(argp);
7571 atomic_dec(&data->ioctl_count);
7572 mutex_unlock(&app_access_lock);
7573 break;
7574 }
7575 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7576 if (data->type != QSEECOM_GENERIC) {
7577 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7578 data->type);
7579 ret = -EINVAL;
7580 break;
7581 }
7582 data->released = true;
7583 mutex_lock(&app_access_lock);
7584 atomic_inc(&data->ioctl_count);
7585 ret = qseecom_mdtp_cipher_dip(argp);
7586 atomic_dec(&data->ioctl_count);
7587 mutex_unlock(&app_access_lock);
7588 break;
7589 }
7590 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7591 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7592 if ((data->listener.id == 0) ||
7593 (data->type != QSEECOM_LISTENER_SERVICE)) {
7594 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7595 data->type, data->listener.id);
7596 ret = -EINVAL;
7597 break;
7598 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007599 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007600 atomic_inc(&data->ioctl_count);
7601 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7602 ret = qseecom_send_modfd_resp(data, argp);
7603 else
7604 ret = qseecom_send_modfd_resp_64(data, argp);
7605 atomic_dec(&data->ioctl_count);
7606 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007607 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007608 if (ret)
7609 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7610 __qseecom_clean_data_sglistinfo(data);
7611 break;
7612 }
7613 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7614 if ((data->client.app_id == 0) ||
7615 (data->type != QSEECOM_CLIENT_APP)) {
7616 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7617 data->type, data->client.app_id);
7618 ret = -EINVAL;
7619 break;
7620 }
7621 if (qseecom.qsee_version < QSEE_VERSION_40) {
7622 pr_err("GP feature unsupported: qsee ver %u\n",
7623 qseecom.qsee_version);
7624 return -EINVAL;
7625 }
7626 /* Only one client allowed here at a time */
7627 mutex_lock(&app_access_lock);
7628 atomic_inc(&data->ioctl_count);
7629 ret = qseecom_qteec_open_session(data, argp);
7630 atomic_dec(&data->ioctl_count);
7631 wake_up_all(&data->abort_wq);
7632 mutex_unlock(&app_access_lock);
7633 if (ret)
7634 pr_err("failed open_session_cmd: %d\n", ret);
7635 __qseecom_clean_data_sglistinfo(data);
7636 break;
7637 }
7638 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7639 if ((data->client.app_id == 0) ||
7640 (data->type != QSEECOM_CLIENT_APP)) {
7641 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7642 data->type, data->client.app_id);
7643 ret = -EINVAL;
7644 break;
7645 }
7646 if (qseecom.qsee_version < QSEE_VERSION_40) {
7647 pr_err("GP feature unsupported: qsee ver %u\n",
7648 qseecom.qsee_version);
7649 return -EINVAL;
7650 }
7651 /* Only one client allowed here at a time */
7652 mutex_lock(&app_access_lock);
7653 atomic_inc(&data->ioctl_count);
7654 ret = qseecom_qteec_close_session(data, argp);
7655 atomic_dec(&data->ioctl_count);
7656 wake_up_all(&data->abort_wq);
7657 mutex_unlock(&app_access_lock);
7658 if (ret)
7659 pr_err("failed close_session_cmd: %d\n", ret);
7660 break;
7661 }
7662 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7663 if ((data->client.app_id == 0) ||
7664 (data->type != QSEECOM_CLIENT_APP)) {
7665 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7666 data->type, data->client.app_id);
7667 ret = -EINVAL;
7668 break;
7669 }
7670 if (qseecom.qsee_version < QSEE_VERSION_40) {
7671 pr_err("GP feature unsupported: qsee ver %u\n",
7672 qseecom.qsee_version);
7673 return -EINVAL;
7674 }
7675 /* Only one client allowed here at a time */
7676 mutex_lock(&app_access_lock);
7677 atomic_inc(&data->ioctl_count);
7678 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7679 atomic_dec(&data->ioctl_count);
7680 wake_up_all(&data->abort_wq);
7681 mutex_unlock(&app_access_lock);
7682 if (ret)
7683 pr_err("failed Invoke cmd: %d\n", ret);
7684 __qseecom_clean_data_sglistinfo(data);
7685 break;
7686 }
7687 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7688 if ((data->client.app_id == 0) ||
7689 (data->type != QSEECOM_CLIENT_APP)) {
7690 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7691 data->type, data->client.app_id);
7692 ret = -EINVAL;
7693 break;
7694 }
7695 if (qseecom.qsee_version < QSEE_VERSION_40) {
7696 pr_err("GP feature unsupported: qsee ver %u\n",
7697 qseecom.qsee_version);
7698 return -EINVAL;
7699 }
7700 /* Only one client allowed here at a time */
7701 mutex_lock(&app_access_lock);
7702 atomic_inc(&data->ioctl_count);
7703 ret = qseecom_qteec_request_cancellation(data, argp);
7704 atomic_dec(&data->ioctl_count);
7705 wake_up_all(&data->abort_wq);
7706 mutex_unlock(&app_access_lock);
7707 if (ret)
7708 pr_err("failed request_cancellation: %d\n", ret);
7709 break;
7710 }
7711 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7712 atomic_inc(&data->ioctl_count);
7713 ret = qseecom_get_ce_info(data, argp);
7714 if (ret)
7715 pr_err("failed get fde ce pipe info: %d\n", ret);
7716 atomic_dec(&data->ioctl_count);
7717 break;
7718 }
7719 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7720 atomic_inc(&data->ioctl_count);
7721 ret = qseecom_free_ce_info(data, argp);
7722 if (ret)
7723 pr_err("failed get fde ce pipe info: %d\n", ret);
7724 atomic_dec(&data->ioctl_count);
7725 break;
7726 }
7727 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7728 atomic_inc(&data->ioctl_count);
7729 ret = qseecom_query_ce_info(data, argp);
7730 if (ret)
7731 pr_err("failed get fde ce pipe info: %d\n", ret);
7732 atomic_dec(&data->ioctl_count);
7733 break;
7734 }
7735 default:
7736 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7737 return -EINVAL;
7738 }
7739 return ret;
7740}
7741
7742static int qseecom_open(struct inode *inode, struct file *file)
7743{
7744 int ret = 0;
7745 struct qseecom_dev_handle *data;
7746
7747 data = kzalloc(sizeof(*data), GFP_KERNEL);
7748 if (!data)
7749 return -ENOMEM;
7750 file->private_data = data;
7751 data->abort = 0;
7752 data->type = QSEECOM_GENERIC;
7753 data->released = false;
7754 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7755 data->mode = INACTIVE;
7756 init_waitqueue_head(&data->abort_wq);
7757 atomic_set(&data->ioctl_count, 0);
7758 return ret;
7759}
7760
7761static int qseecom_release(struct inode *inode, struct file *file)
7762{
7763 struct qseecom_dev_handle *data = file->private_data;
7764 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007765 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007766
7767 if (data->released == false) {
7768 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7769 data->type, data->mode, data);
7770 switch (data->type) {
7771 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007772 pr_debug("release lsnr svc %d\n", data->listener.id);
7773 free_private_data = false;
7774 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007775 ret = qseecom_unregister_listener(data);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007776 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007777 break;
7778 case QSEECOM_CLIENT_APP:
7779 mutex_lock(&app_access_lock);
7780 ret = qseecom_unload_app(data, true);
7781 mutex_unlock(&app_access_lock);
7782 break;
7783 case QSEECOM_SECURE_SERVICE:
7784 case QSEECOM_GENERIC:
7785 ret = qseecom_unmap_ion_allocated_memory(data);
7786 if (ret)
7787 pr_err("Ion Unmap failed\n");
7788 break;
7789 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7790 break;
7791 default:
7792 pr_err("Unsupported clnt_handle_type %d",
7793 data->type);
7794 break;
7795 }
7796 }
7797
7798 if (qseecom.support_bus_scaling) {
7799 mutex_lock(&qsee_bw_mutex);
7800 if (data->mode != INACTIVE) {
7801 qseecom_unregister_bus_bandwidth_needs(data);
7802 if (qseecom.cumulative_mode == INACTIVE) {
7803 ret = __qseecom_set_msm_bus_request(INACTIVE);
7804 if (ret)
7805 pr_err("Fail to scale down bus\n");
7806 }
7807 }
7808 mutex_unlock(&qsee_bw_mutex);
7809 } else {
7810 if (data->fast_load_enabled == true)
7811 qsee_disable_clock_vote(data, CLK_SFPB);
7812 if (data->perf_enabled == true)
7813 qsee_disable_clock_vote(data, CLK_DFAB);
7814 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007815
Zhen Kongbcdeda22018-11-16 13:50:51 -08007816 if (free_private_data)
7817 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007818 return ret;
7819}
7820
7821#ifdef CONFIG_COMPAT
7822#include "compat_qseecom.c"
7823#else
7824#define compat_qseecom_ioctl NULL
7825#endif
7826
7827static const struct file_operations qseecom_fops = {
7828 .owner = THIS_MODULE,
7829 .unlocked_ioctl = qseecom_ioctl,
7830 .compat_ioctl = compat_qseecom_ioctl,
7831 .open = qseecom_open,
7832 .release = qseecom_release
7833};
7834
7835static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7836{
7837 int rc = 0;
7838 struct device *pdev;
7839 struct qseecom_clk *qclk;
7840 char *core_clk_src = NULL;
7841 char *core_clk = NULL;
7842 char *iface_clk = NULL;
7843 char *bus_clk = NULL;
7844
7845 switch (ce) {
7846 case CLK_QSEE: {
7847 core_clk_src = "core_clk_src";
7848 core_clk = "core_clk";
7849 iface_clk = "iface_clk";
7850 bus_clk = "bus_clk";
7851 qclk = &qseecom.qsee;
7852 qclk->instance = CLK_QSEE;
7853 break;
7854 };
7855 case CLK_CE_DRV: {
7856 core_clk_src = "ce_drv_core_clk_src";
7857 core_clk = "ce_drv_core_clk";
7858 iface_clk = "ce_drv_iface_clk";
7859 bus_clk = "ce_drv_bus_clk";
7860 qclk = &qseecom.ce_drv;
7861 qclk->instance = CLK_CE_DRV;
7862 break;
7863 };
7864 default:
7865 pr_err("Invalid ce hw instance: %d!\n", ce);
7866 return -EIO;
7867 }
7868
7869 if (qseecom.no_clock_support) {
7870 qclk->ce_core_clk = NULL;
7871 qclk->ce_clk = NULL;
7872 qclk->ce_bus_clk = NULL;
7873 qclk->ce_core_src_clk = NULL;
7874 return 0;
7875 }
7876
7877 pdev = qseecom.pdev;
7878
7879 /* Get CE3 src core clk. */
7880 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7881 if (!IS_ERR(qclk->ce_core_src_clk)) {
7882 rc = clk_set_rate(qclk->ce_core_src_clk,
7883 qseecom.ce_opp_freq_hz);
7884 if (rc) {
7885 clk_put(qclk->ce_core_src_clk);
7886 qclk->ce_core_src_clk = NULL;
7887 pr_err("Unable to set the core src clk @%uMhz.\n",
7888 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7889 return -EIO;
7890 }
7891 } else {
7892 pr_warn("Unable to get CE core src clk, set to NULL\n");
7893 qclk->ce_core_src_clk = NULL;
7894 }
7895
7896 /* Get CE core clk */
7897 qclk->ce_core_clk = clk_get(pdev, core_clk);
7898 if (IS_ERR(qclk->ce_core_clk)) {
7899 rc = PTR_ERR(qclk->ce_core_clk);
7900 pr_err("Unable to get CE core clk\n");
7901 if (qclk->ce_core_src_clk != NULL)
7902 clk_put(qclk->ce_core_src_clk);
7903 return -EIO;
7904 }
7905
7906 /* Get CE Interface clk */
7907 qclk->ce_clk = clk_get(pdev, iface_clk);
7908 if (IS_ERR(qclk->ce_clk)) {
7909 rc = PTR_ERR(qclk->ce_clk);
7910 pr_err("Unable to get CE interface clk\n");
7911 if (qclk->ce_core_src_clk != NULL)
7912 clk_put(qclk->ce_core_src_clk);
7913 clk_put(qclk->ce_core_clk);
7914 return -EIO;
7915 }
7916
7917 /* Get CE AXI clk */
7918 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7919 if (IS_ERR(qclk->ce_bus_clk)) {
7920 rc = PTR_ERR(qclk->ce_bus_clk);
7921 pr_err("Unable to get CE BUS interface clk\n");
7922 if (qclk->ce_core_src_clk != NULL)
7923 clk_put(qclk->ce_core_src_clk);
7924 clk_put(qclk->ce_core_clk);
7925 clk_put(qclk->ce_clk);
7926 return -EIO;
7927 }
7928
7929 return rc;
7930}
7931
7932static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7933{
7934 struct qseecom_clk *qclk;
7935
7936 if (ce == CLK_QSEE)
7937 qclk = &qseecom.qsee;
7938 else
7939 qclk = &qseecom.ce_drv;
7940
7941 if (qclk->ce_clk != NULL) {
7942 clk_put(qclk->ce_clk);
7943 qclk->ce_clk = NULL;
7944 }
7945 if (qclk->ce_core_clk != NULL) {
7946 clk_put(qclk->ce_core_clk);
7947 qclk->ce_core_clk = NULL;
7948 }
7949 if (qclk->ce_bus_clk != NULL) {
7950 clk_put(qclk->ce_bus_clk);
7951 qclk->ce_bus_clk = NULL;
7952 }
7953 if (qclk->ce_core_src_clk != NULL) {
7954 clk_put(qclk->ce_core_src_clk);
7955 qclk->ce_core_src_clk = NULL;
7956 }
7957 qclk->instance = CLK_INVALID;
7958}
7959
7960static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7961{
7962 int rc = 0;
7963 uint32_t hlos_num_ce_hw_instances;
7964 uint32_t disk_encrypt_pipe;
7965 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007966 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007967 int i;
7968 const int *tbl;
7969 int size;
7970 int entry;
7971 struct qseecom_crypto_info *pfde_tbl = NULL;
7972 struct qseecom_crypto_info *p;
7973 int tbl_size;
7974 int j;
7975 bool old_db = true;
7976 struct qseecom_ce_info_use *pce_info_use;
7977 uint32_t *unit_tbl = NULL;
7978 int total_units = 0;
7979 struct qseecom_ce_pipe_entry *pce_entry;
7980
7981 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7982 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7983
7984 if (of_property_read_u32((&pdev->dev)->of_node,
7985 "qcom,qsee-ce-hw-instance",
7986 &qseecom.ce_info.qsee_ce_hw_instance)) {
7987 pr_err("Fail to get qsee ce hw instance information.\n");
7988 rc = -EINVAL;
7989 goto out;
7990 } else {
7991 pr_debug("qsee-ce-hw-instance=0x%x\n",
7992 qseecom.ce_info.qsee_ce_hw_instance);
7993 }
7994
7995 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7996 "qcom,support-fde");
7997 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7998 "qcom,support-pfe");
7999
8000 if (!qseecom.support_pfe && !qseecom.support_fde) {
8001 pr_warn("Device does not support PFE/FDE");
8002 goto out;
8003 }
8004
8005 if (qseecom.support_fde)
8006 tbl = of_get_property((&pdev->dev)->of_node,
8007 "qcom,full-disk-encrypt-info", &size);
8008 else
8009 tbl = NULL;
8010 if (tbl) {
8011 old_db = false;
8012 if (size % sizeof(struct qseecom_crypto_info)) {
8013 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8014 size);
8015 rc = -EINVAL;
8016 goto out;
8017 }
8018 tbl_size = size / sizeof
8019 (struct qseecom_crypto_info);
8020
8021 pfde_tbl = kzalloc(size, GFP_KERNEL);
8022 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8023 total_units = 0;
8024
8025 if (!pfde_tbl || !unit_tbl) {
8026 pr_err("failed to alloc memory\n");
8027 rc = -ENOMEM;
8028 goto out;
8029 }
8030 if (of_property_read_u32_array((&pdev->dev)->of_node,
8031 "qcom,full-disk-encrypt-info",
8032 (u32 *)pfde_tbl, size/sizeof(u32))) {
8033 pr_err("failed to read full-disk-encrypt-info tbl\n");
8034 rc = -EINVAL;
8035 goto out;
8036 }
8037
8038 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8039 for (j = 0; j < total_units; j++) {
8040 if (p->unit_num == *(unit_tbl + j))
8041 break;
8042 }
8043 if (j == total_units) {
8044 *(unit_tbl + total_units) = p->unit_num;
8045 total_units++;
8046 }
8047 }
8048
8049 qseecom.ce_info.num_fde = total_units;
8050 pce_info_use = qseecom.ce_info.fde = kcalloc(
8051 total_units, sizeof(struct qseecom_ce_info_use),
8052 GFP_KERNEL);
8053 if (!pce_info_use) {
8054 pr_err("failed to alloc memory\n");
8055 rc = -ENOMEM;
8056 goto out;
8057 }
8058
8059 for (j = 0; j < total_units; j++, pce_info_use++) {
8060 pce_info_use->unit_num = *(unit_tbl + j);
8061 pce_info_use->alloc = false;
8062 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8063 pce_info_use->num_ce_pipe_entries = 0;
8064 pce_info_use->ce_pipe_entry = NULL;
8065 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8066 if (p->unit_num == pce_info_use->unit_num)
8067 pce_info_use->num_ce_pipe_entries++;
8068 }
8069
8070 entry = pce_info_use->num_ce_pipe_entries;
8071 pce_entry = pce_info_use->ce_pipe_entry =
8072 kcalloc(entry,
8073 sizeof(struct qseecom_ce_pipe_entry),
8074 GFP_KERNEL);
8075 if (pce_entry == NULL) {
8076 pr_err("failed to alloc memory\n");
8077 rc = -ENOMEM;
8078 goto out;
8079 }
8080
8081 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8082 if (p->unit_num == pce_info_use->unit_num) {
8083 pce_entry->ce_num = p->ce;
8084 pce_entry->ce_pipe_pair =
8085 p->pipe_pair;
8086 pce_entry->valid = true;
8087 pce_entry++;
8088 }
8089 }
8090 }
8091 kfree(unit_tbl);
8092 unit_tbl = NULL;
8093 kfree(pfde_tbl);
8094 pfde_tbl = NULL;
8095 }
8096
8097 if (qseecom.support_pfe)
8098 tbl = of_get_property((&pdev->dev)->of_node,
8099 "qcom,per-file-encrypt-info", &size);
8100 else
8101 tbl = NULL;
8102 if (tbl) {
8103 old_db = false;
8104 if (size % sizeof(struct qseecom_crypto_info)) {
8105 pr_err("per-file-encrypt-info tbl size(%d)\n",
8106 size);
8107 rc = -EINVAL;
8108 goto out;
8109 }
8110 tbl_size = size / sizeof
8111 (struct qseecom_crypto_info);
8112
8113 pfde_tbl = kzalloc(size, GFP_KERNEL);
8114 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8115 total_units = 0;
8116 if (!pfde_tbl || !unit_tbl) {
8117 pr_err("failed to alloc memory\n");
8118 rc = -ENOMEM;
8119 goto out;
8120 }
8121 if (of_property_read_u32_array((&pdev->dev)->of_node,
8122 "qcom,per-file-encrypt-info",
8123 (u32 *)pfde_tbl, size/sizeof(u32))) {
8124 pr_err("failed to read per-file-encrypt-info tbl\n");
8125 rc = -EINVAL;
8126 goto out;
8127 }
8128
8129 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8130 for (j = 0; j < total_units; j++) {
8131 if (p->unit_num == *(unit_tbl + j))
8132 break;
8133 }
8134 if (j == total_units) {
8135 *(unit_tbl + total_units) = p->unit_num;
8136 total_units++;
8137 }
8138 }
8139
8140 qseecom.ce_info.num_pfe = total_units;
8141 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8142 total_units, sizeof(struct qseecom_ce_info_use),
8143 GFP_KERNEL);
8144 if (!pce_info_use) {
8145 pr_err("failed to alloc memory\n");
8146 rc = -ENOMEM;
8147 goto out;
8148 }
8149
8150 for (j = 0; j < total_units; j++, pce_info_use++) {
8151 pce_info_use->unit_num = *(unit_tbl + j);
8152 pce_info_use->alloc = false;
8153 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8154 pce_info_use->num_ce_pipe_entries = 0;
8155 pce_info_use->ce_pipe_entry = NULL;
8156 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8157 if (p->unit_num == pce_info_use->unit_num)
8158 pce_info_use->num_ce_pipe_entries++;
8159 }
8160
8161 entry = pce_info_use->num_ce_pipe_entries;
8162 pce_entry = pce_info_use->ce_pipe_entry =
8163 kcalloc(entry,
8164 sizeof(struct qseecom_ce_pipe_entry),
8165 GFP_KERNEL);
8166 if (pce_entry == NULL) {
8167 pr_err("failed to alloc memory\n");
8168 rc = -ENOMEM;
8169 goto out;
8170 }
8171
8172 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8173 if (p->unit_num == pce_info_use->unit_num) {
8174 pce_entry->ce_num = p->ce;
8175 pce_entry->ce_pipe_pair =
8176 p->pipe_pair;
8177 pce_entry->valid = true;
8178 pce_entry++;
8179 }
8180 }
8181 }
8182 kfree(unit_tbl);
8183 unit_tbl = NULL;
8184 kfree(pfde_tbl);
8185 pfde_tbl = NULL;
8186 }
8187
8188 if (!old_db)
8189 goto out1;
8190
8191 if (of_property_read_bool((&pdev->dev)->of_node,
8192 "qcom,support-multiple-ce-hw-instance")) {
8193 if (of_property_read_u32((&pdev->dev)->of_node,
8194 "qcom,hlos-num-ce-hw-instances",
8195 &hlos_num_ce_hw_instances)) {
8196 pr_err("Fail: get hlos number of ce hw instance\n");
8197 rc = -EINVAL;
8198 goto out;
8199 }
8200 } else {
8201 hlos_num_ce_hw_instances = 1;
8202 }
8203
8204 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8205 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8206 MAX_CE_PIPE_PAIR_PER_UNIT);
8207 rc = -EINVAL;
8208 goto out;
8209 }
8210
8211 if (of_property_read_u32_array((&pdev->dev)->of_node,
8212 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8213 hlos_num_ce_hw_instances)) {
8214 pr_err("Fail: get hlos ce hw instance info\n");
8215 rc = -EINVAL;
8216 goto out;
8217 }
8218
8219 if (qseecom.support_fde) {
8220 pce_info_use = qseecom.ce_info.fde =
8221 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8222 if (!pce_info_use) {
8223 pr_err("failed to alloc memory\n");
8224 rc = -ENOMEM;
8225 goto out;
8226 }
8227 /* by default for old db */
8228 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8229 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8230 pce_info_use->alloc = false;
8231 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8232 pce_info_use->ce_pipe_entry = NULL;
8233 if (of_property_read_u32((&pdev->dev)->of_node,
8234 "qcom,disk-encrypt-pipe-pair",
8235 &disk_encrypt_pipe)) {
8236 pr_err("Fail to get FDE pipe information.\n");
8237 rc = -EINVAL;
8238 goto out;
8239 } else {
8240 pr_debug("disk-encrypt-pipe-pair=0x%x",
8241 disk_encrypt_pipe);
8242 }
8243 entry = pce_info_use->num_ce_pipe_entries =
8244 hlos_num_ce_hw_instances;
8245 pce_entry = pce_info_use->ce_pipe_entry =
8246 kcalloc(entry,
8247 sizeof(struct qseecom_ce_pipe_entry),
8248 GFP_KERNEL);
8249 if (pce_entry == NULL) {
8250 pr_err("failed to alloc memory\n");
8251 rc = -ENOMEM;
8252 goto out;
8253 }
8254 for (i = 0; i < entry; i++) {
8255 pce_entry->ce_num = hlos_ce_hw_instance[i];
8256 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8257 pce_entry->valid = 1;
8258 pce_entry++;
8259 }
8260 } else {
8261 pr_warn("Device does not support FDE");
8262 disk_encrypt_pipe = 0xff;
8263 }
8264 if (qseecom.support_pfe) {
8265 pce_info_use = qseecom.ce_info.pfe =
8266 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8267 if (!pce_info_use) {
8268 pr_err("failed to alloc memory\n");
8269 rc = -ENOMEM;
8270 goto out;
8271 }
8272 /* by default for old db */
8273 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8274 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8275 pce_info_use->alloc = false;
8276 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8277 pce_info_use->ce_pipe_entry = NULL;
8278
8279 if (of_property_read_u32((&pdev->dev)->of_node,
8280 "qcom,file-encrypt-pipe-pair",
8281 &file_encrypt_pipe)) {
8282 pr_err("Fail to get PFE pipe information.\n");
8283 rc = -EINVAL;
8284 goto out;
8285 } else {
8286 pr_debug("file-encrypt-pipe-pair=0x%x",
8287 file_encrypt_pipe);
8288 }
8289 entry = pce_info_use->num_ce_pipe_entries =
8290 hlos_num_ce_hw_instances;
8291 pce_entry = pce_info_use->ce_pipe_entry =
8292 kcalloc(entry,
8293 sizeof(struct qseecom_ce_pipe_entry),
8294 GFP_KERNEL);
8295 if (pce_entry == NULL) {
8296 pr_err("failed to alloc memory\n");
8297 rc = -ENOMEM;
8298 goto out;
8299 }
8300 for (i = 0; i < entry; i++) {
8301 pce_entry->ce_num = hlos_ce_hw_instance[i];
8302 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8303 pce_entry->valid = 1;
8304 pce_entry++;
8305 }
8306 } else {
8307 pr_warn("Device does not support PFE");
8308 file_encrypt_pipe = 0xff;
8309 }
8310
8311out1:
8312 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8313 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8314out:
8315 if (rc) {
8316 if (qseecom.ce_info.fde) {
8317 pce_info_use = qseecom.ce_info.fde;
8318 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8319 pce_entry = pce_info_use->ce_pipe_entry;
8320 kfree(pce_entry);
8321 pce_info_use++;
8322 }
8323 }
8324 kfree(qseecom.ce_info.fde);
8325 qseecom.ce_info.fde = NULL;
8326 if (qseecom.ce_info.pfe) {
8327 pce_info_use = qseecom.ce_info.pfe;
8328 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8329 pce_entry = pce_info_use->ce_pipe_entry;
8330 kfree(pce_entry);
8331 pce_info_use++;
8332 }
8333 }
8334 kfree(qseecom.ce_info.pfe);
8335 qseecom.ce_info.pfe = NULL;
8336 }
8337 kfree(unit_tbl);
8338 kfree(pfde_tbl);
8339 return rc;
8340}
8341
8342static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8343 void __user *argp)
8344{
8345 struct qseecom_ce_info_req req;
8346 struct qseecom_ce_info_req *pinfo = &req;
8347 int ret = 0;
8348 int i;
8349 unsigned int entries;
8350 struct qseecom_ce_info_use *pce_info_use, *p;
8351 int total = 0;
8352 bool found = false;
8353 struct qseecom_ce_pipe_entry *pce_entry;
8354
8355 ret = copy_from_user(pinfo, argp,
8356 sizeof(struct qseecom_ce_info_req));
8357 if (ret) {
8358 pr_err("copy_from_user failed\n");
8359 return ret;
8360 }
8361
8362 switch (pinfo->usage) {
8363 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8364 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8365 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8366 if (qseecom.support_fde) {
8367 p = qseecom.ce_info.fde;
8368 total = qseecom.ce_info.num_fde;
8369 } else {
8370 pr_err("system does not support fde\n");
8371 return -EINVAL;
8372 }
8373 break;
8374 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8375 if (qseecom.support_pfe) {
8376 p = qseecom.ce_info.pfe;
8377 total = qseecom.ce_info.num_pfe;
8378 } else {
8379 pr_err("system does not support pfe\n");
8380 return -EINVAL;
8381 }
8382 break;
8383 default:
8384 pr_err("unsupported usage %d\n", pinfo->usage);
8385 return -EINVAL;
8386 }
8387
8388 pce_info_use = NULL;
8389 for (i = 0; i < total; i++) {
8390 if (!p->alloc)
8391 pce_info_use = p;
8392 else if (!memcmp(p->handle, pinfo->handle,
8393 MAX_CE_INFO_HANDLE_SIZE)) {
8394 pce_info_use = p;
8395 found = true;
8396 break;
8397 }
8398 p++;
8399 }
8400
8401 if (pce_info_use == NULL)
8402 return -EBUSY;
8403
8404 pinfo->unit_num = pce_info_use->unit_num;
8405 if (!pce_info_use->alloc) {
8406 pce_info_use->alloc = true;
8407 memcpy(pce_info_use->handle,
8408 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8409 }
8410 if (pce_info_use->num_ce_pipe_entries >
8411 MAX_CE_PIPE_PAIR_PER_UNIT)
8412 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8413 else
8414 entries = pce_info_use->num_ce_pipe_entries;
8415 pinfo->num_ce_pipe_entries = entries;
8416 pce_entry = pce_info_use->ce_pipe_entry;
8417 for (i = 0; i < entries; i++, pce_entry++)
8418 pinfo->ce_pipe_entry[i] = *pce_entry;
8419 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8420 pinfo->ce_pipe_entry[i].valid = 0;
8421
8422 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8423 pr_err("copy_to_user failed\n");
8424 ret = -EFAULT;
8425 }
8426 return ret;
8427}
8428
8429static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8430 void __user *argp)
8431{
8432 struct qseecom_ce_info_req req;
8433 struct qseecom_ce_info_req *pinfo = &req;
8434 int ret = 0;
8435 struct qseecom_ce_info_use *p;
8436 int total = 0;
8437 int i;
8438 bool found = false;
8439
8440 ret = copy_from_user(pinfo, argp,
8441 sizeof(struct qseecom_ce_info_req));
8442 if (ret)
8443 return ret;
8444
8445 switch (pinfo->usage) {
8446 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8447 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8448 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8449 if (qseecom.support_fde) {
8450 p = qseecom.ce_info.fde;
8451 total = qseecom.ce_info.num_fde;
8452 } else {
8453 pr_err("system does not support fde\n");
8454 return -EINVAL;
8455 }
8456 break;
8457 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8458 if (qseecom.support_pfe) {
8459 p = qseecom.ce_info.pfe;
8460 total = qseecom.ce_info.num_pfe;
8461 } else {
8462 pr_err("system does not support pfe\n");
8463 return -EINVAL;
8464 }
8465 break;
8466 default:
8467 pr_err("unsupported usage %d\n", pinfo->usage);
8468 return -EINVAL;
8469 }
8470
8471 for (i = 0; i < total; i++) {
8472 if (p->alloc &&
8473 !memcmp(p->handle, pinfo->handle,
8474 MAX_CE_INFO_HANDLE_SIZE)) {
8475 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8476 p->alloc = false;
8477 found = true;
8478 break;
8479 }
8480 p++;
8481 }
8482 return ret;
8483}
8484
8485static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8486 void __user *argp)
8487{
8488 struct qseecom_ce_info_req req;
8489 struct qseecom_ce_info_req *pinfo = &req;
8490 int ret = 0;
8491 int i;
8492 unsigned int entries;
8493 struct qseecom_ce_info_use *pce_info_use, *p;
8494 int total = 0;
8495 bool found = false;
8496 struct qseecom_ce_pipe_entry *pce_entry;
8497
8498 ret = copy_from_user(pinfo, argp,
8499 sizeof(struct qseecom_ce_info_req));
8500 if (ret)
8501 return ret;
8502
8503 switch (pinfo->usage) {
8504 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8505 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8506 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8507 if (qseecom.support_fde) {
8508 p = qseecom.ce_info.fde;
8509 total = qseecom.ce_info.num_fde;
8510 } else {
8511 pr_err("system does not support fde\n");
8512 return -EINVAL;
8513 }
8514 break;
8515 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8516 if (qseecom.support_pfe) {
8517 p = qseecom.ce_info.pfe;
8518 total = qseecom.ce_info.num_pfe;
8519 } else {
8520 pr_err("system does not support pfe\n");
8521 return -EINVAL;
8522 }
8523 break;
8524 default:
8525 pr_err("unsupported usage %d\n", pinfo->usage);
8526 return -EINVAL;
8527 }
8528
8529 pce_info_use = NULL;
8530 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8531 pinfo->num_ce_pipe_entries = 0;
8532 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8533 pinfo->ce_pipe_entry[i].valid = 0;
8534
8535 for (i = 0; i < total; i++) {
8536
8537 if (p->alloc && !memcmp(p->handle,
8538 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8539 pce_info_use = p;
8540 found = true;
8541 break;
8542 }
8543 p++;
8544 }
8545 if (!pce_info_use)
8546 goto out;
8547 pinfo->unit_num = pce_info_use->unit_num;
8548 if (pce_info_use->num_ce_pipe_entries >
8549 MAX_CE_PIPE_PAIR_PER_UNIT)
8550 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8551 else
8552 entries = pce_info_use->num_ce_pipe_entries;
8553 pinfo->num_ce_pipe_entries = entries;
8554 pce_entry = pce_info_use->ce_pipe_entry;
8555 for (i = 0; i < entries; i++, pce_entry++)
8556 pinfo->ce_pipe_entry[i] = *pce_entry;
8557 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8558 pinfo->ce_pipe_entry[i].valid = 0;
8559out:
8560 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8561 pr_err("copy_to_user failed\n");
8562 ret = -EFAULT;
8563 }
8564 return ret;
8565}
8566
8567/*
8568 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8569 * then whitelist feature is not supported.
8570 */
8571static int qseecom_check_whitelist_feature(void)
8572{
8573 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8574
8575 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8576}
8577
8578static int qseecom_probe(struct platform_device *pdev)
8579{
8580 int rc;
8581 int i;
8582 uint32_t feature = 10;
8583 struct device *class_dev;
8584 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8585 struct qseecom_command_scm_resp resp;
8586 struct qseecom_ce_info_use *pce_info_use = NULL;
8587
8588 qseecom.qsee_bw_count = 0;
8589 qseecom.qsee_perf_client = 0;
8590 qseecom.qsee_sfpb_bw_count = 0;
8591
8592 qseecom.qsee.ce_core_clk = NULL;
8593 qseecom.qsee.ce_clk = NULL;
8594 qseecom.qsee.ce_core_src_clk = NULL;
8595 qseecom.qsee.ce_bus_clk = NULL;
8596
8597 qseecom.cumulative_mode = 0;
8598 qseecom.current_mode = INACTIVE;
8599 qseecom.support_bus_scaling = false;
8600 qseecom.support_fde = false;
8601 qseecom.support_pfe = false;
8602
8603 qseecom.ce_drv.ce_core_clk = NULL;
8604 qseecom.ce_drv.ce_clk = NULL;
8605 qseecom.ce_drv.ce_core_src_clk = NULL;
8606 qseecom.ce_drv.ce_bus_clk = NULL;
8607 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8608
8609 qseecom.app_block_ref_cnt = 0;
8610 init_waitqueue_head(&qseecom.app_block_wq);
8611 qseecom.whitelist_support = true;
8612
8613 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8614 if (rc < 0) {
8615 pr_err("alloc_chrdev_region failed %d\n", rc);
8616 return rc;
8617 }
8618
8619 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8620 if (IS_ERR(driver_class)) {
8621 rc = -ENOMEM;
8622 pr_err("class_create failed %d\n", rc);
8623 goto exit_unreg_chrdev_region;
8624 }
8625
8626 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8627 QSEECOM_DEV);
8628 if (IS_ERR(class_dev)) {
8629 pr_err("class_device_create failed %d\n", rc);
8630 rc = -ENOMEM;
8631 goto exit_destroy_class;
8632 }
8633
8634 cdev_init(&qseecom.cdev, &qseecom_fops);
8635 qseecom.cdev.owner = THIS_MODULE;
8636
8637 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8638 if (rc < 0) {
8639 pr_err("cdev_add failed %d\n", rc);
8640 goto exit_destroy_device;
8641 }
8642
8643 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008644 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8645 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008646 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008647 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8648 spin_lock_init(&qseecom.registered_kclient_list_lock);
8649 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008650 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008651 qseecom.send_resp_flag = 0;
8652
8653 qseecom.qsee_version = QSEEE_VERSION_00;
8654 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8655 &resp, sizeof(resp));
8656 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8657 if (rc) {
8658 pr_err("Failed to get QSEE version info %d\n", rc);
8659 goto exit_del_cdev;
8660 }
8661 qseecom.qsee_version = resp.result;
8662 qseecom.qseos_version = QSEOS_VERSION_14;
8663 qseecom.commonlib_loaded = false;
8664 qseecom.commonlib64_loaded = false;
8665 qseecom.pdev = class_dev;
8666 /* Create ION msm client */
8667 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8668 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8669 pr_err("Ion client cannot be created\n");
8670 rc = -ENOMEM;
8671 goto exit_del_cdev;
8672 }
8673
8674 /* register client for bus scaling */
8675 if (pdev->dev.of_node) {
8676 qseecom.pdev->of_node = pdev->dev.of_node;
8677 qseecom.support_bus_scaling =
8678 of_property_read_bool((&pdev->dev)->of_node,
8679 "qcom,support-bus-scaling");
8680 rc = qseecom_retrieve_ce_data(pdev);
8681 if (rc)
8682 goto exit_destroy_ion_client;
8683 qseecom.appsbl_qseecom_support =
8684 of_property_read_bool((&pdev->dev)->of_node,
8685 "qcom,appsbl-qseecom-support");
8686 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8687 qseecom.appsbl_qseecom_support);
8688
8689 qseecom.commonlib64_loaded =
8690 of_property_read_bool((&pdev->dev)->of_node,
8691 "qcom,commonlib64-loaded-by-uefi");
8692 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8693 qseecom.commonlib64_loaded);
8694 qseecom.fde_key_size =
8695 of_property_read_bool((&pdev->dev)->of_node,
8696 "qcom,fde-key-size");
8697 qseecom.no_clock_support =
8698 of_property_read_bool((&pdev->dev)->of_node,
8699 "qcom,no-clock-support");
8700 if (!qseecom.no_clock_support) {
8701 pr_info("qseecom clocks handled by other subsystem\n");
8702 } else {
8703 pr_info("no-clock-support=0x%x",
8704 qseecom.no_clock_support);
8705 }
8706
8707 if (of_property_read_u32((&pdev->dev)->of_node,
8708 "qcom,qsee-reentrancy-support",
8709 &qseecom.qsee_reentrancy_support)) {
8710 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8711 qseecom.qsee_reentrancy_support = 0;
8712 } else {
8713 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8714 qseecom.qsee_reentrancy_support);
8715 }
8716
Jiten Patela7bb1d52018-05-11 12:34:26 +05308717 qseecom.enable_key_wrap_in_ks =
8718 of_property_read_bool((&pdev->dev)->of_node,
8719 "qcom,enable-key-wrap-in-ks");
8720 if (qseecom.enable_key_wrap_in_ks) {
8721 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8722 qseecom.enable_key_wrap_in_ks);
8723 }
8724
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008725 /*
8726 * The qseecom bus scaling flag can not be enabled when
8727 * crypto clock is not handled by HLOS.
8728 */
8729 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8730 pr_err("support_bus_scaling flag can not be enabled.\n");
8731 rc = -EINVAL;
8732 goto exit_destroy_ion_client;
8733 }
8734
8735 if (of_property_read_u32((&pdev->dev)->of_node,
8736 "qcom,ce-opp-freq",
8737 &qseecom.ce_opp_freq_hz)) {
8738 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8739 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8740 }
8741 rc = __qseecom_init_clk(CLK_QSEE);
8742 if (rc)
8743 goto exit_destroy_ion_client;
8744
8745 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8746 (qseecom.support_pfe || qseecom.support_fde)) {
8747 rc = __qseecom_init_clk(CLK_CE_DRV);
8748 if (rc) {
8749 __qseecom_deinit_clk(CLK_QSEE);
8750 goto exit_destroy_ion_client;
8751 }
8752 } else {
8753 struct qseecom_clk *qclk;
8754
8755 qclk = &qseecom.qsee;
8756 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8757 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8758 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8759 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8760 }
8761
8762 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8763 msm_bus_cl_get_pdata(pdev);
8764 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8765 (!qseecom.is_apps_region_protected &&
8766 !qseecom.appsbl_qseecom_support)) {
8767 struct resource *resource = NULL;
8768 struct qsee_apps_region_info_ireq req;
8769 struct qsee_apps_region_info_64bit_ireq req_64bit;
8770 struct qseecom_command_scm_resp resp;
8771 void *cmd_buf = NULL;
8772 size_t cmd_len;
8773
8774 resource = platform_get_resource_byname(pdev,
8775 IORESOURCE_MEM, "secapp-region");
8776 if (resource) {
8777 if (qseecom.qsee_version < QSEE_VERSION_40) {
8778 req.qsee_cmd_id =
8779 QSEOS_APP_REGION_NOTIFICATION;
8780 req.addr = (uint32_t)resource->start;
8781 req.size = resource_size(resource);
8782 cmd_buf = (void *)&req;
8783 cmd_len = sizeof(struct
8784 qsee_apps_region_info_ireq);
8785 pr_warn("secure app region addr=0x%x size=0x%x",
8786 req.addr, req.size);
8787 } else {
8788 req_64bit.qsee_cmd_id =
8789 QSEOS_APP_REGION_NOTIFICATION;
8790 req_64bit.addr = resource->start;
8791 req_64bit.size = resource_size(
8792 resource);
8793 cmd_buf = (void *)&req_64bit;
8794 cmd_len = sizeof(struct
8795 qsee_apps_region_info_64bit_ireq);
8796 pr_warn("secure app region addr=0x%llx size=0x%x",
8797 req_64bit.addr, req_64bit.size);
8798 }
8799 } else {
8800 pr_err("Fail to get secure app region info\n");
8801 rc = -EINVAL;
8802 goto exit_deinit_clock;
8803 }
8804 rc = __qseecom_enable_clk(CLK_QSEE);
8805 if (rc) {
8806 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8807 rc = -EIO;
8808 goto exit_deinit_clock;
8809 }
8810 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8811 cmd_buf, cmd_len,
8812 &resp, sizeof(resp));
8813 __qseecom_disable_clk(CLK_QSEE);
8814 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8815 pr_err("send secapp reg fail %d resp.res %d\n",
8816 rc, resp.result);
8817 rc = -EINVAL;
8818 goto exit_deinit_clock;
8819 }
8820 }
8821 /*
8822 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8823 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8824 * Pls add "qseecom.commonlib64_loaded = true" here too.
8825 */
8826 if (qseecom.is_apps_region_protected ||
8827 qseecom.appsbl_qseecom_support)
8828 qseecom.commonlib_loaded = true;
8829 } else {
8830 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8831 pdev->dev.platform_data;
8832 }
8833 if (qseecom.support_bus_scaling) {
8834 init_timer(&(qseecom.bw_scale_down_timer));
8835 INIT_WORK(&qseecom.bw_inactive_req_ws,
8836 qseecom_bw_inactive_req_work);
8837 qseecom.bw_scale_down_timer.function =
8838 qseecom_scale_bus_bandwidth_timer_callback;
8839 }
8840 qseecom.timer_running = false;
8841 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8842 qseecom_platform_support);
8843
8844 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8845 pr_warn("qseecom.whitelist_support = %d\n",
8846 qseecom.whitelist_support);
8847
8848 if (!qseecom.qsee_perf_client)
8849 pr_err("Unable to register bus client\n");
8850
8851 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8852 return 0;
8853
8854exit_deinit_clock:
8855 __qseecom_deinit_clk(CLK_QSEE);
8856 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8857 (qseecom.support_pfe || qseecom.support_fde))
8858 __qseecom_deinit_clk(CLK_CE_DRV);
8859exit_destroy_ion_client:
8860 if (qseecom.ce_info.fde) {
8861 pce_info_use = qseecom.ce_info.fde;
8862 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8863 kzfree(pce_info_use->ce_pipe_entry);
8864 pce_info_use++;
8865 }
8866 kfree(qseecom.ce_info.fde);
8867 }
8868 if (qseecom.ce_info.pfe) {
8869 pce_info_use = qseecom.ce_info.pfe;
8870 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8871 kzfree(pce_info_use->ce_pipe_entry);
8872 pce_info_use++;
8873 }
8874 kfree(qseecom.ce_info.pfe);
8875 }
8876 ion_client_destroy(qseecom.ion_clnt);
8877exit_del_cdev:
8878 cdev_del(&qseecom.cdev);
8879exit_destroy_device:
8880 device_destroy(driver_class, qseecom_device_no);
8881exit_destroy_class:
8882 class_destroy(driver_class);
8883exit_unreg_chrdev_region:
8884 unregister_chrdev_region(qseecom_device_no, 1);
8885 return rc;
8886}
8887
8888static int qseecom_remove(struct platform_device *pdev)
8889{
8890 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308891 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008892 unsigned long flags = 0;
8893 int ret = 0;
8894 int i;
8895 struct qseecom_ce_pipe_entry *pce_entry;
8896 struct qseecom_ce_info_use *pce_info_use;
8897
8898 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8899 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8900
Monika Singhe711b162018-04-24 09:54:50 +05308901 list_for_each_entry_safe(kclient, kclient_tmp,
8902 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008903
8904 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008905 if (!kclient->handle) {
8906 list_del(&kclient->list);
8907 kzfree(kclient);
8908 break;
8909 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008910
8911 list_del(&kclient->list);
8912 mutex_lock(&app_access_lock);
8913 ret = qseecom_unload_app(kclient->handle->dev, false);
8914 mutex_unlock(&app_access_lock);
8915 if (!ret) {
8916 kzfree(kclient->handle->dev);
8917 kzfree(kclient->handle);
8918 kzfree(kclient);
8919 }
8920 }
8921
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008922 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8923
8924 if (qseecom.qseos_version > QSEEE_VERSION_00)
8925 qseecom_unload_commonlib_image();
8926
8927 if (qseecom.qsee_perf_client)
8928 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8929 0);
8930 if (pdev->dev.platform_data != NULL)
8931 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8932
8933 if (qseecom.support_bus_scaling) {
8934 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8935 del_timer_sync(&qseecom.bw_scale_down_timer);
8936 }
8937
8938 if (qseecom.ce_info.fde) {
8939 pce_info_use = qseecom.ce_info.fde;
8940 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8941 pce_entry = pce_info_use->ce_pipe_entry;
8942 kfree(pce_entry);
8943 pce_info_use++;
8944 }
8945 }
8946 kfree(qseecom.ce_info.fde);
8947 if (qseecom.ce_info.pfe) {
8948 pce_info_use = qseecom.ce_info.pfe;
8949 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8950 pce_entry = pce_info_use->ce_pipe_entry;
8951 kfree(pce_entry);
8952 pce_info_use++;
8953 }
8954 }
8955 kfree(qseecom.ce_info.pfe);
8956
8957 /* register client for bus scaling */
8958 if (pdev->dev.of_node) {
8959 __qseecom_deinit_clk(CLK_QSEE);
8960 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8961 (qseecom.support_pfe || qseecom.support_fde))
8962 __qseecom_deinit_clk(CLK_CE_DRV);
8963 }
8964
8965 ion_client_destroy(qseecom.ion_clnt);
8966
8967 cdev_del(&qseecom.cdev);
8968
8969 device_destroy(driver_class, qseecom_device_no);
8970
8971 class_destroy(driver_class);
8972
8973 unregister_chrdev_region(qseecom_device_no, 1);
8974
8975 return ret;
8976}
8977
8978static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8979{
8980 int ret = 0;
8981 struct qseecom_clk *qclk;
8982
8983 qclk = &qseecom.qsee;
8984 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8985 if (qseecom.no_clock_support)
8986 return 0;
8987
8988 mutex_lock(&qsee_bw_mutex);
8989 mutex_lock(&clk_access_lock);
8990
8991 if (qseecom.current_mode != INACTIVE) {
8992 ret = msm_bus_scale_client_update_request(
8993 qseecom.qsee_perf_client, INACTIVE);
8994 if (ret)
8995 pr_err("Fail to scale down bus\n");
8996 else
8997 qseecom.current_mode = INACTIVE;
8998 }
8999
9000 if (qclk->clk_access_cnt) {
9001 if (qclk->ce_clk != NULL)
9002 clk_disable_unprepare(qclk->ce_clk);
9003 if (qclk->ce_core_clk != NULL)
9004 clk_disable_unprepare(qclk->ce_core_clk);
9005 if (qclk->ce_bus_clk != NULL)
9006 clk_disable_unprepare(qclk->ce_bus_clk);
9007 }
9008
9009 del_timer_sync(&(qseecom.bw_scale_down_timer));
9010 qseecom.timer_running = false;
9011
9012 mutex_unlock(&clk_access_lock);
9013 mutex_unlock(&qsee_bw_mutex);
9014 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9015
9016 return 0;
9017}
9018
9019static int qseecom_resume(struct platform_device *pdev)
9020{
9021 int mode = 0;
9022 int ret = 0;
9023 struct qseecom_clk *qclk;
9024
9025 qclk = &qseecom.qsee;
9026 if (qseecom.no_clock_support)
9027 goto exit;
9028
9029 mutex_lock(&qsee_bw_mutex);
9030 mutex_lock(&clk_access_lock);
9031 if (qseecom.cumulative_mode >= HIGH)
9032 mode = HIGH;
9033 else
9034 mode = qseecom.cumulative_mode;
9035
9036 if (qseecom.cumulative_mode != INACTIVE) {
9037 ret = msm_bus_scale_client_update_request(
9038 qseecom.qsee_perf_client, mode);
9039 if (ret)
9040 pr_err("Fail to scale up bus to %d\n", mode);
9041 else
9042 qseecom.current_mode = mode;
9043 }
9044
9045 if (qclk->clk_access_cnt) {
9046 if (qclk->ce_core_clk != NULL) {
9047 ret = clk_prepare_enable(qclk->ce_core_clk);
9048 if (ret) {
9049 pr_err("Unable to enable/prep CE core clk\n");
9050 qclk->clk_access_cnt = 0;
9051 goto err;
9052 }
9053 }
9054 if (qclk->ce_clk != NULL) {
9055 ret = clk_prepare_enable(qclk->ce_clk);
9056 if (ret) {
9057 pr_err("Unable to enable/prep CE iface clk\n");
9058 qclk->clk_access_cnt = 0;
9059 goto ce_clk_err;
9060 }
9061 }
9062 if (qclk->ce_bus_clk != NULL) {
9063 ret = clk_prepare_enable(qclk->ce_bus_clk);
9064 if (ret) {
9065 pr_err("Unable to enable/prep CE bus clk\n");
9066 qclk->clk_access_cnt = 0;
9067 goto ce_bus_clk_err;
9068 }
9069 }
9070 }
9071
9072 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9073 qseecom.bw_scale_down_timer.expires = jiffies +
9074 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9075 mod_timer(&(qseecom.bw_scale_down_timer),
9076 qseecom.bw_scale_down_timer.expires);
9077 qseecom.timer_running = true;
9078 }
9079
9080 mutex_unlock(&clk_access_lock);
9081 mutex_unlock(&qsee_bw_mutex);
9082 goto exit;
9083
9084ce_bus_clk_err:
9085 if (qclk->ce_clk)
9086 clk_disable_unprepare(qclk->ce_clk);
9087ce_clk_err:
9088 if (qclk->ce_core_clk)
9089 clk_disable_unprepare(qclk->ce_core_clk);
9090err:
9091 mutex_unlock(&clk_access_lock);
9092 mutex_unlock(&qsee_bw_mutex);
9093 ret = -EIO;
9094exit:
9095 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9096 return ret;
9097}
9098
9099static const struct of_device_id qseecom_match[] = {
9100 {
9101 .compatible = "qcom,qseecom",
9102 },
9103 {}
9104};
9105
9106static struct platform_driver qseecom_plat_driver = {
9107 .probe = qseecom_probe,
9108 .remove = qseecom_remove,
9109 .suspend = qseecom_suspend,
9110 .resume = qseecom_resume,
9111 .driver = {
9112 .name = "qseecom",
9113 .owner = THIS_MODULE,
9114 .of_match_table = qseecom_match,
9115 },
9116};
9117
9118static int qseecom_init(void)
9119{
9120 return platform_driver_register(&qseecom_plat_driver);
9121}
9122
9123static void qseecom_exit(void)
9124{
9125 platform_driver_unregister(&qseecom_plat_driver);
9126}
9127
9128MODULE_LICENSE("GPL v2");
9129MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9130
9131module_init(qseecom_init);
9132module_exit(qseecom_exit);