blob: fbccd93117e2c2f8ef1335e4d70378e21f4419a6 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong87dcf0e2019-01-04 12:34:50 -08004 * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
Mohamed Sunfeer105a07b2018-08-29 13:52:40 +053068#define QSEECOM_MAX_SG_ENTRY 4096
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -070069#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
Jiten Patela7bb1d52018-05-11 12:34:26 +0530112#define FDE_FLAG_POS 4
113#define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
114
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700115enum qseecom_clk_definitions {
116 CLK_DFAB = 0,
117 CLK_SFPB,
118};
119
120enum qseecom_ice_key_size_type {
121 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
122 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
123 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
124 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
125 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
126 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
127};
128
129enum qseecom_client_handle_type {
130 QSEECOM_CLIENT_APP = 1,
131 QSEECOM_LISTENER_SERVICE,
132 QSEECOM_SECURE_SERVICE,
133 QSEECOM_GENERIC,
134 QSEECOM_UNAVAILABLE_CLIENT_APP,
135};
136
137enum qseecom_ce_hw_instance {
138 CLK_QSEE = 0,
139 CLK_CE_DRV,
140 CLK_INVALID,
141};
142
143static struct class *driver_class;
144static dev_t qseecom_device_no;
145
146static DEFINE_MUTEX(qsee_bw_mutex);
147static DEFINE_MUTEX(app_access_lock);
148static DEFINE_MUTEX(clk_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -0800149static DEFINE_MUTEX(listener_access_lock);
150
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700151
152struct sglist_info {
153 uint32_t indexAndFlags;
154 uint32_t sizeOrCount;
155};
156
157/*
158 * The 31th bit indicates only one or multiple physical address inside
159 * the request buffer. If it is set, the index locates a single physical addr
160 * inside the request buffer, and `sizeOrCount` is the size of the memory being
161 * shared at that physical address.
162 * Otherwise, the index locates an array of {start, len} pairs (a
163 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
164 * that array.
165 *
166 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
167 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
168 *
169 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
170 */
171#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
172 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
173
174#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
175
176#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
177
178#define MAKE_WHITELIST_VERSION(major, minor, patch) \
179 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
180
181struct qseecom_registered_listener_list {
182 struct list_head list;
183 struct qseecom_register_listener_req svc;
184 void *user_virt_sb_base;
185 u8 *sb_virt;
186 phys_addr_t sb_phys;
187 size_t sb_length;
188 struct ion_handle *ihandle; /* Retrieve phy addr */
189 wait_queue_head_t rcv_req_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800190 /* rcv_req_flag: 0: ready and empty; 1: received req */
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700191 int rcv_req_flag;
192 int send_resp_flag;
193 bool listener_in_use;
194 /* wq for thread blocked on this listener*/
195 wait_queue_head_t listener_block_app_wq;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800196 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
197 uint32_t sglist_cnt;
198 int abort;
199 bool unregister_pending;
200};
201
202struct qseecom_unregister_pending_list {
203 struct list_head list;
204 struct qseecom_dev_handle *data;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700205};
206
207struct qseecom_registered_app_list {
208 struct list_head list;
209 u32 app_id;
210 u32 ref_cnt;
211 char app_name[MAX_APP_NAME_SIZE];
212 u32 app_arch;
213 bool app_blocked;
Zhen Kongdea10592018-07-30 17:50:10 -0700214 u32 check_block;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700215 u32 blocked_on_listener_id;
216};
217
218struct qseecom_registered_kclient_list {
219 struct list_head list;
220 struct qseecom_handle *handle;
221};
222
223struct qseecom_ce_info_use {
224 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
225 unsigned int unit_num;
226 unsigned int num_ce_pipe_entries;
227 struct qseecom_ce_pipe_entry *ce_pipe_entry;
228 bool alloc;
229 uint32_t type;
230};
231
232struct ce_hw_usage_info {
233 uint32_t qsee_ce_hw_instance;
234 uint32_t num_fde;
235 struct qseecom_ce_info_use *fde;
236 uint32_t num_pfe;
237 struct qseecom_ce_info_use *pfe;
238};
239
240struct qseecom_clk {
241 enum qseecom_ce_hw_instance instance;
242 struct clk *ce_core_clk;
243 struct clk *ce_clk;
244 struct clk *ce_core_src_clk;
245 struct clk *ce_bus_clk;
246 uint32_t clk_access_cnt;
247};
248
249struct qseecom_control {
250 struct ion_client *ion_clnt; /* Ion client */
251 struct list_head registered_listener_list_head;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700252
253 struct list_head registered_app_list_head;
254 spinlock_t registered_app_list_lock;
255
256 struct list_head registered_kclient_list_head;
257 spinlock_t registered_kclient_list_lock;
258
259 wait_queue_head_t send_resp_wq;
260 int send_resp_flag;
261
262 uint32_t qseos_version;
263 uint32_t qsee_version;
264 struct device *pdev;
265 bool whitelist_support;
266 bool commonlib_loaded;
267 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700268 struct ce_hw_usage_info ce_info;
269
270 int qsee_bw_count;
271 int qsee_sfpb_bw_count;
272
273 uint32_t qsee_perf_client;
274 struct qseecom_clk qsee;
275 struct qseecom_clk ce_drv;
276
277 bool support_bus_scaling;
278 bool support_fde;
279 bool support_pfe;
280 bool fde_key_size;
281 uint32_t cumulative_mode;
282 enum qseecom_bandwidth_request_mode current_mode;
283 struct timer_list bw_scale_down_timer;
284 struct work_struct bw_inactive_req_ws;
285 struct cdev cdev;
286 bool timer_running;
287 bool no_clock_support;
288 unsigned int ce_opp_freq_hz;
289 bool appsbl_qseecom_support;
290 uint32_t qsee_reentrancy_support;
Jiten Patela7bb1d52018-05-11 12:34:26 +0530291 bool enable_key_wrap_in_ks;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700292
293 uint32_t app_block_ref_cnt;
294 wait_queue_head_t app_block_wq;
295 atomic_t qseecom_state;
296 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700297 bool smcinvoke_support;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800298
299 struct list_head unregister_lsnr_pending_list_head;
300 wait_queue_head_t register_lsnr_pending_wq;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700301};
302
303struct qseecom_sec_buf_fd_info {
304 bool is_sec_buf_fd;
305 size_t size;
306 void *vbase;
307 dma_addr_t pbase;
308};
309
310struct qseecom_param_memref {
311 uint32_t buffer;
312 uint32_t size;
313};
314
315struct qseecom_client_handle {
316 u32 app_id;
317 u8 *sb_virt;
318 phys_addr_t sb_phys;
319 unsigned long user_virt_sb_base;
320 size_t sb_length;
321 struct ion_handle *ihandle; /* Retrieve phy addr */
322 char app_name[MAX_APP_NAME_SIZE];
323 u32 app_arch;
324 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
325};
326
327struct qseecom_listener_handle {
328 u32 id;
Zhen Kongbcdeda22018-11-16 13:50:51 -0800329 bool unregister_pending;
Zhen Kong87dcf0e2019-01-04 12:34:50 -0800330 bool release_called;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700331};
332
333static struct qseecom_control qseecom;
334
335struct qseecom_dev_handle {
336 enum qseecom_client_handle_type type;
337 union {
338 struct qseecom_client_handle client;
339 struct qseecom_listener_handle listener;
340 };
341 bool released;
342 int abort;
343 wait_queue_head_t abort_wq;
344 atomic_t ioctl_count;
345 bool perf_enabled;
346 bool fast_load_enabled;
347 enum qseecom_bandwidth_request_mode mode;
348 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
349 uint32_t sglist_cnt;
350 bool use_legacy_cmd;
351};
352
353struct qseecom_key_id_usage_desc {
354 uint8_t desc[QSEECOM_KEY_ID_SIZE];
355};
356
357struct qseecom_crypto_info {
358 unsigned int unit_num;
359 unsigned int ce;
360 unsigned int pipe_pair;
361};
362
363static struct qseecom_key_id_usage_desc key_id_array[] = {
364 {
365 .desc = "Undefined Usage Index",
366 },
367
368 {
369 .desc = "Full Disk Encryption",
370 },
371
372 {
373 .desc = "Per File Encryption",
374 },
375
376 {
377 .desc = "UFS ICE Full Disk Encryption",
378 },
379
380 {
381 .desc = "SDCC ICE Full Disk Encryption",
382 },
383};
384
385/* Function proto types */
386static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
387static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
388static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
389static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
390static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
391static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
392 char *cmnlib_name);
393static int qseecom_enable_ice_setup(int usage);
394static int qseecom_disable_ice_setup(int usage);
395static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
396static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
397 void __user *argp);
398static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
399 void __user *argp);
400static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
401 void __user *argp);
402
403static int get_qseecom_keymaster_status(char *str)
404{
405 get_option(&str, &qseecom.is_apps_region_protected);
406 return 1;
407}
408__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
409
410static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
411 const void *req_buf, void *resp_buf)
412{
413 int ret = 0;
414 uint32_t smc_id = 0;
415 uint32_t qseos_cmd_id = 0;
416 struct scm_desc desc = {0};
417 struct qseecom_command_scm_resp *scm_resp = NULL;
418
419 if (!req_buf || !resp_buf) {
420 pr_err("Invalid buffer pointer\n");
421 return -EINVAL;
422 }
423 qseos_cmd_id = *(uint32_t *)req_buf;
424 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
425
426 switch (svc_id) {
427 case 6: {
428 if (tz_cmd_id == 3) {
429 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
430 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
431 desc.args[0] = *(uint32_t *)req_buf;
432 } else {
433 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
434 svc_id, tz_cmd_id);
435 return -EINVAL;
436 }
437 ret = scm_call2(smc_id, &desc);
438 break;
439 }
440 case SCM_SVC_ES: {
441 switch (tz_cmd_id) {
442 case SCM_SAVE_PARTITION_HASH_ID: {
443 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
444 struct qseecom_save_partition_hash_req *p_hash_req =
445 (struct qseecom_save_partition_hash_req *)
446 req_buf;
447 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
448
449 if (!tzbuf)
450 return -ENOMEM;
451 memset(tzbuf, 0, tzbuflen);
452 memcpy(tzbuf, p_hash_req->digest,
453 SHA256_DIGEST_LENGTH);
454 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
455 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
456 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
457 desc.args[0] = p_hash_req->partition_id;
458 desc.args[1] = virt_to_phys(tzbuf);
459 desc.args[2] = SHA256_DIGEST_LENGTH;
460 ret = scm_call2(smc_id, &desc);
461 kzfree(tzbuf);
462 break;
463 }
464 default: {
465 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
466 tz_cmd_id);
467 ret = -EINVAL;
468 break;
469 }
470 } /* end of switch (tz_cmd_id) */
471 break;
472 } /* end of case SCM_SVC_ES */
473 case SCM_SVC_TZSCHEDULER: {
474 switch (qseos_cmd_id) {
475 case QSEOS_APP_START_COMMAND: {
476 struct qseecom_load_app_ireq *req;
477 struct qseecom_load_app_64bit_ireq *req_64bit;
478
479 smc_id = TZ_OS_APP_START_ID;
480 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
481 if (qseecom.qsee_version < QSEE_VERSION_40) {
482 req = (struct qseecom_load_app_ireq *)req_buf;
483 desc.args[0] = req->mdt_len;
484 desc.args[1] = req->img_len;
485 desc.args[2] = req->phy_addr;
486 } else {
487 req_64bit =
488 (struct qseecom_load_app_64bit_ireq *)
489 req_buf;
490 desc.args[0] = req_64bit->mdt_len;
491 desc.args[1] = req_64bit->img_len;
492 desc.args[2] = req_64bit->phy_addr;
493 }
494 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
495 ret = scm_call2(smc_id, &desc);
496 break;
497 }
498 case QSEOS_APP_SHUTDOWN_COMMAND: {
499 struct qseecom_unload_app_ireq *req;
500
501 req = (struct qseecom_unload_app_ireq *)req_buf;
502 smc_id = TZ_OS_APP_SHUTDOWN_ID;
503 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
504 desc.args[0] = req->app_id;
505 ret = scm_call2(smc_id, &desc);
506 break;
507 }
508 case QSEOS_APP_LOOKUP_COMMAND: {
509 struct qseecom_check_app_ireq *req;
510 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
511 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
512
513 if (!tzbuf)
514 return -ENOMEM;
515 req = (struct qseecom_check_app_ireq *)req_buf;
516 pr_debug("Lookup app_name = %s\n", req->app_name);
517 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
518 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
519 smc_id = TZ_OS_APP_LOOKUP_ID;
520 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
521 desc.args[0] = virt_to_phys(tzbuf);
522 desc.args[1] = strlen(req->app_name);
523 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
524 ret = scm_call2(smc_id, &desc);
525 kzfree(tzbuf);
526 break;
527 }
528 case QSEOS_APP_REGION_NOTIFICATION: {
529 struct qsee_apps_region_info_ireq *req;
530 struct qsee_apps_region_info_64bit_ireq *req_64bit;
531
532 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
533 desc.arginfo =
534 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
535 if (qseecom.qsee_version < QSEE_VERSION_40) {
536 req = (struct qsee_apps_region_info_ireq *)
537 req_buf;
538 desc.args[0] = req->addr;
539 desc.args[1] = req->size;
540 } else {
541 req_64bit =
542 (struct qsee_apps_region_info_64bit_ireq *)
543 req_buf;
544 desc.args[0] = req_64bit->addr;
545 desc.args[1] = req_64bit->size;
546 }
547 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
548 ret = scm_call2(smc_id, &desc);
549 break;
550 }
551 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
552 struct qseecom_load_lib_image_ireq *req;
553 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
554
555 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
556 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
557 if (qseecom.qsee_version < QSEE_VERSION_40) {
558 req = (struct qseecom_load_lib_image_ireq *)
559 req_buf;
560 desc.args[0] = req->mdt_len;
561 desc.args[1] = req->img_len;
562 desc.args[2] = req->phy_addr;
563 } else {
564 req_64bit =
565 (struct qseecom_load_lib_image_64bit_ireq *)
566 req_buf;
567 desc.args[0] = req_64bit->mdt_len;
568 desc.args[1] = req_64bit->img_len;
569 desc.args[2] = req_64bit->phy_addr;
570 }
571 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
572 ret = scm_call2(smc_id, &desc);
573 break;
574 }
575 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
576 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
577 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
578 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
579 ret = scm_call2(smc_id, &desc);
580 break;
581 }
582 case QSEOS_REGISTER_LISTENER: {
583 struct qseecom_register_listener_ireq *req;
584 struct qseecom_register_listener_64bit_ireq *req_64bit;
585
586 desc.arginfo =
587 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
588 if (qseecom.qsee_version < QSEE_VERSION_40) {
589 req = (struct qseecom_register_listener_ireq *)
590 req_buf;
591 desc.args[0] = req->listener_id;
592 desc.args[1] = req->sb_ptr;
593 desc.args[2] = req->sb_len;
594 } else {
595 req_64bit =
596 (struct qseecom_register_listener_64bit_ireq *)
597 req_buf;
598 desc.args[0] = req_64bit->listener_id;
599 desc.args[1] = req_64bit->sb_ptr;
600 desc.args[2] = req_64bit->sb_len;
601 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700602 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700603 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700604 ret = scm_call2(smc_id, &desc);
Zhen Kong50a15202019-01-29 14:16:00 -0800605 if (ret == -EIO) {
606 /* smcinvoke is not supported */
Zhen Kong2f60f492017-06-29 15:22:14 -0700607 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700608 smc_id = TZ_OS_REGISTER_LISTENER_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700609 ret = scm_call2(smc_id, &desc);
610 }
611 break;
612 }
613 case QSEOS_DEREGISTER_LISTENER: {
614 struct qseecom_unregister_listener_ireq *req;
615
616 req = (struct qseecom_unregister_listener_ireq *)
617 req_buf;
618 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
619 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
620 desc.args[0] = req->listener_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700621 ret = scm_call2(smc_id, &desc);
622 break;
623 }
624 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
625 struct qseecom_client_listener_data_irsp *req;
626
627 req = (struct qseecom_client_listener_data_irsp *)
628 req_buf;
629 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
630 desc.arginfo =
631 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
632 desc.args[0] = req->listener_id;
633 desc.args[1] = req->status;
634 ret = scm_call2(smc_id, &desc);
635 break;
636 }
637 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
638 struct qseecom_client_listener_data_irsp *req;
639 struct qseecom_client_listener_data_64bit_irsp *req_64;
640
641 smc_id =
642 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
643 desc.arginfo =
644 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
645 if (qseecom.qsee_version < QSEE_VERSION_40) {
646 req =
647 (struct qseecom_client_listener_data_irsp *)
648 req_buf;
649 desc.args[0] = req->listener_id;
650 desc.args[1] = req->status;
651 desc.args[2] = req->sglistinfo_ptr;
652 desc.args[3] = req->sglistinfo_len;
653 } else {
654 req_64 =
655 (struct qseecom_client_listener_data_64bit_irsp *)
656 req_buf;
657 desc.args[0] = req_64->listener_id;
658 desc.args[1] = req_64->status;
659 desc.args[2] = req_64->sglistinfo_ptr;
660 desc.args[3] = req_64->sglistinfo_len;
661 }
662 ret = scm_call2(smc_id, &desc);
663 break;
664 }
665 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
666 struct qseecom_load_app_ireq *req;
667 struct qseecom_load_app_64bit_ireq *req_64bit;
668
669 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
670 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
671 if (qseecom.qsee_version < QSEE_VERSION_40) {
672 req = (struct qseecom_load_app_ireq *)req_buf;
673 desc.args[0] = req->mdt_len;
674 desc.args[1] = req->img_len;
675 desc.args[2] = req->phy_addr;
676 } else {
677 req_64bit =
678 (struct qseecom_load_app_64bit_ireq *)req_buf;
679 desc.args[0] = req_64bit->mdt_len;
680 desc.args[1] = req_64bit->img_len;
681 desc.args[2] = req_64bit->phy_addr;
682 }
683 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
684 ret = scm_call2(smc_id, &desc);
685 break;
686 }
687 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
688 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
689 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
690 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
691 ret = scm_call2(smc_id, &desc);
692 break;
693 }
694
695 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
696 struct qseecom_client_send_data_ireq *req;
697 struct qseecom_client_send_data_64bit_ireq *req_64bit;
698
699 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
700 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
701 if (qseecom.qsee_version < QSEE_VERSION_40) {
702 req = (struct qseecom_client_send_data_ireq *)
703 req_buf;
704 desc.args[0] = req->app_id;
705 desc.args[1] = req->req_ptr;
706 desc.args[2] = req->req_len;
707 desc.args[3] = req->rsp_ptr;
708 desc.args[4] = req->rsp_len;
709 } else {
710 req_64bit =
711 (struct qseecom_client_send_data_64bit_ireq *)
712 req_buf;
713 desc.args[0] = req_64bit->app_id;
714 desc.args[1] = req_64bit->req_ptr;
715 desc.args[2] = req_64bit->req_len;
716 desc.args[3] = req_64bit->rsp_ptr;
717 desc.args[4] = req_64bit->rsp_len;
718 }
719 ret = scm_call2(smc_id, &desc);
720 break;
721 }
722 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
723 struct qseecom_client_send_data_ireq *req;
724 struct qseecom_client_send_data_64bit_ireq *req_64bit;
725
726 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
727 desc.arginfo =
728 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
729 if (qseecom.qsee_version < QSEE_VERSION_40) {
730 req = (struct qseecom_client_send_data_ireq *)
731 req_buf;
732 desc.args[0] = req->app_id;
733 desc.args[1] = req->req_ptr;
734 desc.args[2] = req->req_len;
735 desc.args[3] = req->rsp_ptr;
736 desc.args[4] = req->rsp_len;
737 desc.args[5] = req->sglistinfo_ptr;
738 desc.args[6] = req->sglistinfo_len;
739 } else {
740 req_64bit =
741 (struct qseecom_client_send_data_64bit_ireq *)
742 req_buf;
743 desc.args[0] = req_64bit->app_id;
744 desc.args[1] = req_64bit->req_ptr;
745 desc.args[2] = req_64bit->req_len;
746 desc.args[3] = req_64bit->rsp_ptr;
747 desc.args[4] = req_64bit->rsp_len;
748 desc.args[5] = req_64bit->sglistinfo_ptr;
749 desc.args[6] = req_64bit->sglistinfo_len;
750 }
751 ret = scm_call2(smc_id, &desc);
752 break;
753 }
754 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
755 struct qseecom_client_send_service_ireq *req;
756
757 req = (struct qseecom_client_send_service_ireq *)
758 req_buf;
759 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
760 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
761 desc.args[0] = req->key_type;
762 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
763 ret = scm_call2(smc_id, &desc);
764 break;
765 }
766 case QSEOS_RPMB_ERASE_COMMAND: {
767 smc_id = TZ_OS_RPMB_ERASE_ID;
768 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
769 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
770 ret = scm_call2(smc_id, &desc);
771 break;
772 }
773 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
774 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
775 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
776 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
777 ret = scm_call2(smc_id, &desc);
778 break;
779 }
780 case QSEOS_GENERATE_KEY: {
781 u32 tzbuflen = PAGE_ALIGN(sizeof
782 (struct qseecom_key_generate_ireq) -
783 sizeof(uint32_t));
784 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
785
786 if (!tzbuf)
787 return -ENOMEM;
788 memset(tzbuf, 0, tzbuflen);
789 memcpy(tzbuf, req_buf + sizeof(uint32_t),
790 (sizeof(struct qseecom_key_generate_ireq) -
791 sizeof(uint32_t)));
792 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
793 smc_id = TZ_OS_KS_GEN_KEY_ID;
794 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
795 desc.args[0] = virt_to_phys(tzbuf);
796 desc.args[1] = tzbuflen;
797 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
798 ret = scm_call2(smc_id, &desc);
799 kzfree(tzbuf);
800 break;
801 }
802 case QSEOS_DELETE_KEY: {
803 u32 tzbuflen = PAGE_ALIGN(sizeof
804 (struct qseecom_key_delete_ireq) -
805 sizeof(uint32_t));
806 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
807
808 if (!tzbuf)
809 return -ENOMEM;
810 memset(tzbuf, 0, tzbuflen);
811 memcpy(tzbuf, req_buf + sizeof(uint32_t),
812 (sizeof(struct qseecom_key_delete_ireq) -
813 sizeof(uint32_t)));
814 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
815 smc_id = TZ_OS_KS_DEL_KEY_ID;
816 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
817 desc.args[0] = virt_to_phys(tzbuf);
818 desc.args[1] = tzbuflen;
819 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
820 ret = scm_call2(smc_id, &desc);
821 kzfree(tzbuf);
822 break;
823 }
824 case QSEOS_SET_KEY: {
825 u32 tzbuflen = PAGE_ALIGN(sizeof
826 (struct qseecom_key_select_ireq) -
827 sizeof(uint32_t));
828 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
829
830 if (!tzbuf)
831 return -ENOMEM;
832 memset(tzbuf, 0, tzbuflen);
833 memcpy(tzbuf, req_buf + sizeof(uint32_t),
834 (sizeof(struct qseecom_key_select_ireq) -
835 sizeof(uint32_t)));
836 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
837 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
838 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
839 desc.args[0] = virt_to_phys(tzbuf);
840 desc.args[1] = tzbuflen;
841 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
842 ret = scm_call2(smc_id, &desc);
843 kzfree(tzbuf);
844 break;
845 }
846 case QSEOS_UPDATE_KEY_USERINFO: {
847 u32 tzbuflen = PAGE_ALIGN(sizeof
848 (struct qseecom_key_userinfo_update_ireq) -
849 sizeof(uint32_t));
850 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
851
852 if (!tzbuf)
853 return -ENOMEM;
854 memset(tzbuf, 0, tzbuflen);
855 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
856 (struct qseecom_key_userinfo_update_ireq) -
857 sizeof(uint32_t)));
858 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
859 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
860 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
861 desc.args[0] = virt_to_phys(tzbuf);
862 desc.args[1] = tzbuflen;
863 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
864 ret = scm_call2(smc_id, &desc);
865 kzfree(tzbuf);
866 break;
867 }
868 case QSEOS_TEE_OPEN_SESSION: {
869 struct qseecom_qteec_ireq *req;
870 struct qseecom_qteec_64bit_ireq *req_64bit;
871
872 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
873 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
874 if (qseecom.qsee_version < QSEE_VERSION_40) {
875 req = (struct qseecom_qteec_ireq *)req_buf;
876 desc.args[0] = req->app_id;
877 desc.args[1] = req->req_ptr;
878 desc.args[2] = req->req_len;
879 desc.args[3] = req->resp_ptr;
880 desc.args[4] = req->resp_len;
881 } else {
882 req_64bit = (struct qseecom_qteec_64bit_ireq *)
883 req_buf;
884 desc.args[0] = req_64bit->app_id;
885 desc.args[1] = req_64bit->req_ptr;
886 desc.args[2] = req_64bit->req_len;
887 desc.args[3] = req_64bit->resp_ptr;
888 desc.args[4] = req_64bit->resp_len;
889 }
890 ret = scm_call2(smc_id, &desc);
891 break;
892 }
893 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
894 struct qseecom_qteec_ireq *req;
895 struct qseecom_qteec_64bit_ireq *req_64bit;
896
897 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
898 desc.arginfo =
899 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
900 if (qseecom.qsee_version < QSEE_VERSION_40) {
901 req = (struct qseecom_qteec_ireq *)req_buf;
902 desc.args[0] = req->app_id;
903 desc.args[1] = req->req_ptr;
904 desc.args[2] = req->req_len;
905 desc.args[3] = req->resp_ptr;
906 desc.args[4] = req->resp_len;
907 desc.args[5] = req->sglistinfo_ptr;
908 desc.args[6] = req->sglistinfo_len;
909 } else {
910 req_64bit = (struct qseecom_qteec_64bit_ireq *)
911 req_buf;
912 desc.args[0] = req_64bit->app_id;
913 desc.args[1] = req_64bit->req_ptr;
914 desc.args[2] = req_64bit->req_len;
915 desc.args[3] = req_64bit->resp_ptr;
916 desc.args[4] = req_64bit->resp_len;
917 desc.args[5] = req_64bit->sglistinfo_ptr;
918 desc.args[6] = req_64bit->sglistinfo_len;
919 }
920 ret = scm_call2(smc_id, &desc);
921 break;
922 }
923 case QSEOS_TEE_INVOKE_COMMAND: {
924 struct qseecom_qteec_ireq *req;
925 struct qseecom_qteec_64bit_ireq *req_64bit;
926
927 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
928 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
929 if (qseecom.qsee_version < QSEE_VERSION_40) {
930 req = (struct qseecom_qteec_ireq *)req_buf;
931 desc.args[0] = req->app_id;
932 desc.args[1] = req->req_ptr;
933 desc.args[2] = req->req_len;
934 desc.args[3] = req->resp_ptr;
935 desc.args[4] = req->resp_len;
936 } else {
937 req_64bit = (struct qseecom_qteec_64bit_ireq *)
938 req_buf;
939 desc.args[0] = req_64bit->app_id;
940 desc.args[1] = req_64bit->req_ptr;
941 desc.args[2] = req_64bit->req_len;
942 desc.args[3] = req_64bit->resp_ptr;
943 desc.args[4] = req_64bit->resp_len;
944 }
945 ret = scm_call2(smc_id, &desc);
946 break;
947 }
948 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
949 struct qseecom_qteec_ireq *req;
950 struct qseecom_qteec_64bit_ireq *req_64bit;
951
952 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
953 desc.arginfo =
954 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
955 if (qseecom.qsee_version < QSEE_VERSION_40) {
956 req = (struct qseecom_qteec_ireq *)req_buf;
957 desc.args[0] = req->app_id;
958 desc.args[1] = req->req_ptr;
959 desc.args[2] = req->req_len;
960 desc.args[3] = req->resp_ptr;
961 desc.args[4] = req->resp_len;
962 desc.args[5] = req->sglistinfo_ptr;
963 desc.args[6] = req->sglistinfo_len;
964 } else {
965 req_64bit = (struct qseecom_qteec_64bit_ireq *)
966 req_buf;
967 desc.args[0] = req_64bit->app_id;
968 desc.args[1] = req_64bit->req_ptr;
969 desc.args[2] = req_64bit->req_len;
970 desc.args[3] = req_64bit->resp_ptr;
971 desc.args[4] = req_64bit->resp_len;
972 desc.args[5] = req_64bit->sglistinfo_ptr;
973 desc.args[6] = req_64bit->sglistinfo_len;
974 }
975 ret = scm_call2(smc_id, &desc);
976 break;
977 }
978 case QSEOS_TEE_CLOSE_SESSION: {
979 struct qseecom_qteec_ireq *req;
980 struct qseecom_qteec_64bit_ireq *req_64bit;
981
982 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
983 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
984 if (qseecom.qsee_version < QSEE_VERSION_40) {
985 req = (struct qseecom_qteec_ireq *)req_buf;
986 desc.args[0] = req->app_id;
987 desc.args[1] = req->req_ptr;
988 desc.args[2] = req->req_len;
989 desc.args[3] = req->resp_ptr;
990 desc.args[4] = req->resp_len;
991 } else {
992 req_64bit = (struct qseecom_qteec_64bit_ireq *)
993 req_buf;
994 desc.args[0] = req_64bit->app_id;
995 desc.args[1] = req_64bit->req_ptr;
996 desc.args[2] = req_64bit->req_len;
997 desc.args[3] = req_64bit->resp_ptr;
998 desc.args[4] = req_64bit->resp_len;
999 }
1000 ret = scm_call2(smc_id, &desc);
1001 break;
1002 }
1003 case QSEOS_TEE_REQUEST_CANCELLATION: {
1004 struct qseecom_qteec_ireq *req;
1005 struct qseecom_qteec_64bit_ireq *req_64bit;
1006
1007 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
1008 desc.arginfo =
1009 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
1010 if (qseecom.qsee_version < QSEE_VERSION_40) {
1011 req = (struct qseecom_qteec_ireq *)req_buf;
1012 desc.args[0] = req->app_id;
1013 desc.args[1] = req->req_ptr;
1014 desc.args[2] = req->req_len;
1015 desc.args[3] = req->resp_ptr;
1016 desc.args[4] = req->resp_len;
1017 } else {
1018 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1019 req_buf;
1020 desc.args[0] = req_64bit->app_id;
1021 desc.args[1] = req_64bit->req_ptr;
1022 desc.args[2] = req_64bit->req_len;
1023 desc.args[3] = req_64bit->resp_ptr;
1024 desc.args[4] = req_64bit->resp_len;
1025 }
1026 ret = scm_call2(smc_id, &desc);
1027 break;
1028 }
1029 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1030 struct qseecom_continue_blocked_request_ireq *req =
1031 (struct qseecom_continue_blocked_request_ireq *)
1032 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001033 if (qseecom.smcinvoke_support)
1034 smc_id =
1035 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1036 else
1037 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001038 desc.arginfo =
1039 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001040 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001041 ret = scm_call2(smc_id, &desc);
1042 break;
1043 }
1044 default: {
1045 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1046 qseos_cmd_id);
1047 ret = -EINVAL;
1048 break;
1049 }
1050 } /*end of switch (qsee_cmd_id) */
1051 break;
1052 } /*end of case SCM_SVC_TZSCHEDULER*/
1053 default: {
1054 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1055 svc_id);
1056 ret = -EINVAL;
1057 break;
1058 }
1059 } /*end of switch svc_id */
1060 scm_resp->result = desc.ret[0];
1061 scm_resp->resp_type = desc.ret[1];
1062 scm_resp->data = desc.ret[2];
1063 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1064 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1065 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1066 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1067 return ret;
1068}
1069
1070
1071static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1072 size_t cmd_len, void *resp_buf, size_t resp_len)
1073{
1074 if (!is_scm_armv8())
1075 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1076 resp_buf, resp_len);
1077 else
1078 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1079}
1080
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001081static struct qseecom_registered_listener_list *__qseecom_find_svc(
1082 int32_t listener_id)
1083{
1084 struct qseecom_registered_listener_list *entry = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001085
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001086 list_for_each_entry(entry,
1087 &qseecom.registered_listener_list_head, list) {
1088 if (entry->svc.listener_id == listener_id)
1089 break;
1090 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001091 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001092 pr_debug("Service id: %u is not found\n", listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001093 return NULL;
1094 }
1095
1096 return entry;
1097}
1098
1099static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1100 struct qseecom_dev_handle *handle,
1101 struct qseecom_register_listener_req *listener)
1102{
1103 int ret = 0;
1104 struct qseecom_register_listener_ireq req;
1105 struct qseecom_register_listener_64bit_ireq req_64bit;
1106 struct qseecom_command_scm_resp resp;
1107 ion_phys_addr_t pa;
1108 void *cmd_buf = NULL;
1109 size_t cmd_len;
1110
1111 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001112 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001113 listener->ifd_data_fd);
1114 if (IS_ERR_OR_NULL(svc->ihandle)) {
1115 pr_err("Ion client could not retrieve the handle\n");
1116 return -ENOMEM;
1117 }
1118
1119 /* Get the physical address of the ION BUF */
1120 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1121 if (ret) {
1122 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1123 ret);
1124 return ret;
1125 }
1126 /* Populate the structure for sending scm call to load image */
1127 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1128 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1129 pr_err("ION memory mapping for listener shared buffer failed\n");
1130 return -ENOMEM;
1131 }
1132 svc->sb_phys = (phys_addr_t)pa;
1133
1134 if (qseecom.qsee_version < QSEE_VERSION_40) {
1135 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1136 req.listener_id = svc->svc.listener_id;
1137 req.sb_len = svc->sb_length;
1138 req.sb_ptr = (uint32_t)svc->sb_phys;
1139 cmd_buf = (void *)&req;
1140 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1141 } else {
1142 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1143 req_64bit.listener_id = svc->svc.listener_id;
1144 req_64bit.sb_len = svc->sb_length;
1145 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1146 cmd_buf = (void *)&req_64bit;
1147 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1148 }
1149
1150 resp.result = QSEOS_RESULT_INCOMPLETE;
1151
1152 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1153 &resp, sizeof(resp));
1154 if (ret) {
1155 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1156 return -EINVAL;
1157 }
1158
1159 if (resp.result != QSEOS_RESULT_SUCCESS) {
1160 pr_err("Error SB registration req: resp.result = %d\n",
1161 resp.result);
1162 return -EPERM;
1163 }
1164 return 0;
1165}
1166
1167static int qseecom_register_listener(struct qseecom_dev_handle *data,
1168 void __user *argp)
1169{
1170 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001171 struct qseecom_register_listener_req rcvd_lstnr;
1172 struct qseecom_registered_listener_list *new_entry;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001173 struct qseecom_registered_listener_list *ptr_svc;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001174
1175 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1176 if (ret) {
1177 pr_err("copy_from_user failed\n");
1178 return ret;
1179 }
1180 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1181 rcvd_lstnr.sb_size))
1182 return -EFAULT;
1183
Zhen Kong3c674612018-09-06 22:51:27 -07001184 data->listener.id = rcvd_lstnr.listener_id;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001185
1186 ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
1187 if (ptr_svc) {
1188 if (ptr_svc->unregister_pending == false) {
1189 pr_err("Service %d is not unique\n",
Zhen Kong3c674612018-09-06 22:51:27 -07001190 rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001191 data->released = true;
1192 return -EBUSY;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001193 } else {
1194 /*wait until listener is unregistered*/
1195 pr_debug("register %d has to wait\n",
1196 rcvd_lstnr.listener_id);
1197 mutex_unlock(&listener_access_lock);
1198 ret = wait_event_freezable(
1199 qseecom.register_lsnr_pending_wq,
1200 list_empty(
1201 &qseecom.unregister_lsnr_pending_list_head));
1202 if (ret) {
1203 pr_err("interrupted register_pending_wq %d\n",
1204 rcvd_lstnr.listener_id);
1205 mutex_lock(&listener_access_lock);
1206 return -ERESTARTSYS;
1207 }
1208 mutex_lock(&listener_access_lock);
1209 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001210 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001211 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1212 if (!new_entry)
1213 return -ENOMEM;
1214 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
Zhen Kongbcdeda22018-11-16 13:50:51 -08001215 new_entry->rcv_req_flag = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001216
1217 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1218 new_entry->sb_length = rcvd_lstnr.sb_size;
1219 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1220 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
Zhen Kong3c674612018-09-06 22:51:27 -07001221 pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
1222 rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001223 kzfree(new_entry);
1224 return -ENOMEM;
1225 }
1226
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001227 init_waitqueue_head(&new_entry->rcv_req_wq);
1228 init_waitqueue_head(&new_entry->listener_block_app_wq);
1229 new_entry->send_resp_flag = 0;
1230 new_entry->listener_in_use = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001231 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001232
Zhen Kong3c674612018-09-06 22:51:27 -07001233 pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001234 return ret;
1235}
1236
Zhen Kongbcdeda22018-11-16 13:50:51 -08001237static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
1238 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001239{
1240 int ret = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001241 struct qseecom_register_listener_ireq req;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001242 struct qseecom_command_scm_resp resp;
1243 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1244
1245 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1246 req.listener_id = data->listener.id;
1247 resp.result = QSEOS_RESULT_INCOMPLETE;
1248
1249 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1250 sizeof(req), &resp, sizeof(resp));
1251 if (ret) {
1252 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1253 ret, data->listener.id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001254 if (ret == -EBUSY)
1255 return ret;
Zhen Kong3c674612018-09-06 22:51:27 -07001256 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001257 }
1258
1259 if (resp.result != QSEOS_RESULT_SUCCESS) {
1260 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1261 resp.result, data->listener.id);
Zhen Kong3c674612018-09-06 22:51:27 -07001262 ret = -EPERM;
1263 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001264 }
1265
1266 data->abort = 1;
Zhen Kong3c674612018-09-06 22:51:27 -07001267 wake_up_all(&ptr_svc->rcv_req_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001268
1269 while (atomic_read(&data->ioctl_count) > 1) {
1270 if (wait_event_freezable(data->abort_wq,
1271 atomic_read(&data->ioctl_count) <= 1)) {
1272 pr_err("Interrupted from abort\n");
1273 ret = -ERESTARTSYS;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001274 }
1275 }
1276
Zhen Kong3c674612018-09-06 22:51:27 -07001277exit:
1278 if (ptr_svc->sb_virt) {
1279 ihandle = ptr_svc->ihandle;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001280 if (!IS_ERR_OR_NULL(ihandle)) {
1281 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1282 ion_free(qseecom.ion_clnt, ihandle);
1283 }
1284 }
Zhen Kong3c674612018-09-06 22:51:27 -07001285 list_del(&ptr_svc->list);
1286 kzfree(ptr_svc);
1287
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001288 data->released = true;
Zhen Kong3c674612018-09-06 22:51:27 -07001289 pr_warn("Service %d is unregistered\n", data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001290 return ret;
1291}
1292
Zhen Kongbcdeda22018-11-16 13:50:51 -08001293static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1294{
1295 struct qseecom_registered_listener_list *ptr_svc = NULL;
1296 struct qseecom_unregister_pending_list *entry = NULL;
1297
1298 ptr_svc = __qseecom_find_svc(data->listener.id);
1299 if (!ptr_svc) {
1300 pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
1301 return -ENODATA;
1302 }
1303 /* stop CA thread waiting for listener response */
1304 ptr_svc->abort = 1;
1305 wake_up_interruptible_all(&qseecom.send_resp_wq);
1306
1307 /* return directly if pending*/
1308 if (ptr_svc->unregister_pending)
1309 return 0;
1310
1311 /*add unregistration into pending list*/
1312 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1313 if (!entry)
1314 return -ENOMEM;
1315 entry->data = data;
1316 list_add_tail(&entry->list,
1317 &qseecom.unregister_lsnr_pending_list_head);
1318 ptr_svc->unregister_pending = true;
1319 pr_debug("unregister %d pending\n", data->listener.id);
1320 return 0;
1321}
1322
1323static void __qseecom_processing_pending_lsnr_unregister(void)
1324{
1325 struct qseecom_unregister_pending_list *entry = NULL;
1326 struct qseecom_registered_listener_list *ptr_svc = NULL;
1327 struct list_head *pos;
1328 int ret = 0;
1329
1330 mutex_lock(&listener_access_lock);
1331 while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
1332 pos = qseecom.unregister_lsnr_pending_list_head.next;
1333 entry = list_entry(pos,
1334 struct qseecom_unregister_pending_list, list);
1335 if (entry && entry->data) {
1336 pr_debug("process pending unregister %d\n",
1337 entry->data->listener.id);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08001338 /* don't process if qseecom_release is not called*/
1339 if (!entry->data->listener.release_called)
1340 break;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001341 ptr_svc = __qseecom_find_svc(
1342 entry->data->listener.id);
1343 if (ptr_svc) {
1344 ret = __qseecom_unregister_listener(
1345 entry->data, ptr_svc);
1346 if (ret == -EBUSY) {
1347 pr_debug("unregister %d pending again\n",
1348 entry->data->listener.id);
1349 mutex_unlock(&listener_access_lock);
1350 return;
1351 }
1352 } else
1353 pr_err("invalid listener %d\n",
1354 entry->data->listener.id);
1355 kzfree(entry->data);
1356 }
1357 list_del(pos);
1358 kzfree(entry);
1359 }
1360 mutex_unlock(&listener_access_lock);
1361 wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
1362}
1363
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001364static int __qseecom_set_msm_bus_request(uint32_t mode)
1365{
1366 int ret = 0;
1367 struct qseecom_clk *qclk;
1368
1369 qclk = &qseecom.qsee;
1370 if (qclk->ce_core_src_clk != NULL) {
1371 if (mode == INACTIVE) {
1372 __qseecom_disable_clk(CLK_QSEE);
1373 } else {
1374 ret = __qseecom_enable_clk(CLK_QSEE);
1375 if (ret)
1376 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1377 ret, mode);
1378 }
1379 }
1380
1381 if ((!ret) && (qseecom.current_mode != mode)) {
1382 ret = msm_bus_scale_client_update_request(
1383 qseecom.qsee_perf_client, mode);
1384 if (ret) {
1385 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1386 ret, mode);
1387 if (qclk->ce_core_src_clk != NULL) {
1388 if (mode == INACTIVE) {
1389 ret = __qseecom_enable_clk(CLK_QSEE);
1390 if (ret)
1391 pr_err("CLK enable failed\n");
1392 } else
1393 __qseecom_disable_clk(CLK_QSEE);
1394 }
1395 }
1396 qseecom.current_mode = mode;
1397 }
1398 return ret;
1399}
1400
1401static void qseecom_bw_inactive_req_work(struct work_struct *work)
1402{
1403 mutex_lock(&app_access_lock);
1404 mutex_lock(&qsee_bw_mutex);
1405 if (qseecom.timer_running)
1406 __qseecom_set_msm_bus_request(INACTIVE);
1407 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1408 qseecom.current_mode, qseecom.cumulative_mode);
1409 qseecom.timer_running = false;
1410 mutex_unlock(&qsee_bw_mutex);
1411 mutex_unlock(&app_access_lock);
1412}
1413
1414static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1415{
1416 schedule_work(&qseecom.bw_inactive_req_ws);
1417}
1418
1419static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1420{
1421 struct qseecom_clk *qclk;
1422 int ret = 0;
1423
1424 mutex_lock(&clk_access_lock);
1425 if (ce == CLK_QSEE)
1426 qclk = &qseecom.qsee;
1427 else
1428 qclk = &qseecom.ce_drv;
1429
1430 if (qclk->clk_access_cnt > 2) {
1431 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1432 ret = -EINVAL;
1433 goto err_dec_ref_cnt;
1434 }
1435 if (qclk->clk_access_cnt == 2)
1436 qclk->clk_access_cnt--;
1437
1438err_dec_ref_cnt:
1439 mutex_unlock(&clk_access_lock);
1440 return ret;
1441}
1442
1443
1444static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1445{
1446 int32_t ret = 0;
1447 int32_t request_mode = INACTIVE;
1448
1449 mutex_lock(&qsee_bw_mutex);
1450 if (mode == 0) {
1451 if (qseecom.cumulative_mode > MEDIUM)
1452 request_mode = HIGH;
1453 else
1454 request_mode = qseecom.cumulative_mode;
1455 } else {
1456 request_mode = mode;
1457 }
1458
1459 ret = __qseecom_set_msm_bus_request(request_mode);
1460 if (ret) {
1461 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1462 ret, request_mode);
1463 goto err_scale_timer;
1464 }
1465
1466 if (qseecom.timer_running) {
1467 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1468 if (ret) {
1469 pr_err("Failed to decrease clk ref count.\n");
1470 goto err_scale_timer;
1471 }
1472 del_timer_sync(&(qseecom.bw_scale_down_timer));
1473 qseecom.timer_running = false;
1474 }
1475err_scale_timer:
1476 mutex_unlock(&qsee_bw_mutex);
1477 return ret;
1478}
1479
1480
1481static int qseecom_unregister_bus_bandwidth_needs(
1482 struct qseecom_dev_handle *data)
1483{
1484 int32_t ret = 0;
1485
1486 qseecom.cumulative_mode -= data->mode;
1487 data->mode = INACTIVE;
1488
1489 return ret;
1490}
1491
1492static int __qseecom_register_bus_bandwidth_needs(
1493 struct qseecom_dev_handle *data, uint32_t request_mode)
1494{
1495 int32_t ret = 0;
1496
1497 if (data->mode == INACTIVE) {
1498 qseecom.cumulative_mode += request_mode;
1499 data->mode = request_mode;
1500 } else {
1501 if (data->mode != request_mode) {
1502 qseecom.cumulative_mode -= data->mode;
1503 qseecom.cumulative_mode += request_mode;
1504 data->mode = request_mode;
1505 }
1506 }
1507 return ret;
1508}
1509
1510static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1511{
1512 int ret = 0;
1513
1514 ret = qsee_vote_for_clock(data, CLK_DFAB);
1515 if (ret) {
1516 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1517 goto perf_enable_exit;
1518 }
1519 ret = qsee_vote_for_clock(data, CLK_SFPB);
1520 if (ret) {
1521 qsee_disable_clock_vote(data, CLK_DFAB);
1522 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1523 goto perf_enable_exit;
1524 }
1525
1526perf_enable_exit:
1527 return ret;
1528}
1529
1530static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1531 void __user *argp)
1532{
1533 int32_t ret = 0;
1534 int32_t req_mode;
1535
1536 if (qseecom.no_clock_support)
1537 return 0;
1538
1539 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1540 if (ret) {
1541 pr_err("copy_from_user failed\n");
1542 return ret;
1543 }
1544 if (req_mode > HIGH) {
1545 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1546 return -EINVAL;
1547 }
1548
1549 /*
1550 * Register bus bandwidth needs if bus scaling feature is enabled;
1551 * otherwise, qseecom enable/disable clocks for the client directly.
1552 */
1553 if (qseecom.support_bus_scaling) {
1554 mutex_lock(&qsee_bw_mutex);
1555 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1556 mutex_unlock(&qsee_bw_mutex);
1557 } else {
1558 pr_debug("Bus scaling feature is NOT enabled\n");
1559 pr_debug("request bandwidth mode %d for the client\n",
1560 req_mode);
1561 if (req_mode != INACTIVE) {
1562 ret = qseecom_perf_enable(data);
1563 if (ret)
1564 pr_err("Failed to vote for clock with err %d\n",
1565 ret);
1566 } else {
1567 qsee_disable_clock_vote(data, CLK_DFAB);
1568 qsee_disable_clock_vote(data, CLK_SFPB);
1569 }
1570 }
1571 return ret;
1572}
1573
1574static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1575{
1576 if (qseecom.no_clock_support)
1577 return;
1578
1579 mutex_lock(&qsee_bw_mutex);
1580 qseecom.bw_scale_down_timer.expires = jiffies +
1581 msecs_to_jiffies(duration);
1582 mod_timer(&(qseecom.bw_scale_down_timer),
1583 qseecom.bw_scale_down_timer.expires);
1584 qseecom.timer_running = true;
1585 mutex_unlock(&qsee_bw_mutex);
1586}
1587
1588static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1589{
1590 if (!qseecom.support_bus_scaling)
1591 qsee_disable_clock_vote(data, CLK_SFPB);
1592 else
1593 __qseecom_add_bw_scale_down_timer(
1594 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1595}
1596
1597static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1598{
1599 int ret = 0;
1600
1601 if (qseecom.support_bus_scaling) {
1602 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1603 if (ret)
1604 pr_err("Failed to set bw MEDIUM.\n");
1605 } else {
1606 ret = qsee_vote_for_clock(data, CLK_SFPB);
1607 if (ret)
1608 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1609 }
1610 return ret;
1611}
1612
1613static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1614 void __user *argp)
1615{
1616 ion_phys_addr_t pa;
1617 int32_t ret;
1618 struct qseecom_set_sb_mem_param_req req;
1619 size_t len;
1620
1621 /* Copy the relevant information needed for loading the image */
1622 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1623 return -EFAULT;
1624
1625 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1626 (req.sb_len == 0)) {
1627 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1628 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1629 return -EFAULT;
1630 }
1631 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1632 req.sb_len))
1633 return -EFAULT;
1634
1635 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001636 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001637 req.ifd_data_fd);
1638 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1639 pr_err("Ion client could not retrieve the handle\n");
1640 return -ENOMEM;
1641 }
1642 /* Get the physical address of the ION BUF */
1643 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1644 if (ret) {
1645
1646 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1647 ret);
1648 return ret;
1649 }
1650
1651 if (len < req.sb_len) {
1652 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1653 req.sb_len, len);
1654 return -EINVAL;
1655 }
1656 /* Populate the structure for sending scm call to load image */
1657 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1658 data->client.ihandle);
1659 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1660 pr_err("ION memory mapping for client shared buf failed\n");
1661 return -ENOMEM;
1662 }
1663 data->client.sb_phys = (phys_addr_t)pa;
1664 data->client.sb_length = req.sb_len;
1665 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1666 return 0;
1667}
1668
Zhen Kong26e62742018-05-04 17:19:06 -07001669static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
1670 struct qseecom_registered_listener_list *ptr_svc)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001671{
1672 int ret;
1673
1674 ret = (qseecom.send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001675 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001676}
1677
1678static int __qseecom_reentrancy_listener_has_sent_rsp(
1679 struct qseecom_dev_handle *data,
1680 struct qseecom_registered_listener_list *ptr_svc)
1681{
1682 int ret;
1683
1684 ret = (ptr_svc->send_resp_flag != 0);
Zhen Kong26e62742018-05-04 17:19:06 -07001685 return ret || data->abort || ptr_svc->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001686}
1687
1688static void __qseecom_clean_listener_sglistinfo(
1689 struct qseecom_registered_listener_list *ptr_svc)
1690{
1691 if (ptr_svc->sglist_cnt) {
1692 memset(ptr_svc->sglistinfo_ptr, 0,
1693 SGLISTINFO_TABLE_SIZE);
1694 ptr_svc->sglist_cnt = 0;
1695 }
1696}
1697
1698static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1699 struct qseecom_command_scm_resp *resp)
1700{
1701 int ret = 0;
1702 int rc = 0;
1703 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07001704 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
1705 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
1706 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001707 struct qseecom_registered_listener_list *ptr_svc = NULL;
1708 sigset_t new_sigset;
1709 sigset_t old_sigset;
1710 uint32_t status;
1711 void *cmd_buf = NULL;
1712 size_t cmd_len;
1713 struct sglist_info *table = NULL;
1714
Zhen Kongbcdeda22018-11-16 13:50:51 -08001715 qseecom.app_block_ref_cnt++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001716 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1717 lstnr = resp->data;
1718 /*
1719 * Wake up blocking lsitener service with the lstnr id
1720 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08001721 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001722 list_for_each_entry(ptr_svc,
1723 &qseecom.registered_listener_list_head, list) {
1724 if (ptr_svc->svc.listener_id == lstnr) {
1725 ptr_svc->listener_in_use = true;
1726 ptr_svc->rcv_req_flag = 1;
1727 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1728 break;
1729 }
1730 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001731
1732 if (ptr_svc == NULL) {
1733 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07001734 rc = -EINVAL;
1735 status = QSEOS_RESULT_FAILURE;
1736 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001737 }
1738
1739 if (!ptr_svc->ihandle) {
1740 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07001741 rc = -EINVAL;
1742 status = QSEOS_RESULT_FAILURE;
1743 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001744 }
1745
1746 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07001747 pr_err("Service %d does not exist\n",
1748 lstnr);
1749 rc = -ERESTARTSYS;
1750 ptr_svc = NULL;
1751 status = QSEOS_RESULT_FAILURE;
1752 goto err_resp;
1753 }
1754
1755 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08001756 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07001757 lstnr, ptr_svc->abort);
1758 rc = -ENODEV;
1759 status = QSEOS_RESULT_FAILURE;
1760 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001761 }
Zhen Kong25731112018-09-20 13:10:03 -07001762
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001763 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1764
1765 /* initialize the new signal mask with all signals*/
1766 sigfillset(&new_sigset);
1767 /* block all signals */
1768 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1769
Zhen Kongbcdeda22018-11-16 13:50:51 -08001770 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001771 do {
1772 /*
1773 * When reentrancy is not supported, check global
1774 * send_resp_flag; otherwise, check this listener's
1775 * send_resp_flag.
1776 */
1777 if (!qseecom.qsee_reentrancy_support &&
1778 !wait_event_freezable(qseecom.send_resp_wq,
Zhen Kong26e62742018-05-04 17:19:06 -07001779 __qseecom_listener_has_sent_rsp(
1780 data, ptr_svc))) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001781 break;
1782 }
1783
1784 if (qseecom.qsee_reentrancy_support &&
1785 !wait_event_freezable(qseecom.send_resp_wq,
1786 __qseecom_reentrancy_listener_has_sent_rsp(
1787 data, ptr_svc))) {
1788 break;
1789 }
1790 } while (1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001791 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001792 /* restore signal mask */
1793 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07001794 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001795 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1796 data->client.app_id, lstnr, ret);
1797 rc = -ENODEV;
1798 status = QSEOS_RESULT_FAILURE;
1799 } else {
1800 status = QSEOS_RESULT_SUCCESS;
1801 }
Zhen Kong26e62742018-05-04 17:19:06 -07001802err_resp:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001803 qseecom.send_resp_flag = 0;
Zhen Kong7d500032018-08-06 16:58:31 -07001804 if (ptr_svc) {
1805 ptr_svc->send_resp_flag = 0;
1806 table = ptr_svc->sglistinfo_ptr;
1807 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001808 if (qseecom.qsee_version < QSEE_VERSION_40) {
1809 send_data_rsp.listener_id = lstnr;
1810 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001811 if (table) {
1812 send_data_rsp.sglistinfo_ptr =
1813 (uint32_t)virt_to_phys(table);
1814 send_data_rsp.sglistinfo_len =
1815 SGLISTINFO_TABLE_SIZE;
1816 dmac_flush_range((void *)table,
1817 (void *)table + SGLISTINFO_TABLE_SIZE);
1818 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001819 cmd_buf = (void *)&send_data_rsp;
1820 cmd_len = sizeof(send_data_rsp);
1821 } else {
1822 send_data_rsp_64bit.listener_id = lstnr;
1823 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07001824 if (table) {
1825 send_data_rsp_64bit.sglistinfo_ptr =
1826 virt_to_phys(table);
1827 send_data_rsp_64bit.sglistinfo_len =
1828 SGLISTINFO_TABLE_SIZE;
1829 dmac_flush_range((void *)table,
1830 (void *)table + SGLISTINFO_TABLE_SIZE);
1831 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001832 cmd_buf = (void *)&send_data_rsp_64bit;
1833 cmd_len = sizeof(send_data_rsp_64bit);
1834 }
Zhen Kong7d500032018-08-06 16:58:31 -07001835 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001836 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1837 else
1838 *(uint32_t *)cmd_buf =
1839 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07001840 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001841 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1842 ptr_svc->ihandle,
1843 ptr_svc->sb_virt, ptr_svc->sb_length,
1844 ION_IOC_CLEAN_INV_CACHES);
1845 if (ret) {
1846 pr_err("cache operation failed %d\n", ret);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001847 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001848 }
1849 }
1850
1851 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1852 ret = __qseecom_enable_clk(CLK_QSEE);
1853 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08001854 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001855 }
1856
1857 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1858 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07001859 if (ptr_svc) {
1860 ptr_svc->listener_in_use = false;
1861 __qseecom_clean_listener_sglistinfo(ptr_svc);
1862 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001863 if (ret) {
1864 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1865 ret, data->client.app_id);
1866 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1867 __qseecom_disable_clk(CLK_QSEE);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001868 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001869 }
Zhen Kong26e62742018-05-04 17:19:06 -07001870 pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
1871 status, resp->result, data->client.app_id, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001872 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1873 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1874 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1875 resp->result, data->client.app_id, lstnr);
1876 ret = -EINVAL;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001877 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001878 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001879exit:
1880 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001881 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1882 __qseecom_disable_clk(CLK_QSEE);
1883
1884 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08001885 qseecom.app_block_ref_cnt--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001886 if (rc)
1887 return rc;
1888
1889 return ret;
1890}
1891
Zhen Konga91aaf02018-02-02 17:21:04 -08001892static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001893 struct qseecom_command_scm_resp *resp,
1894 struct qseecom_registered_app_list *ptr_app,
1895 struct qseecom_dev_handle *data)
1896{
1897 struct qseecom_registered_listener_list *list_ptr;
1898 int ret = 0;
1899 struct qseecom_continue_blocked_request_ireq ireq;
1900 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001901 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001902 sigset_t new_sigset;
1903 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001904 unsigned long flags;
1905 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001906
1907 if (!resp || !data) {
1908 pr_err("invalid resp or data pointer\n");
1909 ret = -EINVAL;
1910 goto exit;
1911 }
1912
1913 /* find app_id & img_name from list */
1914 if (!ptr_app) {
1915 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1916 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1917 list) {
1918 if ((ptr_app->app_id == data->client.app_id) &&
1919 (!strcmp(ptr_app->app_name,
1920 data->client.app_name))) {
1921 found_app = true;
1922 break;
1923 }
1924 }
1925 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1926 flags);
1927 if (!found_app) {
1928 pr_err("app_id %d (%s) is not found\n",
1929 data->client.app_id,
1930 (char *)data->client.app_name);
1931 ret = -ENOENT;
1932 goto exit;
1933 }
1934 }
1935
Zhen Kongd8cc0052017-11-13 15:13:31 -08001936 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001937 session_id = resp->resp_type;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001938 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001939 list_ptr = __qseecom_find_svc(resp->data);
1940 if (!list_ptr) {
1941 pr_err("Invalid listener ID %d\n", resp->data);
1942 ret = -ENODATA;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001943 mutex_unlock(&listener_access_lock);
Zhen Konge7f525f2017-12-01 18:26:25 -08001944 goto exit;
1945 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001946 ptr_app->blocked_on_listener_id = resp->data;
1947
1948 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1949 resp->data, list_ptr->listener_in_use,
1950 session_id, data->client.app_id);
1951
1952 /* sleep until listener is available */
1953 sigfillset(&new_sigset);
1954 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1955
1956 do {
1957 qseecom.app_block_ref_cnt++;
1958 ptr_app->app_blocked = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08001959 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001960 mutex_unlock(&app_access_lock);
1961 wait_event_freezable(
1962 list_ptr->listener_block_app_wq,
1963 !list_ptr->listener_in_use);
1964 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001965 mutex_lock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001966 ptr_app->app_blocked = false;
1967 qseecom.app_block_ref_cnt--;
1968 } while (list_ptr->listener_in_use);
1969
1970 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1971
1972 ptr_app->blocked_on_listener_id = 0;
1973 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1974 resp->data, session_id, data->client.app_id);
1975
1976 /* notify TZ that listener is available */
1977 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1978
1979 if (qseecom.smcinvoke_support)
1980 ireq.app_or_session_id = session_id;
1981 else
1982 ireq.app_or_session_id = data->client.app_id;
1983
1984 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1985 &ireq, sizeof(ireq),
1986 &continue_resp, sizeof(continue_resp));
1987 if (ret && qseecom.smcinvoke_support) {
1988 /* retry with legacy cmd */
1989 qseecom.smcinvoke_support = false;
1990 ireq.app_or_session_id = data->client.app_id;
1991 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1992 &ireq, sizeof(ireq),
1993 &continue_resp, sizeof(continue_resp));
1994 qseecom.smcinvoke_support = true;
1995 if (ret) {
1996 pr_err("unblock app %d or session %d fail\n",
1997 data->client.app_id, session_id);
Zhen Kongbcdeda22018-11-16 13:50:51 -08001998 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08001999 goto exit;
2000 }
2001 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08002002 mutex_unlock(&listener_access_lock);
Zhen Konga91aaf02018-02-02 17:21:04 -08002003 resp->result = continue_resp.result;
2004 resp->resp_type = continue_resp.resp_type;
2005 resp->data = continue_resp.data;
2006 pr_debug("unblock resp = %d\n", resp->result);
2007 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
2008
2009 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
2010 pr_err("Unexpected unblock resp %d\n", resp->result);
2011 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07002012 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002013exit:
2014 return ret;
2015}
2016
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002017static int __qseecom_reentrancy_process_incomplete_cmd(
2018 struct qseecom_dev_handle *data,
2019 struct qseecom_command_scm_resp *resp)
2020{
2021 int ret = 0;
2022 int rc = 0;
2023 uint32_t lstnr;
Zhen Kong7d500032018-08-06 16:58:31 -07002024 struct qseecom_client_listener_data_irsp send_data_rsp = {0};
2025 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
2026 = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002027 struct qseecom_registered_listener_list *ptr_svc = NULL;
2028 sigset_t new_sigset;
2029 sigset_t old_sigset;
2030 uint32_t status;
2031 void *cmd_buf = NULL;
2032 size_t cmd_len;
2033 struct sglist_info *table = NULL;
2034
Zhen Kong26e62742018-05-04 17:19:06 -07002035 while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002036 lstnr = resp->data;
2037 /*
2038 * Wake up blocking lsitener service with the lstnr id
2039 */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002040 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002041 list_for_each_entry(ptr_svc,
2042 &qseecom.registered_listener_list_head, list) {
2043 if (ptr_svc->svc.listener_id == lstnr) {
2044 ptr_svc->listener_in_use = true;
2045 ptr_svc->rcv_req_flag = 1;
2046 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2047 break;
2048 }
2049 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002050
2051 if (ptr_svc == NULL) {
2052 pr_err("Listener Svc %d does not exist\n", lstnr);
Zhen Kong26e62742018-05-04 17:19:06 -07002053 rc = -EINVAL;
2054 status = QSEOS_RESULT_FAILURE;
2055 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002056 }
2057
2058 if (!ptr_svc->ihandle) {
2059 pr_err("Client handle is not initialized\n");
Zhen Kong26e62742018-05-04 17:19:06 -07002060 rc = -EINVAL;
2061 status = QSEOS_RESULT_FAILURE;
2062 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002063 }
2064
2065 if (ptr_svc->svc.listener_id != lstnr) {
Zhen Kong26e62742018-05-04 17:19:06 -07002066 pr_err("Service %d does not exist\n",
2067 lstnr);
2068 rc = -ERESTARTSYS;
2069 ptr_svc = NULL;
2070 status = QSEOS_RESULT_FAILURE;
2071 goto err_resp;
2072 }
2073
2074 if (ptr_svc->abort == 1) {
Zhen Kongbcdeda22018-11-16 13:50:51 -08002075 pr_debug("Service %d abort %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07002076 lstnr, ptr_svc->abort);
2077 rc = -ENODEV;
2078 status = QSEOS_RESULT_FAILURE;
2079 goto err_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002080 }
Zhen Kong25731112018-09-20 13:10:03 -07002081
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002082 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2083
2084 /* initialize the new signal mask with all signals*/
2085 sigfillset(&new_sigset);
2086
2087 /* block all signals */
2088 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2089
2090 /* unlock mutex btw waking listener and sleep-wait */
Zhen Kongbcdeda22018-11-16 13:50:51 -08002091 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002092 mutex_unlock(&app_access_lock);
2093 do {
2094 if (!wait_event_freezable(qseecom.send_resp_wq,
2095 __qseecom_reentrancy_listener_has_sent_rsp(
2096 data, ptr_svc))) {
2097 break;
2098 }
2099 } while (1);
2100 /* lock mutex again after resp sent */
2101 mutex_lock(&app_access_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08002102 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002103 ptr_svc->send_resp_flag = 0;
2104 qseecom.send_resp_flag = 0;
2105
2106 /* restore signal mask */
2107 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
Zhen Kong26e62742018-05-04 17:19:06 -07002108 if (data->abort || ptr_svc->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002109 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2110 data->client.app_id, lstnr, ret);
2111 rc = -ENODEV;
2112 status = QSEOS_RESULT_FAILURE;
2113 } else {
2114 status = QSEOS_RESULT_SUCCESS;
2115 }
Zhen Kong26e62742018-05-04 17:19:06 -07002116err_resp:
Zhen Kong7d500032018-08-06 16:58:31 -07002117 if (ptr_svc)
2118 table = ptr_svc->sglistinfo_ptr;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002119 if (qseecom.qsee_version < QSEE_VERSION_40) {
2120 send_data_rsp.listener_id = lstnr;
2121 send_data_rsp.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002122 if (table) {
2123 send_data_rsp.sglistinfo_ptr =
2124 (uint32_t)virt_to_phys(table);
2125 send_data_rsp.sglistinfo_len =
2126 SGLISTINFO_TABLE_SIZE;
2127 dmac_flush_range((void *)table,
2128 (void *)table + SGLISTINFO_TABLE_SIZE);
2129 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002130 cmd_buf = (void *)&send_data_rsp;
2131 cmd_len = sizeof(send_data_rsp);
2132 } else {
2133 send_data_rsp_64bit.listener_id = lstnr;
2134 send_data_rsp_64bit.status = status;
Zhen Kong7d500032018-08-06 16:58:31 -07002135 if (table) {
2136 send_data_rsp_64bit.sglistinfo_ptr =
2137 virt_to_phys(table);
2138 send_data_rsp_64bit.sglistinfo_len =
2139 SGLISTINFO_TABLE_SIZE;
2140 dmac_flush_range((void *)table,
2141 (void *)table + SGLISTINFO_TABLE_SIZE);
2142 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002143 cmd_buf = (void *)&send_data_rsp_64bit;
2144 cmd_len = sizeof(send_data_rsp_64bit);
2145 }
Zhen Kong7d500032018-08-06 16:58:31 -07002146 if (qseecom.whitelist_support == false || table == NULL)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002147 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2148 else
2149 *(uint32_t *)cmd_buf =
2150 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
Zhen Kong4b8af612018-11-03 17:01:11 -07002151 if (ptr_svc && ptr_svc->ihandle) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002152 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2153 ptr_svc->ihandle,
2154 ptr_svc->sb_virt, ptr_svc->sb_length,
2155 ION_IOC_CLEAN_INV_CACHES);
2156 if (ret) {
2157 pr_err("cache operation failed %d\n", ret);
2158 return ret;
2159 }
2160 }
2161 if (lstnr == RPMB_SERVICE) {
2162 ret = __qseecom_enable_clk(CLK_QSEE);
2163 if (ret)
Zhen Kongbcdeda22018-11-16 13:50:51 -08002164 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002165 }
2166
2167 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2168 cmd_buf, cmd_len, resp, sizeof(*resp));
Zhen Kong7d500032018-08-06 16:58:31 -07002169 if (ptr_svc) {
2170 ptr_svc->listener_in_use = false;
2171 __qseecom_clean_listener_sglistinfo(ptr_svc);
2172 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2173 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002174
2175 if (ret) {
2176 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2177 ret, data->client.app_id);
2178 goto exit;
2179 }
2180
2181 switch (resp->result) {
2182 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2183 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2184 lstnr, data->client.app_id, resp->data);
2185 if (lstnr == resp->data) {
2186 pr_err("lstnr %d should not be blocked!\n",
2187 lstnr);
2188 ret = -EINVAL;
2189 goto exit;
2190 }
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002191 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002192 ret = __qseecom_process_reentrancy_blocked_on_listener(
2193 resp, NULL, data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08002194 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002195 if (ret) {
2196 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2197 data->client.app_id,
2198 data->client.app_name, resp->data);
2199 goto exit;
2200 }
2201 case QSEOS_RESULT_SUCCESS:
2202 case QSEOS_RESULT_INCOMPLETE:
2203 break;
2204 default:
2205 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2206 resp->result, data->client.app_id, lstnr);
2207 ret = -EINVAL;
2208 goto exit;
2209 }
2210exit:
Zhen Kongbcdeda22018-11-16 13:50:51 -08002211 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002212 if (lstnr == RPMB_SERVICE)
2213 __qseecom_disable_clk(CLK_QSEE);
2214
2215 }
2216 if (rc)
2217 return rc;
2218
2219 return ret;
2220}
2221
2222/*
2223 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2224 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2225 * So, needs to first check if no app blocked before sending OS level scm call,
2226 * then wait until all apps are unblocked.
2227 */
2228static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2229{
2230 sigset_t new_sigset, old_sigset;
2231
2232 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2233 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2234 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2235 /* thread sleep until this app unblocked */
2236 while (qseecom.app_block_ref_cnt > 0) {
2237 sigfillset(&new_sigset);
2238 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2239 mutex_unlock(&app_access_lock);
2240 do {
2241 if (!wait_event_freezable(qseecom.app_block_wq,
2242 (qseecom.app_block_ref_cnt == 0)))
2243 break;
2244 } while (1);
2245 mutex_lock(&app_access_lock);
2246 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2247 }
2248 }
2249}
2250
2251/*
2252 * scm_call of send data will fail if this TA is blocked or there are more
2253 * than one TA requesting listener services; So, first check to see if need
2254 * to wait.
2255 */
2256static void __qseecom_reentrancy_check_if_this_app_blocked(
2257 struct qseecom_registered_app_list *ptr_app)
2258{
2259 sigset_t new_sigset, old_sigset;
2260
2261 if (qseecom.qsee_reentrancy_support) {
Zhen Kongdea10592018-07-30 17:50:10 -07002262 ptr_app->check_block++;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002263 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2264 /* thread sleep until this app unblocked */
2265 sigfillset(&new_sigset);
2266 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2267 mutex_unlock(&app_access_lock);
2268 do {
2269 if (!wait_event_freezable(qseecom.app_block_wq,
2270 (!ptr_app->app_blocked &&
2271 qseecom.app_block_ref_cnt <= 1)))
2272 break;
2273 } while (1);
2274 mutex_lock(&app_access_lock);
2275 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2276 }
Zhen Kongdea10592018-07-30 17:50:10 -07002277 ptr_app->check_block--;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002278 }
2279}
2280
2281static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2282 uint32_t *app_id)
2283{
2284 int32_t ret;
2285 struct qseecom_command_scm_resp resp;
2286 bool found_app = false;
2287 struct qseecom_registered_app_list *entry = NULL;
2288 unsigned long flags = 0;
2289
2290 if (!app_id) {
2291 pr_err("Null pointer to app_id\n");
2292 return -EINVAL;
2293 }
2294 *app_id = 0;
2295
2296 /* check if app exists and has been registered locally */
2297 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2298 list_for_each_entry(entry,
2299 &qseecom.registered_app_list_head, list) {
2300 if (!strcmp(entry->app_name, req.app_name)) {
2301 found_app = true;
2302 break;
2303 }
2304 }
2305 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2306 if (found_app) {
2307 pr_debug("Found app with id %d\n", entry->app_id);
2308 *app_id = entry->app_id;
2309 return 0;
2310 }
2311
2312 memset((void *)&resp, 0, sizeof(resp));
2313
2314 /* SCM_CALL to check if app_id for the mentioned app exists */
2315 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2316 sizeof(struct qseecom_check_app_ireq),
2317 &resp, sizeof(resp));
2318 if (ret) {
2319 pr_err("scm_call to check if app is already loaded failed\n");
2320 return -EINVAL;
2321 }
2322
2323 if (resp.result == QSEOS_RESULT_FAILURE)
2324 return 0;
2325
2326 switch (resp.resp_type) {
2327 /*qsee returned listener type response */
2328 case QSEOS_LISTENER_ID:
2329 pr_err("resp type is of listener type instead of app");
2330 return -EINVAL;
2331 case QSEOS_APP_ID:
2332 *app_id = resp.data;
2333 return 0;
2334 default:
2335 pr_err("invalid resp type (%d) from qsee",
2336 resp.resp_type);
2337 return -ENODEV;
2338 }
2339}
2340
2341static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2342{
2343 struct qseecom_registered_app_list *entry = NULL;
2344 unsigned long flags = 0;
2345 u32 app_id = 0;
2346 struct ion_handle *ihandle; /* Ion handle */
2347 struct qseecom_load_img_req load_img_req;
2348 int32_t ret = 0;
2349 ion_phys_addr_t pa = 0;
2350 size_t len;
2351 struct qseecom_command_scm_resp resp;
2352 struct qseecom_check_app_ireq req;
2353 struct qseecom_load_app_ireq load_req;
2354 struct qseecom_load_app_64bit_ireq load_req_64bit;
2355 void *cmd_buf = NULL;
2356 size_t cmd_len;
2357 bool first_time = false;
2358
2359 /* Copy the relevant information needed for loading the image */
2360 if (copy_from_user(&load_img_req,
2361 (void __user *)argp,
2362 sizeof(struct qseecom_load_img_req))) {
2363 pr_err("copy_from_user failed\n");
2364 return -EFAULT;
2365 }
2366
2367 /* Check and load cmnlib */
2368 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2369 if (!qseecom.commonlib_loaded &&
2370 load_img_req.app_arch == ELFCLASS32) {
2371 ret = qseecom_load_commonlib_image(data, "cmnlib");
2372 if (ret) {
2373 pr_err("failed to load cmnlib\n");
2374 return -EIO;
2375 }
2376 qseecom.commonlib_loaded = true;
2377 pr_debug("cmnlib is loaded\n");
2378 }
2379
2380 if (!qseecom.commonlib64_loaded &&
2381 load_img_req.app_arch == ELFCLASS64) {
2382 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2383 if (ret) {
2384 pr_err("failed to load cmnlib64\n");
2385 return -EIO;
2386 }
2387 qseecom.commonlib64_loaded = true;
2388 pr_debug("cmnlib64 is loaded\n");
2389 }
2390 }
2391
2392 if (qseecom.support_bus_scaling) {
2393 mutex_lock(&qsee_bw_mutex);
2394 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2395 mutex_unlock(&qsee_bw_mutex);
2396 if (ret)
2397 return ret;
2398 }
2399
2400 /* Vote for the SFPB clock */
2401 ret = __qseecom_enable_clk_scale_up(data);
2402 if (ret)
2403 goto enable_clk_err;
2404
2405 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2406 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2407 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2408
2409 ret = __qseecom_check_app_exists(req, &app_id);
2410 if (ret < 0)
2411 goto loadapp_err;
2412
2413 if (app_id) {
2414 pr_debug("App id %d (%s) already exists\n", app_id,
2415 (char *)(req.app_name));
2416 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2417 list_for_each_entry(entry,
2418 &qseecom.registered_app_list_head, list){
2419 if (entry->app_id == app_id) {
2420 entry->ref_cnt++;
2421 break;
2422 }
2423 }
2424 spin_unlock_irqrestore(
2425 &qseecom.registered_app_list_lock, flags);
2426 ret = 0;
2427 } else {
2428 first_time = true;
2429 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2430 (char *)(load_img_req.img_name));
2431 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002432 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002433 load_img_req.ifd_data_fd);
2434 if (IS_ERR_OR_NULL(ihandle)) {
2435 pr_err("Ion client could not retrieve the handle\n");
2436 ret = -ENOMEM;
2437 goto loadapp_err;
2438 }
2439
2440 /* Get the physical address of the ION BUF */
2441 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2442 if (ret) {
2443 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2444 ret);
2445 goto loadapp_err;
2446 }
2447 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2448 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2449 len, load_img_req.mdt_len,
2450 load_img_req.img_len);
2451 ret = -EINVAL;
2452 goto loadapp_err;
2453 }
2454 /* Populate the structure for sending scm call to load image */
2455 if (qseecom.qsee_version < QSEE_VERSION_40) {
2456 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2457 load_req.mdt_len = load_img_req.mdt_len;
2458 load_req.img_len = load_img_req.img_len;
2459 strlcpy(load_req.app_name, load_img_req.img_name,
2460 MAX_APP_NAME_SIZE);
2461 load_req.phy_addr = (uint32_t)pa;
2462 cmd_buf = (void *)&load_req;
2463 cmd_len = sizeof(struct qseecom_load_app_ireq);
2464 } else {
2465 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2466 load_req_64bit.mdt_len = load_img_req.mdt_len;
2467 load_req_64bit.img_len = load_img_req.img_len;
2468 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2469 MAX_APP_NAME_SIZE);
2470 load_req_64bit.phy_addr = (uint64_t)pa;
2471 cmd_buf = (void *)&load_req_64bit;
2472 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2473 }
2474
2475 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2476 ION_IOC_CLEAN_INV_CACHES);
2477 if (ret) {
2478 pr_err("cache operation failed %d\n", ret);
2479 goto loadapp_err;
2480 }
2481
2482 /* SCM_CALL to load the app and get the app_id back */
2483 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2484 cmd_len, &resp, sizeof(resp));
2485 if (ret) {
2486 pr_err("scm_call to load app failed\n");
2487 if (!IS_ERR_OR_NULL(ihandle))
2488 ion_free(qseecom.ion_clnt, ihandle);
2489 ret = -EINVAL;
2490 goto loadapp_err;
2491 }
2492
2493 if (resp.result == QSEOS_RESULT_FAILURE) {
2494 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2495 if (!IS_ERR_OR_NULL(ihandle))
2496 ion_free(qseecom.ion_clnt, ihandle);
2497 ret = -EFAULT;
2498 goto loadapp_err;
2499 }
2500
2501 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2502 ret = __qseecom_process_incomplete_cmd(data, &resp);
2503 if (ret) {
2504 pr_err("process_incomplete_cmd failed err: %d\n",
2505 ret);
2506 if (!IS_ERR_OR_NULL(ihandle))
2507 ion_free(qseecom.ion_clnt, ihandle);
2508 ret = -EFAULT;
2509 goto loadapp_err;
2510 }
2511 }
2512
2513 if (resp.result != QSEOS_RESULT_SUCCESS) {
2514 pr_err("scm_call failed resp.result unknown, %d\n",
2515 resp.result);
2516 if (!IS_ERR_OR_NULL(ihandle))
2517 ion_free(qseecom.ion_clnt, ihandle);
2518 ret = -EFAULT;
2519 goto loadapp_err;
2520 }
2521
2522 app_id = resp.data;
2523
2524 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2525 if (!entry) {
2526 ret = -ENOMEM;
2527 goto loadapp_err;
2528 }
2529 entry->app_id = app_id;
2530 entry->ref_cnt = 1;
2531 entry->app_arch = load_img_req.app_arch;
2532 /*
2533 * keymaster app may be first loaded as "keymaste" by qseecomd,
2534 * and then used as "keymaster" on some targets. To avoid app
2535 * name checking error, register "keymaster" into app_list and
2536 * thread private data.
2537 */
2538 if (!strcmp(load_img_req.img_name, "keymaste"))
2539 strlcpy(entry->app_name, "keymaster",
2540 MAX_APP_NAME_SIZE);
2541 else
2542 strlcpy(entry->app_name, load_img_req.img_name,
2543 MAX_APP_NAME_SIZE);
2544 entry->app_blocked = false;
2545 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07002546 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002547
2548 /* Deallocate the handle */
2549 if (!IS_ERR_OR_NULL(ihandle))
2550 ion_free(qseecom.ion_clnt, ihandle);
2551
2552 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2553 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2554 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2555 flags);
2556
2557 pr_warn("App with id %u (%s) now loaded\n", app_id,
2558 (char *)(load_img_req.img_name));
2559 }
2560 data->client.app_id = app_id;
2561 data->client.app_arch = load_img_req.app_arch;
2562 if (!strcmp(load_img_req.img_name, "keymaste"))
2563 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2564 else
2565 strlcpy(data->client.app_name, load_img_req.img_name,
2566 MAX_APP_NAME_SIZE);
2567 load_img_req.app_id = app_id;
2568 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2569 pr_err("copy_to_user failed\n");
2570 ret = -EFAULT;
2571 if (first_time == true) {
2572 spin_lock_irqsave(
2573 &qseecom.registered_app_list_lock, flags);
2574 list_del(&entry->list);
2575 spin_unlock_irqrestore(
2576 &qseecom.registered_app_list_lock, flags);
2577 kzfree(entry);
2578 }
2579 }
2580
2581loadapp_err:
2582 __qseecom_disable_clk_scale_down(data);
2583enable_clk_err:
2584 if (qseecom.support_bus_scaling) {
2585 mutex_lock(&qsee_bw_mutex);
2586 qseecom_unregister_bus_bandwidth_needs(data);
2587 mutex_unlock(&qsee_bw_mutex);
2588 }
2589 return ret;
2590}
2591
2592static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2593{
2594 int ret = 1; /* Set unload app */
2595
2596 wake_up_all(&qseecom.send_resp_wq);
2597 if (qseecom.qsee_reentrancy_support)
2598 mutex_unlock(&app_access_lock);
2599 while (atomic_read(&data->ioctl_count) > 1) {
2600 if (wait_event_freezable(data->abort_wq,
2601 atomic_read(&data->ioctl_count) <= 1)) {
2602 pr_err("Interrupted from abort\n");
2603 ret = -ERESTARTSYS;
2604 break;
2605 }
2606 }
2607 if (qseecom.qsee_reentrancy_support)
2608 mutex_lock(&app_access_lock);
2609 return ret;
2610}
2611
2612static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2613{
2614 int ret = 0;
2615
2616 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2617 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2618 ion_free(qseecom.ion_clnt, data->client.ihandle);
2619 data->client.ihandle = NULL;
2620 }
2621 return ret;
2622}
2623
2624static int qseecom_unload_app(struct qseecom_dev_handle *data,
2625 bool app_crash)
2626{
2627 unsigned long flags;
2628 unsigned long flags1;
2629 int ret = 0;
2630 struct qseecom_command_scm_resp resp;
2631 struct qseecom_registered_app_list *ptr_app = NULL;
2632 bool unload = false;
2633 bool found_app = false;
2634 bool found_dead_app = false;
2635
2636 if (!data) {
2637 pr_err("Invalid/uninitialized device handle\n");
2638 return -EINVAL;
2639 }
2640
2641 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2642 pr_debug("Do not unload keymaster app from tz\n");
2643 goto unload_exit;
2644 }
2645
2646 __qseecom_cleanup_app(data);
2647 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2648
2649 if (data->client.app_id > 0) {
2650 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2651 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2652 list) {
2653 if (ptr_app->app_id == data->client.app_id) {
2654 if (!strcmp((void *)ptr_app->app_name,
2655 (void *)data->client.app_name)) {
2656 found_app = true;
Zhen Kong024798b2018-07-13 18:14:26 -07002657 if (ptr_app->app_blocked ||
2658 ptr_app->check_block)
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002659 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002660 if (app_crash || ptr_app->ref_cnt == 1)
2661 unload = true;
2662 break;
2663 }
2664 found_dead_app = true;
2665 break;
2666 }
2667 }
2668 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2669 flags);
2670 if (found_app == false && found_dead_app == false) {
2671 pr_err("Cannot find app with id = %d (%s)\n",
2672 data->client.app_id,
2673 (char *)data->client.app_name);
2674 ret = -EINVAL;
2675 goto unload_exit;
2676 }
2677 }
2678
2679 if (found_dead_app)
2680 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2681 (char *)data->client.app_name);
2682
2683 if (unload) {
2684 struct qseecom_unload_app_ireq req;
2685 /* Populate the structure for sending scm call to load image */
2686 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2687 req.app_id = data->client.app_id;
2688
2689 /* SCM_CALL to unload the app */
2690 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2691 sizeof(struct qseecom_unload_app_ireq),
2692 &resp, sizeof(resp));
2693 if (ret) {
2694 pr_err("scm_call to unload app (id = %d) failed\n",
2695 req.app_id);
2696 ret = -EFAULT;
2697 goto unload_exit;
2698 } else {
2699 pr_warn("App id %d now unloaded\n", req.app_id);
2700 }
2701 if (resp.result == QSEOS_RESULT_FAILURE) {
2702 pr_err("app (%d) unload_failed!!\n",
2703 data->client.app_id);
2704 ret = -EFAULT;
2705 goto unload_exit;
2706 }
2707 if (resp.result == QSEOS_RESULT_SUCCESS)
2708 pr_debug("App (%d) is unloaded!!\n",
2709 data->client.app_id);
2710 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2711 ret = __qseecom_process_incomplete_cmd(data, &resp);
2712 if (ret) {
2713 pr_err("process_incomplete_cmd fail err: %d\n",
2714 ret);
2715 goto unload_exit;
2716 }
2717 }
2718 }
2719
Zhen Kong7d500032018-08-06 16:58:31 -07002720unload_exit:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002721 if (found_app) {
2722 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2723 if (app_crash) {
2724 ptr_app->ref_cnt = 0;
2725 pr_debug("app_crash: ref_count = 0\n");
2726 } else {
2727 if (ptr_app->ref_cnt == 1) {
2728 ptr_app->ref_cnt = 0;
2729 pr_debug("ref_count set to 0\n");
2730 } else {
2731 ptr_app->ref_cnt--;
2732 pr_debug("Can't unload app(%d) inuse\n",
2733 ptr_app->app_id);
2734 }
2735 }
2736 if (unload) {
2737 list_del(&ptr_app->list);
2738 kzfree(ptr_app);
2739 }
2740 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2741 flags1);
2742 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002743 qseecom_unmap_ion_allocated_memory(data);
2744 data->released = true;
2745 return ret;
2746}
2747
2748static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2749 unsigned long virt)
2750{
2751 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2752}
2753
2754static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2755 unsigned long virt)
2756{
2757 return (uintptr_t)data->client.sb_virt +
2758 (virt - data->client.user_virt_sb_base);
2759}
2760
2761int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2762 struct qseecom_send_svc_cmd_req *req_ptr,
2763 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2764{
2765 int ret = 0;
2766 void *req_buf = NULL;
2767
2768 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2769 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2770 req_ptr, send_svc_ireq_ptr);
2771 return -EINVAL;
2772 }
2773
2774 /* Clients need to ensure req_buf is at base offset of shared buffer */
2775 if ((uintptr_t)req_ptr->cmd_req_buf !=
2776 data_ptr->client.user_virt_sb_base) {
2777 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2778 return -EINVAL;
2779 }
2780
2781 if (data_ptr->client.sb_length <
2782 sizeof(struct qseecom_rpmb_provision_key)) {
2783 pr_err("shared buffer is too small to hold key type\n");
2784 return -EINVAL;
2785 }
2786 req_buf = data_ptr->client.sb_virt;
2787
2788 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2789 send_svc_ireq_ptr->key_type =
2790 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2791 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2792 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2793 data_ptr, (uintptr_t)req_ptr->resp_buf));
2794 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2795
2796 return ret;
2797}
2798
2799int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2800 struct qseecom_send_svc_cmd_req *req_ptr,
2801 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2802{
2803 int ret = 0;
2804 uint32_t reqd_len_sb_in = 0;
2805
2806 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2807 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2808 req_ptr, send_svc_ireq_ptr);
2809 return -EINVAL;
2810 }
2811
2812 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2813 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2814 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2815 pr_err("Required: %u, Available: %zu\n",
2816 reqd_len_sb_in, data_ptr->client.sb_length);
2817 return -ENOMEM;
2818 }
2819
2820 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2821 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2822 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2823 data_ptr, (uintptr_t)req_ptr->resp_buf));
2824 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2825
2826 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2827 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2828
2829
2830 return ret;
2831}
2832
2833static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2834 struct qseecom_send_svc_cmd_req *req)
2835{
2836 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2837 pr_err("req or cmd buffer or response buffer is null\n");
2838 return -EINVAL;
2839 }
2840
2841 if (!data || !data->client.ihandle) {
2842 pr_err("Client or client handle is not initialized\n");
2843 return -EINVAL;
2844 }
2845
2846 if (data->client.sb_virt == NULL) {
2847 pr_err("sb_virt null\n");
2848 return -EINVAL;
2849 }
2850
2851 if (data->client.user_virt_sb_base == 0) {
2852 pr_err("user_virt_sb_base is null\n");
2853 return -EINVAL;
2854 }
2855
2856 if (data->client.sb_length == 0) {
2857 pr_err("sb_length is 0\n");
2858 return -EINVAL;
2859 }
2860
2861 if (((uintptr_t)req->cmd_req_buf <
2862 data->client.user_virt_sb_base) ||
2863 ((uintptr_t)req->cmd_req_buf >=
2864 (data->client.user_virt_sb_base + data->client.sb_length))) {
2865 pr_err("cmd buffer address not within shared bufffer\n");
2866 return -EINVAL;
2867 }
2868 if (((uintptr_t)req->resp_buf <
2869 data->client.user_virt_sb_base) ||
2870 ((uintptr_t)req->resp_buf >=
2871 (data->client.user_virt_sb_base + data->client.sb_length))) {
2872 pr_err("response buffer address not within shared bufffer\n");
2873 return -EINVAL;
2874 }
2875 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2876 (req->cmd_req_len > data->client.sb_length) ||
2877 (req->resp_len > data->client.sb_length)) {
2878 pr_err("cmd buf length or response buf length not valid\n");
2879 return -EINVAL;
2880 }
2881 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2882 pr_err("Integer overflow detected in req_len & rsp_len\n");
2883 return -EINVAL;
2884 }
2885
2886 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2887 pr_debug("Not enough memory to fit cmd_buf.\n");
2888 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2889 (req->cmd_req_len + req->resp_len),
2890 data->client.sb_length);
2891 return -ENOMEM;
2892 }
2893 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2894 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2895 return -EINVAL;
2896 }
2897 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2898 pr_err("Integer overflow in resp_len & resp_buf\n");
2899 return -EINVAL;
2900 }
2901 if (data->client.user_virt_sb_base >
2902 (ULONG_MAX - data->client.sb_length)) {
2903 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2904 return -EINVAL;
2905 }
2906 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2907 ((uintptr_t)data->client.user_virt_sb_base +
2908 data->client.sb_length)) ||
2909 (((uintptr_t)req->resp_buf + req->resp_len) >
2910 ((uintptr_t)data->client.user_virt_sb_base +
2911 data->client.sb_length))) {
2912 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2913 return -EINVAL;
2914 }
2915 return 0;
2916}
2917
2918static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2919 void __user *argp)
2920{
2921 int ret = 0;
2922 struct qseecom_client_send_service_ireq send_svc_ireq;
2923 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2924 struct qseecom_command_scm_resp resp;
2925 struct qseecom_send_svc_cmd_req req;
2926 void *send_req_ptr;
2927 size_t req_buf_size;
2928
2929 /*struct qseecom_command_scm_resp resp;*/
2930
2931 if (copy_from_user(&req,
2932 (void __user *)argp,
2933 sizeof(req))) {
2934 pr_err("copy_from_user failed\n");
2935 return -EFAULT;
2936 }
2937
2938 if (__validate_send_service_cmd_inputs(data, &req))
2939 return -EINVAL;
2940
2941 data->type = QSEECOM_SECURE_SERVICE;
2942
2943 switch (req.cmd_id) {
2944 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2945 case QSEOS_RPMB_ERASE_COMMAND:
2946 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2947 send_req_ptr = &send_svc_ireq;
2948 req_buf_size = sizeof(send_svc_ireq);
2949 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2950 send_req_ptr))
2951 return -EINVAL;
2952 break;
2953 case QSEOS_FSM_LTEOTA_REQ_CMD:
2954 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2955 case QSEOS_FSM_IKE_REQ_CMD:
2956 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2957 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2958 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2959 case QSEOS_FSM_ENCFS_REQ_CMD:
2960 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2961 send_req_ptr = &send_fsm_key_svc_ireq;
2962 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2963 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2964 send_req_ptr))
2965 return -EINVAL;
2966 break;
2967 default:
2968 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2969 return -EINVAL;
2970 }
2971
2972 if (qseecom.support_bus_scaling) {
2973 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2974 if (ret) {
2975 pr_err("Fail to set bw HIGH\n");
2976 return ret;
2977 }
2978 } else {
2979 ret = qseecom_perf_enable(data);
2980 if (ret) {
2981 pr_err("Failed to vote for clocks with err %d\n", ret);
2982 goto exit;
2983 }
2984 }
2985
2986 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2987 data->client.sb_virt, data->client.sb_length,
2988 ION_IOC_CLEAN_INV_CACHES);
2989 if (ret) {
2990 pr_err("cache operation failed %d\n", ret);
2991 goto exit;
2992 }
2993 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2994 (const void *)send_req_ptr,
2995 req_buf_size, &resp, sizeof(resp));
2996 if (ret) {
2997 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2998 if (!qseecom.support_bus_scaling) {
2999 qsee_disable_clock_vote(data, CLK_DFAB);
3000 qsee_disable_clock_vote(data, CLK_SFPB);
3001 } else {
3002 __qseecom_add_bw_scale_down_timer(
3003 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3004 }
3005 goto exit;
3006 }
3007 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3008 data->client.sb_virt, data->client.sb_length,
3009 ION_IOC_INV_CACHES);
3010 if (ret) {
3011 pr_err("cache operation failed %d\n", ret);
3012 goto exit;
3013 }
3014 switch (resp.result) {
3015 case QSEOS_RESULT_SUCCESS:
3016 break;
3017 case QSEOS_RESULT_INCOMPLETE:
3018 pr_debug("qseos_result_incomplete\n");
3019 ret = __qseecom_process_incomplete_cmd(data, &resp);
3020 if (ret) {
3021 pr_err("process_incomplete_cmd fail with result: %d\n",
3022 resp.result);
3023 }
3024 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
3025 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303026 if (put_user(resp.result,
3027 (uint32_t __user *)req.resp_buf)) {
3028 ret = -EINVAL;
3029 goto exit;
3030 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003031 ret = 0;
3032 }
3033 break;
3034 case QSEOS_RESULT_FAILURE:
3035 pr_err("scm call failed with resp.result: %d\n", resp.result);
3036 ret = -EINVAL;
3037 break;
3038 default:
3039 pr_err("Response result %d not supported\n",
3040 resp.result);
3041 ret = -EINVAL;
3042 break;
3043 }
3044 if (!qseecom.support_bus_scaling) {
3045 qsee_disable_clock_vote(data, CLK_DFAB);
3046 qsee_disable_clock_vote(data, CLK_SFPB);
3047 } else {
3048 __qseecom_add_bw_scale_down_timer(
3049 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3050 }
3051
3052exit:
3053 return ret;
3054}
3055
3056static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3057 struct qseecom_send_cmd_req *req)
3058
3059{
3060 if (!data || !data->client.ihandle) {
3061 pr_err("Client or client handle is not initialized\n");
3062 return -EINVAL;
3063 }
3064 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3065 (req->cmd_req_buf == NULL)) {
3066 pr_err("cmd buffer or response buffer is null\n");
3067 return -EINVAL;
3068 }
3069 if (((uintptr_t)req->cmd_req_buf <
3070 data->client.user_virt_sb_base) ||
3071 ((uintptr_t)req->cmd_req_buf >=
3072 (data->client.user_virt_sb_base + data->client.sb_length))) {
3073 pr_err("cmd buffer address not within shared bufffer\n");
3074 return -EINVAL;
3075 }
3076 if (((uintptr_t)req->resp_buf <
3077 data->client.user_virt_sb_base) ||
3078 ((uintptr_t)req->resp_buf >=
3079 (data->client.user_virt_sb_base + data->client.sb_length))) {
3080 pr_err("response buffer address not within shared bufffer\n");
3081 return -EINVAL;
3082 }
3083 if ((req->cmd_req_len == 0) ||
3084 (req->cmd_req_len > data->client.sb_length) ||
3085 (req->resp_len > data->client.sb_length)) {
3086 pr_err("cmd buf length or response buf length not valid\n");
3087 return -EINVAL;
3088 }
3089 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3090 pr_err("Integer overflow detected in req_len & rsp_len\n");
3091 return -EINVAL;
3092 }
3093
3094 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3095 pr_debug("Not enough memory to fit cmd_buf.\n");
3096 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3097 (req->cmd_req_len + req->resp_len),
3098 data->client.sb_length);
3099 return -ENOMEM;
3100 }
3101 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3102 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3103 return -EINVAL;
3104 }
3105 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3106 pr_err("Integer overflow in resp_len & resp_buf\n");
3107 return -EINVAL;
3108 }
3109 if (data->client.user_virt_sb_base >
3110 (ULONG_MAX - data->client.sb_length)) {
3111 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3112 return -EINVAL;
3113 }
3114 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3115 ((uintptr_t)data->client.user_virt_sb_base +
3116 data->client.sb_length)) ||
3117 (((uintptr_t)req->resp_buf + req->resp_len) >
3118 ((uintptr_t)data->client.user_virt_sb_base +
3119 data->client.sb_length))) {
3120 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3121 return -EINVAL;
3122 }
3123 return 0;
3124}
3125
3126int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3127 struct qseecom_registered_app_list *ptr_app,
3128 struct qseecom_dev_handle *data)
3129{
3130 int ret = 0;
3131
3132 switch (resp->result) {
3133 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3134 pr_warn("App(%d) %s is blocked on listener %d\n",
3135 data->client.app_id, data->client.app_name,
3136 resp->data);
3137 ret = __qseecom_process_reentrancy_blocked_on_listener(
3138 resp, ptr_app, data);
3139 if (ret) {
3140 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3141 data->client.app_id, data->client.app_name, resp->data);
3142 return ret;
3143 }
3144
3145 case QSEOS_RESULT_INCOMPLETE:
3146 qseecom.app_block_ref_cnt++;
3147 ptr_app->app_blocked = true;
3148 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3149 ptr_app->app_blocked = false;
3150 qseecom.app_block_ref_cnt--;
3151 wake_up_interruptible(&qseecom.app_block_wq);
3152 if (ret)
3153 pr_err("process_incomplete_cmd failed err: %d\n",
3154 ret);
3155 return ret;
3156 case QSEOS_RESULT_SUCCESS:
3157 return ret;
3158 default:
3159 pr_err("Response result %d not supported\n",
3160 resp->result);
3161 return -EINVAL;
3162 }
3163}
3164
3165static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3166 struct qseecom_send_cmd_req *req)
3167{
3168 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003169 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003170 u32 reqd_len_sb_in = 0;
3171 struct qseecom_client_send_data_ireq send_data_req = {0};
3172 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3173 struct qseecom_command_scm_resp resp;
3174 unsigned long flags;
3175 struct qseecom_registered_app_list *ptr_app;
3176 bool found_app = false;
3177 void *cmd_buf = NULL;
3178 size_t cmd_len;
3179 struct sglist_info *table = data->sglistinfo_ptr;
3180
3181 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3182 /* find app_id & img_name from list */
3183 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3184 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3185 list) {
3186 if ((ptr_app->app_id == data->client.app_id) &&
3187 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3188 found_app = true;
3189 break;
3190 }
3191 }
3192 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3193
3194 if (!found_app) {
3195 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3196 (char *)data->client.app_name);
3197 return -ENOENT;
3198 }
3199
3200 if (qseecom.qsee_version < QSEE_VERSION_40) {
3201 send_data_req.app_id = data->client.app_id;
3202 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3203 data, (uintptr_t)req->cmd_req_buf));
3204 send_data_req.req_len = req->cmd_req_len;
3205 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3206 data, (uintptr_t)req->resp_buf));
3207 send_data_req.rsp_len = req->resp_len;
3208 send_data_req.sglistinfo_ptr =
3209 (uint32_t)virt_to_phys(table);
3210 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3211 dmac_flush_range((void *)table,
3212 (void *)table + SGLISTINFO_TABLE_SIZE);
3213 cmd_buf = (void *)&send_data_req;
3214 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3215 } else {
3216 send_data_req_64bit.app_id = data->client.app_id;
3217 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3218 (uintptr_t)req->cmd_req_buf);
3219 send_data_req_64bit.req_len = req->cmd_req_len;
3220 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3221 (uintptr_t)req->resp_buf);
3222 send_data_req_64bit.rsp_len = req->resp_len;
3223 /* check if 32bit app's phys_addr region is under 4GB.*/
3224 if ((data->client.app_arch == ELFCLASS32) &&
3225 ((send_data_req_64bit.req_ptr >=
3226 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3227 (send_data_req_64bit.rsp_ptr >=
3228 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3229 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3230 data->client.app_name,
3231 send_data_req_64bit.req_ptr,
3232 send_data_req_64bit.req_len,
3233 send_data_req_64bit.rsp_ptr,
3234 send_data_req_64bit.rsp_len);
3235 return -EFAULT;
3236 }
3237 send_data_req_64bit.sglistinfo_ptr =
3238 (uint64_t)virt_to_phys(table);
3239 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3240 dmac_flush_range((void *)table,
3241 (void *)table + SGLISTINFO_TABLE_SIZE);
3242 cmd_buf = (void *)&send_data_req_64bit;
3243 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3244 }
3245
3246 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3247 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3248 else
3249 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3250
3251 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3252 data->client.sb_virt,
3253 reqd_len_sb_in,
3254 ION_IOC_CLEAN_INV_CACHES);
3255 if (ret) {
3256 pr_err("cache operation failed %d\n", ret);
3257 return ret;
3258 }
3259
3260 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3261
3262 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3263 cmd_buf, cmd_len,
3264 &resp, sizeof(resp));
3265 if (ret) {
3266 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3267 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003268 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003269 }
3270
3271 if (qseecom.qsee_reentrancy_support) {
3272 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003273 if (ret)
3274 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003275 } else {
3276 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3277 ret = __qseecom_process_incomplete_cmd(data, &resp);
3278 if (ret) {
3279 pr_err("process_incomplete_cmd failed err: %d\n",
3280 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003281 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003282 }
3283 } else {
3284 if (resp.result != QSEOS_RESULT_SUCCESS) {
3285 pr_err("Response result %d not supported\n",
3286 resp.result);
3287 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003288 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003289 }
3290 }
3291 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003292exit:
3293 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003294 data->client.sb_virt, data->client.sb_length,
3295 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003296 if (ret2) {
3297 pr_err("cache operation failed %d\n", ret2);
3298 return ret2;
3299 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003300 __qseecom_processing_pending_lsnr_unregister();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003301 return ret;
3302}
3303
3304static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3305{
3306 int ret = 0;
3307 struct qseecom_send_cmd_req req;
3308
3309 ret = copy_from_user(&req, argp, sizeof(req));
3310 if (ret) {
3311 pr_err("copy_from_user failed\n");
3312 return ret;
3313 }
3314
3315 if (__validate_send_cmd_inputs(data, &req))
3316 return -EINVAL;
3317
3318 ret = __qseecom_send_cmd(data, &req);
3319
3320 if (ret)
3321 return ret;
3322
3323 return ret;
3324}
3325
3326int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3327 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3328 struct qseecom_dev_handle *data, int i) {
3329
3330 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3331 (req->ifd_data[i].fd > 0)) {
3332 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3333 (req->ifd_data[i].cmd_buf_offset >
3334 req->cmd_req_len - sizeof(uint32_t))) {
3335 pr_err("Invalid offset (req len) 0x%x\n",
3336 req->ifd_data[i].cmd_buf_offset);
3337 return -EINVAL;
3338 }
3339 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3340 (lstnr_resp->ifd_data[i].fd > 0)) {
3341 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3342 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3343 lstnr_resp->resp_len - sizeof(uint32_t))) {
3344 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3345 lstnr_resp->ifd_data[i].cmd_buf_offset);
3346 return -EINVAL;
3347 }
3348 }
3349 return 0;
3350}
3351
3352static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3353 struct qseecom_dev_handle *data)
3354{
3355 struct ion_handle *ihandle;
3356 char *field;
3357 int ret = 0;
3358 int i = 0;
3359 uint32_t len = 0;
3360 struct scatterlist *sg;
3361 struct qseecom_send_modfd_cmd_req *req = NULL;
3362 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3363 struct qseecom_registered_listener_list *this_lstnr = NULL;
3364 uint32_t offset;
3365 struct sg_table *sg_ptr;
3366
3367 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3368 (data->type != QSEECOM_CLIENT_APP))
3369 return -EFAULT;
3370
3371 if (msg == NULL) {
3372 pr_err("Invalid address\n");
3373 return -EINVAL;
3374 }
3375 if (data->type == QSEECOM_LISTENER_SERVICE) {
3376 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3377 this_lstnr = __qseecom_find_svc(data->listener.id);
3378 if (IS_ERR_OR_NULL(this_lstnr)) {
3379 pr_err("Invalid listener ID\n");
3380 return -ENOMEM;
3381 }
3382 } else {
3383 req = (struct qseecom_send_modfd_cmd_req *)msg;
3384 }
3385
3386 for (i = 0; i < MAX_ION_FD; i++) {
3387 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3388 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003389 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003390 req->ifd_data[i].fd);
3391 if (IS_ERR_OR_NULL(ihandle)) {
3392 pr_err("Ion client can't retrieve the handle\n");
3393 return -ENOMEM;
3394 }
3395 field = (char *) req->cmd_req_buf +
3396 req->ifd_data[i].cmd_buf_offset;
3397 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3398 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003399 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003400 lstnr_resp->ifd_data[i].fd);
3401 if (IS_ERR_OR_NULL(ihandle)) {
3402 pr_err("Ion client can't retrieve the handle\n");
3403 return -ENOMEM;
3404 }
3405 field = lstnr_resp->resp_buf_ptr +
3406 lstnr_resp->ifd_data[i].cmd_buf_offset;
3407 } else {
3408 continue;
3409 }
3410 /* Populate the cmd data structure with the phys_addr */
3411 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3412 if (IS_ERR_OR_NULL(sg_ptr)) {
3413 pr_err("IOn client could not retrieve sg table\n");
3414 goto err;
3415 }
3416 if (sg_ptr->nents == 0) {
3417 pr_err("Num of scattered entries is 0\n");
3418 goto err;
3419 }
3420 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3421 pr_err("Num of scattered entries");
3422 pr_err(" (%d) is greater than max supported %d\n",
3423 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3424 goto err;
3425 }
3426 sg = sg_ptr->sgl;
3427 if (sg_ptr->nents == 1) {
3428 uint32_t *update;
3429
3430 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3431 goto err;
3432 if ((data->type == QSEECOM_CLIENT_APP &&
3433 (data->client.app_arch == ELFCLASS32 ||
3434 data->client.app_arch == ELFCLASS64)) ||
3435 (data->type == QSEECOM_LISTENER_SERVICE)) {
3436 /*
3437 * Check if sg list phy add region is under 4GB
3438 */
3439 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3440 (!cleanup) &&
3441 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3442 >= PHY_ADDR_4G - sg->length)) {
3443 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3444 data->client.app_name,
3445 &(sg_dma_address(sg_ptr->sgl)),
3446 sg->length);
3447 goto err;
3448 }
3449 update = (uint32_t *) field;
3450 *update = cleanup ? 0 :
3451 (uint32_t)sg_dma_address(sg_ptr->sgl);
3452 } else {
3453 pr_err("QSEE app arch %u is not supported\n",
3454 data->client.app_arch);
3455 goto err;
3456 }
3457 len += (uint32_t)sg->length;
3458 } else {
3459 struct qseecom_sg_entry *update;
3460 int j = 0;
3461
3462 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3463 (req->ifd_data[i].fd > 0)) {
3464
3465 if ((req->cmd_req_len <
3466 SG_ENTRY_SZ * sg_ptr->nents) ||
3467 (req->ifd_data[i].cmd_buf_offset >
3468 (req->cmd_req_len -
3469 SG_ENTRY_SZ * sg_ptr->nents))) {
3470 pr_err("Invalid offset = 0x%x\n",
3471 req->ifd_data[i].cmd_buf_offset);
3472 goto err;
3473 }
3474
3475 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3476 (lstnr_resp->ifd_data[i].fd > 0)) {
3477
3478 if ((lstnr_resp->resp_len <
3479 SG_ENTRY_SZ * sg_ptr->nents) ||
3480 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3481 (lstnr_resp->resp_len -
3482 SG_ENTRY_SZ * sg_ptr->nents))) {
3483 goto err;
3484 }
3485 }
3486 if ((data->type == QSEECOM_CLIENT_APP &&
3487 (data->client.app_arch == ELFCLASS32 ||
3488 data->client.app_arch == ELFCLASS64)) ||
3489 (data->type == QSEECOM_LISTENER_SERVICE)) {
3490 update = (struct qseecom_sg_entry *)field;
3491 for (j = 0; j < sg_ptr->nents; j++) {
3492 /*
3493 * Check if sg list PA is under 4GB
3494 */
3495 if ((qseecom.qsee_version >=
3496 QSEE_VERSION_40) &&
3497 (!cleanup) &&
3498 ((uint64_t)(sg_dma_address(sg))
3499 >= PHY_ADDR_4G - sg->length)) {
3500 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3501 data->client.app_name,
3502 &(sg_dma_address(sg)),
3503 sg->length);
3504 goto err;
3505 }
3506 update->phys_addr = cleanup ? 0 :
3507 (uint32_t)sg_dma_address(sg);
3508 update->len = cleanup ? 0 : sg->length;
3509 update++;
3510 len += sg->length;
3511 sg = sg_next(sg);
3512 }
3513 } else {
3514 pr_err("QSEE app arch %u is not supported\n",
3515 data->client.app_arch);
3516 goto err;
3517 }
3518 }
3519
3520 if (cleanup) {
3521 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3522 ihandle, NULL, len,
3523 ION_IOC_INV_CACHES);
3524 if (ret) {
3525 pr_err("cache operation failed %d\n", ret);
3526 goto err;
3527 }
3528 } else {
3529 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3530 ihandle, NULL, len,
3531 ION_IOC_CLEAN_INV_CACHES);
3532 if (ret) {
3533 pr_err("cache operation failed %d\n", ret);
3534 goto err;
3535 }
3536 if (data->type == QSEECOM_CLIENT_APP) {
3537 offset = req->ifd_data[i].cmd_buf_offset;
3538 data->sglistinfo_ptr[i].indexAndFlags =
3539 SGLISTINFO_SET_INDEX_FLAG(
3540 (sg_ptr->nents == 1), 0, offset);
3541 data->sglistinfo_ptr[i].sizeOrCount =
3542 (sg_ptr->nents == 1) ?
3543 sg->length : sg_ptr->nents;
3544 data->sglist_cnt = i + 1;
3545 } else {
3546 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3547 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3548 (uintptr_t)this_lstnr->sb_virt);
3549 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3550 SGLISTINFO_SET_INDEX_FLAG(
3551 (sg_ptr->nents == 1), 0, offset);
3552 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3553 (sg_ptr->nents == 1) ?
3554 sg->length : sg_ptr->nents;
3555 this_lstnr->sglist_cnt = i + 1;
3556 }
3557 }
3558 /* Deallocate the handle */
3559 if (!IS_ERR_OR_NULL(ihandle))
3560 ion_free(qseecom.ion_clnt, ihandle);
3561 }
3562 return ret;
3563err:
3564 if (!IS_ERR_OR_NULL(ihandle))
3565 ion_free(qseecom.ion_clnt, ihandle);
3566 return -ENOMEM;
3567}
3568
3569static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3570 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3571{
3572 struct scatterlist *sg = sg_ptr->sgl;
3573 struct qseecom_sg_entry_64bit *sg_entry;
3574 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3575 void *buf;
3576 uint i;
3577 size_t size;
3578 dma_addr_t coh_pmem;
3579
3580 if (fd_idx >= MAX_ION_FD) {
3581 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3582 return -ENOMEM;
3583 }
3584 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3585 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3586 /* Allocate a contiguous kernel buffer */
3587 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3588 size = (size + PAGE_SIZE) & PAGE_MASK;
3589 buf = dma_alloc_coherent(qseecom.pdev,
3590 size, &coh_pmem, GFP_KERNEL);
3591 if (buf == NULL) {
3592 pr_err("failed to alloc memory for sg buf\n");
3593 return -ENOMEM;
3594 }
3595 /* update qseecom_sg_list_buf_hdr_64bit */
3596 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3597 buf_hdr->new_buf_phys_addr = coh_pmem;
3598 buf_hdr->nents_total = sg_ptr->nents;
3599 /* save the left sg entries into new allocated buf */
3600 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3601 for (i = 0; i < sg_ptr->nents; i++) {
3602 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3603 sg_entry->len = sg->length;
3604 sg_entry++;
3605 sg = sg_next(sg);
3606 }
3607
3608 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3609 data->client.sec_buf_fd[fd_idx].vbase = buf;
3610 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3611 data->client.sec_buf_fd[fd_idx].size = size;
3612
3613 return 0;
3614}
3615
3616static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3617 struct qseecom_dev_handle *data)
3618{
3619 struct ion_handle *ihandle;
3620 char *field;
3621 int ret = 0;
3622 int i = 0;
3623 uint32_t len = 0;
3624 struct scatterlist *sg;
3625 struct qseecom_send_modfd_cmd_req *req = NULL;
3626 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3627 struct qseecom_registered_listener_list *this_lstnr = NULL;
3628 uint32_t offset;
3629 struct sg_table *sg_ptr;
3630
3631 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3632 (data->type != QSEECOM_CLIENT_APP))
3633 return -EFAULT;
3634
3635 if (msg == NULL) {
3636 pr_err("Invalid address\n");
3637 return -EINVAL;
3638 }
3639 if (data->type == QSEECOM_LISTENER_SERVICE) {
3640 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3641 this_lstnr = __qseecom_find_svc(data->listener.id);
3642 if (IS_ERR_OR_NULL(this_lstnr)) {
3643 pr_err("Invalid listener ID\n");
3644 return -ENOMEM;
3645 }
3646 } else {
3647 req = (struct qseecom_send_modfd_cmd_req *)msg;
3648 }
3649
3650 for (i = 0; i < MAX_ION_FD; i++) {
3651 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3652 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003653 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003654 req->ifd_data[i].fd);
3655 if (IS_ERR_OR_NULL(ihandle)) {
3656 pr_err("Ion client can't retrieve the handle\n");
3657 return -ENOMEM;
3658 }
3659 field = (char *) req->cmd_req_buf +
3660 req->ifd_data[i].cmd_buf_offset;
3661 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3662 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003663 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003664 lstnr_resp->ifd_data[i].fd);
3665 if (IS_ERR_OR_NULL(ihandle)) {
3666 pr_err("Ion client can't retrieve the handle\n");
3667 return -ENOMEM;
3668 }
3669 field = lstnr_resp->resp_buf_ptr +
3670 lstnr_resp->ifd_data[i].cmd_buf_offset;
3671 } else {
3672 continue;
3673 }
3674 /* Populate the cmd data structure with the phys_addr */
3675 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3676 if (IS_ERR_OR_NULL(sg_ptr)) {
3677 pr_err("IOn client could not retrieve sg table\n");
3678 goto err;
3679 }
3680 if (sg_ptr->nents == 0) {
3681 pr_err("Num of scattered entries is 0\n");
3682 goto err;
3683 }
3684 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3685 pr_warn("Num of scattered entries");
3686 pr_warn(" (%d) is greater than %d\n",
3687 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3688 if (cleanup) {
3689 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3690 data->client.sec_buf_fd[i].vbase)
3691 dma_free_coherent(qseecom.pdev,
3692 data->client.sec_buf_fd[i].size,
3693 data->client.sec_buf_fd[i].vbase,
3694 data->client.sec_buf_fd[i].pbase);
3695 } else {
3696 ret = __qseecom_allocate_sg_list_buffer(data,
3697 field, i, sg_ptr);
3698 if (ret) {
3699 pr_err("Failed to allocate sg list buffer\n");
3700 goto err;
3701 }
3702 }
3703 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3704 sg = sg_ptr->sgl;
3705 goto cleanup;
3706 }
3707 sg = sg_ptr->sgl;
3708 if (sg_ptr->nents == 1) {
3709 uint64_t *update_64bit;
3710
3711 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3712 goto err;
3713 /* 64bit app uses 64bit address */
3714 update_64bit = (uint64_t *) field;
3715 *update_64bit = cleanup ? 0 :
3716 (uint64_t)sg_dma_address(sg_ptr->sgl);
3717 len += (uint32_t)sg->length;
3718 } else {
3719 struct qseecom_sg_entry_64bit *update_64bit;
3720 int j = 0;
3721
3722 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3723 (req->ifd_data[i].fd > 0)) {
3724
3725 if ((req->cmd_req_len <
3726 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3727 (req->ifd_data[i].cmd_buf_offset >
3728 (req->cmd_req_len -
3729 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3730 pr_err("Invalid offset = 0x%x\n",
3731 req->ifd_data[i].cmd_buf_offset);
3732 goto err;
3733 }
3734
3735 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3736 (lstnr_resp->ifd_data[i].fd > 0)) {
3737
3738 if ((lstnr_resp->resp_len <
3739 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3740 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3741 (lstnr_resp->resp_len -
3742 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3743 goto err;
3744 }
3745 }
3746 /* 64bit app uses 64bit address */
3747 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3748 for (j = 0; j < sg_ptr->nents; j++) {
3749 update_64bit->phys_addr = cleanup ? 0 :
3750 (uint64_t)sg_dma_address(sg);
3751 update_64bit->len = cleanup ? 0 :
3752 (uint32_t)sg->length;
3753 update_64bit++;
3754 len += sg->length;
3755 sg = sg_next(sg);
3756 }
3757 }
3758cleanup:
3759 if (cleanup) {
3760 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3761 ihandle, NULL, len,
3762 ION_IOC_INV_CACHES);
3763 if (ret) {
3764 pr_err("cache operation failed %d\n", ret);
3765 goto err;
3766 }
3767 } else {
3768 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3769 ihandle, NULL, len,
3770 ION_IOC_CLEAN_INV_CACHES);
3771 if (ret) {
3772 pr_err("cache operation failed %d\n", ret);
3773 goto err;
3774 }
3775 if (data->type == QSEECOM_CLIENT_APP) {
3776 offset = req->ifd_data[i].cmd_buf_offset;
3777 data->sglistinfo_ptr[i].indexAndFlags =
3778 SGLISTINFO_SET_INDEX_FLAG(
3779 (sg_ptr->nents == 1), 1, offset);
3780 data->sglistinfo_ptr[i].sizeOrCount =
3781 (sg_ptr->nents == 1) ?
3782 sg->length : sg_ptr->nents;
3783 data->sglist_cnt = i + 1;
3784 } else {
3785 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3786 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3787 (uintptr_t)this_lstnr->sb_virt);
3788 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3789 SGLISTINFO_SET_INDEX_FLAG(
3790 (sg_ptr->nents == 1), 1, offset);
3791 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3792 (sg_ptr->nents == 1) ?
3793 sg->length : sg_ptr->nents;
3794 this_lstnr->sglist_cnt = i + 1;
3795 }
3796 }
3797 /* Deallocate the handle */
3798 if (!IS_ERR_OR_NULL(ihandle))
3799 ion_free(qseecom.ion_clnt, ihandle);
3800 }
3801 return ret;
3802err:
3803 for (i = 0; i < MAX_ION_FD; i++)
3804 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3805 data->client.sec_buf_fd[i].vbase)
3806 dma_free_coherent(qseecom.pdev,
3807 data->client.sec_buf_fd[i].size,
3808 data->client.sec_buf_fd[i].vbase,
3809 data->client.sec_buf_fd[i].pbase);
3810 if (!IS_ERR_OR_NULL(ihandle))
3811 ion_free(qseecom.ion_clnt, ihandle);
3812 return -ENOMEM;
3813}
3814
3815static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3816 void __user *argp,
3817 bool is_64bit_addr)
3818{
3819 int ret = 0;
3820 int i;
3821 struct qseecom_send_modfd_cmd_req req;
3822 struct qseecom_send_cmd_req send_cmd_req;
3823
3824 ret = copy_from_user(&req, argp, sizeof(req));
3825 if (ret) {
3826 pr_err("copy_from_user failed\n");
3827 return ret;
3828 }
3829
3830 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3831 send_cmd_req.cmd_req_len = req.cmd_req_len;
3832 send_cmd_req.resp_buf = req.resp_buf;
3833 send_cmd_req.resp_len = req.resp_len;
3834
3835 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3836 return -EINVAL;
3837
3838 /* validate offsets */
3839 for (i = 0; i < MAX_ION_FD; i++) {
3840 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3841 pr_err("Invalid offset %d = 0x%x\n",
3842 i, req.ifd_data[i].cmd_buf_offset);
3843 return -EINVAL;
3844 }
3845 }
3846 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3847 (uintptr_t)req.cmd_req_buf);
3848 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3849 (uintptr_t)req.resp_buf);
3850
3851 if (!is_64bit_addr) {
3852 ret = __qseecom_update_cmd_buf(&req, false, data);
3853 if (ret)
3854 return ret;
3855 ret = __qseecom_send_cmd(data, &send_cmd_req);
3856 if (ret)
3857 return ret;
3858 ret = __qseecom_update_cmd_buf(&req, true, data);
3859 if (ret)
3860 return ret;
3861 } else {
3862 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3863 if (ret)
3864 return ret;
3865 ret = __qseecom_send_cmd(data, &send_cmd_req);
3866 if (ret)
3867 return ret;
3868 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3869 if (ret)
3870 return ret;
3871 }
3872
3873 return ret;
3874}
3875
3876static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3877 void __user *argp)
3878{
3879 return __qseecom_send_modfd_cmd(data, argp, false);
3880}
3881
3882static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3883 void __user *argp)
3884{
3885 return __qseecom_send_modfd_cmd(data, argp, true);
3886}
3887
3888
3889
3890static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3891 struct qseecom_registered_listener_list *svc)
3892{
3893 int ret;
3894
Zhen Kongf5087172018-10-11 17:22:05 -07003895 ret = (svc->rcv_req_flag == 1);
Zhen Kongbcdeda22018-11-16 13:50:51 -08003896 return ret || data->abort;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003897}
3898
3899static int qseecom_receive_req(struct qseecom_dev_handle *data)
3900{
3901 int ret = 0;
3902 struct qseecom_registered_listener_list *this_lstnr;
3903
Zhen Kongbcdeda22018-11-16 13:50:51 -08003904 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003905 this_lstnr = __qseecom_find_svc(data->listener.id);
3906 if (!this_lstnr) {
3907 pr_err("Invalid listener ID\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08003908 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003909 return -ENODATA;
3910 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003911 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003912
3913 while (1) {
3914 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3915 __qseecom_listener_has_rcvd_req(data,
3916 this_lstnr))) {
Zhen Kong25731112018-09-20 13:10:03 -07003917 pr_warn("Interrupted: exiting Listener Service = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003918 (uint32_t)data->listener.id);
3919 /* woken up for different reason */
3920 return -ERESTARTSYS;
3921 }
3922
Zhen Kongbcdeda22018-11-16 13:50:51 -08003923 if (data->abort) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003924 pr_err("Aborting Listener Service = %d\n",
Zhen Kong26e62742018-05-04 17:19:06 -07003925 (uint32_t)data->listener.id);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003926 return -ENODEV;
3927 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08003928 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003929 this_lstnr->rcv_req_flag = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08003930 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003931 break;
3932 }
3933 return ret;
3934}
3935
3936static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3937{
3938 unsigned char app_arch = 0;
3939 struct elf32_hdr *ehdr;
3940 struct elf64_hdr *ehdr64;
3941
3942 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3943
3944 switch (app_arch) {
3945 case ELFCLASS32: {
3946 ehdr = (struct elf32_hdr *)fw_entry->data;
3947 if (fw_entry->size < sizeof(*ehdr)) {
3948 pr_err("%s: Not big enough to be an elf32 header\n",
3949 qseecom.pdev->init_name);
3950 return false;
3951 }
3952 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3953 pr_err("%s: Not an elf32 header\n",
3954 qseecom.pdev->init_name);
3955 return false;
3956 }
3957 if (ehdr->e_phnum == 0) {
3958 pr_err("%s: No loadable segments\n",
3959 qseecom.pdev->init_name);
3960 return false;
3961 }
3962 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3963 sizeof(struct elf32_hdr) > fw_entry->size) {
3964 pr_err("%s: Program headers not within mdt\n",
3965 qseecom.pdev->init_name);
3966 return false;
3967 }
3968 break;
3969 }
3970 case ELFCLASS64: {
3971 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3972 if (fw_entry->size < sizeof(*ehdr64)) {
3973 pr_err("%s: Not big enough to be an elf64 header\n",
3974 qseecom.pdev->init_name);
3975 return false;
3976 }
3977 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3978 pr_err("%s: Not an elf64 header\n",
3979 qseecom.pdev->init_name);
3980 return false;
3981 }
3982 if (ehdr64->e_phnum == 0) {
3983 pr_err("%s: No loadable segments\n",
3984 qseecom.pdev->init_name);
3985 return false;
3986 }
3987 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3988 sizeof(struct elf64_hdr) > fw_entry->size) {
3989 pr_err("%s: Program headers not within mdt\n",
3990 qseecom.pdev->init_name);
3991 return false;
3992 }
3993 break;
3994 }
3995 default: {
3996 pr_err("QSEE app arch %u is not supported\n", app_arch);
3997 return false;
3998 }
3999 }
4000 return true;
4001}
4002
4003static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
4004 uint32_t *app_arch)
4005{
4006 int ret = -1;
4007 int i = 0, rc = 0;
4008 const struct firmware *fw_entry = NULL;
4009 char fw_name[MAX_APP_NAME_SIZE];
4010 struct elf32_hdr *ehdr;
4011 struct elf64_hdr *ehdr64;
4012 int num_images = 0;
4013
4014 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4015 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4016 if (rc) {
4017 pr_err("error with request_firmware\n");
4018 ret = -EIO;
4019 goto err;
4020 }
4021 if (!__qseecom_is_fw_image_valid(fw_entry)) {
4022 ret = -EIO;
4023 goto err;
4024 }
4025 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4026 *fw_size = fw_entry->size;
4027 if (*app_arch == ELFCLASS32) {
4028 ehdr = (struct elf32_hdr *)fw_entry->data;
4029 num_images = ehdr->e_phnum;
4030 } else if (*app_arch == ELFCLASS64) {
4031 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4032 num_images = ehdr64->e_phnum;
4033 } else {
4034 pr_err("QSEE %s app, arch %u is not supported\n",
4035 appname, *app_arch);
4036 ret = -EIO;
4037 goto err;
4038 }
4039 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4040 release_firmware(fw_entry);
4041 fw_entry = NULL;
4042 for (i = 0; i < num_images; i++) {
4043 memset(fw_name, 0, sizeof(fw_name));
4044 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4045 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4046 if (ret)
4047 goto err;
4048 if (*fw_size > U32_MAX - fw_entry->size) {
4049 pr_err("QSEE %s app file size overflow\n", appname);
4050 ret = -EINVAL;
4051 goto err;
4052 }
4053 *fw_size += fw_entry->size;
4054 release_firmware(fw_entry);
4055 fw_entry = NULL;
4056 }
4057
4058 return ret;
4059err:
4060 if (fw_entry)
4061 release_firmware(fw_entry);
4062 *fw_size = 0;
4063 return ret;
4064}
4065
4066static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4067 uint32_t fw_size,
4068 struct qseecom_load_app_ireq *load_req)
4069{
4070 int ret = -1;
4071 int i = 0, rc = 0;
4072 const struct firmware *fw_entry = NULL;
4073 char fw_name[MAX_APP_NAME_SIZE];
4074 u8 *img_data_ptr = img_data;
4075 struct elf32_hdr *ehdr;
4076 struct elf64_hdr *ehdr64;
4077 int num_images = 0;
4078 unsigned char app_arch = 0;
4079
4080 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4081 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4082 if (rc) {
4083 ret = -EIO;
4084 goto err;
4085 }
4086
4087 load_req->img_len = fw_entry->size;
4088 if (load_req->img_len > fw_size) {
4089 pr_err("app %s size %zu is larger than buf size %u\n",
4090 appname, fw_entry->size, fw_size);
4091 ret = -EINVAL;
4092 goto err;
4093 }
4094 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4095 img_data_ptr = img_data_ptr + fw_entry->size;
4096 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4097
4098 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4099 if (app_arch == ELFCLASS32) {
4100 ehdr = (struct elf32_hdr *)fw_entry->data;
4101 num_images = ehdr->e_phnum;
4102 } else if (app_arch == ELFCLASS64) {
4103 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4104 num_images = ehdr64->e_phnum;
4105 } else {
4106 pr_err("QSEE %s app, arch %u is not supported\n",
4107 appname, app_arch);
4108 ret = -EIO;
4109 goto err;
4110 }
4111 release_firmware(fw_entry);
4112 fw_entry = NULL;
4113 for (i = 0; i < num_images; i++) {
4114 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4115 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4116 if (ret) {
4117 pr_err("Failed to locate blob %s\n", fw_name);
4118 goto err;
4119 }
4120 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4121 (fw_entry->size + load_req->img_len > fw_size)) {
4122 pr_err("Invalid file size for %s\n", fw_name);
4123 ret = -EINVAL;
4124 goto err;
4125 }
4126 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4127 img_data_ptr = img_data_ptr + fw_entry->size;
4128 load_req->img_len += fw_entry->size;
4129 release_firmware(fw_entry);
4130 fw_entry = NULL;
4131 }
4132 return ret;
4133err:
4134 release_firmware(fw_entry);
4135 return ret;
4136}
4137
4138static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4139 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4140{
4141 size_t len = 0;
4142 int ret = 0;
4143 ion_phys_addr_t pa;
4144 struct ion_handle *ihandle = NULL;
4145 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004146 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004147
Zhen Kong3dd92792017-12-08 09:47:15 -08004148 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004149 if (retry++) {
4150 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004151 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004152 mutex_lock(&app_access_lock);
4153 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004154 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4155 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4156 } while (IS_ERR_OR_NULL(ihandle) &&
4157 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004158
4159 if (IS_ERR_OR_NULL(ihandle)) {
4160 pr_err("ION alloc failed\n");
4161 return -ENOMEM;
4162 }
4163 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4164 ihandle);
4165
4166 if (IS_ERR_OR_NULL(img_data)) {
4167 pr_err("ION memory mapping for image loading failed\n");
4168 ret = -ENOMEM;
4169 goto exit_ion_free;
4170 }
4171 /* Get the physical address of the ION BUF */
4172 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4173 if (ret) {
4174 pr_err("physical memory retrieval failure\n");
4175 ret = -EIO;
4176 goto exit_ion_unmap_kernel;
4177 }
4178
4179 *pihandle = ihandle;
4180 *data = img_data;
4181 *paddr = pa;
4182 return ret;
4183
4184exit_ion_unmap_kernel:
4185 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4186exit_ion_free:
4187 ion_free(qseecom.ion_clnt, ihandle);
4188 ihandle = NULL;
4189 return ret;
4190}
4191
4192static void __qseecom_free_img_data(struct ion_handle **ihandle)
4193{
4194 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4195 ion_free(qseecom.ion_clnt, *ihandle);
4196 *ihandle = NULL;
4197}
4198
4199static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4200 uint32_t *app_id)
4201{
4202 int ret = -1;
4203 uint32_t fw_size = 0;
4204 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4205 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4206 struct qseecom_command_scm_resp resp;
4207 u8 *img_data = NULL;
4208 ion_phys_addr_t pa = 0;
4209 struct ion_handle *ihandle = NULL;
4210 void *cmd_buf = NULL;
4211 size_t cmd_len;
4212 uint32_t app_arch = 0;
4213
4214 if (!data || !appname || !app_id) {
4215 pr_err("Null pointer to data or appname or appid\n");
4216 return -EINVAL;
4217 }
4218 *app_id = 0;
4219 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4220 return -EIO;
4221 data->client.app_arch = app_arch;
4222
4223 /* Check and load cmnlib */
4224 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4225 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4226 ret = qseecom_load_commonlib_image(data, "cmnlib");
4227 if (ret) {
4228 pr_err("failed to load cmnlib\n");
4229 return -EIO;
4230 }
4231 qseecom.commonlib_loaded = true;
4232 pr_debug("cmnlib is loaded\n");
4233 }
4234
4235 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4236 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4237 if (ret) {
4238 pr_err("failed to load cmnlib64\n");
4239 return -EIO;
4240 }
4241 qseecom.commonlib64_loaded = true;
4242 pr_debug("cmnlib64 is loaded\n");
4243 }
4244 }
4245
4246 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4247 if (ret)
4248 return ret;
4249
4250 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4251 if (ret) {
4252 ret = -EIO;
4253 goto exit_free_img_data;
4254 }
4255
4256 /* Populate the load_req parameters */
4257 if (qseecom.qsee_version < QSEE_VERSION_40) {
4258 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4259 load_req.mdt_len = load_req.mdt_len;
4260 load_req.img_len = load_req.img_len;
4261 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4262 load_req.phy_addr = (uint32_t)pa;
4263 cmd_buf = (void *)&load_req;
4264 cmd_len = sizeof(struct qseecom_load_app_ireq);
4265 } else {
4266 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4267 load_req_64bit.mdt_len = load_req.mdt_len;
4268 load_req_64bit.img_len = load_req.img_len;
4269 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4270 load_req_64bit.phy_addr = (uint64_t)pa;
4271 cmd_buf = (void *)&load_req_64bit;
4272 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4273 }
4274
4275 if (qseecom.support_bus_scaling) {
4276 mutex_lock(&qsee_bw_mutex);
4277 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4278 mutex_unlock(&qsee_bw_mutex);
4279 if (ret) {
4280 ret = -EIO;
4281 goto exit_free_img_data;
4282 }
4283 }
4284
4285 ret = __qseecom_enable_clk_scale_up(data);
4286 if (ret) {
4287 ret = -EIO;
4288 goto exit_unregister_bus_bw_need;
4289 }
4290
4291 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4292 img_data, fw_size,
4293 ION_IOC_CLEAN_INV_CACHES);
4294 if (ret) {
4295 pr_err("cache operation failed %d\n", ret);
4296 goto exit_disable_clk_vote;
4297 }
4298
4299 /* SCM_CALL to load the image */
4300 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4301 &resp, sizeof(resp));
4302 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004303 pr_err("scm_call to load failed : ret %d, result %x\n",
4304 ret, resp.result);
4305 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4306 ret = -EEXIST;
4307 else
4308 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004309 goto exit_disable_clk_vote;
4310 }
4311
4312 switch (resp.result) {
4313 case QSEOS_RESULT_SUCCESS:
4314 *app_id = resp.data;
4315 break;
4316 case QSEOS_RESULT_INCOMPLETE:
4317 ret = __qseecom_process_incomplete_cmd(data, &resp);
4318 if (ret)
4319 pr_err("process_incomplete_cmd FAILED\n");
4320 else
4321 *app_id = resp.data;
4322 break;
4323 case QSEOS_RESULT_FAILURE:
4324 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4325 break;
4326 default:
4327 pr_err("scm call return unknown response %d\n", resp.result);
4328 ret = -EINVAL;
4329 break;
4330 }
4331
4332exit_disable_clk_vote:
4333 __qseecom_disable_clk_scale_down(data);
4334
4335exit_unregister_bus_bw_need:
4336 if (qseecom.support_bus_scaling) {
4337 mutex_lock(&qsee_bw_mutex);
4338 qseecom_unregister_bus_bandwidth_needs(data);
4339 mutex_unlock(&qsee_bw_mutex);
4340 }
4341
4342exit_free_img_data:
4343 __qseecom_free_img_data(&ihandle);
4344 return ret;
4345}
4346
4347static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4348 char *cmnlib_name)
4349{
4350 int ret = 0;
4351 uint32_t fw_size = 0;
4352 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4353 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4354 struct qseecom_command_scm_resp resp;
4355 u8 *img_data = NULL;
4356 ion_phys_addr_t pa = 0;
4357 void *cmd_buf = NULL;
4358 size_t cmd_len;
4359 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004360 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004361
4362 if (!cmnlib_name) {
4363 pr_err("cmnlib_name is NULL\n");
4364 return -EINVAL;
4365 }
4366 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4367 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4368 cmnlib_name, strlen(cmnlib_name));
4369 return -EINVAL;
4370 }
4371
4372 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4373 return -EIO;
4374
Zhen Kong3bafb312017-10-18 10:27:20 -07004375 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004376 &img_data, fw_size, &pa);
4377 if (ret)
4378 return -EIO;
4379
4380 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4381 if (ret) {
4382 ret = -EIO;
4383 goto exit_free_img_data;
4384 }
4385 if (qseecom.qsee_version < QSEE_VERSION_40) {
4386 load_req.phy_addr = (uint32_t)pa;
4387 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4388 cmd_buf = (void *)&load_req;
4389 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4390 } else {
4391 load_req_64bit.phy_addr = (uint64_t)pa;
4392 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4393 load_req_64bit.img_len = load_req.img_len;
4394 load_req_64bit.mdt_len = load_req.mdt_len;
4395 cmd_buf = (void *)&load_req_64bit;
4396 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4397 }
4398
4399 if (qseecom.support_bus_scaling) {
4400 mutex_lock(&qsee_bw_mutex);
4401 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4402 mutex_unlock(&qsee_bw_mutex);
4403 if (ret) {
4404 ret = -EIO;
4405 goto exit_free_img_data;
4406 }
4407 }
4408
4409 /* Vote for the SFPB clock */
4410 ret = __qseecom_enable_clk_scale_up(data);
4411 if (ret) {
4412 ret = -EIO;
4413 goto exit_unregister_bus_bw_need;
4414 }
4415
Zhen Kong3bafb312017-10-18 10:27:20 -07004416 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004417 img_data, fw_size,
4418 ION_IOC_CLEAN_INV_CACHES);
4419 if (ret) {
4420 pr_err("cache operation failed %d\n", ret);
4421 goto exit_disable_clk_vote;
4422 }
4423
4424 /* SCM_CALL to load the image */
4425 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4426 &resp, sizeof(resp));
4427 if (ret) {
4428 pr_err("scm_call to load failed : ret %d\n", ret);
4429 ret = -EIO;
4430 goto exit_disable_clk_vote;
4431 }
4432
4433 switch (resp.result) {
4434 case QSEOS_RESULT_SUCCESS:
4435 break;
4436 case QSEOS_RESULT_FAILURE:
4437 pr_err("scm call failed w/response result%d\n", resp.result);
4438 ret = -EINVAL;
4439 goto exit_disable_clk_vote;
4440 case QSEOS_RESULT_INCOMPLETE:
4441 ret = __qseecom_process_incomplete_cmd(data, &resp);
4442 if (ret) {
4443 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4444 goto exit_disable_clk_vote;
4445 }
4446 break;
4447 default:
4448 pr_err("scm call return unknown response %d\n", resp.result);
4449 ret = -EINVAL;
4450 goto exit_disable_clk_vote;
4451 }
4452
4453exit_disable_clk_vote:
4454 __qseecom_disable_clk_scale_down(data);
4455
4456exit_unregister_bus_bw_need:
4457 if (qseecom.support_bus_scaling) {
4458 mutex_lock(&qsee_bw_mutex);
4459 qseecom_unregister_bus_bandwidth_needs(data);
4460 mutex_unlock(&qsee_bw_mutex);
4461 }
4462
4463exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004464 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004465 return ret;
4466}
4467
4468static int qseecom_unload_commonlib_image(void)
4469{
4470 int ret = -EINVAL;
4471 struct qseecom_unload_lib_image_ireq unload_req = {0};
4472 struct qseecom_command_scm_resp resp;
4473
4474 /* Populate the remaining parameters */
4475 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4476
4477 /* SCM_CALL to load the image */
4478 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4479 sizeof(struct qseecom_unload_lib_image_ireq),
4480 &resp, sizeof(resp));
4481 if (ret) {
4482 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4483 ret = -EIO;
4484 } else {
4485 switch (resp.result) {
4486 case QSEOS_RESULT_SUCCESS:
4487 break;
4488 case QSEOS_RESULT_FAILURE:
4489 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4490 break;
4491 default:
4492 pr_err("scm call return unknown response %d\n",
4493 resp.result);
4494 ret = -EINVAL;
4495 break;
4496 }
4497 }
4498
4499 return ret;
4500}
4501
4502int qseecom_start_app(struct qseecom_handle **handle,
4503 char *app_name, uint32_t size)
4504{
4505 int32_t ret = 0;
4506 unsigned long flags = 0;
4507 struct qseecom_dev_handle *data = NULL;
4508 struct qseecom_check_app_ireq app_ireq;
4509 struct qseecom_registered_app_list *entry = NULL;
4510 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4511 bool found_app = false;
4512 size_t len;
4513 ion_phys_addr_t pa;
4514 uint32_t fw_size, app_arch;
4515 uint32_t app_id = 0;
4516
Zhen Kongbcdeda22018-11-16 13:50:51 -08004517 __qseecom_processing_pending_lsnr_unregister();
4518
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004519 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4520 pr_err("Not allowed to be called in %d state\n",
4521 atomic_read(&qseecom.qseecom_state));
4522 return -EPERM;
4523 }
4524 if (!app_name) {
4525 pr_err("failed to get the app name\n");
4526 return -EINVAL;
4527 }
4528
Zhen Kong64a6d7282017-06-16 11:55:07 -07004529 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004530 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004531 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004532 return -EINVAL;
4533 }
4534
4535 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4536 if (!(*handle))
4537 return -ENOMEM;
4538
4539 data = kzalloc(sizeof(*data), GFP_KERNEL);
4540 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304541 ret = -ENOMEM;
4542 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004543 }
4544 data->abort = 0;
4545 data->type = QSEECOM_CLIENT_APP;
4546 data->released = false;
4547 data->client.sb_length = size;
4548 data->client.user_virt_sb_base = 0;
4549 data->client.ihandle = NULL;
4550
4551 init_waitqueue_head(&data->abort_wq);
4552
4553 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4554 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4555 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4556 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304557 ret = -ENOMEM;
4558 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004559 }
4560 mutex_lock(&app_access_lock);
4561
Zhen Kong5d02be92018-05-29 16:17:29 -07004562recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004563 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4564 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4565 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4566 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304567 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004568
4569 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4570 if (app_id) {
4571 pr_warn("App id %d for [%s] app exists\n", app_id,
4572 (char *)app_ireq.app_name);
4573 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4574 list_for_each_entry(entry,
4575 &qseecom.registered_app_list_head, list){
4576 if (entry->app_id == app_id) {
4577 entry->ref_cnt++;
4578 found_app = true;
4579 break;
4580 }
4581 }
4582 spin_unlock_irqrestore(
4583 &qseecom.registered_app_list_lock, flags);
4584 if (!found_app)
4585 pr_warn("App_id %d [%s] was loaded but not registered\n",
4586 ret, (char *)app_ireq.app_name);
4587 } else {
4588 /* load the app and get the app_id */
4589 pr_debug("%s: Loading app for the first time'\n",
4590 qseecom.pdev->init_name);
4591 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004592 if (ret == -EEXIST) {
4593 pr_err("recheck if TA %s is loaded\n", app_name);
4594 goto recheck;
4595 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304596 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004597 }
4598 data->client.app_id = app_id;
4599 if (!found_app) {
4600 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4601 if (!entry) {
4602 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304603 ret = -ENOMEM;
4604 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004605 }
4606 entry->app_id = app_id;
4607 entry->ref_cnt = 1;
4608 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4609 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4610 ret = -EIO;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304611 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004612 }
4613 entry->app_arch = app_arch;
4614 entry->app_blocked = false;
4615 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07004616 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004617 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4618 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4619 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4620 flags);
4621 }
4622
4623 /* Get the physical address of the ION BUF */
4624 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4625 if (ret) {
4626 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4627 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304628 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004629 }
4630
4631 /* Populate the structure for sending scm call to load image */
4632 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4633 data->client.ihandle);
4634 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4635 pr_err("ION memory mapping for client shared buf failed\n");
4636 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304637 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004638 }
4639 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4640 data->client.sb_phys = (phys_addr_t)pa;
4641 (*handle)->dev = (void *)data;
4642 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4643 (*handle)->sbuf_len = data->client.sb_length;
4644
4645 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4646 if (!kclient_entry) {
4647 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304648 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004649 }
4650 kclient_entry->handle = *handle;
4651
4652 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4653 list_add_tail(&kclient_entry->list,
4654 &qseecom.registered_kclient_list_head);
4655 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4656
4657 mutex_unlock(&app_access_lock);
4658 return 0;
4659
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304660exit_ion_unmap_kernel:
4661 if (!IS_ERR_OR_NULL(data->client.ihandle))
4662 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4663exit_entry_free:
4664 kfree(entry);
4665exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004666 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304667 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4668 ion_free(qseecom.ion_clnt, data->client.ihandle);
4669 data->client.ihandle = NULL;
4670 }
4671exit_data_free:
4672 kfree(data);
4673exit_handle_free:
4674 if (*handle) {
4675 kfree(*handle);
4676 *handle = NULL;
4677 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004678 return ret;
4679}
4680EXPORT_SYMBOL(qseecom_start_app);
4681
4682int qseecom_shutdown_app(struct qseecom_handle **handle)
4683{
4684 int ret = -EINVAL;
4685 struct qseecom_dev_handle *data;
4686
4687 struct qseecom_registered_kclient_list *kclient = NULL;
4688 unsigned long flags = 0;
4689 bool found_handle = false;
4690
Zhen Kongbcdeda22018-11-16 13:50:51 -08004691 __qseecom_processing_pending_lsnr_unregister();
4692
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004693 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4694 pr_err("Not allowed to be called in %d state\n",
4695 atomic_read(&qseecom.qseecom_state));
4696 return -EPERM;
4697 }
4698
4699 if ((handle == NULL) || (*handle == NULL)) {
4700 pr_err("Handle is not initialized\n");
4701 return -EINVAL;
4702 }
4703 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4704 mutex_lock(&app_access_lock);
4705
4706 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4707 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4708 list) {
4709 if (kclient->handle == (*handle)) {
4710 list_del(&kclient->list);
4711 found_handle = true;
4712 break;
4713 }
4714 }
4715 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4716 if (!found_handle)
4717 pr_err("Unable to find the handle, exiting\n");
4718 else
4719 ret = qseecom_unload_app(data, false);
4720
4721 mutex_unlock(&app_access_lock);
4722 if (ret == 0) {
4723 kzfree(data);
4724 kzfree(*handle);
4725 kzfree(kclient);
4726 *handle = NULL;
4727 }
4728
4729 return ret;
4730}
4731EXPORT_SYMBOL(qseecom_shutdown_app);
4732
4733int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4734 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4735{
4736 int ret = 0;
4737 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4738 struct qseecom_dev_handle *data;
4739 bool perf_enabled = false;
4740
Zhen Kongbcdeda22018-11-16 13:50:51 -08004741 __qseecom_processing_pending_lsnr_unregister();
4742
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004743 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4744 pr_err("Not allowed to be called in %d state\n",
4745 atomic_read(&qseecom.qseecom_state));
4746 return -EPERM;
4747 }
4748
4749 if (handle == NULL) {
4750 pr_err("Handle is not initialized\n");
4751 return -EINVAL;
4752 }
4753 data = handle->dev;
4754
4755 req.cmd_req_len = sbuf_len;
4756 req.resp_len = rbuf_len;
4757 req.cmd_req_buf = send_buf;
4758 req.resp_buf = resp_buf;
4759
4760 if (__validate_send_cmd_inputs(data, &req))
4761 return -EINVAL;
4762
4763 mutex_lock(&app_access_lock);
4764 if (qseecom.support_bus_scaling) {
4765 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4766 if (ret) {
4767 pr_err("Failed to set bw.\n");
4768 mutex_unlock(&app_access_lock);
4769 return ret;
4770 }
4771 }
4772 /*
4773 * On targets where crypto clock is handled by HLOS,
4774 * if clk_access_cnt is zero and perf_enabled is false,
4775 * then the crypto clock was not enabled before sending cmd
4776 * to tz, qseecom will enable the clock to avoid service failure.
4777 */
4778 if (!qseecom.no_clock_support &&
4779 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4780 pr_debug("ce clock is not enabled!\n");
4781 ret = qseecom_perf_enable(data);
4782 if (ret) {
4783 pr_err("Failed to vote for clock with err %d\n",
4784 ret);
4785 mutex_unlock(&app_access_lock);
4786 return -EINVAL;
4787 }
4788 perf_enabled = true;
4789 }
4790 if (!strcmp(data->client.app_name, "securemm"))
4791 data->use_legacy_cmd = true;
4792
4793 ret = __qseecom_send_cmd(data, &req);
4794 data->use_legacy_cmd = false;
4795 if (qseecom.support_bus_scaling)
4796 __qseecom_add_bw_scale_down_timer(
4797 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4798
4799 if (perf_enabled) {
4800 qsee_disable_clock_vote(data, CLK_DFAB);
4801 qsee_disable_clock_vote(data, CLK_SFPB);
4802 }
4803
4804 mutex_unlock(&app_access_lock);
4805
4806 if (ret)
4807 return ret;
4808
4809 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4810 req.resp_len, req.resp_buf);
4811 return ret;
4812}
4813EXPORT_SYMBOL(qseecom_send_command);
4814
4815int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4816{
4817 int ret = 0;
4818
4819 if ((handle == NULL) || (handle->dev == NULL)) {
4820 pr_err("No valid kernel client\n");
4821 return -EINVAL;
4822 }
4823 if (high) {
4824 if (qseecom.support_bus_scaling) {
4825 mutex_lock(&qsee_bw_mutex);
4826 __qseecom_register_bus_bandwidth_needs(handle->dev,
4827 HIGH);
4828 mutex_unlock(&qsee_bw_mutex);
4829 } else {
4830 ret = qseecom_perf_enable(handle->dev);
4831 if (ret)
4832 pr_err("Failed to vote for clock with err %d\n",
4833 ret);
4834 }
4835 } else {
4836 if (!qseecom.support_bus_scaling) {
4837 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4838 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4839 } else {
4840 mutex_lock(&qsee_bw_mutex);
4841 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4842 mutex_unlock(&qsee_bw_mutex);
4843 }
4844 }
4845 return ret;
4846}
4847EXPORT_SYMBOL(qseecom_set_bandwidth);
4848
4849int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4850{
4851 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4852 struct qseecom_dev_handle dummy_private_data = {0};
4853 struct qseecom_command_scm_resp resp;
4854 int ret = 0;
4855
4856 if (!desc) {
4857 pr_err("desc is NULL\n");
4858 return -EINVAL;
4859 }
4860
4861 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004862 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004863 resp.data = desc->ret[2]; /*listener_id*/
4864
Zhen Konge7f525f2017-12-01 18:26:25 -08004865 dummy_private_data.client.app_id = desc->ret[1];
4866 dummy_app_entry.app_id = desc->ret[1];
4867
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004868 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004869 if (qseecom.qsee_reentrancy_support)
4870 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004871 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004872 else
4873 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4874 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004875 mutex_unlock(&app_access_lock);
4876 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004877 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004878 (int)desc->ret[0], (int)desc->ret[2],
4879 (int)desc->ret[1], ret);
4880 desc->ret[0] = resp.result;
4881 desc->ret[1] = resp.resp_type;
4882 desc->ret[2] = resp.data;
4883 return ret;
4884}
4885EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4886
4887static int qseecom_send_resp(void)
4888{
4889 qseecom.send_resp_flag = 1;
4890 wake_up_interruptible(&qseecom.send_resp_wq);
4891 return 0;
4892}
4893
4894static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4895{
4896 struct qseecom_registered_listener_list *this_lstnr = NULL;
4897
4898 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4899 this_lstnr = __qseecom_find_svc(data->listener.id);
4900 if (this_lstnr == NULL)
4901 return -EINVAL;
4902 qseecom.send_resp_flag = 1;
4903 this_lstnr->send_resp_flag = 1;
4904 wake_up_interruptible(&qseecom.send_resp_wq);
4905 return 0;
4906}
4907
4908static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4909 struct qseecom_send_modfd_listener_resp *resp,
4910 struct qseecom_registered_listener_list *this_lstnr)
4911{
4912 int i;
4913
4914 if (!data || !resp || !this_lstnr) {
4915 pr_err("listener handle or resp msg is null\n");
4916 return -EINVAL;
4917 }
4918
4919 if (resp->resp_buf_ptr == NULL) {
4920 pr_err("resp buffer is null\n");
4921 return -EINVAL;
4922 }
4923 /* validate resp buf length */
4924 if ((resp->resp_len == 0) ||
4925 (resp->resp_len > this_lstnr->sb_length)) {
4926 pr_err("resp buf length %d not valid\n", resp->resp_len);
4927 return -EINVAL;
4928 }
4929
4930 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4931 pr_err("Integer overflow in resp_len & resp_buf\n");
4932 return -EINVAL;
4933 }
4934 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4935 (ULONG_MAX - this_lstnr->sb_length)) {
4936 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4937 return -EINVAL;
4938 }
4939 /* validate resp buf */
4940 if (((uintptr_t)resp->resp_buf_ptr <
4941 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4942 ((uintptr_t)resp->resp_buf_ptr >=
4943 ((uintptr_t)this_lstnr->user_virt_sb_base +
4944 this_lstnr->sb_length)) ||
4945 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4946 ((uintptr_t)this_lstnr->user_virt_sb_base +
4947 this_lstnr->sb_length))) {
4948 pr_err("resp buf is out of shared buffer region\n");
4949 return -EINVAL;
4950 }
4951
4952 /* validate offsets */
4953 for (i = 0; i < MAX_ION_FD; i++) {
4954 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4955 pr_err("Invalid offset %d = 0x%x\n",
4956 i, resp->ifd_data[i].cmd_buf_offset);
4957 return -EINVAL;
4958 }
4959 }
4960
4961 return 0;
4962}
4963
4964static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4965 void __user *argp, bool is_64bit_addr)
4966{
4967 struct qseecom_send_modfd_listener_resp resp;
4968 struct qseecom_registered_listener_list *this_lstnr = NULL;
4969
4970 if (copy_from_user(&resp, argp, sizeof(resp))) {
4971 pr_err("copy_from_user failed");
4972 return -EINVAL;
4973 }
4974
4975 this_lstnr = __qseecom_find_svc(data->listener.id);
4976 if (this_lstnr == NULL)
4977 return -EINVAL;
4978
4979 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4980 return -EINVAL;
4981
4982 resp.resp_buf_ptr = this_lstnr->sb_virt +
4983 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4984
4985 if (!is_64bit_addr)
4986 __qseecom_update_cmd_buf(&resp, false, data);
4987 else
4988 __qseecom_update_cmd_buf_64(&resp, false, data);
4989 qseecom.send_resp_flag = 1;
4990 this_lstnr->send_resp_flag = 1;
4991 wake_up_interruptible(&qseecom.send_resp_wq);
4992 return 0;
4993}
4994
4995static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4996 void __user *argp)
4997{
4998 return __qseecom_send_modfd_resp(data, argp, false);
4999}
5000
5001static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
5002 void __user *argp)
5003{
5004 return __qseecom_send_modfd_resp(data, argp, true);
5005}
5006
5007static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
5008 void __user *argp)
5009{
5010 struct qseecom_qseos_version_req req;
5011
5012 if (copy_from_user(&req, argp, sizeof(req))) {
5013 pr_err("copy_from_user failed");
5014 return -EINVAL;
5015 }
5016 req.qseos_version = qseecom.qseos_version;
5017 if (copy_to_user(argp, &req, sizeof(req))) {
5018 pr_err("copy_to_user failed");
5019 return -EINVAL;
5020 }
5021 return 0;
5022}
5023
5024static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
5025{
5026 int rc = 0;
5027 struct qseecom_clk *qclk = NULL;
5028
5029 if (qseecom.no_clock_support)
5030 return 0;
5031
5032 if (ce == CLK_QSEE)
5033 qclk = &qseecom.qsee;
5034 if (ce == CLK_CE_DRV)
5035 qclk = &qseecom.ce_drv;
5036
5037 if (qclk == NULL) {
5038 pr_err("CLK type not supported\n");
5039 return -EINVAL;
5040 }
5041 mutex_lock(&clk_access_lock);
5042
5043 if (qclk->clk_access_cnt == ULONG_MAX) {
5044 pr_err("clk_access_cnt beyond limitation\n");
5045 goto err;
5046 }
5047 if (qclk->clk_access_cnt > 0) {
5048 qclk->clk_access_cnt++;
5049 mutex_unlock(&clk_access_lock);
5050 return rc;
5051 }
5052
5053 /* Enable CE core clk */
5054 if (qclk->ce_core_clk != NULL) {
5055 rc = clk_prepare_enable(qclk->ce_core_clk);
5056 if (rc) {
5057 pr_err("Unable to enable/prepare CE core clk\n");
5058 goto err;
5059 }
5060 }
5061 /* Enable CE clk */
5062 if (qclk->ce_clk != NULL) {
5063 rc = clk_prepare_enable(qclk->ce_clk);
5064 if (rc) {
5065 pr_err("Unable to enable/prepare CE iface clk\n");
5066 goto ce_clk_err;
5067 }
5068 }
5069 /* Enable AXI clk */
5070 if (qclk->ce_bus_clk != NULL) {
5071 rc = clk_prepare_enable(qclk->ce_bus_clk);
5072 if (rc) {
5073 pr_err("Unable to enable/prepare CE bus clk\n");
5074 goto ce_bus_clk_err;
5075 }
5076 }
5077 qclk->clk_access_cnt++;
5078 mutex_unlock(&clk_access_lock);
5079 return 0;
5080
5081ce_bus_clk_err:
5082 if (qclk->ce_clk != NULL)
5083 clk_disable_unprepare(qclk->ce_clk);
5084ce_clk_err:
5085 if (qclk->ce_core_clk != NULL)
5086 clk_disable_unprepare(qclk->ce_core_clk);
5087err:
5088 mutex_unlock(&clk_access_lock);
5089 return -EIO;
5090}
5091
5092static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5093{
5094 struct qseecom_clk *qclk;
5095
5096 if (qseecom.no_clock_support)
5097 return;
5098
5099 if (ce == CLK_QSEE)
5100 qclk = &qseecom.qsee;
5101 else
5102 qclk = &qseecom.ce_drv;
5103
5104 mutex_lock(&clk_access_lock);
5105
5106 if (qclk->clk_access_cnt == 0) {
5107 mutex_unlock(&clk_access_lock);
5108 return;
5109 }
5110
5111 if (qclk->clk_access_cnt == 1) {
5112 if (qclk->ce_clk != NULL)
5113 clk_disable_unprepare(qclk->ce_clk);
5114 if (qclk->ce_core_clk != NULL)
5115 clk_disable_unprepare(qclk->ce_core_clk);
5116 if (qclk->ce_bus_clk != NULL)
5117 clk_disable_unprepare(qclk->ce_bus_clk);
5118 }
5119 qclk->clk_access_cnt--;
5120 mutex_unlock(&clk_access_lock);
5121}
5122
5123static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5124 int32_t clk_type)
5125{
5126 int ret = 0;
5127 struct qseecom_clk *qclk;
5128
5129 if (qseecom.no_clock_support)
5130 return 0;
5131
5132 qclk = &qseecom.qsee;
5133 if (!qseecom.qsee_perf_client)
5134 return ret;
5135
5136 switch (clk_type) {
5137 case CLK_DFAB:
5138 mutex_lock(&qsee_bw_mutex);
5139 if (!qseecom.qsee_bw_count) {
5140 if (qseecom.qsee_sfpb_bw_count > 0)
5141 ret = msm_bus_scale_client_update_request(
5142 qseecom.qsee_perf_client, 3);
5143 else {
5144 if (qclk->ce_core_src_clk != NULL)
5145 ret = __qseecom_enable_clk(CLK_QSEE);
5146 if (!ret) {
5147 ret =
5148 msm_bus_scale_client_update_request(
5149 qseecom.qsee_perf_client, 1);
5150 if ((ret) &&
5151 (qclk->ce_core_src_clk != NULL))
5152 __qseecom_disable_clk(CLK_QSEE);
5153 }
5154 }
5155 if (ret)
5156 pr_err("DFAB Bandwidth req failed (%d)\n",
5157 ret);
5158 else {
5159 qseecom.qsee_bw_count++;
5160 data->perf_enabled = true;
5161 }
5162 } else {
5163 qseecom.qsee_bw_count++;
5164 data->perf_enabled = true;
5165 }
5166 mutex_unlock(&qsee_bw_mutex);
5167 break;
5168 case CLK_SFPB:
5169 mutex_lock(&qsee_bw_mutex);
5170 if (!qseecom.qsee_sfpb_bw_count) {
5171 if (qseecom.qsee_bw_count > 0)
5172 ret = msm_bus_scale_client_update_request(
5173 qseecom.qsee_perf_client, 3);
5174 else {
5175 if (qclk->ce_core_src_clk != NULL)
5176 ret = __qseecom_enable_clk(CLK_QSEE);
5177 if (!ret) {
5178 ret =
5179 msm_bus_scale_client_update_request(
5180 qseecom.qsee_perf_client, 2);
5181 if ((ret) &&
5182 (qclk->ce_core_src_clk != NULL))
5183 __qseecom_disable_clk(CLK_QSEE);
5184 }
5185 }
5186
5187 if (ret)
5188 pr_err("SFPB Bandwidth req failed (%d)\n",
5189 ret);
5190 else {
5191 qseecom.qsee_sfpb_bw_count++;
5192 data->fast_load_enabled = true;
5193 }
5194 } else {
5195 qseecom.qsee_sfpb_bw_count++;
5196 data->fast_load_enabled = true;
5197 }
5198 mutex_unlock(&qsee_bw_mutex);
5199 break;
5200 default:
5201 pr_err("Clock type not defined\n");
5202 break;
5203 }
5204 return ret;
5205}
5206
5207static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5208 int32_t clk_type)
5209{
5210 int32_t ret = 0;
5211 struct qseecom_clk *qclk;
5212
5213 qclk = &qseecom.qsee;
5214
5215 if (qseecom.no_clock_support)
5216 return;
5217 if (!qseecom.qsee_perf_client)
5218 return;
5219
5220 switch (clk_type) {
5221 case CLK_DFAB:
5222 mutex_lock(&qsee_bw_mutex);
5223 if (qseecom.qsee_bw_count == 0) {
5224 pr_err("Client error.Extra call to disable DFAB clk\n");
5225 mutex_unlock(&qsee_bw_mutex);
5226 return;
5227 }
5228
5229 if (qseecom.qsee_bw_count == 1) {
5230 if (qseecom.qsee_sfpb_bw_count > 0)
5231 ret = msm_bus_scale_client_update_request(
5232 qseecom.qsee_perf_client, 2);
5233 else {
5234 ret = msm_bus_scale_client_update_request(
5235 qseecom.qsee_perf_client, 0);
5236 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5237 __qseecom_disable_clk(CLK_QSEE);
5238 }
5239 if (ret)
5240 pr_err("SFPB Bandwidth req fail (%d)\n",
5241 ret);
5242 else {
5243 qseecom.qsee_bw_count--;
5244 data->perf_enabled = false;
5245 }
5246 } else {
5247 qseecom.qsee_bw_count--;
5248 data->perf_enabled = false;
5249 }
5250 mutex_unlock(&qsee_bw_mutex);
5251 break;
5252 case CLK_SFPB:
5253 mutex_lock(&qsee_bw_mutex);
5254 if (qseecom.qsee_sfpb_bw_count == 0) {
5255 pr_err("Client error.Extra call to disable SFPB clk\n");
5256 mutex_unlock(&qsee_bw_mutex);
5257 return;
5258 }
5259 if (qseecom.qsee_sfpb_bw_count == 1) {
5260 if (qseecom.qsee_bw_count > 0)
5261 ret = msm_bus_scale_client_update_request(
5262 qseecom.qsee_perf_client, 1);
5263 else {
5264 ret = msm_bus_scale_client_update_request(
5265 qseecom.qsee_perf_client, 0);
5266 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5267 __qseecom_disable_clk(CLK_QSEE);
5268 }
5269 if (ret)
5270 pr_err("SFPB Bandwidth req fail (%d)\n",
5271 ret);
5272 else {
5273 qseecom.qsee_sfpb_bw_count--;
5274 data->fast_load_enabled = false;
5275 }
5276 } else {
5277 qseecom.qsee_sfpb_bw_count--;
5278 data->fast_load_enabled = false;
5279 }
5280 mutex_unlock(&qsee_bw_mutex);
5281 break;
5282 default:
5283 pr_err("Clock type not defined\n");
5284 break;
5285 }
5286
5287}
5288
5289static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5290 void __user *argp)
5291{
5292 struct ion_handle *ihandle; /* Ion handle */
5293 struct qseecom_load_img_req load_img_req;
5294 int uret = 0;
5295 int ret;
5296 ion_phys_addr_t pa = 0;
5297 size_t len;
5298 struct qseecom_load_app_ireq load_req;
5299 struct qseecom_load_app_64bit_ireq load_req_64bit;
5300 struct qseecom_command_scm_resp resp;
5301 void *cmd_buf = NULL;
5302 size_t cmd_len;
5303 /* Copy the relevant information needed for loading the image */
5304 if (copy_from_user(&load_img_req,
5305 (void __user *)argp,
5306 sizeof(struct qseecom_load_img_req))) {
5307 pr_err("copy_from_user failed\n");
5308 return -EFAULT;
5309 }
5310
5311 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005312 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005313 load_img_req.ifd_data_fd);
5314 if (IS_ERR_OR_NULL(ihandle)) {
5315 pr_err("Ion client could not retrieve the handle\n");
5316 return -ENOMEM;
5317 }
5318
5319 /* Get the physical address of the ION BUF */
5320 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5321 if (ret) {
5322 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5323 ret);
5324 return ret;
5325 }
5326 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5327 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5328 len, load_img_req.mdt_len,
5329 load_img_req.img_len);
5330 return ret;
5331 }
5332 /* Populate the structure for sending scm call to load image */
5333 if (qseecom.qsee_version < QSEE_VERSION_40) {
5334 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5335 load_req.mdt_len = load_img_req.mdt_len;
5336 load_req.img_len = load_img_req.img_len;
5337 load_req.phy_addr = (uint32_t)pa;
5338 cmd_buf = (void *)&load_req;
5339 cmd_len = sizeof(struct qseecom_load_app_ireq);
5340 } else {
5341 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5342 load_req_64bit.mdt_len = load_img_req.mdt_len;
5343 load_req_64bit.img_len = load_img_req.img_len;
5344 load_req_64bit.phy_addr = (uint64_t)pa;
5345 cmd_buf = (void *)&load_req_64bit;
5346 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5347 }
5348
5349 if (qseecom.support_bus_scaling) {
5350 mutex_lock(&qsee_bw_mutex);
5351 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5352 mutex_unlock(&qsee_bw_mutex);
5353 if (ret) {
5354 ret = -EIO;
5355 goto exit_cpu_restore;
5356 }
5357 }
5358
5359 /* Vote for the SFPB clock */
5360 ret = __qseecom_enable_clk_scale_up(data);
5361 if (ret) {
5362 ret = -EIO;
5363 goto exit_register_bus_bandwidth_needs;
5364 }
5365 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5366 ION_IOC_CLEAN_INV_CACHES);
5367 if (ret) {
5368 pr_err("cache operation failed %d\n", ret);
5369 goto exit_disable_clock;
5370 }
5371 /* SCM_CALL to load the external elf */
5372 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5373 &resp, sizeof(resp));
5374 if (ret) {
5375 pr_err("scm_call to load failed : ret %d\n",
5376 ret);
5377 ret = -EFAULT;
5378 goto exit_disable_clock;
5379 }
5380
5381 switch (resp.result) {
5382 case QSEOS_RESULT_SUCCESS:
5383 break;
5384 case QSEOS_RESULT_INCOMPLETE:
5385 pr_err("%s: qseos result incomplete\n", __func__);
5386 ret = __qseecom_process_incomplete_cmd(data, &resp);
5387 if (ret)
5388 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5389 break;
5390 case QSEOS_RESULT_FAILURE:
5391 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5392 ret = -EFAULT;
5393 break;
5394 default:
5395 pr_err("scm_call response result %d not supported\n",
5396 resp.result);
5397 ret = -EFAULT;
5398 break;
5399 }
5400
5401exit_disable_clock:
5402 __qseecom_disable_clk_scale_down(data);
5403
5404exit_register_bus_bandwidth_needs:
5405 if (qseecom.support_bus_scaling) {
5406 mutex_lock(&qsee_bw_mutex);
5407 uret = qseecom_unregister_bus_bandwidth_needs(data);
5408 mutex_unlock(&qsee_bw_mutex);
5409 if (uret)
5410 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5411 uret, ret);
5412 }
5413
5414exit_cpu_restore:
5415 /* Deallocate the handle */
5416 if (!IS_ERR_OR_NULL(ihandle))
5417 ion_free(qseecom.ion_clnt, ihandle);
5418 return ret;
5419}
5420
5421static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5422{
5423 int ret = 0;
5424 struct qseecom_command_scm_resp resp;
5425 struct qseecom_unload_app_ireq req;
5426
5427 /* unavailable client app */
5428 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5429
5430 /* Populate the structure for sending scm call to unload image */
5431 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5432
5433 /* SCM_CALL to unload the external elf */
5434 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5435 sizeof(struct qseecom_unload_app_ireq),
5436 &resp, sizeof(resp));
5437 if (ret) {
5438 pr_err("scm_call to unload failed : ret %d\n",
5439 ret);
5440 ret = -EFAULT;
5441 goto qseecom_unload_external_elf_scm_err;
5442 }
5443 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5444 ret = __qseecom_process_incomplete_cmd(data, &resp);
5445 if (ret)
5446 pr_err("process_incomplete_cmd fail err: %d\n",
5447 ret);
5448 } else {
5449 if (resp.result != QSEOS_RESULT_SUCCESS) {
5450 pr_err("scm_call to unload image failed resp.result =%d\n",
5451 resp.result);
5452 ret = -EFAULT;
5453 }
5454 }
5455
5456qseecom_unload_external_elf_scm_err:
5457
5458 return ret;
5459}
5460
5461static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5462 void __user *argp)
5463{
5464
5465 int32_t ret;
5466 struct qseecom_qseos_app_load_query query_req;
5467 struct qseecom_check_app_ireq req;
5468 struct qseecom_registered_app_list *entry = NULL;
5469 unsigned long flags = 0;
5470 uint32_t app_arch = 0, app_id = 0;
5471 bool found_app = false;
5472
5473 /* Copy the relevant information needed for loading the image */
5474 if (copy_from_user(&query_req,
5475 (void __user *)argp,
5476 sizeof(struct qseecom_qseos_app_load_query))) {
5477 pr_err("copy_from_user failed\n");
5478 return -EFAULT;
5479 }
5480
5481 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5482 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5483 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5484
5485 ret = __qseecom_check_app_exists(req, &app_id);
5486 if (ret) {
5487 pr_err(" scm call to check if app is loaded failed");
5488 return ret; /* scm call failed */
5489 }
5490 if (app_id) {
5491 pr_debug("App id %d (%s) already exists\n", app_id,
5492 (char *)(req.app_name));
5493 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5494 list_for_each_entry(entry,
5495 &qseecom.registered_app_list_head, list){
5496 if (entry->app_id == app_id) {
5497 app_arch = entry->app_arch;
5498 entry->ref_cnt++;
5499 found_app = true;
5500 break;
5501 }
5502 }
5503 spin_unlock_irqrestore(
5504 &qseecom.registered_app_list_lock, flags);
5505 data->client.app_id = app_id;
5506 query_req.app_id = app_id;
5507 if (app_arch) {
5508 data->client.app_arch = app_arch;
5509 query_req.app_arch = app_arch;
5510 } else {
5511 data->client.app_arch = 0;
5512 query_req.app_arch = 0;
5513 }
5514 strlcpy(data->client.app_name, query_req.app_name,
5515 MAX_APP_NAME_SIZE);
5516 /*
5517 * If app was loaded by appsbl before and was not registered,
5518 * regiser this app now.
5519 */
5520 if (!found_app) {
5521 pr_debug("Register app %d [%s] which was loaded before\n",
5522 ret, (char *)query_req.app_name);
5523 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5524 if (!entry) {
5525 pr_err("kmalloc for app entry failed\n");
5526 return -ENOMEM;
5527 }
5528 entry->app_id = app_id;
5529 entry->ref_cnt = 1;
5530 entry->app_arch = data->client.app_arch;
5531 strlcpy(entry->app_name, data->client.app_name,
5532 MAX_APP_NAME_SIZE);
5533 entry->app_blocked = false;
5534 entry->blocked_on_listener_id = 0;
Zhen Kongdea10592018-07-30 17:50:10 -07005535 entry->check_block = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005536 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5537 flags);
5538 list_add_tail(&entry->list,
5539 &qseecom.registered_app_list_head);
5540 spin_unlock_irqrestore(
5541 &qseecom.registered_app_list_lock, flags);
5542 }
5543 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5544 pr_err("copy_to_user failed\n");
5545 return -EFAULT;
5546 }
5547 return -EEXIST; /* app already loaded */
5548 } else {
5549 return 0; /* app not loaded */
5550 }
5551}
5552
5553static int __qseecom_get_ce_pipe_info(
5554 enum qseecom_key_management_usage_type usage,
5555 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5556{
5557 int ret = -EINVAL;
5558 int i, j;
5559 struct qseecom_ce_info_use *p = NULL;
5560 int total = 0;
5561 struct qseecom_ce_pipe_entry *pcepipe;
5562
5563 switch (usage) {
5564 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5565 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5566 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5567 if (qseecom.support_fde) {
5568 p = qseecom.ce_info.fde;
5569 total = qseecom.ce_info.num_fde;
5570 } else {
5571 pr_err("system does not support fde\n");
5572 return -EINVAL;
5573 }
5574 break;
5575 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5576 if (qseecom.support_pfe) {
5577 p = qseecom.ce_info.pfe;
5578 total = qseecom.ce_info.num_pfe;
5579 } else {
5580 pr_err("system does not support pfe\n");
5581 return -EINVAL;
5582 }
5583 break;
5584 default:
5585 pr_err("unsupported usage %d\n", usage);
5586 return -EINVAL;
5587 }
5588
5589 for (j = 0; j < total; j++) {
5590 if (p->unit_num == unit) {
5591 pcepipe = p->ce_pipe_entry;
5592 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5593 (*ce_hw)[i] = pcepipe->ce_num;
5594 *pipe = pcepipe->ce_pipe_pair;
5595 pcepipe++;
5596 }
5597 ret = 0;
5598 break;
5599 }
5600 p++;
5601 }
5602 return ret;
5603}
5604
5605static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5606 enum qseecom_key_management_usage_type usage,
5607 struct qseecom_key_generate_ireq *ireq)
5608{
5609 struct qseecom_command_scm_resp resp;
5610 int ret;
5611
5612 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5613 usage >= QSEOS_KM_USAGE_MAX) {
5614 pr_err("Error:: unsupported usage %d\n", usage);
5615 return -EFAULT;
5616 }
5617 ret = __qseecom_enable_clk(CLK_QSEE);
5618 if (ret)
5619 return ret;
5620
5621 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5622 ireq, sizeof(struct qseecom_key_generate_ireq),
5623 &resp, sizeof(resp));
5624 if (ret) {
5625 if (ret == -EINVAL &&
5626 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5627 pr_debug("Key ID exists.\n");
5628 ret = 0;
5629 } else {
5630 pr_err("scm call to generate key failed : %d\n", ret);
5631 ret = -EFAULT;
5632 }
5633 goto generate_key_exit;
5634 }
5635
5636 switch (resp.result) {
5637 case QSEOS_RESULT_SUCCESS:
5638 break;
5639 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5640 pr_debug("Key ID exists.\n");
5641 break;
5642 case QSEOS_RESULT_INCOMPLETE:
5643 ret = __qseecom_process_incomplete_cmd(data, &resp);
5644 if (ret) {
5645 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5646 pr_debug("Key ID exists.\n");
5647 ret = 0;
5648 } else {
5649 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5650 resp.result);
5651 }
5652 }
5653 break;
5654 case QSEOS_RESULT_FAILURE:
5655 default:
5656 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5657 ret = -EINVAL;
5658 break;
5659 }
5660generate_key_exit:
5661 __qseecom_disable_clk(CLK_QSEE);
5662 return ret;
5663}
5664
5665static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5666 enum qseecom_key_management_usage_type usage,
5667 struct qseecom_key_delete_ireq *ireq)
5668{
5669 struct qseecom_command_scm_resp resp;
5670 int ret;
5671
5672 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5673 usage >= QSEOS_KM_USAGE_MAX) {
5674 pr_err("Error:: unsupported usage %d\n", usage);
5675 return -EFAULT;
5676 }
5677 ret = __qseecom_enable_clk(CLK_QSEE);
5678 if (ret)
5679 return ret;
5680
5681 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5682 ireq, sizeof(struct qseecom_key_delete_ireq),
5683 &resp, sizeof(struct qseecom_command_scm_resp));
5684 if (ret) {
5685 if (ret == -EINVAL &&
5686 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5687 pr_debug("Max attempts to input password reached.\n");
5688 ret = -ERANGE;
5689 } else {
5690 pr_err("scm call to delete key failed : %d\n", ret);
5691 ret = -EFAULT;
5692 }
5693 goto del_key_exit;
5694 }
5695
5696 switch (resp.result) {
5697 case QSEOS_RESULT_SUCCESS:
5698 break;
5699 case QSEOS_RESULT_INCOMPLETE:
5700 ret = __qseecom_process_incomplete_cmd(data, &resp);
5701 if (ret) {
5702 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5703 resp.result);
5704 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5705 pr_debug("Max attempts to input password reached.\n");
5706 ret = -ERANGE;
5707 }
5708 }
5709 break;
5710 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5711 pr_debug("Max attempts to input password reached.\n");
5712 ret = -ERANGE;
5713 break;
5714 case QSEOS_RESULT_FAILURE:
5715 default:
5716 pr_err("Delete key scm call failed resp.result %d\n",
5717 resp.result);
5718 ret = -EINVAL;
5719 break;
5720 }
5721del_key_exit:
5722 __qseecom_disable_clk(CLK_QSEE);
5723 return ret;
5724}
5725
5726static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5727 enum qseecom_key_management_usage_type usage,
5728 struct qseecom_key_select_ireq *ireq)
5729{
5730 struct qseecom_command_scm_resp resp;
5731 int ret;
5732
5733 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5734 usage >= QSEOS_KM_USAGE_MAX) {
5735 pr_err("Error:: unsupported usage %d\n", usage);
5736 return -EFAULT;
5737 }
5738 ret = __qseecom_enable_clk(CLK_QSEE);
5739 if (ret)
5740 return ret;
5741
5742 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5743 ret = __qseecom_enable_clk(CLK_CE_DRV);
5744 if (ret)
5745 return ret;
5746 }
5747
5748 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5749 ireq, sizeof(struct qseecom_key_select_ireq),
5750 &resp, sizeof(struct qseecom_command_scm_resp));
5751 if (ret) {
5752 if (ret == -EINVAL &&
5753 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5754 pr_debug("Max attempts to input password reached.\n");
5755 ret = -ERANGE;
5756 } else if (ret == -EINVAL &&
5757 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5758 pr_debug("Set Key operation under processing...\n");
5759 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5760 } else {
5761 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5762 ret);
5763 ret = -EFAULT;
5764 }
5765 goto set_key_exit;
5766 }
5767
5768 switch (resp.result) {
5769 case QSEOS_RESULT_SUCCESS:
5770 break;
5771 case QSEOS_RESULT_INCOMPLETE:
5772 ret = __qseecom_process_incomplete_cmd(data, &resp);
5773 if (ret) {
5774 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5775 resp.result);
5776 if (resp.result ==
5777 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5778 pr_debug("Set Key operation under processing...\n");
5779 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5780 }
5781 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5782 pr_debug("Max attempts to input password reached.\n");
5783 ret = -ERANGE;
5784 }
5785 }
5786 break;
5787 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5788 pr_debug("Max attempts to input password reached.\n");
5789 ret = -ERANGE;
5790 break;
5791 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5792 pr_debug("Set Key operation under processing...\n");
5793 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5794 break;
5795 case QSEOS_RESULT_FAILURE:
5796 default:
5797 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5798 ret = -EINVAL;
5799 break;
5800 }
5801set_key_exit:
5802 __qseecom_disable_clk(CLK_QSEE);
5803 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5804 __qseecom_disable_clk(CLK_CE_DRV);
5805 return ret;
5806}
5807
5808static int __qseecom_update_current_key_user_info(
5809 struct qseecom_dev_handle *data,
5810 enum qseecom_key_management_usage_type usage,
5811 struct qseecom_key_userinfo_update_ireq *ireq)
5812{
5813 struct qseecom_command_scm_resp resp;
5814 int ret;
5815
5816 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5817 usage >= QSEOS_KM_USAGE_MAX) {
5818 pr_err("Error:: unsupported usage %d\n", usage);
5819 return -EFAULT;
5820 }
5821 ret = __qseecom_enable_clk(CLK_QSEE);
5822 if (ret)
5823 return ret;
5824
5825 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5826 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5827 &resp, sizeof(struct qseecom_command_scm_resp));
5828 if (ret) {
5829 if (ret == -EINVAL &&
5830 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5831 pr_debug("Set Key operation under processing...\n");
5832 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5833 } else {
5834 pr_err("scm call to update key userinfo failed: %d\n",
5835 ret);
5836 __qseecom_disable_clk(CLK_QSEE);
5837 return -EFAULT;
5838 }
5839 }
5840
5841 switch (resp.result) {
5842 case QSEOS_RESULT_SUCCESS:
5843 break;
5844 case QSEOS_RESULT_INCOMPLETE:
5845 ret = __qseecom_process_incomplete_cmd(data, &resp);
5846 if (resp.result ==
5847 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5848 pr_debug("Set Key operation under processing...\n");
5849 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5850 }
5851 if (ret)
5852 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5853 resp.result);
5854 break;
5855 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5856 pr_debug("Update Key operation under processing...\n");
5857 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5858 break;
5859 case QSEOS_RESULT_FAILURE:
5860 default:
5861 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5862 ret = -EINVAL;
5863 break;
5864 }
5865
5866 __qseecom_disable_clk(CLK_QSEE);
5867 return ret;
5868}
5869
5870
5871static int qseecom_enable_ice_setup(int usage)
5872{
5873 int ret = 0;
5874
5875 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5876 ret = qcom_ice_setup_ice_hw("ufs", true);
5877 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5878 ret = qcom_ice_setup_ice_hw("sdcc", true);
5879
5880 return ret;
5881}
5882
5883static int qseecom_disable_ice_setup(int usage)
5884{
5885 int ret = 0;
5886
5887 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5888 ret = qcom_ice_setup_ice_hw("ufs", false);
5889 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5890 ret = qcom_ice_setup_ice_hw("sdcc", false);
5891
5892 return ret;
5893}
5894
5895static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5896{
5897 struct qseecom_ce_info_use *pce_info_use, *p;
5898 int total = 0;
5899 int i;
5900
5901 switch (usage) {
5902 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5903 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5904 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5905 p = qseecom.ce_info.fde;
5906 total = qseecom.ce_info.num_fde;
5907 break;
5908 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5909 p = qseecom.ce_info.pfe;
5910 total = qseecom.ce_info.num_pfe;
5911 break;
5912 default:
5913 pr_err("unsupported usage %d\n", usage);
5914 return -EINVAL;
5915 }
5916
5917 pce_info_use = NULL;
5918
5919 for (i = 0; i < total; i++) {
5920 if (p->unit_num == unit) {
5921 pce_info_use = p;
5922 break;
5923 }
5924 p++;
5925 }
5926 if (!pce_info_use) {
5927 pr_err("can not find %d\n", unit);
5928 return -EINVAL;
5929 }
5930 return pce_info_use->num_ce_pipe_entries;
5931}
5932
5933static int qseecom_create_key(struct qseecom_dev_handle *data,
5934 void __user *argp)
5935{
5936 int i;
5937 uint32_t *ce_hw = NULL;
5938 uint32_t pipe = 0;
5939 int ret = 0;
5940 uint32_t flags = 0;
5941 struct qseecom_create_key_req create_key_req;
5942 struct qseecom_key_generate_ireq generate_key_ireq;
5943 struct qseecom_key_select_ireq set_key_ireq;
5944 uint32_t entries = 0;
5945
5946 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5947 if (ret) {
5948 pr_err("copy_from_user failed\n");
5949 return ret;
5950 }
5951
5952 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5953 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5954 pr_err("unsupported usage %d\n", create_key_req.usage);
5955 ret = -EFAULT;
5956 return ret;
5957 }
5958 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5959 create_key_req.usage);
5960 if (entries <= 0) {
5961 pr_err("no ce instance for usage %d instance %d\n",
5962 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5963 ret = -EINVAL;
5964 return ret;
5965 }
5966
5967 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5968 if (!ce_hw) {
5969 ret = -ENOMEM;
5970 return ret;
5971 }
5972 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5973 DEFAULT_CE_INFO_UNIT);
5974 if (ret) {
5975 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5976 ret = -EINVAL;
5977 goto free_buf;
5978 }
5979
5980 if (qseecom.fde_key_size)
5981 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5982 else
5983 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5984
Jiten Patela7bb1d52018-05-11 12:34:26 +05305985 if (qseecom.enable_key_wrap_in_ks == true)
5986 flags |= ENABLE_KEY_WRAP_IN_KS;
5987
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005988 generate_key_ireq.flags = flags;
5989 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5990 memset((void *)generate_key_ireq.key_id,
5991 0, QSEECOM_KEY_ID_SIZE);
5992 memset((void *)generate_key_ireq.hash32,
5993 0, QSEECOM_HASH_SIZE);
5994 memcpy((void *)generate_key_ireq.key_id,
5995 (void *)key_id_array[create_key_req.usage].desc,
5996 QSEECOM_KEY_ID_SIZE);
5997 memcpy((void *)generate_key_ireq.hash32,
5998 (void *)create_key_req.hash32,
5999 QSEECOM_HASH_SIZE);
6000
6001 ret = __qseecom_generate_and_save_key(data,
6002 create_key_req.usage, &generate_key_ireq);
6003 if (ret) {
6004 pr_err("Failed to generate key on storage: %d\n", ret);
6005 goto free_buf;
6006 }
6007
6008 for (i = 0; i < entries; i++) {
6009 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6010 if (create_key_req.usage ==
6011 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6012 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6013 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6014
6015 } else if (create_key_req.usage ==
6016 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6017 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6018 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6019
6020 } else {
6021 set_key_ireq.ce = ce_hw[i];
6022 set_key_ireq.pipe = pipe;
6023 }
6024 set_key_ireq.flags = flags;
6025
6026 /* set both PIPE_ENC and PIPE_ENC_XTS*/
6027 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6028 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6029 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6030 memcpy((void *)set_key_ireq.key_id,
6031 (void *)key_id_array[create_key_req.usage].desc,
6032 QSEECOM_KEY_ID_SIZE);
6033 memcpy((void *)set_key_ireq.hash32,
6034 (void *)create_key_req.hash32,
6035 QSEECOM_HASH_SIZE);
6036 /*
6037 * It will return false if it is GPCE based crypto instance or
6038 * ICE is setup properly
6039 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006040 ret = qseecom_enable_ice_setup(create_key_req.usage);
6041 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006042 goto free_buf;
6043
6044 do {
6045 ret = __qseecom_set_clear_ce_key(data,
6046 create_key_req.usage,
6047 &set_key_ireq);
6048 /*
6049 * wait a little before calling scm again to let other
6050 * processes run
6051 */
6052 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6053 msleep(50);
6054
6055 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6056
6057 qseecom_disable_ice_setup(create_key_req.usage);
6058
6059 if (ret) {
6060 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6061 pipe, ce_hw[i], ret);
6062 goto free_buf;
6063 } else {
6064 pr_err("Set the key successfully\n");
6065 if ((create_key_req.usage ==
6066 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6067 (create_key_req.usage ==
6068 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6069 goto free_buf;
6070 }
6071 }
6072
6073free_buf:
6074 kzfree(ce_hw);
6075 return ret;
6076}
6077
6078static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6079 void __user *argp)
6080{
6081 uint32_t *ce_hw = NULL;
6082 uint32_t pipe = 0;
6083 int ret = 0;
6084 uint32_t flags = 0;
6085 int i, j;
6086 struct qseecom_wipe_key_req wipe_key_req;
6087 struct qseecom_key_delete_ireq delete_key_ireq;
6088 struct qseecom_key_select_ireq clear_key_ireq;
6089 uint32_t entries = 0;
6090
6091 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6092 if (ret) {
6093 pr_err("copy_from_user failed\n");
6094 return ret;
6095 }
6096
6097 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6098 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6099 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6100 ret = -EFAULT;
6101 return ret;
6102 }
6103
6104 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6105 wipe_key_req.usage);
6106 if (entries <= 0) {
6107 pr_err("no ce instance for usage %d instance %d\n",
6108 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6109 ret = -EINVAL;
6110 return ret;
6111 }
6112
6113 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6114 if (!ce_hw) {
6115 ret = -ENOMEM;
6116 return ret;
6117 }
6118
6119 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6120 DEFAULT_CE_INFO_UNIT);
6121 if (ret) {
6122 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6123 ret = -EINVAL;
6124 goto free_buf;
6125 }
6126
6127 if (wipe_key_req.wipe_key_flag) {
6128 delete_key_ireq.flags = flags;
6129 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6130 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6131 memcpy((void *)delete_key_ireq.key_id,
6132 (void *)key_id_array[wipe_key_req.usage].desc,
6133 QSEECOM_KEY_ID_SIZE);
6134 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6135
6136 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6137 &delete_key_ireq);
6138 if (ret) {
6139 pr_err("Failed to delete key from ssd storage: %d\n",
6140 ret);
6141 ret = -EFAULT;
6142 goto free_buf;
6143 }
6144 }
6145
6146 for (j = 0; j < entries; j++) {
6147 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6148 if (wipe_key_req.usage ==
6149 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6150 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6151 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6152 } else if (wipe_key_req.usage ==
6153 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6154 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6155 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6156 } else {
6157 clear_key_ireq.ce = ce_hw[j];
6158 clear_key_ireq.pipe = pipe;
6159 }
6160 clear_key_ireq.flags = flags;
6161 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6162 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6163 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6164 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6165
6166 /*
6167 * It will return false if it is GPCE based crypto instance or
6168 * ICE is setup properly
6169 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006170 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6171 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006172 goto free_buf;
6173
6174 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6175 &clear_key_ireq);
6176
6177 qseecom_disable_ice_setup(wipe_key_req.usage);
6178
6179 if (ret) {
6180 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6181 pipe, ce_hw[j], ret);
6182 ret = -EFAULT;
6183 goto free_buf;
6184 }
6185 }
6186
6187free_buf:
6188 kzfree(ce_hw);
6189 return ret;
6190}
6191
6192static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6193 void __user *argp)
6194{
6195 int ret = 0;
6196 uint32_t flags = 0;
6197 struct qseecom_update_key_userinfo_req update_key_req;
6198 struct qseecom_key_userinfo_update_ireq ireq;
6199
6200 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6201 if (ret) {
6202 pr_err("copy_from_user failed\n");
6203 return ret;
6204 }
6205
6206 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6207 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6208 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6209 return -EFAULT;
6210 }
6211
6212 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6213
6214 if (qseecom.fde_key_size)
6215 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6216 else
6217 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6218
6219 ireq.flags = flags;
6220 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6221 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6222 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6223 memcpy((void *)ireq.key_id,
6224 (void *)key_id_array[update_key_req.usage].desc,
6225 QSEECOM_KEY_ID_SIZE);
6226 memcpy((void *)ireq.current_hash32,
6227 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6228 memcpy((void *)ireq.new_hash32,
6229 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6230
6231 do {
6232 ret = __qseecom_update_current_key_user_info(data,
6233 update_key_req.usage,
6234 &ireq);
6235 /*
6236 * wait a little before calling scm again to let other
6237 * processes run
6238 */
6239 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6240 msleep(50);
6241
6242 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6243 if (ret) {
6244 pr_err("Failed to update key info: %d\n", ret);
6245 return ret;
6246 }
6247 return ret;
6248
6249}
6250static int qseecom_is_es_activated(void __user *argp)
6251{
Zhen Kong26e62742018-05-04 17:19:06 -07006252 struct qseecom_is_es_activated_req req = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006253 struct qseecom_command_scm_resp resp;
6254 int ret;
6255
6256 if (qseecom.qsee_version < QSEE_VERSION_04) {
6257 pr_err("invalid qsee version\n");
6258 return -ENODEV;
6259 }
6260
6261 if (argp == NULL) {
6262 pr_err("arg is null\n");
6263 return -EINVAL;
6264 }
6265
6266 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6267 &req, sizeof(req), &resp, sizeof(resp));
6268 if (ret) {
6269 pr_err("scm_call failed\n");
6270 return ret;
6271 }
6272
6273 req.is_activated = resp.result;
6274 ret = copy_to_user(argp, &req, sizeof(req));
6275 if (ret) {
6276 pr_err("copy_to_user failed\n");
6277 return ret;
6278 }
6279
6280 return 0;
6281}
6282
6283static int qseecom_save_partition_hash(void __user *argp)
6284{
6285 struct qseecom_save_partition_hash_req req;
6286 struct qseecom_command_scm_resp resp;
6287 int ret;
6288
6289 memset(&resp, 0x00, sizeof(resp));
6290
6291 if (qseecom.qsee_version < QSEE_VERSION_04) {
6292 pr_err("invalid qsee version\n");
6293 return -ENODEV;
6294 }
6295
6296 if (argp == NULL) {
6297 pr_err("arg is null\n");
6298 return -EINVAL;
6299 }
6300
6301 ret = copy_from_user(&req, argp, sizeof(req));
6302 if (ret) {
6303 pr_err("copy_from_user failed\n");
6304 return ret;
6305 }
6306
6307 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6308 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6309 if (ret) {
6310 pr_err("qseecom_scm_call failed\n");
6311 return ret;
6312 }
6313
6314 return 0;
6315}
6316
6317static int qseecom_mdtp_cipher_dip(void __user *argp)
6318{
6319 struct qseecom_mdtp_cipher_dip_req req;
6320 u32 tzbuflenin, tzbuflenout;
6321 char *tzbufin = NULL, *tzbufout = NULL;
6322 struct scm_desc desc = {0};
6323 int ret;
6324
6325 do {
6326 /* Copy the parameters from userspace */
6327 if (argp == NULL) {
6328 pr_err("arg is null\n");
6329 ret = -EINVAL;
6330 break;
6331 }
6332
6333 ret = copy_from_user(&req, argp, sizeof(req));
6334 if (ret) {
6335 pr_err("copy_from_user failed, ret= %d\n", ret);
6336 break;
6337 }
6338
6339 if (req.in_buf == NULL || req.out_buf == NULL ||
6340 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6341 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6342 req.direction > 1) {
6343 pr_err("invalid parameters\n");
6344 ret = -EINVAL;
6345 break;
6346 }
6347
6348 /* Copy the input buffer from userspace to kernel space */
6349 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6350 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6351 if (!tzbufin) {
6352 pr_err("error allocating in buffer\n");
6353 ret = -ENOMEM;
6354 break;
6355 }
6356
6357 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6358 if (ret) {
6359 pr_err("copy_from_user failed, ret=%d\n", ret);
6360 break;
6361 }
6362
6363 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6364
6365 /* Prepare the output buffer in kernel space */
6366 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6367 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6368 if (!tzbufout) {
6369 pr_err("error allocating out buffer\n");
6370 ret = -ENOMEM;
6371 break;
6372 }
6373
6374 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6375
6376 /* Send the command to TZ */
6377 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6378 desc.args[0] = virt_to_phys(tzbufin);
6379 desc.args[1] = req.in_buf_size;
6380 desc.args[2] = virt_to_phys(tzbufout);
6381 desc.args[3] = req.out_buf_size;
6382 desc.args[4] = req.direction;
6383
6384 ret = __qseecom_enable_clk(CLK_QSEE);
6385 if (ret)
6386 break;
6387
6388 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6389
6390 __qseecom_disable_clk(CLK_QSEE);
6391
6392 if (ret) {
6393 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6394 ret);
6395 break;
6396 }
6397
6398 /* Copy the output buffer from kernel space to userspace */
6399 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6400 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6401 if (ret) {
6402 pr_err("copy_to_user failed, ret=%d\n", ret);
6403 break;
6404 }
6405 } while (0);
6406
6407 kzfree(tzbufin);
6408 kzfree(tzbufout);
6409
6410 return ret;
6411}
6412
6413static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6414 struct qseecom_qteec_req *req)
6415{
6416 if (!data || !data->client.ihandle) {
6417 pr_err("Client or client handle is not initialized\n");
6418 return -EINVAL;
6419 }
6420
6421 if (data->type != QSEECOM_CLIENT_APP)
6422 return -EFAULT;
6423
6424 if (req->req_len > UINT_MAX - req->resp_len) {
6425 pr_err("Integer overflow detected in req_len & rsp_len\n");
6426 return -EINVAL;
6427 }
6428
6429 if (req->req_len + req->resp_len > data->client.sb_length) {
6430 pr_debug("Not enough memory to fit cmd_buf.\n");
6431 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6432 (req->req_len + req->resp_len), data->client.sb_length);
6433 return -ENOMEM;
6434 }
6435
6436 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6437 pr_err("cmd buffer or response buffer is null\n");
6438 return -EINVAL;
6439 }
6440 if (((uintptr_t)req->req_ptr <
6441 data->client.user_virt_sb_base) ||
6442 ((uintptr_t)req->req_ptr >=
6443 (data->client.user_virt_sb_base + data->client.sb_length))) {
6444 pr_err("cmd buffer address not within shared bufffer\n");
6445 return -EINVAL;
6446 }
6447
6448 if (((uintptr_t)req->resp_ptr <
6449 data->client.user_virt_sb_base) ||
6450 ((uintptr_t)req->resp_ptr >=
6451 (data->client.user_virt_sb_base + data->client.sb_length))) {
6452 pr_err("response buffer address not within shared bufffer\n");
6453 return -EINVAL;
6454 }
6455
6456 if ((req->req_len == 0) || (req->resp_len == 0)) {
6457 pr_err("cmd buf lengtgh/response buf length not valid\n");
6458 return -EINVAL;
6459 }
6460
6461 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6462 pr_err("Integer overflow in req_len & req_ptr\n");
6463 return -EINVAL;
6464 }
6465
6466 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6467 pr_err("Integer overflow in resp_len & resp_ptr\n");
6468 return -EINVAL;
6469 }
6470
6471 if (data->client.user_virt_sb_base >
6472 (ULONG_MAX - data->client.sb_length)) {
6473 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6474 return -EINVAL;
6475 }
6476 if ((((uintptr_t)req->req_ptr + req->req_len) >
6477 ((uintptr_t)data->client.user_virt_sb_base +
6478 data->client.sb_length)) ||
6479 (((uintptr_t)req->resp_ptr + req->resp_len) >
6480 ((uintptr_t)data->client.user_virt_sb_base +
6481 data->client.sb_length))) {
6482 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6483 return -EINVAL;
6484 }
6485 return 0;
6486}
6487
6488static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6489 uint32_t fd_idx, struct sg_table *sg_ptr)
6490{
6491 struct scatterlist *sg = sg_ptr->sgl;
6492 struct qseecom_sg_entry *sg_entry;
6493 void *buf;
6494 uint i;
6495 size_t size;
6496 dma_addr_t coh_pmem;
6497
6498 if (fd_idx >= MAX_ION_FD) {
6499 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6500 return -ENOMEM;
6501 }
6502 /*
6503 * Allocate a buffer, populate it with number of entry plus
6504 * each sg entry's phy addr and length; then return the
6505 * phy_addr of the buffer.
6506 */
6507 size = sizeof(uint32_t) +
6508 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6509 size = (size + PAGE_SIZE) & PAGE_MASK;
6510 buf = dma_alloc_coherent(qseecom.pdev,
6511 size, &coh_pmem, GFP_KERNEL);
6512 if (buf == NULL) {
6513 pr_err("failed to alloc memory for sg buf\n");
6514 return -ENOMEM;
6515 }
6516 *(uint32_t *)buf = sg_ptr->nents;
6517 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6518 for (i = 0; i < sg_ptr->nents; i++) {
6519 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6520 sg_entry->len = sg->length;
6521 sg_entry++;
6522 sg = sg_next(sg);
6523 }
6524 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6525 data->client.sec_buf_fd[fd_idx].vbase = buf;
6526 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6527 data->client.sec_buf_fd[fd_idx].size = size;
6528 return 0;
6529}
6530
6531static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6532 struct qseecom_dev_handle *data, bool cleanup)
6533{
6534 struct ion_handle *ihandle;
6535 int ret = 0;
6536 int i = 0;
6537 uint32_t *update;
6538 struct sg_table *sg_ptr = NULL;
6539 struct scatterlist *sg;
6540 struct qseecom_param_memref *memref;
6541
6542 if (req == NULL) {
6543 pr_err("Invalid address\n");
6544 return -EINVAL;
6545 }
6546 for (i = 0; i < MAX_ION_FD; i++) {
6547 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006548 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006549 req->ifd_data[i].fd);
6550 if (IS_ERR_OR_NULL(ihandle)) {
6551 pr_err("Ion client can't retrieve the handle\n");
6552 return -ENOMEM;
6553 }
6554 if ((req->req_len < sizeof(uint32_t)) ||
6555 (req->ifd_data[i].cmd_buf_offset >
6556 req->req_len - sizeof(uint32_t))) {
6557 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6558 req->req_len,
6559 req->ifd_data[i].cmd_buf_offset);
6560 return -EINVAL;
6561 }
6562 update = (uint32_t *)((char *) req->req_ptr +
6563 req->ifd_data[i].cmd_buf_offset);
6564 if (!update) {
6565 pr_err("update pointer is NULL\n");
6566 return -EINVAL;
6567 }
6568 } else {
6569 continue;
6570 }
6571 /* Populate the cmd data structure with the phys_addr */
6572 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6573 if (IS_ERR_OR_NULL(sg_ptr)) {
6574 pr_err("IOn client could not retrieve sg table\n");
6575 goto err;
6576 }
6577 sg = sg_ptr->sgl;
6578 if (sg == NULL) {
6579 pr_err("sg is NULL\n");
6580 goto err;
6581 }
6582 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6583 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6584 sg_ptr->nents, sg->length);
6585 goto err;
6586 }
6587 /* clean up buf for pre-allocated fd */
6588 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6589 (*update)) {
6590 if (data->client.sec_buf_fd[i].vbase)
6591 dma_free_coherent(qseecom.pdev,
6592 data->client.sec_buf_fd[i].size,
6593 data->client.sec_buf_fd[i].vbase,
6594 data->client.sec_buf_fd[i].pbase);
6595 memset((void *)update, 0,
6596 sizeof(struct qseecom_param_memref));
6597 memset(&(data->client.sec_buf_fd[i]), 0,
6598 sizeof(struct qseecom_sec_buf_fd_info));
6599 goto clean;
6600 }
6601
6602 if (*update == 0) {
6603 /* update buf for pre-allocated fd from secure heap*/
6604 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6605 sg_ptr);
6606 if (ret) {
6607 pr_err("Failed to handle buf for fd[%d]\n", i);
6608 goto err;
6609 }
6610 memref = (struct qseecom_param_memref *)update;
6611 memref->buffer =
6612 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6613 memref->size =
6614 (uint32_t)(data->client.sec_buf_fd[i].size);
6615 } else {
6616 /* update buf for fd from non-secure qseecom heap */
6617 if (sg_ptr->nents != 1) {
6618 pr_err("Num of scat entr (%d) invalid\n",
6619 sg_ptr->nents);
6620 goto err;
6621 }
6622 if (cleanup)
6623 *update = 0;
6624 else
6625 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6626 }
6627clean:
6628 if (cleanup) {
6629 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6630 ihandle, NULL, sg->length,
6631 ION_IOC_INV_CACHES);
6632 if (ret) {
6633 pr_err("cache operation failed %d\n", ret);
6634 goto err;
6635 }
6636 } else {
6637 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6638 ihandle, NULL, sg->length,
6639 ION_IOC_CLEAN_INV_CACHES);
6640 if (ret) {
6641 pr_err("cache operation failed %d\n", ret);
6642 goto err;
6643 }
6644 data->sglistinfo_ptr[i].indexAndFlags =
6645 SGLISTINFO_SET_INDEX_FLAG(
6646 (sg_ptr->nents == 1), 0,
6647 req->ifd_data[i].cmd_buf_offset);
6648 data->sglistinfo_ptr[i].sizeOrCount =
6649 (sg_ptr->nents == 1) ?
6650 sg->length : sg_ptr->nents;
6651 data->sglist_cnt = i + 1;
6652 }
6653 /* Deallocate the handle */
6654 if (!IS_ERR_OR_NULL(ihandle))
6655 ion_free(qseecom.ion_clnt, ihandle);
6656 }
6657 return ret;
6658err:
6659 if (!IS_ERR_OR_NULL(ihandle))
6660 ion_free(qseecom.ion_clnt, ihandle);
6661 return -ENOMEM;
6662}
6663
6664static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6665 struct qseecom_qteec_req *req, uint32_t cmd_id)
6666{
6667 struct qseecom_command_scm_resp resp;
6668 struct qseecom_qteec_ireq ireq;
6669 struct qseecom_qteec_64bit_ireq ireq_64bit;
6670 struct qseecom_registered_app_list *ptr_app;
6671 bool found_app = false;
6672 unsigned long flags;
6673 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006674 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006675 uint32_t reqd_len_sb_in = 0;
6676 void *cmd_buf = NULL;
6677 size_t cmd_len;
6678 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306679 void *req_ptr = NULL;
6680 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006681
6682 ret = __qseecom_qteec_validate_msg(data, req);
6683 if (ret)
6684 return ret;
6685
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306686 req_ptr = req->req_ptr;
6687 resp_ptr = req->resp_ptr;
6688
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006689 /* find app_id & img_name from list */
6690 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6691 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6692 list) {
6693 if ((ptr_app->app_id == data->client.app_id) &&
6694 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6695 found_app = true;
6696 break;
6697 }
6698 }
6699 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6700 if (!found_app) {
6701 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6702 (char *)data->client.app_name);
6703 return -ENOENT;
6704 }
6705
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306706 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6707 (uintptr_t)req->req_ptr);
6708 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6709 (uintptr_t)req->resp_ptr);
6710
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006711 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6712 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6713 ret = __qseecom_update_qteec_req_buf(
6714 (struct qseecom_qteec_modfd_req *)req, data, false);
6715 if (ret)
6716 return ret;
6717 }
6718
6719 if (qseecom.qsee_version < QSEE_VERSION_40) {
6720 ireq.app_id = data->client.app_id;
6721 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306722 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006723 ireq.req_len = req->req_len;
6724 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306725 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006726 ireq.resp_len = req->resp_len;
6727 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6728 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6729 dmac_flush_range((void *)table,
6730 (void *)table + SGLISTINFO_TABLE_SIZE);
6731 cmd_buf = (void *)&ireq;
6732 cmd_len = sizeof(struct qseecom_qteec_ireq);
6733 } else {
6734 ireq_64bit.app_id = data->client.app_id;
6735 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306736 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006737 ireq_64bit.req_len = req->req_len;
6738 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306739 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006740 ireq_64bit.resp_len = req->resp_len;
6741 if ((data->client.app_arch == ELFCLASS32) &&
6742 ((ireq_64bit.req_ptr >=
6743 PHY_ADDR_4G - ireq_64bit.req_len) ||
6744 (ireq_64bit.resp_ptr >=
6745 PHY_ADDR_4G - ireq_64bit.resp_len))){
6746 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6747 data->client.app_name, data->client.app_id);
6748 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6749 ireq_64bit.req_ptr, ireq_64bit.req_len,
6750 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6751 return -EFAULT;
6752 }
6753 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6754 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6755 dmac_flush_range((void *)table,
6756 (void *)table + SGLISTINFO_TABLE_SIZE);
6757 cmd_buf = (void *)&ireq_64bit;
6758 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6759 }
6760 if (qseecom.whitelist_support == true
6761 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6762 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6763 else
6764 *(uint32_t *)cmd_buf = cmd_id;
6765
6766 reqd_len_sb_in = req->req_len + req->resp_len;
6767 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6768 data->client.sb_virt,
6769 reqd_len_sb_in,
6770 ION_IOC_CLEAN_INV_CACHES);
6771 if (ret) {
6772 pr_err("cache operation failed %d\n", ret);
6773 return ret;
6774 }
6775
6776 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6777
6778 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6779 cmd_buf, cmd_len,
6780 &resp, sizeof(resp));
6781 if (ret) {
6782 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6783 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006784 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006785 }
6786
6787 if (qseecom.qsee_reentrancy_support) {
6788 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006789 if (ret)
6790 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006791 } else {
6792 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6793 ret = __qseecom_process_incomplete_cmd(data, &resp);
6794 if (ret) {
6795 pr_err("process_incomplete_cmd failed err: %d\n",
6796 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006797 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006798 }
6799 } else {
6800 if (resp.result != QSEOS_RESULT_SUCCESS) {
6801 pr_err("Response result %d not supported\n",
6802 resp.result);
6803 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006804 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006805 }
6806 }
6807 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006808exit:
6809 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006810 data->client.sb_virt, data->client.sb_length,
6811 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006812 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006813 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006814 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006815 }
6816
6817 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6818 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006819 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006820 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006821 if (ret2)
6822 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006823 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006824 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006825}
6826
6827static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6828 void __user *argp)
6829{
6830 struct qseecom_qteec_modfd_req req;
6831 int ret = 0;
6832
6833 ret = copy_from_user(&req, argp,
6834 sizeof(struct qseecom_qteec_modfd_req));
6835 if (ret) {
6836 pr_err("copy_from_user failed\n");
6837 return ret;
6838 }
6839 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6840 QSEOS_TEE_OPEN_SESSION);
6841
6842 return ret;
6843}
6844
6845static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6846 void __user *argp)
6847{
6848 struct qseecom_qteec_req req;
6849 int ret = 0;
6850
6851 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6852 if (ret) {
6853 pr_err("copy_from_user failed\n");
6854 return ret;
6855 }
6856 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6857 return ret;
6858}
6859
6860static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6861 void __user *argp)
6862{
6863 struct qseecom_qteec_modfd_req req;
6864 struct qseecom_command_scm_resp resp;
6865 struct qseecom_qteec_ireq ireq;
6866 struct qseecom_qteec_64bit_ireq ireq_64bit;
6867 struct qseecom_registered_app_list *ptr_app;
6868 bool found_app = false;
6869 unsigned long flags;
6870 int ret = 0;
6871 int i = 0;
6872 uint32_t reqd_len_sb_in = 0;
6873 void *cmd_buf = NULL;
6874 size_t cmd_len;
6875 struct sglist_info *table = data->sglistinfo_ptr;
6876 void *req_ptr = NULL;
6877 void *resp_ptr = NULL;
6878
6879 ret = copy_from_user(&req, argp,
6880 sizeof(struct qseecom_qteec_modfd_req));
6881 if (ret) {
6882 pr_err("copy_from_user failed\n");
6883 return ret;
6884 }
6885 ret = __qseecom_qteec_validate_msg(data,
6886 (struct qseecom_qteec_req *)(&req));
6887 if (ret)
6888 return ret;
6889 req_ptr = req.req_ptr;
6890 resp_ptr = req.resp_ptr;
6891
6892 /* find app_id & img_name from list */
6893 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6894 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6895 list) {
6896 if ((ptr_app->app_id == data->client.app_id) &&
6897 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6898 found_app = true;
6899 break;
6900 }
6901 }
6902 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6903 if (!found_app) {
6904 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6905 (char *)data->client.app_name);
6906 return -ENOENT;
6907 }
6908
6909 /* validate offsets */
6910 for (i = 0; i < MAX_ION_FD; i++) {
6911 if (req.ifd_data[i].fd) {
6912 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6913 return -EINVAL;
6914 }
6915 }
6916 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6917 (uintptr_t)req.req_ptr);
6918 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6919 (uintptr_t)req.resp_ptr);
6920 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6921 if (ret)
6922 return ret;
6923
6924 if (qseecom.qsee_version < QSEE_VERSION_40) {
6925 ireq.app_id = data->client.app_id;
6926 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6927 (uintptr_t)req_ptr);
6928 ireq.req_len = req.req_len;
6929 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6930 (uintptr_t)resp_ptr);
6931 ireq.resp_len = req.resp_len;
6932 cmd_buf = (void *)&ireq;
6933 cmd_len = sizeof(struct qseecom_qteec_ireq);
6934 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6935 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6936 dmac_flush_range((void *)table,
6937 (void *)table + SGLISTINFO_TABLE_SIZE);
6938 } else {
6939 ireq_64bit.app_id = data->client.app_id;
6940 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6941 (uintptr_t)req_ptr);
6942 ireq_64bit.req_len = req.req_len;
6943 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6944 (uintptr_t)resp_ptr);
6945 ireq_64bit.resp_len = req.resp_len;
6946 cmd_buf = (void *)&ireq_64bit;
6947 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6948 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6949 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6950 dmac_flush_range((void *)table,
6951 (void *)table + SGLISTINFO_TABLE_SIZE);
6952 }
6953 reqd_len_sb_in = req.req_len + req.resp_len;
6954 if (qseecom.whitelist_support == true)
6955 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6956 else
6957 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6958
6959 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6960 data->client.sb_virt,
6961 reqd_len_sb_in,
6962 ION_IOC_CLEAN_INV_CACHES);
6963 if (ret) {
6964 pr_err("cache operation failed %d\n", ret);
6965 return ret;
6966 }
6967
6968 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6969
6970 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6971 cmd_buf, cmd_len,
6972 &resp, sizeof(resp));
6973 if (ret) {
6974 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6975 ret, data->client.app_id);
6976 return ret;
6977 }
6978
6979 if (qseecom.qsee_reentrancy_support) {
6980 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6981 } else {
6982 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6983 ret = __qseecom_process_incomplete_cmd(data, &resp);
6984 if (ret) {
6985 pr_err("process_incomplete_cmd failed err: %d\n",
6986 ret);
6987 return ret;
6988 }
6989 } else {
6990 if (resp.result != QSEOS_RESULT_SUCCESS) {
6991 pr_err("Response result %d not supported\n",
6992 resp.result);
6993 ret = -EINVAL;
6994 }
6995 }
6996 }
6997 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6998 if (ret)
6999 return ret;
7000
7001 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
7002 data->client.sb_virt, data->client.sb_length,
7003 ION_IOC_INV_CACHES);
7004 if (ret) {
7005 pr_err("cache operation failed %d\n", ret);
7006 return ret;
7007 }
7008 return 0;
7009}
7010
7011static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
7012 void __user *argp)
7013{
7014 struct qseecom_qteec_modfd_req req;
7015 int ret = 0;
7016
7017 ret = copy_from_user(&req, argp,
7018 sizeof(struct qseecom_qteec_modfd_req));
7019 if (ret) {
7020 pr_err("copy_from_user failed\n");
7021 return ret;
7022 }
7023 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
7024 QSEOS_TEE_REQUEST_CANCELLATION);
7025
7026 return ret;
7027}
7028
7029static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
7030{
7031 if (data->sglist_cnt) {
7032 memset(data->sglistinfo_ptr, 0,
7033 SGLISTINFO_TABLE_SIZE);
7034 data->sglist_cnt = 0;
7035 }
7036}
7037
7038static inline long qseecom_ioctl(struct file *file,
7039 unsigned int cmd, unsigned long arg)
7040{
7041 int ret = 0;
7042 struct qseecom_dev_handle *data = file->private_data;
7043 void __user *argp = (void __user *) arg;
7044 bool perf_enabled = false;
7045
7046 if (!data) {
7047 pr_err("Invalid/uninitialized device handle\n");
7048 return -EINVAL;
7049 }
7050
7051 if (data->abort) {
7052 pr_err("Aborting qseecom driver\n");
7053 return -ENODEV;
7054 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007055 if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
7056 cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
7057 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
7058 cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
7059 __qseecom_processing_pending_lsnr_unregister();
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007060
7061 switch (cmd) {
7062 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
7063 if (data->type != QSEECOM_GENERIC) {
7064 pr_err("reg lstnr req: invalid handle (%d)\n",
7065 data->type);
7066 ret = -EINVAL;
7067 break;
7068 }
7069 pr_debug("ioctl register_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007070 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007071 atomic_inc(&data->ioctl_count);
7072 data->type = QSEECOM_LISTENER_SERVICE;
7073 ret = qseecom_register_listener(data, argp);
7074 atomic_dec(&data->ioctl_count);
7075 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007076 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007077 if (ret)
7078 pr_err("failed qseecom_register_listener: %d\n", ret);
7079 break;
7080 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307081 case QSEECOM_IOCTL_SET_ICE_INFO: {
7082 struct qseecom_ice_data_t ice_data;
7083
7084 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7085 if (ret) {
7086 pr_err("copy_from_user failed\n");
7087 return -EFAULT;
7088 }
7089 qcom_ice_set_fde_flag(ice_data.flag);
7090 break;
7091 }
7092
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007093 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7094 if ((data->listener.id == 0) ||
7095 (data->type != QSEECOM_LISTENER_SERVICE)) {
7096 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7097 data->type, data->listener.id);
7098 ret = -EINVAL;
7099 break;
7100 }
7101 pr_debug("ioctl unregister_listener_req()\n");
Zhen Kongbcdeda22018-11-16 13:50:51 -08007102 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007103 atomic_inc(&data->ioctl_count);
7104 ret = qseecom_unregister_listener(data);
7105 atomic_dec(&data->ioctl_count);
7106 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007107 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007108 if (ret)
7109 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7110 break;
7111 }
7112 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7113 if ((data->client.app_id == 0) ||
7114 (data->type != QSEECOM_CLIENT_APP)) {
7115 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7116 data->type, data->client.app_id);
7117 ret = -EINVAL;
7118 break;
7119 }
7120 /* Only one client allowed here at a time */
7121 mutex_lock(&app_access_lock);
7122 if (qseecom.support_bus_scaling) {
7123 /* register bus bw in case the client doesn't do it */
7124 if (!data->mode) {
7125 mutex_lock(&qsee_bw_mutex);
7126 __qseecom_register_bus_bandwidth_needs(
7127 data, HIGH);
7128 mutex_unlock(&qsee_bw_mutex);
7129 }
7130 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7131 if (ret) {
7132 pr_err("Failed to set bw.\n");
7133 ret = -EINVAL;
7134 mutex_unlock(&app_access_lock);
7135 break;
7136 }
7137 }
7138 /*
7139 * On targets where crypto clock is handled by HLOS,
7140 * if clk_access_cnt is zero and perf_enabled is false,
7141 * then the crypto clock was not enabled before sending cmd to
7142 * tz, qseecom will enable the clock to avoid service failure.
7143 */
7144 if (!qseecom.no_clock_support &&
7145 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7146 pr_debug("ce clock is not enabled!\n");
7147 ret = qseecom_perf_enable(data);
7148 if (ret) {
7149 pr_err("Failed to vote for clock with err %d\n",
7150 ret);
7151 mutex_unlock(&app_access_lock);
7152 ret = -EINVAL;
7153 break;
7154 }
7155 perf_enabled = true;
7156 }
7157 atomic_inc(&data->ioctl_count);
7158 ret = qseecom_send_cmd(data, argp);
7159 if (qseecom.support_bus_scaling)
7160 __qseecom_add_bw_scale_down_timer(
7161 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7162 if (perf_enabled) {
7163 qsee_disable_clock_vote(data, CLK_DFAB);
7164 qsee_disable_clock_vote(data, CLK_SFPB);
7165 }
7166 atomic_dec(&data->ioctl_count);
7167 wake_up_all(&data->abort_wq);
7168 mutex_unlock(&app_access_lock);
7169 if (ret)
7170 pr_err("failed qseecom_send_cmd: %d\n", ret);
7171 break;
7172 }
7173 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7174 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7175 if ((data->client.app_id == 0) ||
7176 (data->type != QSEECOM_CLIENT_APP)) {
7177 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7178 data->type, data->client.app_id);
7179 ret = -EINVAL;
7180 break;
7181 }
7182 /* Only one client allowed here at a time */
7183 mutex_lock(&app_access_lock);
7184 if (qseecom.support_bus_scaling) {
7185 if (!data->mode) {
7186 mutex_lock(&qsee_bw_mutex);
7187 __qseecom_register_bus_bandwidth_needs(
7188 data, HIGH);
7189 mutex_unlock(&qsee_bw_mutex);
7190 }
7191 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7192 if (ret) {
7193 pr_err("Failed to set bw.\n");
7194 mutex_unlock(&app_access_lock);
7195 ret = -EINVAL;
7196 break;
7197 }
7198 }
7199 /*
7200 * On targets where crypto clock is handled by HLOS,
7201 * if clk_access_cnt is zero and perf_enabled is false,
7202 * then the crypto clock was not enabled before sending cmd to
7203 * tz, qseecom will enable the clock to avoid service failure.
7204 */
7205 if (!qseecom.no_clock_support &&
7206 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7207 pr_debug("ce clock is not enabled!\n");
7208 ret = qseecom_perf_enable(data);
7209 if (ret) {
7210 pr_err("Failed to vote for clock with err %d\n",
7211 ret);
7212 mutex_unlock(&app_access_lock);
7213 ret = -EINVAL;
7214 break;
7215 }
7216 perf_enabled = true;
7217 }
7218 atomic_inc(&data->ioctl_count);
7219 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7220 ret = qseecom_send_modfd_cmd(data, argp);
7221 else
7222 ret = qseecom_send_modfd_cmd_64(data, argp);
7223 if (qseecom.support_bus_scaling)
7224 __qseecom_add_bw_scale_down_timer(
7225 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7226 if (perf_enabled) {
7227 qsee_disable_clock_vote(data, CLK_DFAB);
7228 qsee_disable_clock_vote(data, CLK_SFPB);
7229 }
7230 atomic_dec(&data->ioctl_count);
7231 wake_up_all(&data->abort_wq);
7232 mutex_unlock(&app_access_lock);
7233 if (ret)
7234 pr_err("failed qseecom_send_cmd: %d\n", ret);
7235 __qseecom_clean_data_sglistinfo(data);
7236 break;
7237 }
7238 case QSEECOM_IOCTL_RECEIVE_REQ: {
7239 if ((data->listener.id == 0) ||
7240 (data->type != QSEECOM_LISTENER_SERVICE)) {
7241 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7242 data->type, data->listener.id);
7243 ret = -EINVAL;
7244 break;
7245 }
7246 atomic_inc(&data->ioctl_count);
7247 ret = qseecom_receive_req(data);
7248 atomic_dec(&data->ioctl_count);
7249 wake_up_all(&data->abort_wq);
7250 if (ret && (ret != -ERESTARTSYS))
7251 pr_err("failed qseecom_receive_req: %d\n", ret);
7252 break;
7253 }
7254 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7255 if ((data->listener.id == 0) ||
7256 (data->type != QSEECOM_LISTENER_SERVICE)) {
7257 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7258 data->type, data->listener.id);
7259 ret = -EINVAL;
7260 break;
7261 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007262 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007263 atomic_inc(&data->ioctl_count);
7264 if (!qseecom.qsee_reentrancy_support)
7265 ret = qseecom_send_resp();
7266 else
7267 ret = qseecom_reentrancy_send_resp(data);
7268 atomic_dec(&data->ioctl_count);
7269 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007270 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007271 if (ret)
7272 pr_err("failed qseecom_send_resp: %d\n", ret);
7273 break;
7274 }
7275 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7276 if ((data->type != QSEECOM_CLIENT_APP) &&
7277 (data->type != QSEECOM_GENERIC) &&
7278 (data->type != QSEECOM_SECURE_SERVICE)) {
7279 pr_err("set mem param req: invalid handle (%d)\n",
7280 data->type);
7281 ret = -EINVAL;
7282 break;
7283 }
7284 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7285 mutex_lock(&app_access_lock);
7286 atomic_inc(&data->ioctl_count);
7287 ret = qseecom_set_client_mem_param(data, argp);
7288 atomic_dec(&data->ioctl_count);
7289 mutex_unlock(&app_access_lock);
7290 if (ret)
7291 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7292 ret);
7293 break;
7294 }
7295 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7296 if ((data->type != QSEECOM_GENERIC) &&
7297 (data->type != QSEECOM_CLIENT_APP)) {
7298 pr_err("load app req: invalid handle (%d)\n",
7299 data->type);
7300 ret = -EINVAL;
7301 break;
7302 }
7303 data->type = QSEECOM_CLIENT_APP;
7304 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7305 mutex_lock(&app_access_lock);
7306 atomic_inc(&data->ioctl_count);
7307 ret = qseecom_load_app(data, argp);
7308 atomic_dec(&data->ioctl_count);
7309 mutex_unlock(&app_access_lock);
7310 if (ret)
7311 pr_err("failed load_app request: %d\n", ret);
7312 break;
7313 }
7314 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7315 if ((data->client.app_id == 0) ||
7316 (data->type != QSEECOM_CLIENT_APP)) {
7317 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7318 data->type, data->client.app_id);
7319 ret = -EINVAL;
7320 break;
7321 }
7322 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7323 mutex_lock(&app_access_lock);
7324 atomic_inc(&data->ioctl_count);
7325 ret = qseecom_unload_app(data, false);
7326 atomic_dec(&data->ioctl_count);
7327 mutex_unlock(&app_access_lock);
7328 if (ret)
7329 pr_err("failed unload_app request: %d\n", ret);
7330 break;
7331 }
7332 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7333 atomic_inc(&data->ioctl_count);
7334 ret = qseecom_get_qseos_version(data, argp);
7335 if (ret)
7336 pr_err("qseecom_get_qseos_version: %d\n", ret);
7337 atomic_dec(&data->ioctl_count);
7338 break;
7339 }
7340 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7341 if ((data->type != QSEECOM_GENERIC) &&
7342 (data->type != QSEECOM_CLIENT_APP)) {
7343 pr_err("perf enable req: invalid handle (%d)\n",
7344 data->type);
7345 ret = -EINVAL;
7346 break;
7347 }
7348 if ((data->type == QSEECOM_CLIENT_APP) &&
7349 (data->client.app_id == 0)) {
7350 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7351 data->type, data->client.app_id);
7352 ret = -EINVAL;
7353 break;
7354 }
7355 atomic_inc(&data->ioctl_count);
7356 if (qseecom.support_bus_scaling) {
7357 mutex_lock(&qsee_bw_mutex);
7358 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7359 mutex_unlock(&qsee_bw_mutex);
7360 } else {
7361 ret = qseecom_perf_enable(data);
7362 if (ret)
7363 pr_err("Fail to vote for clocks %d\n", ret);
7364 }
7365 atomic_dec(&data->ioctl_count);
7366 break;
7367 }
7368 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7369 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7370 (data->type != QSEECOM_CLIENT_APP)) {
7371 pr_err("perf disable req: invalid handle (%d)\n",
7372 data->type);
7373 ret = -EINVAL;
7374 break;
7375 }
7376 if ((data->type == QSEECOM_CLIENT_APP) &&
7377 (data->client.app_id == 0)) {
7378 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7379 data->type, data->client.app_id);
7380 ret = -EINVAL;
7381 break;
7382 }
7383 atomic_inc(&data->ioctl_count);
7384 if (!qseecom.support_bus_scaling) {
7385 qsee_disable_clock_vote(data, CLK_DFAB);
7386 qsee_disable_clock_vote(data, CLK_SFPB);
7387 } else {
7388 mutex_lock(&qsee_bw_mutex);
7389 qseecom_unregister_bus_bandwidth_needs(data);
7390 mutex_unlock(&qsee_bw_mutex);
7391 }
7392 atomic_dec(&data->ioctl_count);
7393 break;
7394 }
7395
7396 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7397 /* If crypto clock is not handled by HLOS, return directly. */
7398 if (qseecom.no_clock_support) {
7399 pr_debug("crypto clock is not handled by HLOS\n");
7400 break;
7401 }
7402 if ((data->client.app_id == 0) ||
7403 (data->type != QSEECOM_CLIENT_APP)) {
7404 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7405 data->type, data->client.app_id);
7406 ret = -EINVAL;
7407 break;
7408 }
7409 atomic_inc(&data->ioctl_count);
7410 ret = qseecom_scale_bus_bandwidth(data, argp);
7411 atomic_dec(&data->ioctl_count);
7412 break;
7413 }
7414 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7415 if (data->type != QSEECOM_GENERIC) {
7416 pr_err("load ext elf req: invalid client handle (%d)\n",
7417 data->type);
7418 ret = -EINVAL;
7419 break;
7420 }
7421 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7422 data->released = true;
7423 mutex_lock(&app_access_lock);
7424 atomic_inc(&data->ioctl_count);
7425 ret = qseecom_load_external_elf(data, argp);
7426 atomic_dec(&data->ioctl_count);
7427 mutex_unlock(&app_access_lock);
7428 if (ret)
7429 pr_err("failed load_external_elf request: %d\n", ret);
7430 break;
7431 }
7432 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7433 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7434 pr_err("unload ext elf req: invalid handle (%d)\n",
7435 data->type);
7436 ret = -EINVAL;
7437 break;
7438 }
7439 data->released = true;
7440 mutex_lock(&app_access_lock);
7441 atomic_inc(&data->ioctl_count);
7442 ret = qseecom_unload_external_elf(data);
7443 atomic_dec(&data->ioctl_count);
7444 mutex_unlock(&app_access_lock);
7445 if (ret)
7446 pr_err("failed unload_app request: %d\n", ret);
7447 break;
7448 }
7449 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7450 data->type = QSEECOM_CLIENT_APP;
7451 mutex_lock(&app_access_lock);
7452 atomic_inc(&data->ioctl_count);
7453 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7454 ret = qseecom_query_app_loaded(data, argp);
7455 atomic_dec(&data->ioctl_count);
7456 mutex_unlock(&app_access_lock);
7457 break;
7458 }
7459 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7460 if (data->type != QSEECOM_GENERIC) {
7461 pr_err("send cmd svc req: invalid handle (%d)\n",
7462 data->type);
7463 ret = -EINVAL;
7464 break;
7465 }
7466 data->type = QSEECOM_SECURE_SERVICE;
7467 if (qseecom.qsee_version < QSEE_VERSION_03) {
7468 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7469 qseecom.qsee_version);
7470 return -EINVAL;
7471 }
7472 mutex_lock(&app_access_lock);
7473 atomic_inc(&data->ioctl_count);
7474 ret = qseecom_send_service_cmd(data, argp);
7475 atomic_dec(&data->ioctl_count);
7476 mutex_unlock(&app_access_lock);
7477 break;
7478 }
7479 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7480 if (!(qseecom.support_pfe || qseecom.support_fde))
7481 pr_err("Features requiring key init not supported\n");
7482 if (data->type != QSEECOM_GENERIC) {
7483 pr_err("create key req: invalid handle (%d)\n",
7484 data->type);
7485 ret = -EINVAL;
7486 break;
7487 }
7488 if (qseecom.qsee_version < QSEE_VERSION_05) {
7489 pr_err("Create Key feature unsupported: qsee ver %u\n",
7490 qseecom.qsee_version);
7491 return -EINVAL;
7492 }
7493 data->released = true;
7494 mutex_lock(&app_access_lock);
7495 atomic_inc(&data->ioctl_count);
7496 ret = qseecom_create_key(data, argp);
7497 if (ret)
7498 pr_err("failed to create encryption key: %d\n", ret);
7499
7500 atomic_dec(&data->ioctl_count);
7501 mutex_unlock(&app_access_lock);
7502 break;
7503 }
7504 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7505 if (!(qseecom.support_pfe || qseecom.support_fde))
7506 pr_err("Features requiring key init not supported\n");
7507 if (data->type != QSEECOM_GENERIC) {
7508 pr_err("wipe key req: invalid handle (%d)\n",
7509 data->type);
7510 ret = -EINVAL;
7511 break;
7512 }
7513 if (qseecom.qsee_version < QSEE_VERSION_05) {
7514 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7515 qseecom.qsee_version);
7516 return -EINVAL;
7517 }
7518 data->released = true;
7519 mutex_lock(&app_access_lock);
7520 atomic_inc(&data->ioctl_count);
7521 ret = qseecom_wipe_key(data, argp);
7522 if (ret)
7523 pr_err("failed to wipe encryption key: %d\n", ret);
7524 atomic_dec(&data->ioctl_count);
7525 mutex_unlock(&app_access_lock);
7526 break;
7527 }
7528 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7529 if (!(qseecom.support_pfe || qseecom.support_fde))
7530 pr_err("Features requiring key init not supported\n");
7531 if (data->type != QSEECOM_GENERIC) {
7532 pr_err("update key req: invalid handle (%d)\n",
7533 data->type);
7534 ret = -EINVAL;
7535 break;
7536 }
7537 if (qseecom.qsee_version < QSEE_VERSION_05) {
7538 pr_err("Update Key feature unsupported in qsee ver %u\n",
7539 qseecom.qsee_version);
7540 return -EINVAL;
7541 }
7542 data->released = true;
7543 mutex_lock(&app_access_lock);
7544 atomic_inc(&data->ioctl_count);
7545 ret = qseecom_update_key_user_info(data, argp);
7546 if (ret)
7547 pr_err("failed to update key user info: %d\n", ret);
7548 atomic_dec(&data->ioctl_count);
7549 mutex_unlock(&app_access_lock);
7550 break;
7551 }
7552 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7553 if (data->type != QSEECOM_GENERIC) {
7554 pr_err("save part hash req: invalid handle (%d)\n",
7555 data->type);
7556 ret = -EINVAL;
7557 break;
7558 }
7559 data->released = true;
7560 mutex_lock(&app_access_lock);
7561 atomic_inc(&data->ioctl_count);
7562 ret = qseecom_save_partition_hash(argp);
7563 atomic_dec(&data->ioctl_count);
7564 mutex_unlock(&app_access_lock);
7565 break;
7566 }
7567 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7568 if (data->type != QSEECOM_GENERIC) {
7569 pr_err("ES activated req: invalid handle (%d)\n",
7570 data->type);
7571 ret = -EINVAL;
7572 break;
7573 }
7574 data->released = true;
7575 mutex_lock(&app_access_lock);
7576 atomic_inc(&data->ioctl_count);
7577 ret = qseecom_is_es_activated(argp);
7578 atomic_dec(&data->ioctl_count);
7579 mutex_unlock(&app_access_lock);
7580 break;
7581 }
7582 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7583 if (data->type != QSEECOM_GENERIC) {
7584 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7585 data->type);
7586 ret = -EINVAL;
7587 break;
7588 }
7589 data->released = true;
7590 mutex_lock(&app_access_lock);
7591 atomic_inc(&data->ioctl_count);
7592 ret = qseecom_mdtp_cipher_dip(argp);
7593 atomic_dec(&data->ioctl_count);
7594 mutex_unlock(&app_access_lock);
7595 break;
7596 }
7597 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7598 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7599 if ((data->listener.id == 0) ||
7600 (data->type != QSEECOM_LISTENER_SERVICE)) {
7601 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7602 data->type, data->listener.id);
7603 ret = -EINVAL;
7604 break;
7605 }
Zhen Kongbcdeda22018-11-16 13:50:51 -08007606 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007607 atomic_inc(&data->ioctl_count);
7608 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7609 ret = qseecom_send_modfd_resp(data, argp);
7610 else
7611 ret = qseecom_send_modfd_resp_64(data, argp);
7612 atomic_dec(&data->ioctl_count);
7613 wake_up_all(&data->abort_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08007614 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007615 if (ret)
7616 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7617 __qseecom_clean_data_sglistinfo(data);
7618 break;
7619 }
7620 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7621 if ((data->client.app_id == 0) ||
7622 (data->type != QSEECOM_CLIENT_APP)) {
7623 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7624 data->type, data->client.app_id);
7625 ret = -EINVAL;
7626 break;
7627 }
7628 if (qseecom.qsee_version < QSEE_VERSION_40) {
7629 pr_err("GP feature unsupported: qsee ver %u\n",
7630 qseecom.qsee_version);
7631 return -EINVAL;
7632 }
7633 /* Only one client allowed here at a time */
7634 mutex_lock(&app_access_lock);
7635 atomic_inc(&data->ioctl_count);
7636 ret = qseecom_qteec_open_session(data, argp);
7637 atomic_dec(&data->ioctl_count);
7638 wake_up_all(&data->abort_wq);
7639 mutex_unlock(&app_access_lock);
7640 if (ret)
7641 pr_err("failed open_session_cmd: %d\n", ret);
7642 __qseecom_clean_data_sglistinfo(data);
7643 break;
7644 }
7645 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7646 if ((data->client.app_id == 0) ||
7647 (data->type != QSEECOM_CLIENT_APP)) {
7648 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7649 data->type, data->client.app_id);
7650 ret = -EINVAL;
7651 break;
7652 }
7653 if (qseecom.qsee_version < QSEE_VERSION_40) {
7654 pr_err("GP feature unsupported: qsee ver %u\n",
7655 qseecom.qsee_version);
7656 return -EINVAL;
7657 }
7658 /* Only one client allowed here at a time */
7659 mutex_lock(&app_access_lock);
7660 atomic_inc(&data->ioctl_count);
7661 ret = qseecom_qteec_close_session(data, argp);
7662 atomic_dec(&data->ioctl_count);
7663 wake_up_all(&data->abort_wq);
7664 mutex_unlock(&app_access_lock);
7665 if (ret)
7666 pr_err("failed close_session_cmd: %d\n", ret);
7667 break;
7668 }
7669 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7670 if ((data->client.app_id == 0) ||
7671 (data->type != QSEECOM_CLIENT_APP)) {
7672 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7673 data->type, data->client.app_id);
7674 ret = -EINVAL;
7675 break;
7676 }
7677 if (qseecom.qsee_version < QSEE_VERSION_40) {
7678 pr_err("GP feature unsupported: qsee ver %u\n",
7679 qseecom.qsee_version);
7680 return -EINVAL;
7681 }
7682 /* Only one client allowed here at a time */
7683 mutex_lock(&app_access_lock);
7684 atomic_inc(&data->ioctl_count);
7685 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7686 atomic_dec(&data->ioctl_count);
7687 wake_up_all(&data->abort_wq);
7688 mutex_unlock(&app_access_lock);
7689 if (ret)
7690 pr_err("failed Invoke cmd: %d\n", ret);
7691 __qseecom_clean_data_sglistinfo(data);
7692 break;
7693 }
7694 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7695 if ((data->client.app_id == 0) ||
7696 (data->type != QSEECOM_CLIENT_APP)) {
7697 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7698 data->type, data->client.app_id);
7699 ret = -EINVAL;
7700 break;
7701 }
7702 if (qseecom.qsee_version < QSEE_VERSION_40) {
7703 pr_err("GP feature unsupported: qsee ver %u\n",
7704 qseecom.qsee_version);
7705 return -EINVAL;
7706 }
7707 /* Only one client allowed here at a time */
7708 mutex_lock(&app_access_lock);
7709 atomic_inc(&data->ioctl_count);
7710 ret = qseecom_qteec_request_cancellation(data, argp);
7711 atomic_dec(&data->ioctl_count);
7712 wake_up_all(&data->abort_wq);
7713 mutex_unlock(&app_access_lock);
7714 if (ret)
7715 pr_err("failed request_cancellation: %d\n", ret);
7716 break;
7717 }
7718 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7719 atomic_inc(&data->ioctl_count);
7720 ret = qseecom_get_ce_info(data, argp);
7721 if (ret)
7722 pr_err("failed get fde ce pipe info: %d\n", ret);
7723 atomic_dec(&data->ioctl_count);
7724 break;
7725 }
7726 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7727 atomic_inc(&data->ioctl_count);
7728 ret = qseecom_free_ce_info(data, argp);
7729 if (ret)
7730 pr_err("failed get fde ce pipe info: %d\n", ret);
7731 atomic_dec(&data->ioctl_count);
7732 break;
7733 }
7734 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7735 atomic_inc(&data->ioctl_count);
7736 ret = qseecom_query_ce_info(data, argp);
7737 if (ret)
7738 pr_err("failed get fde ce pipe info: %d\n", ret);
7739 atomic_dec(&data->ioctl_count);
7740 break;
7741 }
7742 default:
7743 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7744 return -EINVAL;
7745 }
7746 return ret;
7747}
7748
7749static int qseecom_open(struct inode *inode, struct file *file)
7750{
7751 int ret = 0;
7752 struct qseecom_dev_handle *data;
7753
7754 data = kzalloc(sizeof(*data), GFP_KERNEL);
7755 if (!data)
7756 return -ENOMEM;
7757 file->private_data = data;
7758 data->abort = 0;
7759 data->type = QSEECOM_GENERIC;
7760 data->released = false;
7761 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7762 data->mode = INACTIVE;
7763 init_waitqueue_head(&data->abort_wq);
7764 atomic_set(&data->ioctl_count, 0);
7765 return ret;
7766}
7767
7768static int qseecom_release(struct inode *inode, struct file *file)
7769{
7770 struct qseecom_dev_handle *data = file->private_data;
7771 int ret = 0;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007772 bool free_private_data = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007773
7774 if (data->released == false) {
7775 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7776 data->type, data->mode, data);
7777 switch (data->type) {
7778 case QSEECOM_LISTENER_SERVICE:
Zhen Kongbcdeda22018-11-16 13:50:51 -08007779 pr_debug("release lsnr svc %d\n", data->listener.id);
7780 free_private_data = false;
7781 mutex_lock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007782 ret = qseecom_unregister_listener(data);
Zhen Kong87dcf0e2019-01-04 12:34:50 -08007783 data->listener.release_called = true;
Zhen Kongbcdeda22018-11-16 13:50:51 -08007784 mutex_unlock(&listener_access_lock);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007785 break;
7786 case QSEECOM_CLIENT_APP:
7787 mutex_lock(&app_access_lock);
7788 ret = qseecom_unload_app(data, true);
7789 mutex_unlock(&app_access_lock);
7790 break;
7791 case QSEECOM_SECURE_SERVICE:
7792 case QSEECOM_GENERIC:
7793 ret = qseecom_unmap_ion_allocated_memory(data);
7794 if (ret)
7795 pr_err("Ion Unmap failed\n");
7796 break;
7797 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7798 break;
7799 default:
7800 pr_err("Unsupported clnt_handle_type %d",
7801 data->type);
7802 break;
7803 }
7804 }
7805
7806 if (qseecom.support_bus_scaling) {
7807 mutex_lock(&qsee_bw_mutex);
7808 if (data->mode != INACTIVE) {
7809 qseecom_unregister_bus_bandwidth_needs(data);
7810 if (qseecom.cumulative_mode == INACTIVE) {
7811 ret = __qseecom_set_msm_bus_request(INACTIVE);
7812 if (ret)
7813 pr_err("Fail to scale down bus\n");
7814 }
7815 }
7816 mutex_unlock(&qsee_bw_mutex);
7817 } else {
7818 if (data->fast_load_enabled == true)
7819 qsee_disable_clock_vote(data, CLK_SFPB);
7820 if (data->perf_enabled == true)
7821 qsee_disable_clock_vote(data, CLK_DFAB);
7822 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007823
Zhen Kongbcdeda22018-11-16 13:50:51 -08007824 if (free_private_data)
7825 kfree(data);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007826 return ret;
7827}
7828
7829#ifdef CONFIG_COMPAT
7830#include "compat_qseecom.c"
7831#else
7832#define compat_qseecom_ioctl NULL
7833#endif
7834
7835static const struct file_operations qseecom_fops = {
7836 .owner = THIS_MODULE,
7837 .unlocked_ioctl = qseecom_ioctl,
7838 .compat_ioctl = compat_qseecom_ioctl,
7839 .open = qseecom_open,
7840 .release = qseecom_release
7841};
7842
7843static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7844{
7845 int rc = 0;
7846 struct device *pdev;
7847 struct qseecom_clk *qclk;
7848 char *core_clk_src = NULL;
7849 char *core_clk = NULL;
7850 char *iface_clk = NULL;
7851 char *bus_clk = NULL;
7852
7853 switch (ce) {
7854 case CLK_QSEE: {
7855 core_clk_src = "core_clk_src";
7856 core_clk = "core_clk";
7857 iface_clk = "iface_clk";
7858 bus_clk = "bus_clk";
7859 qclk = &qseecom.qsee;
7860 qclk->instance = CLK_QSEE;
7861 break;
7862 };
7863 case CLK_CE_DRV: {
7864 core_clk_src = "ce_drv_core_clk_src";
7865 core_clk = "ce_drv_core_clk";
7866 iface_clk = "ce_drv_iface_clk";
7867 bus_clk = "ce_drv_bus_clk";
7868 qclk = &qseecom.ce_drv;
7869 qclk->instance = CLK_CE_DRV;
7870 break;
7871 };
7872 default:
7873 pr_err("Invalid ce hw instance: %d!\n", ce);
7874 return -EIO;
7875 }
7876
7877 if (qseecom.no_clock_support) {
7878 qclk->ce_core_clk = NULL;
7879 qclk->ce_clk = NULL;
7880 qclk->ce_bus_clk = NULL;
7881 qclk->ce_core_src_clk = NULL;
7882 return 0;
7883 }
7884
7885 pdev = qseecom.pdev;
7886
7887 /* Get CE3 src core clk. */
7888 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7889 if (!IS_ERR(qclk->ce_core_src_clk)) {
7890 rc = clk_set_rate(qclk->ce_core_src_clk,
7891 qseecom.ce_opp_freq_hz);
7892 if (rc) {
7893 clk_put(qclk->ce_core_src_clk);
7894 qclk->ce_core_src_clk = NULL;
7895 pr_err("Unable to set the core src clk @%uMhz.\n",
7896 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7897 return -EIO;
7898 }
7899 } else {
7900 pr_warn("Unable to get CE core src clk, set to NULL\n");
7901 qclk->ce_core_src_clk = NULL;
7902 }
7903
7904 /* Get CE core clk */
7905 qclk->ce_core_clk = clk_get(pdev, core_clk);
7906 if (IS_ERR(qclk->ce_core_clk)) {
7907 rc = PTR_ERR(qclk->ce_core_clk);
7908 pr_err("Unable to get CE core clk\n");
7909 if (qclk->ce_core_src_clk != NULL)
7910 clk_put(qclk->ce_core_src_clk);
7911 return -EIO;
7912 }
7913
7914 /* Get CE Interface clk */
7915 qclk->ce_clk = clk_get(pdev, iface_clk);
7916 if (IS_ERR(qclk->ce_clk)) {
7917 rc = PTR_ERR(qclk->ce_clk);
7918 pr_err("Unable to get CE interface clk\n");
7919 if (qclk->ce_core_src_clk != NULL)
7920 clk_put(qclk->ce_core_src_clk);
7921 clk_put(qclk->ce_core_clk);
7922 return -EIO;
7923 }
7924
7925 /* Get CE AXI clk */
7926 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7927 if (IS_ERR(qclk->ce_bus_clk)) {
7928 rc = PTR_ERR(qclk->ce_bus_clk);
7929 pr_err("Unable to get CE BUS interface clk\n");
7930 if (qclk->ce_core_src_clk != NULL)
7931 clk_put(qclk->ce_core_src_clk);
7932 clk_put(qclk->ce_core_clk);
7933 clk_put(qclk->ce_clk);
7934 return -EIO;
7935 }
7936
7937 return rc;
7938}
7939
7940static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7941{
7942 struct qseecom_clk *qclk;
7943
7944 if (ce == CLK_QSEE)
7945 qclk = &qseecom.qsee;
7946 else
7947 qclk = &qseecom.ce_drv;
7948
7949 if (qclk->ce_clk != NULL) {
7950 clk_put(qclk->ce_clk);
7951 qclk->ce_clk = NULL;
7952 }
7953 if (qclk->ce_core_clk != NULL) {
7954 clk_put(qclk->ce_core_clk);
7955 qclk->ce_core_clk = NULL;
7956 }
7957 if (qclk->ce_bus_clk != NULL) {
7958 clk_put(qclk->ce_bus_clk);
7959 qclk->ce_bus_clk = NULL;
7960 }
7961 if (qclk->ce_core_src_clk != NULL) {
7962 clk_put(qclk->ce_core_src_clk);
7963 qclk->ce_core_src_clk = NULL;
7964 }
7965 qclk->instance = CLK_INVALID;
7966}
7967
7968static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7969{
7970 int rc = 0;
7971 uint32_t hlos_num_ce_hw_instances;
7972 uint32_t disk_encrypt_pipe;
7973 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007974 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007975 int i;
7976 const int *tbl;
7977 int size;
7978 int entry;
7979 struct qseecom_crypto_info *pfde_tbl = NULL;
7980 struct qseecom_crypto_info *p;
7981 int tbl_size;
7982 int j;
7983 bool old_db = true;
7984 struct qseecom_ce_info_use *pce_info_use;
7985 uint32_t *unit_tbl = NULL;
7986 int total_units = 0;
7987 struct qseecom_ce_pipe_entry *pce_entry;
7988
7989 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7990 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7991
7992 if (of_property_read_u32((&pdev->dev)->of_node,
7993 "qcom,qsee-ce-hw-instance",
7994 &qseecom.ce_info.qsee_ce_hw_instance)) {
7995 pr_err("Fail to get qsee ce hw instance information.\n");
7996 rc = -EINVAL;
7997 goto out;
7998 } else {
7999 pr_debug("qsee-ce-hw-instance=0x%x\n",
8000 qseecom.ce_info.qsee_ce_hw_instance);
8001 }
8002
8003 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
8004 "qcom,support-fde");
8005 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
8006 "qcom,support-pfe");
8007
8008 if (!qseecom.support_pfe && !qseecom.support_fde) {
8009 pr_warn("Device does not support PFE/FDE");
8010 goto out;
8011 }
8012
8013 if (qseecom.support_fde)
8014 tbl = of_get_property((&pdev->dev)->of_node,
8015 "qcom,full-disk-encrypt-info", &size);
8016 else
8017 tbl = NULL;
8018 if (tbl) {
8019 old_db = false;
8020 if (size % sizeof(struct qseecom_crypto_info)) {
8021 pr_err("full-disk-encrypt-info tbl size(%d)\n",
8022 size);
8023 rc = -EINVAL;
8024 goto out;
8025 }
8026 tbl_size = size / sizeof
8027 (struct qseecom_crypto_info);
8028
8029 pfde_tbl = kzalloc(size, GFP_KERNEL);
8030 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8031 total_units = 0;
8032
8033 if (!pfde_tbl || !unit_tbl) {
8034 pr_err("failed to alloc memory\n");
8035 rc = -ENOMEM;
8036 goto out;
8037 }
8038 if (of_property_read_u32_array((&pdev->dev)->of_node,
8039 "qcom,full-disk-encrypt-info",
8040 (u32 *)pfde_tbl, size/sizeof(u32))) {
8041 pr_err("failed to read full-disk-encrypt-info tbl\n");
8042 rc = -EINVAL;
8043 goto out;
8044 }
8045
8046 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8047 for (j = 0; j < total_units; j++) {
8048 if (p->unit_num == *(unit_tbl + j))
8049 break;
8050 }
8051 if (j == total_units) {
8052 *(unit_tbl + total_units) = p->unit_num;
8053 total_units++;
8054 }
8055 }
8056
8057 qseecom.ce_info.num_fde = total_units;
8058 pce_info_use = qseecom.ce_info.fde = kcalloc(
8059 total_units, sizeof(struct qseecom_ce_info_use),
8060 GFP_KERNEL);
8061 if (!pce_info_use) {
8062 pr_err("failed to alloc memory\n");
8063 rc = -ENOMEM;
8064 goto out;
8065 }
8066
8067 for (j = 0; j < total_units; j++, pce_info_use++) {
8068 pce_info_use->unit_num = *(unit_tbl + j);
8069 pce_info_use->alloc = false;
8070 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8071 pce_info_use->num_ce_pipe_entries = 0;
8072 pce_info_use->ce_pipe_entry = NULL;
8073 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8074 if (p->unit_num == pce_info_use->unit_num)
8075 pce_info_use->num_ce_pipe_entries++;
8076 }
8077
8078 entry = pce_info_use->num_ce_pipe_entries;
8079 pce_entry = pce_info_use->ce_pipe_entry =
8080 kcalloc(entry,
8081 sizeof(struct qseecom_ce_pipe_entry),
8082 GFP_KERNEL);
8083 if (pce_entry == NULL) {
8084 pr_err("failed to alloc memory\n");
8085 rc = -ENOMEM;
8086 goto out;
8087 }
8088
8089 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8090 if (p->unit_num == pce_info_use->unit_num) {
8091 pce_entry->ce_num = p->ce;
8092 pce_entry->ce_pipe_pair =
8093 p->pipe_pair;
8094 pce_entry->valid = true;
8095 pce_entry++;
8096 }
8097 }
8098 }
8099 kfree(unit_tbl);
8100 unit_tbl = NULL;
8101 kfree(pfde_tbl);
8102 pfde_tbl = NULL;
8103 }
8104
8105 if (qseecom.support_pfe)
8106 tbl = of_get_property((&pdev->dev)->of_node,
8107 "qcom,per-file-encrypt-info", &size);
8108 else
8109 tbl = NULL;
8110 if (tbl) {
8111 old_db = false;
8112 if (size % sizeof(struct qseecom_crypto_info)) {
8113 pr_err("per-file-encrypt-info tbl size(%d)\n",
8114 size);
8115 rc = -EINVAL;
8116 goto out;
8117 }
8118 tbl_size = size / sizeof
8119 (struct qseecom_crypto_info);
8120
8121 pfde_tbl = kzalloc(size, GFP_KERNEL);
8122 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8123 total_units = 0;
8124 if (!pfde_tbl || !unit_tbl) {
8125 pr_err("failed to alloc memory\n");
8126 rc = -ENOMEM;
8127 goto out;
8128 }
8129 if (of_property_read_u32_array((&pdev->dev)->of_node,
8130 "qcom,per-file-encrypt-info",
8131 (u32 *)pfde_tbl, size/sizeof(u32))) {
8132 pr_err("failed to read per-file-encrypt-info tbl\n");
8133 rc = -EINVAL;
8134 goto out;
8135 }
8136
8137 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8138 for (j = 0; j < total_units; j++) {
8139 if (p->unit_num == *(unit_tbl + j))
8140 break;
8141 }
8142 if (j == total_units) {
8143 *(unit_tbl + total_units) = p->unit_num;
8144 total_units++;
8145 }
8146 }
8147
8148 qseecom.ce_info.num_pfe = total_units;
8149 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8150 total_units, sizeof(struct qseecom_ce_info_use),
8151 GFP_KERNEL);
8152 if (!pce_info_use) {
8153 pr_err("failed to alloc memory\n");
8154 rc = -ENOMEM;
8155 goto out;
8156 }
8157
8158 for (j = 0; j < total_units; j++, pce_info_use++) {
8159 pce_info_use->unit_num = *(unit_tbl + j);
8160 pce_info_use->alloc = false;
8161 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8162 pce_info_use->num_ce_pipe_entries = 0;
8163 pce_info_use->ce_pipe_entry = NULL;
8164 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8165 if (p->unit_num == pce_info_use->unit_num)
8166 pce_info_use->num_ce_pipe_entries++;
8167 }
8168
8169 entry = pce_info_use->num_ce_pipe_entries;
8170 pce_entry = pce_info_use->ce_pipe_entry =
8171 kcalloc(entry,
8172 sizeof(struct qseecom_ce_pipe_entry),
8173 GFP_KERNEL);
8174 if (pce_entry == NULL) {
8175 pr_err("failed to alloc memory\n");
8176 rc = -ENOMEM;
8177 goto out;
8178 }
8179
8180 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8181 if (p->unit_num == pce_info_use->unit_num) {
8182 pce_entry->ce_num = p->ce;
8183 pce_entry->ce_pipe_pair =
8184 p->pipe_pair;
8185 pce_entry->valid = true;
8186 pce_entry++;
8187 }
8188 }
8189 }
8190 kfree(unit_tbl);
8191 unit_tbl = NULL;
8192 kfree(pfde_tbl);
8193 pfde_tbl = NULL;
8194 }
8195
8196 if (!old_db)
8197 goto out1;
8198
8199 if (of_property_read_bool((&pdev->dev)->of_node,
8200 "qcom,support-multiple-ce-hw-instance")) {
8201 if (of_property_read_u32((&pdev->dev)->of_node,
8202 "qcom,hlos-num-ce-hw-instances",
8203 &hlos_num_ce_hw_instances)) {
8204 pr_err("Fail: get hlos number of ce hw instance\n");
8205 rc = -EINVAL;
8206 goto out;
8207 }
8208 } else {
8209 hlos_num_ce_hw_instances = 1;
8210 }
8211
8212 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8213 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8214 MAX_CE_PIPE_PAIR_PER_UNIT);
8215 rc = -EINVAL;
8216 goto out;
8217 }
8218
8219 if (of_property_read_u32_array((&pdev->dev)->of_node,
8220 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8221 hlos_num_ce_hw_instances)) {
8222 pr_err("Fail: get hlos ce hw instance info\n");
8223 rc = -EINVAL;
8224 goto out;
8225 }
8226
8227 if (qseecom.support_fde) {
8228 pce_info_use = qseecom.ce_info.fde =
8229 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8230 if (!pce_info_use) {
8231 pr_err("failed to alloc memory\n");
8232 rc = -ENOMEM;
8233 goto out;
8234 }
8235 /* by default for old db */
8236 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8237 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8238 pce_info_use->alloc = false;
8239 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8240 pce_info_use->ce_pipe_entry = NULL;
8241 if (of_property_read_u32((&pdev->dev)->of_node,
8242 "qcom,disk-encrypt-pipe-pair",
8243 &disk_encrypt_pipe)) {
8244 pr_err("Fail to get FDE pipe information.\n");
8245 rc = -EINVAL;
8246 goto out;
8247 } else {
8248 pr_debug("disk-encrypt-pipe-pair=0x%x",
8249 disk_encrypt_pipe);
8250 }
8251 entry = pce_info_use->num_ce_pipe_entries =
8252 hlos_num_ce_hw_instances;
8253 pce_entry = pce_info_use->ce_pipe_entry =
8254 kcalloc(entry,
8255 sizeof(struct qseecom_ce_pipe_entry),
8256 GFP_KERNEL);
8257 if (pce_entry == NULL) {
8258 pr_err("failed to alloc memory\n");
8259 rc = -ENOMEM;
8260 goto out;
8261 }
8262 for (i = 0; i < entry; i++) {
8263 pce_entry->ce_num = hlos_ce_hw_instance[i];
8264 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8265 pce_entry->valid = 1;
8266 pce_entry++;
8267 }
8268 } else {
8269 pr_warn("Device does not support FDE");
8270 disk_encrypt_pipe = 0xff;
8271 }
8272 if (qseecom.support_pfe) {
8273 pce_info_use = qseecom.ce_info.pfe =
8274 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8275 if (!pce_info_use) {
8276 pr_err("failed to alloc memory\n");
8277 rc = -ENOMEM;
8278 goto out;
8279 }
8280 /* by default for old db */
8281 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8282 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8283 pce_info_use->alloc = false;
8284 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8285 pce_info_use->ce_pipe_entry = NULL;
8286
8287 if (of_property_read_u32((&pdev->dev)->of_node,
8288 "qcom,file-encrypt-pipe-pair",
8289 &file_encrypt_pipe)) {
8290 pr_err("Fail to get PFE pipe information.\n");
8291 rc = -EINVAL;
8292 goto out;
8293 } else {
8294 pr_debug("file-encrypt-pipe-pair=0x%x",
8295 file_encrypt_pipe);
8296 }
8297 entry = pce_info_use->num_ce_pipe_entries =
8298 hlos_num_ce_hw_instances;
8299 pce_entry = pce_info_use->ce_pipe_entry =
8300 kcalloc(entry,
8301 sizeof(struct qseecom_ce_pipe_entry),
8302 GFP_KERNEL);
8303 if (pce_entry == NULL) {
8304 pr_err("failed to alloc memory\n");
8305 rc = -ENOMEM;
8306 goto out;
8307 }
8308 for (i = 0; i < entry; i++) {
8309 pce_entry->ce_num = hlos_ce_hw_instance[i];
8310 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8311 pce_entry->valid = 1;
8312 pce_entry++;
8313 }
8314 } else {
8315 pr_warn("Device does not support PFE");
8316 file_encrypt_pipe = 0xff;
8317 }
8318
8319out1:
8320 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8321 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8322out:
8323 if (rc) {
8324 if (qseecom.ce_info.fde) {
8325 pce_info_use = qseecom.ce_info.fde;
8326 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8327 pce_entry = pce_info_use->ce_pipe_entry;
8328 kfree(pce_entry);
8329 pce_info_use++;
8330 }
8331 }
8332 kfree(qseecom.ce_info.fde);
8333 qseecom.ce_info.fde = NULL;
8334 if (qseecom.ce_info.pfe) {
8335 pce_info_use = qseecom.ce_info.pfe;
8336 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8337 pce_entry = pce_info_use->ce_pipe_entry;
8338 kfree(pce_entry);
8339 pce_info_use++;
8340 }
8341 }
8342 kfree(qseecom.ce_info.pfe);
8343 qseecom.ce_info.pfe = NULL;
8344 }
8345 kfree(unit_tbl);
8346 kfree(pfde_tbl);
8347 return rc;
8348}
8349
8350static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8351 void __user *argp)
8352{
8353 struct qseecom_ce_info_req req;
8354 struct qseecom_ce_info_req *pinfo = &req;
8355 int ret = 0;
8356 int i;
8357 unsigned int entries;
8358 struct qseecom_ce_info_use *pce_info_use, *p;
8359 int total = 0;
8360 bool found = false;
8361 struct qseecom_ce_pipe_entry *pce_entry;
8362
8363 ret = copy_from_user(pinfo, argp,
8364 sizeof(struct qseecom_ce_info_req));
8365 if (ret) {
8366 pr_err("copy_from_user failed\n");
8367 return ret;
8368 }
8369
8370 switch (pinfo->usage) {
8371 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8372 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8373 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8374 if (qseecom.support_fde) {
8375 p = qseecom.ce_info.fde;
8376 total = qseecom.ce_info.num_fde;
8377 } else {
8378 pr_err("system does not support fde\n");
8379 return -EINVAL;
8380 }
8381 break;
8382 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8383 if (qseecom.support_pfe) {
8384 p = qseecom.ce_info.pfe;
8385 total = qseecom.ce_info.num_pfe;
8386 } else {
8387 pr_err("system does not support pfe\n");
8388 return -EINVAL;
8389 }
8390 break;
8391 default:
8392 pr_err("unsupported usage %d\n", pinfo->usage);
8393 return -EINVAL;
8394 }
8395
8396 pce_info_use = NULL;
8397 for (i = 0; i < total; i++) {
8398 if (!p->alloc)
8399 pce_info_use = p;
8400 else if (!memcmp(p->handle, pinfo->handle,
8401 MAX_CE_INFO_HANDLE_SIZE)) {
8402 pce_info_use = p;
8403 found = true;
8404 break;
8405 }
8406 p++;
8407 }
8408
8409 if (pce_info_use == NULL)
8410 return -EBUSY;
8411
8412 pinfo->unit_num = pce_info_use->unit_num;
8413 if (!pce_info_use->alloc) {
8414 pce_info_use->alloc = true;
8415 memcpy(pce_info_use->handle,
8416 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8417 }
8418 if (pce_info_use->num_ce_pipe_entries >
8419 MAX_CE_PIPE_PAIR_PER_UNIT)
8420 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8421 else
8422 entries = pce_info_use->num_ce_pipe_entries;
8423 pinfo->num_ce_pipe_entries = entries;
8424 pce_entry = pce_info_use->ce_pipe_entry;
8425 for (i = 0; i < entries; i++, pce_entry++)
8426 pinfo->ce_pipe_entry[i] = *pce_entry;
8427 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8428 pinfo->ce_pipe_entry[i].valid = 0;
8429
8430 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8431 pr_err("copy_to_user failed\n");
8432 ret = -EFAULT;
8433 }
8434 return ret;
8435}
8436
8437static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8438 void __user *argp)
8439{
8440 struct qseecom_ce_info_req req;
8441 struct qseecom_ce_info_req *pinfo = &req;
8442 int ret = 0;
8443 struct qseecom_ce_info_use *p;
8444 int total = 0;
8445 int i;
8446 bool found = false;
8447
8448 ret = copy_from_user(pinfo, argp,
8449 sizeof(struct qseecom_ce_info_req));
8450 if (ret)
8451 return ret;
8452
8453 switch (pinfo->usage) {
8454 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8455 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8456 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8457 if (qseecom.support_fde) {
8458 p = qseecom.ce_info.fde;
8459 total = qseecom.ce_info.num_fde;
8460 } else {
8461 pr_err("system does not support fde\n");
8462 return -EINVAL;
8463 }
8464 break;
8465 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8466 if (qseecom.support_pfe) {
8467 p = qseecom.ce_info.pfe;
8468 total = qseecom.ce_info.num_pfe;
8469 } else {
8470 pr_err("system does not support pfe\n");
8471 return -EINVAL;
8472 }
8473 break;
8474 default:
8475 pr_err("unsupported usage %d\n", pinfo->usage);
8476 return -EINVAL;
8477 }
8478
8479 for (i = 0; i < total; i++) {
8480 if (p->alloc &&
8481 !memcmp(p->handle, pinfo->handle,
8482 MAX_CE_INFO_HANDLE_SIZE)) {
8483 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8484 p->alloc = false;
8485 found = true;
8486 break;
8487 }
8488 p++;
8489 }
8490 return ret;
8491}
8492
8493static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8494 void __user *argp)
8495{
8496 struct qseecom_ce_info_req req;
8497 struct qseecom_ce_info_req *pinfo = &req;
8498 int ret = 0;
8499 int i;
8500 unsigned int entries;
8501 struct qseecom_ce_info_use *pce_info_use, *p;
8502 int total = 0;
8503 bool found = false;
8504 struct qseecom_ce_pipe_entry *pce_entry;
8505
8506 ret = copy_from_user(pinfo, argp,
8507 sizeof(struct qseecom_ce_info_req));
8508 if (ret)
8509 return ret;
8510
8511 switch (pinfo->usage) {
8512 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8513 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8514 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8515 if (qseecom.support_fde) {
8516 p = qseecom.ce_info.fde;
8517 total = qseecom.ce_info.num_fde;
8518 } else {
8519 pr_err("system does not support fde\n");
8520 return -EINVAL;
8521 }
8522 break;
8523 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8524 if (qseecom.support_pfe) {
8525 p = qseecom.ce_info.pfe;
8526 total = qseecom.ce_info.num_pfe;
8527 } else {
8528 pr_err("system does not support pfe\n");
8529 return -EINVAL;
8530 }
8531 break;
8532 default:
8533 pr_err("unsupported usage %d\n", pinfo->usage);
8534 return -EINVAL;
8535 }
8536
8537 pce_info_use = NULL;
8538 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8539 pinfo->num_ce_pipe_entries = 0;
8540 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8541 pinfo->ce_pipe_entry[i].valid = 0;
8542
8543 for (i = 0; i < total; i++) {
8544
8545 if (p->alloc && !memcmp(p->handle,
8546 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8547 pce_info_use = p;
8548 found = true;
8549 break;
8550 }
8551 p++;
8552 }
8553 if (!pce_info_use)
8554 goto out;
8555 pinfo->unit_num = pce_info_use->unit_num;
8556 if (pce_info_use->num_ce_pipe_entries >
8557 MAX_CE_PIPE_PAIR_PER_UNIT)
8558 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8559 else
8560 entries = pce_info_use->num_ce_pipe_entries;
8561 pinfo->num_ce_pipe_entries = entries;
8562 pce_entry = pce_info_use->ce_pipe_entry;
8563 for (i = 0; i < entries; i++, pce_entry++)
8564 pinfo->ce_pipe_entry[i] = *pce_entry;
8565 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8566 pinfo->ce_pipe_entry[i].valid = 0;
8567out:
8568 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8569 pr_err("copy_to_user failed\n");
8570 ret = -EFAULT;
8571 }
8572 return ret;
8573}
8574
8575/*
8576 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8577 * then whitelist feature is not supported.
8578 */
8579static int qseecom_check_whitelist_feature(void)
8580{
8581 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8582
8583 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8584}
8585
8586static int qseecom_probe(struct platform_device *pdev)
8587{
8588 int rc;
8589 int i;
8590 uint32_t feature = 10;
8591 struct device *class_dev;
8592 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8593 struct qseecom_command_scm_resp resp;
8594 struct qseecom_ce_info_use *pce_info_use = NULL;
8595
8596 qseecom.qsee_bw_count = 0;
8597 qseecom.qsee_perf_client = 0;
8598 qseecom.qsee_sfpb_bw_count = 0;
8599
8600 qseecom.qsee.ce_core_clk = NULL;
8601 qseecom.qsee.ce_clk = NULL;
8602 qseecom.qsee.ce_core_src_clk = NULL;
8603 qseecom.qsee.ce_bus_clk = NULL;
8604
8605 qseecom.cumulative_mode = 0;
8606 qseecom.current_mode = INACTIVE;
8607 qseecom.support_bus_scaling = false;
8608 qseecom.support_fde = false;
8609 qseecom.support_pfe = false;
8610
8611 qseecom.ce_drv.ce_core_clk = NULL;
8612 qseecom.ce_drv.ce_clk = NULL;
8613 qseecom.ce_drv.ce_core_src_clk = NULL;
8614 qseecom.ce_drv.ce_bus_clk = NULL;
8615 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8616
8617 qseecom.app_block_ref_cnt = 0;
8618 init_waitqueue_head(&qseecom.app_block_wq);
8619 qseecom.whitelist_support = true;
8620
8621 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8622 if (rc < 0) {
8623 pr_err("alloc_chrdev_region failed %d\n", rc);
8624 return rc;
8625 }
8626
8627 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8628 if (IS_ERR(driver_class)) {
8629 rc = -ENOMEM;
8630 pr_err("class_create failed %d\n", rc);
8631 goto exit_unreg_chrdev_region;
8632 }
8633
8634 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8635 QSEECOM_DEV);
8636 if (IS_ERR(class_dev)) {
8637 pr_err("class_device_create failed %d\n", rc);
8638 rc = -ENOMEM;
8639 goto exit_destroy_class;
8640 }
8641
8642 cdev_init(&qseecom.cdev, &qseecom_fops);
8643 qseecom.cdev.owner = THIS_MODULE;
8644
8645 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8646 if (rc < 0) {
8647 pr_err("cdev_add failed %d\n", rc);
8648 goto exit_destroy_device;
8649 }
8650
8651 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008652 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8653 spin_lock_init(&qseecom.registered_app_list_lock);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008654 INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008655 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8656 spin_lock_init(&qseecom.registered_kclient_list_lock);
8657 init_waitqueue_head(&qseecom.send_resp_wq);
Zhen Kongbcdeda22018-11-16 13:50:51 -08008658 init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008659 qseecom.send_resp_flag = 0;
8660
8661 qseecom.qsee_version = QSEEE_VERSION_00;
8662 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8663 &resp, sizeof(resp));
8664 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8665 if (rc) {
8666 pr_err("Failed to get QSEE version info %d\n", rc);
8667 goto exit_del_cdev;
8668 }
8669 qseecom.qsee_version = resp.result;
8670 qseecom.qseos_version = QSEOS_VERSION_14;
8671 qseecom.commonlib_loaded = false;
8672 qseecom.commonlib64_loaded = false;
8673 qseecom.pdev = class_dev;
8674 /* Create ION msm client */
8675 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8676 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8677 pr_err("Ion client cannot be created\n");
8678 rc = -ENOMEM;
8679 goto exit_del_cdev;
8680 }
8681
8682 /* register client for bus scaling */
8683 if (pdev->dev.of_node) {
8684 qseecom.pdev->of_node = pdev->dev.of_node;
8685 qseecom.support_bus_scaling =
8686 of_property_read_bool((&pdev->dev)->of_node,
8687 "qcom,support-bus-scaling");
8688 rc = qseecom_retrieve_ce_data(pdev);
8689 if (rc)
8690 goto exit_destroy_ion_client;
8691 qseecom.appsbl_qseecom_support =
8692 of_property_read_bool((&pdev->dev)->of_node,
8693 "qcom,appsbl-qseecom-support");
8694 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8695 qseecom.appsbl_qseecom_support);
8696
8697 qseecom.commonlib64_loaded =
8698 of_property_read_bool((&pdev->dev)->of_node,
8699 "qcom,commonlib64-loaded-by-uefi");
8700 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8701 qseecom.commonlib64_loaded);
8702 qseecom.fde_key_size =
8703 of_property_read_bool((&pdev->dev)->of_node,
8704 "qcom,fde-key-size");
8705 qseecom.no_clock_support =
8706 of_property_read_bool((&pdev->dev)->of_node,
8707 "qcom,no-clock-support");
8708 if (!qseecom.no_clock_support) {
8709 pr_info("qseecom clocks handled by other subsystem\n");
8710 } else {
8711 pr_info("no-clock-support=0x%x",
8712 qseecom.no_clock_support);
8713 }
8714
8715 if (of_property_read_u32((&pdev->dev)->of_node,
8716 "qcom,qsee-reentrancy-support",
8717 &qseecom.qsee_reentrancy_support)) {
8718 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8719 qseecom.qsee_reentrancy_support = 0;
8720 } else {
8721 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8722 qseecom.qsee_reentrancy_support);
8723 }
8724
Jiten Patela7bb1d52018-05-11 12:34:26 +05308725 qseecom.enable_key_wrap_in_ks =
8726 of_property_read_bool((&pdev->dev)->of_node,
8727 "qcom,enable-key-wrap-in-ks");
8728 if (qseecom.enable_key_wrap_in_ks) {
8729 pr_warn("qseecom.enable_key_wrap_in_ks = %d\n",
8730 qseecom.enable_key_wrap_in_ks);
8731 }
8732
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008733 /*
8734 * The qseecom bus scaling flag can not be enabled when
8735 * crypto clock is not handled by HLOS.
8736 */
8737 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8738 pr_err("support_bus_scaling flag can not be enabled.\n");
8739 rc = -EINVAL;
8740 goto exit_destroy_ion_client;
8741 }
8742
8743 if (of_property_read_u32((&pdev->dev)->of_node,
8744 "qcom,ce-opp-freq",
8745 &qseecom.ce_opp_freq_hz)) {
8746 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8747 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8748 }
8749 rc = __qseecom_init_clk(CLK_QSEE);
8750 if (rc)
8751 goto exit_destroy_ion_client;
8752
8753 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8754 (qseecom.support_pfe || qseecom.support_fde)) {
8755 rc = __qseecom_init_clk(CLK_CE_DRV);
8756 if (rc) {
8757 __qseecom_deinit_clk(CLK_QSEE);
8758 goto exit_destroy_ion_client;
8759 }
8760 } else {
8761 struct qseecom_clk *qclk;
8762
8763 qclk = &qseecom.qsee;
8764 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8765 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8766 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8767 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8768 }
8769
8770 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8771 msm_bus_cl_get_pdata(pdev);
8772 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8773 (!qseecom.is_apps_region_protected &&
8774 !qseecom.appsbl_qseecom_support)) {
8775 struct resource *resource = NULL;
8776 struct qsee_apps_region_info_ireq req;
8777 struct qsee_apps_region_info_64bit_ireq req_64bit;
8778 struct qseecom_command_scm_resp resp;
8779 void *cmd_buf = NULL;
8780 size_t cmd_len;
8781
8782 resource = platform_get_resource_byname(pdev,
8783 IORESOURCE_MEM, "secapp-region");
8784 if (resource) {
8785 if (qseecom.qsee_version < QSEE_VERSION_40) {
8786 req.qsee_cmd_id =
8787 QSEOS_APP_REGION_NOTIFICATION;
8788 req.addr = (uint32_t)resource->start;
8789 req.size = resource_size(resource);
8790 cmd_buf = (void *)&req;
8791 cmd_len = sizeof(struct
8792 qsee_apps_region_info_ireq);
8793 pr_warn("secure app region addr=0x%x size=0x%x",
8794 req.addr, req.size);
8795 } else {
8796 req_64bit.qsee_cmd_id =
8797 QSEOS_APP_REGION_NOTIFICATION;
8798 req_64bit.addr = resource->start;
8799 req_64bit.size = resource_size(
8800 resource);
8801 cmd_buf = (void *)&req_64bit;
8802 cmd_len = sizeof(struct
8803 qsee_apps_region_info_64bit_ireq);
8804 pr_warn("secure app region addr=0x%llx size=0x%x",
8805 req_64bit.addr, req_64bit.size);
8806 }
8807 } else {
8808 pr_err("Fail to get secure app region info\n");
8809 rc = -EINVAL;
8810 goto exit_deinit_clock;
8811 }
8812 rc = __qseecom_enable_clk(CLK_QSEE);
8813 if (rc) {
8814 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8815 rc = -EIO;
8816 goto exit_deinit_clock;
8817 }
8818 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8819 cmd_buf, cmd_len,
8820 &resp, sizeof(resp));
8821 __qseecom_disable_clk(CLK_QSEE);
8822 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8823 pr_err("send secapp reg fail %d resp.res %d\n",
8824 rc, resp.result);
8825 rc = -EINVAL;
8826 goto exit_deinit_clock;
8827 }
8828 }
8829 /*
8830 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8831 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8832 * Pls add "qseecom.commonlib64_loaded = true" here too.
8833 */
8834 if (qseecom.is_apps_region_protected ||
8835 qseecom.appsbl_qseecom_support)
8836 qseecom.commonlib_loaded = true;
8837 } else {
8838 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8839 pdev->dev.platform_data;
8840 }
8841 if (qseecom.support_bus_scaling) {
8842 init_timer(&(qseecom.bw_scale_down_timer));
8843 INIT_WORK(&qseecom.bw_inactive_req_ws,
8844 qseecom_bw_inactive_req_work);
8845 qseecom.bw_scale_down_timer.function =
8846 qseecom_scale_bus_bandwidth_timer_callback;
8847 }
8848 qseecom.timer_running = false;
8849 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8850 qseecom_platform_support);
8851
8852 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8853 pr_warn("qseecom.whitelist_support = %d\n",
8854 qseecom.whitelist_support);
8855
8856 if (!qseecom.qsee_perf_client)
8857 pr_err("Unable to register bus client\n");
8858
8859 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8860 return 0;
8861
8862exit_deinit_clock:
8863 __qseecom_deinit_clk(CLK_QSEE);
8864 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8865 (qseecom.support_pfe || qseecom.support_fde))
8866 __qseecom_deinit_clk(CLK_CE_DRV);
8867exit_destroy_ion_client:
8868 if (qseecom.ce_info.fde) {
8869 pce_info_use = qseecom.ce_info.fde;
8870 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8871 kzfree(pce_info_use->ce_pipe_entry);
8872 pce_info_use++;
8873 }
8874 kfree(qseecom.ce_info.fde);
8875 }
8876 if (qseecom.ce_info.pfe) {
8877 pce_info_use = qseecom.ce_info.pfe;
8878 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8879 kzfree(pce_info_use->ce_pipe_entry);
8880 pce_info_use++;
8881 }
8882 kfree(qseecom.ce_info.pfe);
8883 }
8884 ion_client_destroy(qseecom.ion_clnt);
8885exit_del_cdev:
8886 cdev_del(&qseecom.cdev);
8887exit_destroy_device:
8888 device_destroy(driver_class, qseecom_device_no);
8889exit_destroy_class:
8890 class_destroy(driver_class);
8891exit_unreg_chrdev_region:
8892 unregister_chrdev_region(qseecom_device_no, 1);
8893 return rc;
8894}
8895
8896static int qseecom_remove(struct platform_device *pdev)
8897{
8898 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308899 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008900 unsigned long flags = 0;
8901 int ret = 0;
8902 int i;
8903 struct qseecom_ce_pipe_entry *pce_entry;
8904 struct qseecom_ce_info_use *pce_info_use;
8905
8906 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8907 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8908
Monika Singhe711b162018-04-24 09:54:50 +05308909 list_for_each_entry_safe(kclient, kclient_tmp,
8910 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008911
8912 /* Break the loop if client handle is NULL */
Zhen Kong9131def2018-07-13 12:02:32 -07008913 if (!kclient->handle) {
8914 list_del(&kclient->list);
8915 kzfree(kclient);
8916 break;
8917 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008918
8919 list_del(&kclient->list);
8920 mutex_lock(&app_access_lock);
8921 ret = qseecom_unload_app(kclient->handle->dev, false);
8922 mutex_unlock(&app_access_lock);
8923 if (!ret) {
8924 kzfree(kclient->handle->dev);
8925 kzfree(kclient->handle);
8926 kzfree(kclient);
8927 }
8928 }
8929
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008930 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8931
8932 if (qseecom.qseos_version > QSEEE_VERSION_00)
8933 qseecom_unload_commonlib_image();
8934
8935 if (qseecom.qsee_perf_client)
8936 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8937 0);
8938 if (pdev->dev.platform_data != NULL)
8939 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8940
8941 if (qseecom.support_bus_scaling) {
8942 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8943 del_timer_sync(&qseecom.bw_scale_down_timer);
8944 }
8945
8946 if (qseecom.ce_info.fde) {
8947 pce_info_use = qseecom.ce_info.fde;
8948 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8949 pce_entry = pce_info_use->ce_pipe_entry;
8950 kfree(pce_entry);
8951 pce_info_use++;
8952 }
8953 }
8954 kfree(qseecom.ce_info.fde);
8955 if (qseecom.ce_info.pfe) {
8956 pce_info_use = qseecom.ce_info.pfe;
8957 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8958 pce_entry = pce_info_use->ce_pipe_entry;
8959 kfree(pce_entry);
8960 pce_info_use++;
8961 }
8962 }
8963 kfree(qseecom.ce_info.pfe);
8964
8965 /* register client for bus scaling */
8966 if (pdev->dev.of_node) {
8967 __qseecom_deinit_clk(CLK_QSEE);
8968 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8969 (qseecom.support_pfe || qseecom.support_fde))
8970 __qseecom_deinit_clk(CLK_CE_DRV);
8971 }
8972
8973 ion_client_destroy(qseecom.ion_clnt);
8974
8975 cdev_del(&qseecom.cdev);
8976
8977 device_destroy(driver_class, qseecom_device_no);
8978
8979 class_destroy(driver_class);
8980
8981 unregister_chrdev_region(qseecom_device_no, 1);
8982
8983 return ret;
8984}
8985
8986static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8987{
8988 int ret = 0;
8989 struct qseecom_clk *qclk;
8990
8991 qclk = &qseecom.qsee;
8992 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8993 if (qseecom.no_clock_support)
8994 return 0;
8995
8996 mutex_lock(&qsee_bw_mutex);
8997 mutex_lock(&clk_access_lock);
8998
8999 if (qseecom.current_mode != INACTIVE) {
9000 ret = msm_bus_scale_client_update_request(
9001 qseecom.qsee_perf_client, INACTIVE);
9002 if (ret)
9003 pr_err("Fail to scale down bus\n");
9004 else
9005 qseecom.current_mode = INACTIVE;
9006 }
9007
9008 if (qclk->clk_access_cnt) {
9009 if (qclk->ce_clk != NULL)
9010 clk_disable_unprepare(qclk->ce_clk);
9011 if (qclk->ce_core_clk != NULL)
9012 clk_disable_unprepare(qclk->ce_core_clk);
9013 if (qclk->ce_bus_clk != NULL)
9014 clk_disable_unprepare(qclk->ce_bus_clk);
9015 }
9016
9017 del_timer_sync(&(qseecom.bw_scale_down_timer));
9018 qseecom.timer_running = false;
9019
9020 mutex_unlock(&clk_access_lock);
9021 mutex_unlock(&qsee_bw_mutex);
9022 cancel_work_sync(&qseecom.bw_inactive_req_ws);
9023
9024 return 0;
9025}
9026
9027static int qseecom_resume(struct platform_device *pdev)
9028{
9029 int mode = 0;
9030 int ret = 0;
9031 struct qseecom_clk *qclk;
9032
9033 qclk = &qseecom.qsee;
9034 if (qseecom.no_clock_support)
9035 goto exit;
9036
9037 mutex_lock(&qsee_bw_mutex);
9038 mutex_lock(&clk_access_lock);
9039 if (qseecom.cumulative_mode >= HIGH)
9040 mode = HIGH;
9041 else
9042 mode = qseecom.cumulative_mode;
9043
9044 if (qseecom.cumulative_mode != INACTIVE) {
9045 ret = msm_bus_scale_client_update_request(
9046 qseecom.qsee_perf_client, mode);
9047 if (ret)
9048 pr_err("Fail to scale up bus to %d\n", mode);
9049 else
9050 qseecom.current_mode = mode;
9051 }
9052
9053 if (qclk->clk_access_cnt) {
9054 if (qclk->ce_core_clk != NULL) {
9055 ret = clk_prepare_enable(qclk->ce_core_clk);
9056 if (ret) {
9057 pr_err("Unable to enable/prep CE core clk\n");
9058 qclk->clk_access_cnt = 0;
9059 goto err;
9060 }
9061 }
9062 if (qclk->ce_clk != NULL) {
9063 ret = clk_prepare_enable(qclk->ce_clk);
9064 if (ret) {
9065 pr_err("Unable to enable/prep CE iface clk\n");
9066 qclk->clk_access_cnt = 0;
9067 goto ce_clk_err;
9068 }
9069 }
9070 if (qclk->ce_bus_clk != NULL) {
9071 ret = clk_prepare_enable(qclk->ce_bus_clk);
9072 if (ret) {
9073 pr_err("Unable to enable/prep CE bus clk\n");
9074 qclk->clk_access_cnt = 0;
9075 goto ce_bus_clk_err;
9076 }
9077 }
9078 }
9079
9080 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9081 qseecom.bw_scale_down_timer.expires = jiffies +
9082 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9083 mod_timer(&(qseecom.bw_scale_down_timer),
9084 qseecom.bw_scale_down_timer.expires);
9085 qseecom.timer_running = true;
9086 }
9087
9088 mutex_unlock(&clk_access_lock);
9089 mutex_unlock(&qsee_bw_mutex);
9090 goto exit;
9091
9092ce_bus_clk_err:
9093 if (qclk->ce_clk)
9094 clk_disable_unprepare(qclk->ce_clk);
9095ce_clk_err:
9096 if (qclk->ce_core_clk)
9097 clk_disable_unprepare(qclk->ce_core_clk);
9098err:
9099 mutex_unlock(&clk_access_lock);
9100 mutex_unlock(&qsee_bw_mutex);
9101 ret = -EIO;
9102exit:
9103 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9104 return ret;
9105}
9106
9107static const struct of_device_id qseecom_match[] = {
9108 {
9109 .compatible = "qcom,qseecom",
9110 },
9111 {}
9112};
9113
9114static struct platform_driver qseecom_plat_driver = {
9115 .probe = qseecom_probe,
9116 .remove = qseecom_remove,
9117 .suspend = qseecom_suspend,
9118 .resume = qseecom_resume,
9119 .driver = {
9120 .name = "qseecom",
9121 .owner = THIS_MODULE,
9122 .of_match_table = qseecom_match,
9123 },
9124};
9125
9126static int qseecom_init(void)
9127{
9128 return platform_driver_register(&qseecom_plat_driver);
9129}
9130
9131static void qseecom_exit(void)
9132{
9133 platform_driver_unregister(&qseecom_plat_driver);
9134}
9135
9136MODULE_LICENSE("GPL v2");
9137MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9138
9139module_init(qseecom_init);
9140module_exit(qseecom_exit);