blob: 4c4835d213dabccffdc4906bf32d411ea1ad5fd4 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700255 struct ce_hw_usage_info ce_info;
256
257 int qsee_bw_count;
258 int qsee_sfpb_bw_count;
259
260 uint32_t qsee_perf_client;
261 struct qseecom_clk qsee;
262 struct qseecom_clk ce_drv;
263
264 bool support_bus_scaling;
265 bool support_fde;
266 bool support_pfe;
267 bool fde_key_size;
268 uint32_t cumulative_mode;
269 enum qseecom_bandwidth_request_mode current_mode;
270 struct timer_list bw_scale_down_timer;
271 struct work_struct bw_inactive_req_ws;
272 struct cdev cdev;
273 bool timer_running;
274 bool no_clock_support;
275 unsigned int ce_opp_freq_hz;
276 bool appsbl_qseecom_support;
277 uint32_t qsee_reentrancy_support;
278
279 uint32_t app_block_ref_cnt;
280 wait_queue_head_t app_block_wq;
281 atomic_t qseecom_state;
282 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700283 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700583 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700584 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
585 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
586 ret = scm_call2(smc_id, &desc);
587 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700588 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700589 smc_id = TZ_OS_REGISTER_LISTENER_ID;
590 __qseecom_reentrancy_check_if_no_app_blocked(
591 smc_id);
592 ret = scm_call2(smc_id, &desc);
593 }
594 break;
595 }
596 case QSEOS_DEREGISTER_LISTENER: {
597 struct qseecom_unregister_listener_ireq *req;
598
599 req = (struct qseecom_unregister_listener_ireq *)
600 req_buf;
601 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
602 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
603 desc.args[0] = req->listener_id;
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
605 ret = scm_call2(smc_id, &desc);
606 break;
607 }
608 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
609 struct qseecom_client_listener_data_irsp *req;
610
611 req = (struct qseecom_client_listener_data_irsp *)
612 req_buf;
613 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
614 desc.arginfo =
615 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
616 desc.args[0] = req->listener_id;
617 desc.args[1] = req->status;
618 ret = scm_call2(smc_id, &desc);
619 break;
620 }
621 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
622 struct qseecom_client_listener_data_irsp *req;
623 struct qseecom_client_listener_data_64bit_irsp *req_64;
624
625 smc_id =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
627 desc.arginfo =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
629 if (qseecom.qsee_version < QSEE_VERSION_40) {
630 req =
631 (struct qseecom_client_listener_data_irsp *)
632 req_buf;
633 desc.args[0] = req->listener_id;
634 desc.args[1] = req->status;
635 desc.args[2] = req->sglistinfo_ptr;
636 desc.args[3] = req->sglistinfo_len;
637 } else {
638 req_64 =
639 (struct qseecom_client_listener_data_64bit_irsp *)
640 req_buf;
641 desc.args[0] = req_64->listener_id;
642 desc.args[1] = req_64->status;
643 desc.args[2] = req_64->sglistinfo_ptr;
644 desc.args[3] = req_64->sglistinfo_len;
645 }
646 ret = scm_call2(smc_id, &desc);
647 break;
648 }
649 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
650 struct qseecom_load_app_ireq *req;
651 struct qseecom_load_app_64bit_ireq *req_64bit;
652
653 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
654 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
655 if (qseecom.qsee_version < QSEE_VERSION_40) {
656 req = (struct qseecom_load_app_ireq *)req_buf;
657 desc.args[0] = req->mdt_len;
658 desc.args[1] = req->img_len;
659 desc.args[2] = req->phy_addr;
660 } else {
661 req_64bit =
662 (struct qseecom_load_app_64bit_ireq *)req_buf;
663 desc.args[0] = req_64bit->mdt_len;
664 desc.args[1] = req_64bit->img_len;
665 desc.args[2] = req_64bit->phy_addr;
666 }
667 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
668 ret = scm_call2(smc_id, &desc);
669 break;
670 }
671 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
672 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
673 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
674 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
675 ret = scm_call2(smc_id, &desc);
676 break;
677 }
678
679 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
680 struct qseecom_client_send_data_ireq *req;
681 struct qseecom_client_send_data_64bit_ireq *req_64bit;
682
683 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
684 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
685 if (qseecom.qsee_version < QSEE_VERSION_40) {
686 req = (struct qseecom_client_send_data_ireq *)
687 req_buf;
688 desc.args[0] = req->app_id;
689 desc.args[1] = req->req_ptr;
690 desc.args[2] = req->req_len;
691 desc.args[3] = req->rsp_ptr;
692 desc.args[4] = req->rsp_len;
693 } else {
694 req_64bit =
695 (struct qseecom_client_send_data_64bit_ireq *)
696 req_buf;
697 desc.args[0] = req_64bit->app_id;
698 desc.args[1] = req_64bit->req_ptr;
699 desc.args[2] = req_64bit->req_len;
700 desc.args[3] = req_64bit->rsp_ptr;
701 desc.args[4] = req_64bit->rsp_len;
702 }
703 ret = scm_call2(smc_id, &desc);
704 break;
705 }
706 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
707 struct qseecom_client_send_data_ireq *req;
708 struct qseecom_client_send_data_64bit_ireq *req_64bit;
709
710 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
711 desc.arginfo =
712 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
713 if (qseecom.qsee_version < QSEE_VERSION_40) {
714 req = (struct qseecom_client_send_data_ireq *)
715 req_buf;
716 desc.args[0] = req->app_id;
717 desc.args[1] = req->req_ptr;
718 desc.args[2] = req->req_len;
719 desc.args[3] = req->rsp_ptr;
720 desc.args[4] = req->rsp_len;
721 desc.args[5] = req->sglistinfo_ptr;
722 desc.args[6] = req->sglistinfo_len;
723 } else {
724 req_64bit =
725 (struct qseecom_client_send_data_64bit_ireq *)
726 req_buf;
727 desc.args[0] = req_64bit->app_id;
728 desc.args[1] = req_64bit->req_ptr;
729 desc.args[2] = req_64bit->req_len;
730 desc.args[3] = req_64bit->rsp_ptr;
731 desc.args[4] = req_64bit->rsp_len;
732 desc.args[5] = req_64bit->sglistinfo_ptr;
733 desc.args[6] = req_64bit->sglistinfo_len;
734 }
735 ret = scm_call2(smc_id, &desc);
736 break;
737 }
738 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
739 struct qseecom_client_send_service_ireq *req;
740
741 req = (struct qseecom_client_send_service_ireq *)
742 req_buf;
743 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
744 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
745 desc.args[0] = req->key_type;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
747 ret = scm_call2(smc_id, &desc);
748 break;
749 }
750 case QSEOS_RPMB_ERASE_COMMAND: {
751 smc_id = TZ_OS_RPMB_ERASE_ID;
752 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
753 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
754 ret = scm_call2(smc_id, &desc);
755 break;
756 }
757 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
758 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
759 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_GENERATE_KEY: {
765 u32 tzbuflen = PAGE_ALIGN(sizeof
766 (struct qseecom_key_generate_ireq) -
767 sizeof(uint32_t));
768 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
769
770 if (!tzbuf)
771 return -ENOMEM;
772 memset(tzbuf, 0, tzbuflen);
773 memcpy(tzbuf, req_buf + sizeof(uint32_t),
774 (sizeof(struct qseecom_key_generate_ireq) -
775 sizeof(uint32_t)));
776 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
777 smc_id = TZ_OS_KS_GEN_KEY_ID;
778 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
779 desc.args[0] = virt_to_phys(tzbuf);
780 desc.args[1] = tzbuflen;
781 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
782 ret = scm_call2(smc_id, &desc);
783 kzfree(tzbuf);
784 break;
785 }
786 case QSEOS_DELETE_KEY: {
787 u32 tzbuflen = PAGE_ALIGN(sizeof
788 (struct qseecom_key_delete_ireq) -
789 sizeof(uint32_t));
790 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
791
792 if (!tzbuf)
793 return -ENOMEM;
794 memset(tzbuf, 0, tzbuflen);
795 memcpy(tzbuf, req_buf + sizeof(uint32_t),
796 (sizeof(struct qseecom_key_delete_ireq) -
797 sizeof(uint32_t)));
798 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
799 smc_id = TZ_OS_KS_DEL_KEY_ID;
800 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
801 desc.args[0] = virt_to_phys(tzbuf);
802 desc.args[1] = tzbuflen;
803 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
804 ret = scm_call2(smc_id, &desc);
805 kzfree(tzbuf);
806 break;
807 }
808 case QSEOS_SET_KEY: {
809 u32 tzbuflen = PAGE_ALIGN(sizeof
810 (struct qseecom_key_select_ireq) -
811 sizeof(uint32_t));
812 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
813
814 if (!tzbuf)
815 return -ENOMEM;
816 memset(tzbuf, 0, tzbuflen);
817 memcpy(tzbuf, req_buf + sizeof(uint32_t),
818 (sizeof(struct qseecom_key_select_ireq) -
819 sizeof(uint32_t)));
820 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
821 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
822 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
823 desc.args[0] = virt_to_phys(tzbuf);
824 desc.args[1] = tzbuflen;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
826 ret = scm_call2(smc_id, &desc);
827 kzfree(tzbuf);
828 break;
829 }
830 case QSEOS_UPDATE_KEY_USERINFO: {
831 u32 tzbuflen = PAGE_ALIGN(sizeof
832 (struct qseecom_key_userinfo_update_ireq) -
833 sizeof(uint32_t));
834 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
835
836 if (!tzbuf)
837 return -ENOMEM;
838 memset(tzbuf, 0, tzbuflen);
839 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
840 (struct qseecom_key_userinfo_update_ireq) -
841 sizeof(uint32_t)));
842 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
843 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
844 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
845 desc.args[0] = virt_to_phys(tzbuf);
846 desc.args[1] = tzbuflen;
847 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
848 ret = scm_call2(smc_id, &desc);
849 kzfree(tzbuf);
850 break;
851 }
852 case QSEOS_TEE_OPEN_SESSION: {
853 struct qseecom_qteec_ireq *req;
854 struct qseecom_qteec_64bit_ireq *req_64bit;
855
856 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
857 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
858 if (qseecom.qsee_version < QSEE_VERSION_40) {
859 req = (struct qseecom_qteec_ireq *)req_buf;
860 desc.args[0] = req->app_id;
861 desc.args[1] = req->req_ptr;
862 desc.args[2] = req->req_len;
863 desc.args[3] = req->resp_ptr;
864 desc.args[4] = req->resp_len;
865 } else {
866 req_64bit = (struct qseecom_qteec_64bit_ireq *)
867 req_buf;
868 desc.args[0] = req_64bit->app_id;
869 desc.args[1] = req_64bit->req_ptr;
870 desc.args[2] = req_64bit->req_len;
871 desc.args[3] = req_64bit->resp_ptr;
872 desc.args[4] = req_64bit->resp_len;
873 }
874 ret = scm_call2(smc_id, &desc);
875 break;
876 }
877 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
878 struct qseecom_qteec_ireq *req;
879 struct qseecom_qteec_64bit_ireq *req_64bit;
880
881 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
882 desc.arginfo =
883 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
884 if (qseecom.qsee_version < QSEE_VERSION_40) {
885 req = (struct qseecom_qteec_ireq *)req_buf;
886 desc.args[0] = req->app_id;
887 desc.args[1] = req->req_ptr;
888 desc.args[2] = req->req_len;
889 desc.args[3] = req->resp_ptr;
890 desc.args[4] = req->resp_len;
891 desc.args[5] = req->sglistinfo_ptr;
892 desc.args[6] = req->sglistinfo_len;
893 } else {
894 req_64bit = (struct qseecom_qteec_64bit_ireq *)
895 req_buf;
896 desc.args[0] = req_64bit->app_id;
897 desc.args[1] = req_64bit->req_ptr;
898 desc.args[2] = req_64bit->req_len;
899 desc.args[3] = req_64bit->resp_ptr;
900 desc.args[4] = req_64bit->resp_len;
901 desc.args[5] = req_64bit->sglistinfo_ptr;
902 desc.args[6] = req_64bit->sglistinfo_len;
903 }
904 ret = scm_call2(smc_id, &desc);
905 break;
906 }
907 case QSEOS_TEE_INVOKE_COMMAND: {
908 struct qseecom_qteec_ireq *req;
909 struct qseecom_qteec_64bit_ireq *req_64bit;
910
911 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
912 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
913 if (qseecom.qsee_version < QSEE_VERSION_40) {
914 req = (struct qseecom_qteec_ireq *)req_buf;
915 desc.args[0] = req->app_id;
916 desc.args[1] = req->req_ptr;
917 desc.args[2] = req->req_len;
918 desc.args[3] = req->resp_ptr;
919 desc.args[4] = req->resp_len;
920 } else {
921 req_64bit = (struct qseecom_qteec_64bit_ireq *)
922 req_buf;
923 desc.args[0] = req_64bit->app_id;
924 desc.args[1] = req_64bit->req_ptr;
925 desc.args[2] = req_64bit->req_len;
926 desc.args[3] = req_64bit->resp_ptr;
927 desc.args[4] = req_64bit->resp_len;
928 }
929 ret = scm_call2(smc_id, &desc);
930 break;
931 }
932 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
933 struct qseecom_qteec_ireq *req;
934 struct qseecom_qteec_64bit_ireq *req_64bit;
935
936 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
937 desc.arginfo =
938 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
939 if (qseecom.qsee_version < QSEE_VERSION_40) {
940 req = (struct qseecom_qteec_ireq *)req_buf;
941 desc.args[0] = req->app_id;
942 desc.args[1] = req->req_ptr;
943 desc.args[2] = req->req_len;
944 desc.args[3] = req->resp_ptr;
945 desc.args[4] = req->resp_len;
946 desc.args[5] = req->sglistinfo_ptr;
947 desc.args[6] = req->sglistinfo_len;
948 } else {
949 req_64bit = (struct qseecom_qteec_64bit_ireq *)
950 req_buf;
951 desc.args[0] = req_64bit->app_id;
952 desc.args[1] = req_64bit->req_ptr;
953 desc.args[2] = req_64bit->req_len;
954 desc.args[3] = req_64bit->resp_ptr;
955 desc.args[4] = req_64bit->resp_len;
956 desc.args[5] = req_64bit->sglistinfo_ptr;
957 desc.args[6] = req_64bit->sglistinfo_len;
958 }
959 ret = scm_call2(smc_id, &desc);
960 break;
961 }
962 case QSEOS_TEE_CLOSE_SESSION: {
963 struct qseecom_qteec_ireq *req;
964 struct qseecom_qteec_64bit_ireq *req_64bit;
965
966 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
967 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
968 if (qseecom.qsee_version < QSEE_VERSION_40) {
969 req = (struct qseecom_qteec_ireq *)req_buf;
970 desc.args[0] = req->app_id;
971 desc.args[1] = req->req_ptr;
972 desc.args[2] = req->req_len;
973 desc.args[3] = req->resp_ptr;
974 desc.args[4] = req->resp_len;
975 } else {
976 req_64bit = (struct qseecom_qteec_64bit_ireq *)
977 req_buf;
978 desc.args[0] = req_64bit->app_id;
979 desc.args[1] = req_64bit->req_ptr;
980 desc.args[2] = req_64bit->req_len;
981 desc.args[3] = req_64bit->resp_ptr;
982 desc.args[4] = req_64bit->resp_len;
983 }
984 ret = scm_call2(smc_id, &desc);
985 break;
986 }
987 case QSEOS_TEE_REQUEST_CANCELLATION: {
988 struct qseecom_qteec_ireq *req;
989 struct qseecom_qteec_64bit_ireq *req_64bit;
990
991 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
992 desc.arginfo =
993 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
994 if (qseecom.qsee_version < QSEE_VERSION_40) {
995 req = (struct qseecom_qteec_ireq *)req_buf;
996 desc.args[0] = req->app_id;
997 desc.args[1] = req->req_ptr;
998 desc.args[2] = req->req_len;
999 desc.args[3] = req->resp_ptr;
1000 desc.args[4] = req->resp_len;
1001 } else {
1002 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1003 req_buf;
1004 desc.args[0] = req_64bit->app_id;
1005 desc.args[1] = req_64bit->req_ptr;
1006 desc.args[2] = req_64bit->req_len;
1007 desc.args[3] = req_64bit->resp_ptr;
1008 desc.args[4] = req_64bit->resp_len;
1009 }
1010 ret = scm_call2(smc_id, &desc);
1011 break;
1012 }
1013 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1014 struct qseecom_continue_blocked_request_ireq *req =
1015 (struct qseecom_continue_blocked_request_ireq *)
1016 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001017 if (qseecom.smcinvoke_support)
1018 smc_id =
1019 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1020 else
1021 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001022 desc.arginfo =
1023 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001024 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 ret = scm_call2(smc_id, &desc);
1026 break;
1027 }
1028 default: {
1029 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1030 qseos_cmd_id);
1031 ret = -EINVAL;
1032 break;
1033 }
1034 } /*end of switch (qsee_cmd_id) */
1035 break;
1036 } /*end of case SCM_SVC_TZSCHEDULER*/
1037 default: {
1038 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1039 svc_id);
1040 ret = -EINVAL;
1041 break;
1042 }
1043 } /*end of switch svc_id */
1044 scm_resp->result = desc.ret[0];
1045 scm_resp->resp_type = desc.ret[1];
1046 scm_resp->data = desc.ret[2];
1047 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1048 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1049 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1050 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1051 return ret;
1052}
1053
1054
1055static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1056 size_t cmd_len, void *resp_buf, size_t resp_len)
1057{
1058 if (!is_scm_armv8())
1059 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1060 resp_buf, resp_len);
1061 else
1062 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1063}
1064
1065static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1066 struct qseecom_register_listener_req *svc)
1067{
1068 struct qseecom_registered_listener_list *ptr;
1069 int unique = 1;
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1073 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1074 if (ptr->svc.listener_id == svc->listener_id) {
1075 pr_err("Service id: %u is already registered\n",
1076 ptr->svc.listener_id);
1077 unique = 0;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1082 return unique;
1083}
1084
1085static struct qseecom_registered_listener_list *__qseecom_find_svc(
1086 int32_t listener_id)
1087{
1088 struct qseecom_registered_listener_list *entry = NULL;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1092 list_for_each_entry(entry,
1093 &qseecom.registered_listener_list_head, list) {
1094 if (entry->svc.listener_id == listener_id)
1095 break;
1096 }
1097 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1098
1099 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1100 pr_err("Service id: %u is not found\n", listener_id);
1101 return NULL;
1102 }
1103
1104 return entry;
1105}
1106
1107static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1108 struct qseecom_dev_handle *handle,
1109 struct qseecom_register_listener_req *listener)
1110{
1111 int ret = 0;
1112 struct qseecom_register_listener_ireq req;
1113 struct qseecom_register_listener_64bit_ireq req_64bit;
1114 struct qseecom_command_scm_resp resp;
1115 ion_phys_addr_t pa;
1116 void *cmd_buf = NULL;
1117 size_t cmd_len;
1118
1119 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001120 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001121 listener->ifd_data_fd);
1122 if (IS_ERR_OR_NULL(svc->ihandle)) {
1123 pr_err("Ion client could not retrieve the handle\n");
1124 return -ENOMEM;
1125 }
1126
1127 /* Get the physical address of the ION BUF */
1128 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1129 if (ret) {
1130 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1131 ret);
1132 return ret;
1133 }
1134 /* Populate the structure for sending scm call to load image */
1135 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1136 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1137 pr_err("ION memory mapping for listener shared buffer failed\n");
1138 return -ENOMEM;
1139 }
1140 svc->sb_phys = (phys_addr_t)pa;
1141
1142 if (qseecom.qsee_version < QSEE_VERSION_40) {
1143 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1144 req.listener_id = svc->svc.listener_id;
1145 req.sb_len = svc->sb_length;
1146 req.sb_ptr = (uint32_t)svc->sb_phys;
1147 cmd_buf = (void *)&req;
1148 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1149 } else {
1150 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1151 req_64bit.listener_id = svc->svc.listener_id;
1152 req_64bit.sb_len = svc->sb_length;
1153 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1154 cmd_buf = (void *)&req_64bit;
1155 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1156 }
1157
1158 resp.result = QSEOS_RESULT_INCOMPLETE;
1159
1160 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1161 &resp, sizeof(resp));
1162 if (ret) {
1163 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1164 return -EINVAL;
1165 }
1166
1167 if (resp.result != QSEOS_RESULT_SUCCESS) {
1168 pr_err("Error SB registration req: resp.result = %d\n",
1169 resp.result);
1170 return -EPERM;
1171 }
1172 return 0;
1173}
1174
1175static int qseecom_register_listener(struct qseecom_dev_handle *data,
1176 void __user *argp)
1177{
1178 int ret = 0;
1179 unsigned long flags;
1180 struct qseecom_register_listener_req rcvd_lstnr;
1181 struct qseecom_registered_listener_list *new_entry;
1182
1183 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1184 if (ret) {
1185 pr_err("copy_from_user failed\n");
1186 return ret;
1187 }
1188 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1189 rcvd_lstnr.sb_size))
1190 return -EFAULT;
1191
1192 data->listener.id = 0;
1193 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1194 pr_err("Service is not unique and is already registered\n");
1195 data->released = true;
1196 return -EBUSY;
1197 }
1198
1199 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1200 if (!new_entry)
1201 return -ENOMEM;
1202 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1203 new_entry->rcv_req_flag = 0;
1204
1205 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1206 new_entry->sb_length = rcvd_lstnr.sb_size;
1207 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1208 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1209 pr_err("qseecom_set_sb_memoryfailed\n");
1210 kzfree(new_entry);
1211 return -ENOMEM;
1212 }
1213
1214 data->listener.id = rcvd_lstnr.listener_id;
1215 init_waitqueue_head(&new_entry->rcv_req_wq);
1216 init_waitqueue_head(&new_entry->listener_block_app_wq);
1217 new_entry->send_resp_flag = 0;
1218 new_entry->listener_in_use = false;
1219 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1220 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1221 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1222
1223 return ret;
1224}
1225
1226static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1227{
1228 int ret = 0;
1229 unsigned long flags;
1230 uint32_t unmap_mem = 0;
1231 struct qseecom_register_listener_ireq req;
1232 struct qseecom_registered_listener_list *ptr_svc = NULL;
1233 struct qseecom_command_scm_resp resp;
1234 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1235
1236 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1237 req.listener_id = data->listener.id;
1238 resp.result = QSEOS_RESULT_INCOMPLETE;
1239
1240 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1241 sizeof(req), &resp, sizeof(resp));
1242 if (ret) {
1243 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1244 ret, data->listener.id);
1245 return ret;
1246 }
1247
1248 if (resp.result != QSEOS_RESULT_SUCCESS) {
1249 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1250 resp.result, data->listener.id);
1251 return -EPERM;
1252 }
1253
1254 data->abort = 1;
1255 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1256 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1257 list) {
1258 if (ptr_svc->svc.listener_id == data->listener.id) {
1259 wake_up_all(&ptr_svc->rcv_req_wq);
1260 break;
1261 }
1262 }
1263 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1264
1265 while (atomic_read(&data->ioctl_count) > 1) {
1266 if (wait_event_freezable(data->abort_wq,
1267 atomic_read(&data->ioctl_count) <= 1)) {
1268 pr_err("Interrupted from abort\n");
1269 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001270 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272 }
1273
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc,
1276 &qseecom.registered_listener_list_head, list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
1278 if (ptr_svc->sb_virt) {
1279 unmap_mem = 1;
1280 ihandle = ptr_svc->ihandle;
1281 }
1282 list_del(&ptr_svc->list);
1283 kzfree(ptr_svc);
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1288
1289 /* Unmap the memory */
1290 if (unmap_mem) {
1291 if (!IS_ERR_OR_NULL(ihandle)) {
1292 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1293 ion_free(qseecom.ion_clnt, ihandle);
1294 }
1295 }
1296 data->released = true;
1297 return ret;
1298}
1299
1300static int __qseecom_set_msm_bus_request(uint32_t mode)
1301{
1302 int ret = 0;
1303 struct qseecom_clk *qclk;
1304
1305 qclk = &qseecom.qsee;
1306 if (qclk->ce_core_src_clk != NULL) {
1307 if (mode == INACTIVE) {
1308 __qseecom_disable_clk(CLK_QSEE);
1309 } else {
1310 ret = __qseecom_enable_clk(CLK_QSEE);
1311 if (ret)
1312 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1313 ret, mode);
1314 }
1315 }
1316
1317 if ((!ret) && (qseecom.current_mode != mode)) {
1318 ret = msm_bus_scale_client_update_request(
1319 qseecom.qsee_perf_client, mode);
1320 if (ret) {
1321 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1322 ret, mode);
1323 if (qclk->ce_core_src_clk != NULL) {
1324 if (mode == INACTIVE) {
1325 ret = __qseecom_enable_clk(CLK_QSEE);
1326 if (ret)
1327 pr_err("CLK enable failed\n");
1328 } else
1329 __qseecom_disable_clk(CLK_QSEE);
1330 }
1331 }
1332 qseecom.current_mode = mode;
1333 }
1334 return ret;
1335}
1336
1337static void qseecom_bw_inactive_req_work(struct work_struct *work)
1338{
1339 mutex_lock(&app_access_lock);
1340 mutex_lock(&qsee_bw_mutex);
1341 if (qseecom.timer_running)
1342 __qseecom_set_msm_bus_request(INACTIVE);
1343 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1344 qseecom.current_mode, qseecom.cumulative_mode);
1345 qseecom.timer_running = false;
1346 mutex_unlock(&qsee_bw_mutex);
1347 mutex_unlock(&app_access_lock);
1348}
1349
1350static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1351{
1352 schedule_work(&qseecom.bw_inactive_req_ws);
1353}
1354
1355static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1356{
1357 struct qseecom_clk *qclk;
1358 int ret = 0;
1359
1360 mutex_lock(&clk_access_lock);
1361 if (ce == CLK_QSEE)
1362 qclk = &qseecom.qsee;
1363 else
1364 qclk = &qseecom.ce_drv;
1365
1366 if (qclk->clk_access_cnt > 2) {
1367 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1368 ret = -EINVAL;
1369 goto err_dec_ref_cnt;
1370 }
1371 if (qclk->clk_access_cnt == 2)
1372 qclk->clk_access_cnt--;
1373
1374err_dec_ref_cnt:
1375 mutex_unlock(&clk_access_lock);
1376 return ret;
1377}
1378
1379
1380static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1381{
1382 int32_t ret = 0;
1383 int32_t request_mode = INACTIVE;
1384
1385 mutex_lock(&qsee_bw_mutex);
1386 if (mode == 0) {
1387 if (qseecom.cumulative_mode > MEDIUM)
1388 request_mode = HIGH;
1389 else
1390 request_mode = qseecom.cumulative_mode;
1391 } else {
1392 request_mode = mode;
1393 }
1394
1395 ret = __qseecom_set_msm_bus_request(request_mode);
1396 if (ret) {
1397 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1398 ret, request_mode);
1399 goto err_scale_timer;
1400 }
1401
1402 if (qseecom.timer_running) {
1403 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1404 if (ret) {
1405 pr_err("Failed to decrease clk ref count.\n");
1406 goto err_scale_timer;
1407 }
1408 del_timer_sync(&(qseecom.bw_scale_down_timer));
1409 qseecom.timer_running = false;
1410 }
1411err_scale_timer:
1412 mutex_unlock(&qsee_bw_mutex);
1413 return ret;
1414}
1415
1416
1417static int qseecom_unregister_bus_bandwidth_needs(
1418 struct qseecom_dev_handle *data)
1419{
1420 int32_t ret = 0;
1421
1422 qseecom.cumulative_mode -= data->mode;
1423 data->mode = INACTIVE;
1424
1425 return ret;
1426}
1427
1428static int __qseecom_register_bus_bandwidth_needs(
1429 struct qseecom_dev_handle *data, uint32_t request_mode)
1430{
1431 int32_t ret = 0;
1432
1433 if (data->mode == INACTIVE) {
1434 qseecom.cumulative_mode += request_mode;
1435 data->mode = request_mode;
1436 } else {
1437 if (data->mode != request_mode) {
1438 qseecom.cumulative_mode -= data->mode;
1439 qseecom.cumulative_mode += request_mode;
1440 data->mode = request_mode;
1441 }
1442 }
1443 return ret;
1444}
1445
1446static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1447{
1448 int ret = 0;
1449
1450 ret = qsee_vote_for_clock(data, CLK_DFAB);
1451 if (ret) {
1452 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455 ret = qsee_vote_for_clock(data, CLK_SFPB);
1456 if (ret) {
1457 qsee_disable_clock_vote(data, CLK_DFAB);
1458 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1459 goto perf_enable_exit;
1460 }
1461
1462perf_enable_exit:
1463 return ret;
1464}
1465
1466static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1467 void __user *argp)
1468{
1469 int32_t ret = 0;
1470 int32_t req_mode;
1471
1472 if (qseecom.no_clock_support)
1473 return 0;
1474
1475 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1476 if (ret) {
1477 pr_err("copy_from_user failed\n");
1478 return ret;
1479 }
1480 if (req_mode > HIGH) {
1481 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1482 return -EINVAL;
1483 }
1484
1485 /*
1486 * Register bus bandwidth needs if bus scaling feature is enabled;
1487 * otherwise, qseecom enable/disable clocks for the client directly.
1488 */
1489 if (qseecom.support_bus_scaling) {
1490 mutex_lock(&qsee_bw_mutex);
1491 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1492 mutex_unlock(&qsee_bw_mutex);
1493 } else {
1494 pr_debug("Bus scaling feature is NOT enabled\n");
1495 pr_debug("request bandwidth mode %d for the client\n",
1496 req_mode);
1497 if (req_mode != INACTIVE) {
1498 ret = qseecom_perf_enable(data);
1499 if (ret)
1500 pr_err("Failed to vote for clock with err %d\n",
1501 ret);
1502 } else {
1503 qsee_disable_clock_vote(data, CLK_DFAB);
1504 qsee_disable_clock_vote(data, CLK_SFPB);
1505 }
1506 }
1507 return ret;
1508}
1509
1510static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1511{
1512 if (qseecom.no_clock_support)
1513 return;
1514
1515 mutex_lock(&qsee_bw_mutex);
1516 qseecom.bw_scale_down_timer.expires = jiffies +
1517 msecs_to_jiffies(duration);
1518 mod_timer(&(qseecom.bw_scale_down_timer),
1519 qseecom.bw_scale_down_timer.expires);
1520 qseecom.timer_running = true;
1521 mutex_unlock(&qsee_bw_mutex);
1522}
1523
1524static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1525{
1526 if (!qseecom.support_bus_scaling)
1527 qsee_disable_clock_vote(data, CLK_SFPB);
1528 else
1529 __qseecom_add_bw_scale_down_timer(
1530 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1531}
1532
1533static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1534{
1535 int ret = 0;
1536
1537 if (qseecom.support_bus_scaling) {
1538 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1539 if (ret)
1540 pr_err("Failed to set bw MEDIUM.\n");
1541 } else {
1542 ret = qsee_vote_for_clock(data, CLK_SFPB);
1543 if (ret)
1544 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1545 }
1546 return ret;
1547}
1548
1549static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1550 void __user *argp)
1551{
1552 ion_phys_addr_t pa;
1553 int32_t ret;
1554 struct qseecom_set_sb_mem_param_req req;
1555 size_t len;
1556
1557 /* Copy the relevant information needed for loading the image */
1558 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1559 return -EFAULT;
1560
1561 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1562 (req.sb_len == 0)) {
1563 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1564 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1565 return -EFAULT;
1566 }
1567 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1568 req.sb_len))
1569 return -EFAULT;
1570
1571 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001572 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001573 req.ifd_data_fd);
1574 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1575 pr_err("Ion client could not retrieve the handle\n");
1576 return -ENOMEM;
1577 }
1578 /* Get the physical address of the ION BUF */
1579 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1580 if (ret) {
1581
1582 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1583 ret);
1584 return ret;
1585 }
1586
1587 if (len < req.sb_len) {
1588 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1589 req.sb_len, len);
1590 return -EINVAL;
1591 }
1592 /* Populate the structure for sending scm call to load image */
1593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1594 data->client.ihandle);
1595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1596 pr_err("ION memory mapping for client shared buf failed\n");
1597 return -ENOMEM;
1598 }
1599 data->client.sb_phys = (phys_addr_t)pa;
1600 data->client.sb_length = req.sb_len;
1601 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1602 return 0;
1603}
1604
1605static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1606{
1607 int ret;
1608
1609 ret = (qseecom.send_resp_flag != 0);
1610 return ret || data->abort;
1611}
1612
1613static int __qseecom_reentrancy_listener_has_sent_rsp(
1614 struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
1616{
1617 int ret;
1618
1619 ret = (ptr_svc->send_resp_flag != 0);
1620 return ret || data->abort;
1621}
1622
1623static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1624 struct qseecom_command_scm_resp *resp,
1625 struct qseecom_client_listener_data_irsp *send_data_rsp,
1626 struct qseecom_registered_listener_list *ptr_svc,
1627 uint32_t lstnr) {
1628 int ret = 0;
1629
1630 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1631 qseecom.send_resp_flag = 0;
1632 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1633 send_data_rsp->listener_id = lstnr;
1634 if (ptr_svc)
1635 pr_warn("listener_id:%x, lstnr: %x\n",
1636 ptr_svc->svc.listener_id, lstnr);
1637 if (ptr_svc && ptr_svc->ihandle) {
1638 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1639 ptr_svc->sb_virt, ptr_svc->sb_length,
1640 ION_IOC_CLEAN_INV_CACHES);
1641 if (ret) {
1642 pr_err("cache operation failed %d\n", ret);
1643 return ret;
1644 }
1645 }
1646
1647 if (lstnr == RPMB_SERVICE) {
1648 ret = __qseecom_enable_clk(CLK_QSEE);
1649 if (ret)
1650 return ret;
1651 }
1652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1653 sizeof(send_data_rsp), resp, sizeof(*resp));
1654 if (ret) {
1655 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1656 ret, data->client.app_id);
1657 if (lstnr == RPMB_SERVICE)
1658 __qseecom_disable_clk(CLK_QSEE);
1659 return ret;
1660 }
1661 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1662 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1663 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1664 resp->result, data->client.app_id, lstnr);
1665 ret = -EINVAL;
1666 }
1667 if (lstnr == RPMB_SERVICE)
1668 __qseecom_disable_clk(CLK_QSEE);
1669 return ret;
1670}
1671
1672static void __qseecom_clean_listener_sglistinfo(
1673 struct qseecom_registered_listener_list *ptr_svc)
1674{
1675 if (ptr_svc->sglist_cnt) {
1676 memset(ptr_svc->sglistinfo_ptr, 0,
1677 SGLISTINFO_TABLE_SIZE);
1678 ptr_svc->sglist_cnt = 0;
1679 }
1680}
1681
1682static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1683 struct qseecom_command_scm_resp *resp)
1684{
1685 int ret = 0;
1686 int rc = 0;
1687 uint32_t lstnr;
1688 unsigned long flags;
1689 struct qseecom_client_listener_data_irsp send_data_rsp;
1690 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1691 struct qseecom_registered_listener_list *ptr_svc = NULL;
1692 sigset_t new_sigset;
1693 sigset_t old_sigset;
1694 uint32_t status;
1695 void *cmd_buf = NULL;
1696 size_t cmd_len;
1697 struct sglist_info *table = NULL;
1698
1699 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1700 lstnr = resp->data;
1701 /*
1702 * Wake up blocking lsitener service with the lstnr id
1703 */
1704 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1705 flags);
1706 list_for_each_entry(ptr_svc,
1707 &qseecom.registered_listener_list_head, list) {
1708 if (ptr_svc->svc.listener_id == lstnr) {
1709 ptr_svc->listener_in_use = true;
1710 ptr_svc->rcv_req_flag = 1;
1711 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1716 flags);
1717
1718 if (ptr_svc == NULL) {
1719 pr_err("Listener Svc %d does not exist\n", lstnr);
1720 __qseecom_qseos_fail_return_resp_tz(data, resp,
1721 &send_data_rsp, ptr_svc, lstnr);
1722 return -EINVAL;
1723 }
1724
1725 if (!ptr_svc->ihandle) {
1726 pr_err("Client handle is not initialized\n");
1727 __qseecom_qseos_fail_return_resp_tz(data, resp,
1728 &send_data_rsp, ptr_svc, lstnr);
1729 return -EINVAL;
1730 }
1731
1732 if (ptr_svc->svc.listener_id != lstnr) {
1733 pr_warn("Service requested does not exist\n");
1734 __qseecom_qseos_fail_return_resp_tz(data, resp,
1735 &send_data_rsp, ptr_svc, lstnr);
1736 return -ERESTARTSYS;
1737 }
1738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_listener_has_sent_rsp(data))) {
1754 break;
1755 }
1756
1757 if (qseecom.qsee_reentrancy_support &&
1758 !wait_event_freezable(qseecom.send_resp_wq,
1759 __qseecom_reentrancy_listener_has_sent_rsp(
1760 data, ptr_svc))) {
1761 break;
1762 }
1763 } while (1);
1764
1765 /* restore signal mask */
1766 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1767 if (data->abort) {
1768 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1769 data->client.app_id, lstnr, ret);
1770 rc = -ENODEV;
1771 status = QSEOS_RESULT_FAILURE;
1772 } else {
1773 status = QSEOS_RESULT_SUCCESS;
1774 }
1775
1776 qseecom.send_resp_flag = 0;
1777 ptr_svc->send_resp_flag = 0;
1778 table = ptr_svc->sglistinfo_ptr;
1779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
1782 send_data_rsp.sglistinfo_ptr =
1783 (uint32_t)virt_to_phys(table);
1784 send_data_rsp.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 cmd_buf = (void *)&send_data_rsp;
1789 cmd_len = sizeof(send_data_rsp);
1790 } else {
1791 send_data_rsp_64bit.listener_id = lstnr;
1792 send_data_rsp_64bit.status = status;
1793 send_data_rsp_64bit.sglistinfo_ptr =
1794 virt_to_phys(table);
1795 send_data_rsp_64bit.sglistinfo_len =
1796 SGLISTINFO_TABLE_SIZE;
1797 dmac_flush_range((void *)table,
1798 (void *)table + SGLISTINFO_TABLE_SIZE);
1799 cmd_buf = (void *)&send_data_rsp_64bit;
1800 cmd_len = sizeof(send_data_rsp_64bit);
1801 }
1802 if (qseecom.whitelist_support == false)
1803 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1804 else
1805 *(uint32_t *)cmd_buf =
1806 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1807 if (ptr_svc) {
1808 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1809 ptr_svc->ihandle,
1810 ptr_svc->sb_virt, ptr_svc->sb_length,
1811 ION_IOC_CLEAN_INV_CACHES);
1812 if (ret) {
1813 pr_err("cache operation failed %d\n", ret);
1814 return ret;
1815 }
1816 }
1817
1818 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1819 ret = __qseecom_enable_clk(CLK_QSEE);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1825 cmd_buf, cmd_len, resp, sizeof(*resp));
1826 ptr_svc->listener_in_use = false;
1827 __qseecom_clean_listener_sglistinfo(ptr_svc);
1828 if (ret) {
1829 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1830 ret, data->client.app_id);
1831 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1832 __qseecom_disable_clk(CLK_QSEE);
1833 return ret;
1834 }
1835 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1836 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1837 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1838 resp->result, data->client.app_id, lstnr);
1839 ret = -EINVAL;
1840 }
1841 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1842 __qseecom_disable_clk(CLK_QSEE);
1843
1844 }
1845 if (rc)
1846 return rc;
1847
1848 return ret;
1849}
1850
Zhen Kong2f60f492017-06-29 15:22:14 -07001851static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001852 struct qseecom_command_scm_resp *resp,
1853 struct qseecom_registered_app_list *ptr_app,
1854 struct qseecom_dev_handle *data)
1855{
1856 struct qseecom_registered_listener_list *list_ptr;
1857 int ret = 0;
1858 struct qseecom_continue_blocked_request_ireq ireq;
1859 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001861 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862
1863 if (!resp || !data) {
1864 pr_err("invalid resp or data pointer\n");
1865 ret = -EINVAL;
1866 goto exit;
1867 }
1868
1869 /* find app_id & img_name from list */
1870 if (!ptr_app) {
1871 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1872 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1873 list) {
1874 if ((ptr_app->app_id == data->client.app_id) &&
1875 (!strcmp(ptr_app->app_name,
1876 data->client.app_name))) {
1877 found_app = true;
1878 break;
1879 }
1880 }
1881 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1882 flags);
1883 if (!found_app) {
1884 pr_err("app_id %d (%s) is not found\n",
1885 data->client.app_id,
1886 (char *)data->client.app_name);
1887 ret = -ENOENT;
1888 goto exit;
1889 }
1890 }
1891
1892 list_ptr = __qseecom_find_svc(resp->data);
1893 if (!list_ptr) {
1894 pr_err("Invalid listener ID\n");
1895 ret = -ENODATA;
1896 goto exit;
1897 }
1898 pr_debug("lsntr %d in_use = %d\n",
1899 resp->data, list_ptr->listener_in_use);
1900 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001901
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001902 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001903 do {
1904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001910 pr_err("Interrupted: listener_id %d, app_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001911 resp->data, ptr_app->app_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001919
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001920 ptr_app->blocked_on_listener_id = 0;
1921 /* notify the blocked app that listener is available */
1922 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1923 resp->data, data->client.app_id,
1924 data->client.app_name);
1925 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001926 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001927 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1928 &ireq, sizeof(ireq),
1929 &continue_resp, sizeof(continue_resp));
1930 if (ret) {
1931 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1932 data->client.app_id,
1933 data->client.app_name, ret);
1934 goto exit;
1935 }
1936 /*
1937 * After TZ app is unblocked, then continue to next case
1938 * for incomplete request processing
1939 */
1940 resp->result = QSEOS_RESULT_INCOMPLETE;
1941exit:
1942 return ret;
1943}
1944
Zhen Kong2f60f492017-06-29 15:22:14 -07001945static int __qseecom_process_blocked_on_listener_smcinvoke(
1946 struct qseecom_command_scm_resp *resp)
1947{
1948 struct qseecom_registered_listener_list *list_ptr;
1949 int ret = 0;
1950 struct qseecom_continue_blocked_request_ireq ireq;
1951 struct qseecom_command_scm_resp continue_resp;
1952 unsigned int session_id;
1953
1954 if (!resp) {
1955 pr_err("invalid resp pointer\n");
1956 ret = -EINVAL;
1957 goto exit;
1958 }
1959 session_id = resp->resp_type;
1960 list_ptr = __qseecom_find_svc(resp->data);
1961 if (!list_ptr) {
1962 pr_err("Invalid listener ID\n");
1963 ret = -ENODATA;
1964 goto exit;
1965 }
1966 pr_debug("lsntr %d in_use = %d\n",
1967 resp->data, list_ptr->listener_in_use);
1968 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001969 do {
1970 qseecom.app_block_ref_cnt++;
1971 mutex_unlock(&app_access_lock);
1972 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 list_ptr->listener_block_app_wq,
1974 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001975 pr_err("Interrupted: listener_id %d, session_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001976 resp->data, session_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001977 ret = -ERESTARTSYS;
1978 goto exit;
1979 }
1980 mutex_lock(&app_access_lock);
1981 qseecom.app_block_ref_cnt--;
1982 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001983
1984 /* notify TZ that listener is available */
1985 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1986 resp->data, session_id);
1987 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1988 ireq.app_or_session_id = session_id;
1989 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1990 &ireq, sizeof(ireq),
1991 &continue_resp, sizeof(continue_resp));
1992 if (ret) {
1993 pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
1994 session_id, ret);
1995 goto exit;
1996 }
1997 resp->result = QSEOS_RESULT_INCOMPLETE;
1998exit:
1999 return ret;
2000}
2001
2002static int __qseecom_process_reentrancy_blocked_on_listener(
2003 struct qseecom_command_scm_resp *resp,
2004 struct qseecom_registered_app_list *ptr_app,
2005 struct qseecom_dev_handle *data)
2006{
2007 if (!qseecom.smcinvoke_support)
2008 return __qseecom_process_blocked_on_listener_legacy(
2009 resp, ptr_app, data);
2010 else
2011 return __qseecom_process_blocked_on_listener_smcinvoke(
2012 resp);
2013}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002014static int __qseecom_reentrancy_process_incomplete_cmd(
2015 struct qseecom_dev_handle *data,
2016 struct qseecom_command_scm_resp *resp)
2017{
2018 int ret = 0;
2019 int rc = 0;
2020 uint32_t lstnr;
2021 unsigned long flags;
2022 struct qseecom_client_listener_data_irsp send_data_rsp;
2023 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2024 struct qseecom_registered_listener_list *ptr_svc = NULL;
2025 sigset_t new_sigset;
2026 sigset_t old_sigset;
2027 uint32_t status;
2028 void *cmd_buf = NULL;
2029 size_t cmd_len;
2030 struct sglist_info *table = NULL;
2031
2032 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2033 lstnr = resp->data;
2034 /*
2035 * Wake up blocking lsitener service with the lstnr id
2036 */
2037 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2038 flags);
2039 list_for_each_entry(ptr_svc,
2040 &qseecom.registered_listener_list_head, list) {
2041 if (ptr_svc->svc.listener_id == lstnr) {
2042 ptr_svc->listener_in_use = true;
2043 ptr_svc->rcv_req_flag = 1;
2044 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2045 break;
2046 }
2047 }
2048 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2049 flags);
2050
2051 if (ptr_svc == NULL) {
2052 pr_err("Listener Svc %d does not exist\n", lstnr);
2053 return -EINVAL;
2054 }
2055
2056 if (!ptr_svc->ihandle) {
2057 pr_err("Client handle is not initialized\n");
2058 return -EINVAL;
2059 }
2060
2061 if (ptr_svc->svc.listener_id != lstnr) {
2062 pr_warn("Service requested does not exist\n");
2063 return -ERESTARTSYS;
2064 }
2065 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2066
2067 /* initialize the new signal mask with all signals*/
2068 sigfillset(&new_sigset);
2069
2070 /* block all signals */
2071 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2072
2073 /* unlock mutex btw waking listener and sleep-wait */
2074 mutex_unlock(&app_access_lock);
2075 do {
2076 if (!wait_event_freezable(qseecom.send_resp_wq,
2077 __qseecom_reentrancy_listener_has_sent_rsp(
2078 data, ptr_svc))) {
2079 break;
2080 }
2081 } while (1);
2082 /* lock mutex again after resp sent */
2083 mutex_lock(&app_access_lock);
2084 ptr_svc->send_resp_flag = 0;
2085 qseecom.send_resp_flag = 0;
2086
2087 /* restore signal mask */
2088 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2089 if (data->abort) {
2090 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2091 data->client.app_id, lstnr, ret);
2092 rc = -ENODEV;
2093 status = QSEOS_RESULT_FAILURE;
2094 } else {
2095 status = QSEOS_RESULT_SUCCESS;
2096 }
2097 table = ptr_svc->sglistinfo_ptr;
2098 if (qseecom.qsee_version < QSEE_VERSION_40) {
2099 send_data_rsp.listener_id = lstnr;
2100 send_data_rsp.status = status;
2101 send_data_rsp.sglistinfo_ptr =
2102 (uint32_t)virt_to_phys(table);
2103 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2104 dmac_flush_range((void *)table,
2105 (void *)table + SGLISTINFO_TABLE_SIZE);
2106 cmd_buf = (void *)&send_data_rsp;
2107 cmd_len = sizeof(send_data_rsp);
2108 } else {
2109 send_data_rsp_64bit.listener_id = lstnr;
2110 send_data_rsp_64bit.status = status;
2111 send_data_rsp_64bit.sglistinfo_ptr =
2112 virt_to_phys(table);
2113 send_data_rsp_64bit.sglistinfo_len =
2114 SGLISTINFO_TABLE_SIZE;
2115 dmac_flush_range((void *)table,
2116 (void *)table + SGLISTINFO_TABLE_SIZE);
2117 cmd_buf = (void *)&send_data_rsp_64bit;
2118 cmd_len = sizeof(send_data_rsp_64bit);
2119 }
2120 if (qseecom.whitelist_support == false)
2121 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2122 else
2123 *(uint32_t *)cmd_buf =
2124 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2125 if (ptr_svc) {
2126 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2127 ptr_svc->ihandle,
2128 ptr_svc->sb_virt, ptr_svc->sb_length,
2129 ION_IOC_CLEAN_INV_CACHES);
2130 if (ret) {
2131 pr_err("cache operation failed %d\n", ret);
2132 return ret;
2133 }
2134 }
2135 if (lstnr == RPMB_SERVICE) {
2136 ret = __qseecom_enable_clk(CLK_QSEE);
2137 if (ret)
2138 return ret;
2139 }
2140
2141 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2142 cmd_buf, cmd_len, resp, sizeof(*resp));
2143 ptr_svc->listener_in_use = false;
2144 __qseecom_clean_listener_sglistinfo(ptr_svc);
2145 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2146
2147 if (ret) {
2148 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2149 ret, data->client.app_id);
2150 goto exit;
2151 }
2152
2153 switch (resp->result) {
2154 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2155 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2156 lstnr, data->client.app_id, resp->data);
2157 if (lstnr == resp->data) {
2158 pr_err("lstnr %d should not be blocked!\n",
2159 lstnr);
2160 ret = -EINVAL;
2161 goto exit;
2162 }
2163 ret = __qseecom_process_reentrancy_blocked_on_listener(
2164 resp, NULL, data);
2165 if (ret) {
2166 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2167 data->client.app_id,
2168 data->client.app_name, resp->data);
2169 goto exit;
2170 }
2171 case QSEOS_RESULT_SUCCESS:
2172 case QSEOS_RESULT_INCOMPLETE:
2173 break;
2174 default:
2175 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2176 resp->result, data->client.app_id, lstnr);
2177 ret = -EINVAL;
2178 goto exit;
2179 }
2180exit:
2181 if (lstnr == RPMB_SERVICE)
2182 __qseecom_disable_clk(CLK_QSEE);
2183
2184 }
2185 if (rc)
2186 return rc;
2187
2188 return ret;
2189}
2190
2191/*
2192 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2193 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2194 * So, needs to first check if no app blocked before sending OS level scm call,
2195 * then wait until all apps are unblocked.
2196 */
2197static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2198{
2199 sigset_t new_sigset, old_sigset;
2200
2201 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2202 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2203 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2204 /* thread sleep until this app unblocked */
2205 while (qseecom.app_block_ref_cnt > 0) {
2206 sigfillset(&new_sigset);
2207 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2208 mutex_unlock(&app_access_lock);
2209 do {
2210 if (!wait_event_freezable(qseecom.app_block_wq,
2211 (qseecom.app_block_ref_cnt == 0)))
2212 break;
2213 } while (1);
2214 mutex_lock(&app_access_lock);
2215 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2216 }
2217 }
2218}
2219
2220/*
2221 * scm_call of send data will fail if this TA is blocked or there are more
2222 * than one TA requesting listener services; So, first check to see if need
2223 * to wait.
2224 */
2225static void __qseecom_reentrancy_check_if_this_app_blocked(
2226 struct qseecom_registered_app_list *ptr_app)
2227{
2228 sigset_t new_sigset, old_sigset;
2229
2230 if (qseecom.qsee_reentrancy_support) {
2231 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2232 /* thread sleep until this app unblocked */
2233 sigfillset(&new_sigset);
2234 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2235 mutex_unlock(&app_access_lock);
2236 do {
2237 if (!wait_event_freezable(qseecom.app_block_wq,
2238 (!ptr_app->app_blocked &&
2239 qseecom.app_block_ref_cnt <= 1)))
2240 break;
2241 } while (1);
2242 mutex_lock(&app_access_lock);
2243 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2244 }
2245 }
2246}
2247
2248static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2249 uint32_t *app_id)
2250{
2251 int32_t ret;
2252 struct qseecom_command_scm_resp resp;
2253 bool found_app = false;
2254 struct qseecom_registered_app_list *entry = NULL;
2255 unsigned long flags = 0;
2256
2257 if (!app_id) {
2258 pr_err("Null pointer to app_id\n");
2259 return -EINVAL;
2260 }
2261 *app_id = 0;
2262
2263 /* check if app exists and has been registered locally */
2264 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2265 list_for_each_entry(entry,
2266 &qseecom.registered_app_list_head, list) {
2267 if (!strcmp(entry->app_name, req.app_name)) {
2268 found_app = true;
2269 break;
2270 }
2271 }
2272 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2273 if (found_app) {
2274 pr_debug("Found app with id %d\n", entry->app_id);
2275 *app_id = entry->app_id;
2276 return 0;
2277 }
2278
2279 memset((void *)&resp, 0, sizeof(resp));
2280
2281 /* SCM_CALL to check if app_id for the mentioned app exists */
2282 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2283 sizeof(struct qseecom_check_app_ireq),
2284 &resp, sizeof(resp));
2285 if (ret) {
2286 pr_err("scm_call to check if app is already loaded failed\n");
2287 return -EINVAL;
2288 }
2289
2290 if (resp.result == QSEOS_RESULT_FAILURE)
2291 return 0;
2292
2293 switch (resp.resp_type) {
2294 /*qsee returned listener type response */
2295 case QSEOS_LISTENER_ID:
2296 pr_err("resp type is of listener type instead of app");
2297 return -EINVAL;
2298 case QSEOS_APP_ID:
2299 *app_id = resp.data;
2300 return 0;
2301 default:
2302 pr_err("invalid resp type (%d) from qsee",
2303 resp.resp_type);
2304 return -ENODEV;
2305 }
2306}
2307
2308static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2309{
2310 struct qseecom_registered_app_list *entry = NULL;
2311 unsigned long flags = 0;
2312 u32 app_id = 0;
2313 struct ion_handle *ihandle; /* Ion handle */
2314 struct qseecom_load_img_req load_img_req;
2315 int32_t ret = 0;
2316 ion_phys_addr_t pa = 0;
2317 size_t len;
2318 struct qseecom_command_scm_resp resp;
2319 struct qseecom_check_app_ireq req;
2320 struct qseecom_load_app_ireq load_req;
2321 struct qseecom_load_app_64bit_ireq load_req_64bit;
2322 void *cmd_buf = NULL;
2323 size_t cmd_len;
2324 bool first_time = false;
2325
2326 /* Copy the relevant information needed for loading the image */
2327 if (copy_from_user(&load_img_req,
2328 (void __user *)argp,
2329 sizeof(struct qseecom_load_img_req))) {
2330 pr_err("copy_from_user failed\n");
2331 return -EFAULT;
2332 }
2333
2334 /* Check and load cmnlib */
2335 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2336 if (!qseecom.commonlib_loaded &&
2337 load_img_req.app_arch == ELFCLASS32) {
2338 ret = qseecom_load_commonlib_image(data, "cmnlib");
2339 if (ret) {
2340 pr_err("failed to load cmnlib\n");
2341 return -EIO;
2342 }
2343 qseecom.commonlib_loaded = true;
2344 pr_debug("cmnlib is loaded\n");
2345 }
2346
2347 if (!qseecom.commonlib64_loaded &&
2348 load_img_req.app_arch == ELFCLASS64) {
2349 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2350 if (ret) {
2351 pr_err("failed to load cmnlib64\n");
2352 return -EIO;
2353 }
2354 qseecom.commonlib64_loaded = true;
2355 pr_debug("cmnlib64 is loaded\n");
2356 }
2357 }
2358
2359 if (qseecom.support_bus_scaling) {
2360 mutex_lock(&qsee_bw_mutex);
2361 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2362 mutex_unlock(&qsee_bw_mutex);
2363 if (ret)
2364 return ret;
2365 }
2366
2367 /* Vote for the SFPB clock */
2368 ret = __qseecom_enable_clk_scale_up(data);
2369 if (ret)
2370 goto enable_clk_err;
2371
2372 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2373 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2374 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2375
2376 ret = __qseecom_check_app_exists(req, &app_id);
2377 if (ret < 0)
2378 goto loadapp_err;
2379
2380 if (app_id) {
2381 pr_debug("App id %d (%s) already exists\n", app_id,
2382 (char *)(req.app_name));
2383 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2384 list_for_each_entry(entry,
2385 &qseecom.registered_app_list_head, list){
2386 if (entry->app_id == app_id) {
2387 entry->ref_cnt++;
2388 break;
2389 }
2390 }
2391 spin_unlock_irqrestore(
2392 &qseecom.registered_app_list_lock, flags);
2393 ret = 0;
2394 } else {
2395 first_time = true;
2396 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2397 (char *)(load_img_req.img_name));
2398 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002399 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002400 load_img_req.ifd_data_fd);
2401 if (IS_ERR_OR_NULL(ihandle)) {
2402 pr_err("Ion client could not retrieve the handle\n");
2403 ret = -ENOMEM;
2404 goto loadapp_err;
2405 }
2406
2407 /* Get the physical address of the ION BUF */
2408 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2409 if (ret) {
2410 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2411 ret);
2412 goto loadapp_err;
2413 }
2414 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2415 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2416 len, load_img_req.mdt_len,
2417 load_img_req.img_len);
2418 ret = -EINVAL;
2419 goto loadapp_err;
2420 }
2421 /* Populate the structure for sending scm call to load image */
2422 if (qseecom.qsee_version < QSEE_VERSION_40) {
2423 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2424 load_req.mdt_len = load_img_req.mdt_len;
2425 load_req.img_len = load_img_req.img_len;
2426 strlcpy(load_req.app_name, load_img_req.img_name,
2427 MAX_APP_NAME_SIZE);
2428 load_req.phy_addr = (uint32_t)pa;
2429 cmd_buf = (void *)&load_req;
2430 cmd_len = sizeof(struct qseecom_load_app_ireq);
2431 } else {
2432 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2433 load_req_64bit.mdt_len = load_img_req.mdt_len;
2434 load_req_64bit.img_len = load_img_req.img_len;
2435 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 load_req_64bit.phy_addr = (uint64_t)pa;
2438 cmd_buf = (void *)&load_req_64bit;
2439 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2440 }
2441
2442 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2443 ION_IOC_CLEAN_INV_CACHES);
2444 if (ret) {
2445 pr_err("cache operation failed %d\n", ret);
2446 goto loadapp_err;
2447 }
2448
2449 /* SCM_CALL to load the app and get the app_id back */
2450 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2451 cmd_len, &resp, sizeof(resp));
2452 if (ret) {
2453 pr_err("scm_call to load app failed\n");
2454 if (!IS_ERR_OR_NULL(ihandle))
2455 ion_free(qseecom.ion_clnt, ihandle);
2456 ret = -EINVAL;
2457 goto loadapp_err;
2458 }
2459
2460 if (resp.result == QSEOS_RESULT_FAILURE) {
2461 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2462 if (!IS_ERR_OR_NULL(ihandle))
2463 ion_free(qseecom.ion_clnt, ihandle);
2464 ret = -EFAULT;
2465 goto loadapp_err;
2466 }
2467
2468 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2469 ret = __qseecom_process_incomplete_cmd(data, &resp);
2470 if (ret) {
2471 pr_err("process_incomplete_cmd failed err: %d\n",
2472 ret);
2473 if (!IS_ERR_OR_NULL(ihandle))
2474 ion_free(qseecom.ion_clnt, ihandle);
2475 ret = -EFAULT;
2476 goto loadapp_err;
2477 }
2478 }
2479
2480 if (resp.result != QSEOS_RESULT_SUCCESS) {
2481 pr_err("scm_call failed resp.result unknown, %d\n",
2482 resp.result);
2483 if (!IS_ERR_OR_NULL(ihandle))
2484 ion_free(qseecom.ion_clnt, ihandle);
2485 ret = -EFAULT;
2486 goto loadapp_err;
2487 }
2488
2489 app_id = resp.data;
2490
2491 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2492 if (!entry) {
2493 ret = -ENOMEM;
2494 goto loadapp_err;
2495 }
2496 entry->app_id = app_id;
2497 entry->ref_cnt = 1;
2498 entry->app_arch = load_img_req.app_arch;
2499 /*
2500 * keymaster app may be first loaded as "keymaste" by qseecomd,
2501 * and then used as "keymaster" on some targets. To avoid app
2502 * name checking error, register "keymaster" into app_list and
2503 * thread private data.
2504 */
2505 if (!strcmp(load_img_req.img_name, "keymaste"))
2506 strlcpy(entry->app_name, "keymaster",
2507 MAX_APP_NAME_SIZE);
2508 else
2509 strlcpy(entry->app_name, load_img_req.img_name,
2510 MAX_APP_NAME_SIZE);
2511 entry->app_blocked = false;
2512 entry->blocked_on_listener_id = 0;
2513
2514 /* Deallocate the handle */
2515 if (!IS_ERR_OR_NULL(ihandle))
2516 ion_free(qseecom.ion_clnt, ihandle);
2517
2518 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2519 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2520 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2521 flags);
2522
2523 pr_warn("App with id %u (%s) now loaded\n", app_id,
2524 (char *)(load_img_req.img_name));
2525 }
2526 data->client.app_id = app_id;
2527 data->client.app_arch = load_img_req.app_arch;
2528 if (!strcmp(load_img_req.img_name, "keymaste"))
2529 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2530 else
2531 strlcpy(data->client.app_name, load_img_req.img_name,
2532 MAX_APP_NAME_SIZE);
2533 load_img_req.app_id = app_id;
2534 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2535 pr_err("copy_to_user failed\n");
2536 ret = -EFAULT;
2537 if (first_time == true) {
2538 spin_lock_irqsave(
2539 &qseecom.registered_app_list_lock, flags);
2540 list_del(&entry->list);
2541 spin_unlock_irqrestore(
2542 &qseecom.registered_app_list_lock, flags);
2543 kzfree(entry);
2544 }
2545 }
2546
2547loadapp_err:
2548 __qseecom_disable_clk_scale_down(data);
2549enable_clk_err:
2550 if (qseecom.support_bus_scaling) {
2551 mutex_lock(&qsee_bw_mutex);
2552 qseecom_unregister_bus_bandwidth_needs(data);
2553 mutex_unlock(&qsee_bw_mutex);
2554 }
2555 return ret;
2556}
2557
2558static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2559{
2560 int ret = 1; /* Set unload app */
2561
2562 wake_up_all(&qseecom.send_resp_wq);
2563 if (qseecom.qsee_reentrancy_support)
2564 mutex_unlock(&app_access_lock);
2565 while (atomic_read(&data->ioctl_count) > 1) {
2566 if (wait_event_freezable(data->abort_wq,
2567 atomic_read(&data->ioctl_count) <= 1)) {
2568 pr_err("Interrupted from abort\n");
2569 ret = -ERESTARTSYS;
2570 break;
2571 }
2572 }
2573 if (qseecom.qsee_reentrancy_support)
2574 mutex_lock(&app_access_lock);
2575 return ret;
2576}
2577
2578static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2579{
2580 int ret = 0;
2581
2582 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2583 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2584 ion_free(qseecom.ion_clnt, data->client.ihandle);
2585 data->client.ihandle = NULL;
2586 }
2587 return ret;
2588}
2589
2590static int qseecom_unload_app(struct qseecom_dev_handle *data,
2591 bool app_crash)
2592{
2593 unsigned long flags;
2594 unsigned long flags1;
2595 int ret = 0;
2596 struct qseecom_command_scm_resp resp;
2597 struct qseecom_registered_app_list *ptr_app = NULL;
2598 bool unload = false;
2599 bool found_app = false;
2600 bool found_dead_app = false;
2601
2602 if (!data) {
2603 pr_err("Invalid/uninitialized device handle\n");
2604 return -EINVAL;
2605 }
2606
2607 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2608 pr_debug("Do not unload keymaster app from tz\n");
2609 goto unload_exit;
2610 }
2611
2612 __qseecom_cleanup_app(data);
2613 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2614
2615 if (data->client.app_id > 0) {
2616 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2617 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2618 list) {
2619 if (ptr_app->app_id == data->client.app_id) {
2620 if (!strcmp((void *)ptr_app->app_name,
2621 (void *)data->client.app_name)) {
2622 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002623 if (ptr_app->app_blocked)
2624 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002625 if (app_crash || ptr_app->ref_cnt == 1)
2626 unload = true;
2627 break;
2628 }
2629 found_dead_app = true;
2630 break;
2631 }
2632 }
2633 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2634 flags);
2635 if (found_app == false && found_dead_app == false) {
2636 pr_err("Cannot find app with id = %d (%s)\n",
2637 data->client.app_id,
2638 (char *)data->client.app_name);
2639 ret = -EINVAL;
2640 goto unload_exit;
2641 }
2642 }
2643
2644 if (found_dead_app)
2645 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2646 (char *)data->client.app_name);
2647
2648 if (unload) {
2649 struct qseecom_unload_app_ireq req;
2650 /* Populate the structure for sending scm call to load image */
2651 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2652 req.app_id = data->client.app_id;
2653
2654 /* SCM_CALL to unload the app */
2655 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2656 sizeof(struct qseecom_unload_app_ireq),
2657 &resp, sizeof(resp));
2658 if (ret) {
2659 pr_err("scm_call to unload app (id = %d) failed\n",
2660 req.app_id);
2661 ret = -EFAULT;
2662 goto unload_exit;
2663 } else {
2664 pr_warn("App id %d now unloaded\n", req.app_id);
2665 }
2666 if (resp.result == QSEOS_RESULT_FAILURE) {
2667 pr_err("app (%d) unload_failed!!\n",
2668 data->client.app_id);
2669 ret = -EFAULT;
2670 goto unload_exit;
2671 }
2672 if (resp.result == QSEOS_RESULT_SUCCESS)
2673 pr_debug("App (%d) is unloaded!!\n",
2674 data->client.app_id);
2675 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2676 ret = __qseecom_process_incomplete_cmd(data, &resp);
2677 if (ret) {
2678 pr_err("process_incomplete_cmd fail err: %d\n",
2679 ret);
2680 goto unload_exit;
2681 }
2682 }
2683 }
2684
2685 if (found_app) {
2686 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2687 if (app_crash) {
2688 ptr_app->ref_cnt = 0;
2689 pr_debug("app_crash: ref_count = 0\n");
2690 } else {
2691 if (ptr_app->ref_cnt == 1) {
2692 ptr_app->ref_cnt = 0;
2693 pr_debug("ref_count set to 0\n");
2694 } else {
2695 ptr_app->ref_cnt--;
2696 pr_debug("Can't unload app(%d) inuse\n",
2697 ptr_app->app_id);
2698 }
2699 }
2700 if (unload) {
2701 list_del(&ptr_app->list);
2702 kzfree(ptr_app);
2703 }
2704 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2705 flags1);
2706 }
2707unload_exit:
2708 qseecom_unmap_ion_allocated_memory(data);
2709 data->released = true;
2710 return ret;
2711}
2712
2713static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2714 unsigned long virt)
2715{
2716 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2717}
2718
2719static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2720 unsigned long virt)
2721{
2722 return (uintptr_t)data->client.sb_virt +
2723 (virt - data->client.user_virt_sb_base);
2724}
2725
2726int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2727 struct qseecom_send_svc_cmd_req *req_ptr,
2728 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2729{
2730 int ret = 0;
2731 void *req_buf = NULL;
2732
2733 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2734 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2735 req_ptr, send_svc_ireq_ptr);
2736 return -EINVAL;
2737 }
2738
2739 /* Clients need to ensure req_buf is at base offset of shared buffer */
2740 if ((uintptr_t)req_ptr->cmd_req_buf !=
2741 data_ptr->client.user_virt_sb_base) {
2742 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2743 return -EINVAL;
2744 }
2745
2746 if (data_ptr->client.sb_length <
2747 sizeof(struct qseecom_rpmb_provision_key)) {
2748 pr_err("shared buffer is too small to hold key type\n");
2749 return -EINVAL;
2750 }
2751 req_buf = data_ptr->client.sb_virt;
2752
2753 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2754 send_svc_ireq_ptr->key_type =
2755 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2756 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2757 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2758 data_ptr, (uintptr_t)req_ptr->resp_buf));
2759 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2760
2761 return ret;
2762}
2763
2764int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2765 struct qseecom_send_svc_cmd_req *req_ptr,
2766 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2767{
2768 int ret = 0;
2769 uint32_t reqd_len_sb_in = 0;
2770
2771 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2772 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2773 req_ptr, send_svc_ireq_ptr);
2774 return -EINVAL;
2775 }
2776
2777 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2778 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2779 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2780 pr_err("Required: %u, Available: %zu\n",
2781 reqd_len_sb_in, data_ptr->client.sb_length);
2782 return -ENOMEM;
2783 }
2784
2785 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2786 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2787 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2788 data_ptr, (uintptr_t)req_ptr->resp_buf));
2789 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2790
2791 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2792 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2793
2794
2795 return ret;
2796}
2797
2798static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2799 struct qseecom_send_svc_cmd_req *req)
2800{
2801 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2802 pr_err("req or cmd buffer or response buffer is null\n");
2803 return -EINVAL;
2804 }
2805
2806 if (!data || !data->client.ihandle) {
2807 pr_err("Client or client handle is not initialized\n");
2808 return -EINVAL;
2809 }
2810
2811 if (data->client.sb_virt == NULL) {
2812 pr_err("sb_virt null\n");
2813 return -EINVAL;
2814 }
2815
2816 if (data->client.user_virt_sb_base == 0) {
2817 pr_err("user_virt_sb_base is null\n");
2818 return -EINVAL;
2819 }
2820
2821 if (data->client.sb_length == 0) {
2822 pr_err("sb_length is 0\n");
2823 return -EINVAL;
2824 }
2825
2826 if (((uintptr_t)req->cmd_req_buf <
2827 data->client.user_virt_sb_base) ||
2828 ((uintptr_t)req->cmd_req_buf >=
2829 (data->client.user_virt_sb_base + data->client.sb_length))) {
2830 pr_err("cmd buffer address not within shared bufffer\n");
2831 return -EINVAL;
2832 }
2833 if (((uintptr_t)req->resp_buf <
2834 data->client.user_virt_sb_base) ||
2835 ((uintptr_t)req->resp_buf >=
2836 (data->client.user_virt_sb_base + data->client.sb_length))) {
2837 pr_err("response buffer address not within shared bufffer\n");
2838 return -EINVAL;
2839 }
2840 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2841 (req->cmd_req_len > data->client.sb_length) ||
2842 (req->resp_len > data->client.sb_length)) {
2843 pr_err("cmd buf length or response buf length not valid\n");
2844 return -EINVAL;
2845 }
2846 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2847 pr_err("Integer overflow detected in req_len & rsp_len\n");
2848 return -EINVAL;
2849 }
2850
2851 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2852 pr_debug("Not enough memory to fit cmd_buf.\n");
2853 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2854 (req->cmd_req_len + req->resp_len),
2855 data->client.sb_length);
2856 return -ENOMEM;
2857 }
2858 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2859 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2860 return -EINVAL;
2861 }
2862 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2863 pr_err("Integer overflow in resp_len & resp_buf\n");
2864 return -EINVAL;
2865 }
2866 if (data->client.user_virt_sb_base >
2867 (ULONG_MAX - data->client.sb_length)) {
2868 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2869 return -EINVAL;
2870 }
2871 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2872 ((uintptr_t)data->client.user_virt_sb_base +
2873 data->client.sb_length)) ||
2874 (((uintptr_t)req->resp_buf + req->resp_len) >
2875 ((uintptr_t)data->client.user_virt_sb_base +
2876 data->client.sb_length))) {
2877 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2878 return -EINVAL;
2879 }
2880 return 0;
2881}
2882
2883static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2884 void __user *argp)
2885{
2886 int ret = 0;
2887 struct qseecom_client_send_service_ireq send_svc_ireq;
2888 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2889 struct qseecom_command_scm_resp resp;
2890 struct qseecom_send_svc_cmd_req req;
2891 void *send_req_ptr;
2892 size_t req_buf_size;
2893
2894 /*struct qseecom_command_scm_resp resp;*/
2895
2896 if (copy_from_user(&req,
2897 (void __user *)argp,
2898 sizeof(req))) {
2899 pr_err("copy_from_user failed\n");
2900 return -EFAULT;
2901 }
2902
2903 if (__validate_send_service_cmd_inputs(data, &req))
2904 return -EINVAL;
2905
2906 data->type = QSEECOM_SECURE_SERVICE;
2907
2908 switch (req.cmd_id) {
2909 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2910 case QSEOS_RPMB_ERASE_COMMAND:
2911 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2912 send_req_ptr = &send_svc_ireq;
2913 req_buf_size = sizeof(send_svc_ireq);
2914 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2915 send_req_ptr))
2916 return -EINVAL;
2917 break;
2918 case QSEOS_FSM_LTEOTA_REQ_CMD:
2919 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2920 case QSEOS_FSM_IKE_REQ_CMD:
2921 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2922 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2923 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2924 case QSEOS_FSM_ENCFS_REQ_CMD:
2925 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2926 send_req_ptr = &send_fsm_key_svc_ireq;
2927 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2928 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2929 send_req_ptr))
2930 return -EINVAL;
2931 break;
2932 default:
2933 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2934 return -EINVAL;
2935 }
2936
2937 if (qseecom.support_bus_scaling) {
2938 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2939 if (ret) {
2940 pr_err("Fail to set bw HIGH\n");
2941 return ret;
2942 }
2943 } else {
2944 ret = qseecom_perf_enable(data);
2945 if (ret) {
2946 pr_err("Failed to vote for clocks with err %d\n", ret);
2947 goto exit;
2948 }
2949 }
2950
2951 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2952 data->client.sb_virt, data->client.sb_length,
2953 ION_IOC_CLEAN_INV_CACHES);
2954 if (ret) {
2955 pr_err("cache operation failed %d\n", ret);
2956 goto exit;
2957 }
2958 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2959 (const void *)send_req_ptr,
2960 req_buf_size, &resp, sizeof(resp));
2961 if (ret) {
2962 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2963 if (!qseecom.support_bus_scaling) {
2964 qsee_disable_clock_vote(data, CLK_DFAB);
2965 qsee_disable_clock_vote(data, CLK_SFPB);
2966 } else {
2967 __qseecom_add_bw_scale_down_timer(
2968 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2969 }
2970 goto exit;
2971 }
2972 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2973 data->client.sb_virt, data->client.sb_length,
2974 ION_IOC_INV_CACHES);
2975 if (ret) {
2976 pr_err("cache operation failed %d\n", ret);
2977 goto exit;
2978 }
2979 switch (resp.result) {
2980 case QSEOS_RESULT_SUCCESS:
2981 break;
2982 case QSEOS_RESULT_INCOMPLETE:
2983 pr_debug("qseos_result_incomplete\n");
2984 ret = __qseecom_process_incomplete_cmd(data, &resp);
2985 if (ret) {
2986 pr_err("process_incomplete_cmd fail with result: %d\n",
2987 resp.result);
2988 }
2989 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2990 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302991 if (put_user(resp.result,
2992 (uint32_t __user *)req.resp_buf)) {
2993 ret = -EINVAL;
2994 goto exit;
2995 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002996 ret = 0;
2997 }
2998 break;
2999 case QSEOS_RESULT_FAILURE:
3000 pr_err("scm call failed with resp.result: %d\n", resp.result);
3001 ret = -EINVAL;
3002 break;
3003 default:
3004 pr_err("Response result %d not supported\n",
3005 resp.result);
3006 ret = -EINVAL;
3007 break;
3008 }
3009 if (!qseecom.support_bus_scaling) {
3010 qsee_disable_clock_vote(data, CLK_DFAB);
3011 qsee_disable_clock_vote(data, CLK_SFPB);
3012 } else {
3013 __qseecom_add_bw_scale_down_timer(
3014 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3015 }
3016
3017exit:
3018 return ret;
3019}
3020
3021static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3022 struct qseecom_send_cmd_req *req)
3023
3024{
3025 if (!data || !data->client.ihandle) {
3026 pr_err("Client or client handle is not initialized\n");
3027 return -EINVAL;
3028 }
3029 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3030 (req->cmd_req_buf == NULL)) {
3031 pr_err("cmd buffer or response buffer is null\n");
3032 return -EINVAL;
3033 }
3034 if (((uintptr_t)req->cmd_req_buf <
3035 data->client.user_virt_sb_base) ||
3036 ((uintptr_t)req->cmd_req_buf >=
3037 (data->client.user_virt_sb_base + data->client.sb_length))) {
3038 pr_err("cmd buffer address not within shared bufffer\n");
3039 return -EINVAL;
3040 }
3041 if (((uintptr_t)req->resp_buf <
3042 data->client.user_virt_sb_base) ||
3043 ((uintptr_t)req->resp_buf >=
3044 (data->client.user_virt_sb_base + data->client.sb_length))) {
3045 pr_err("response buffer address not within shared bufffer\n");
3046 return -EINVAL;
3047 }
3048 if ((req->cmd_req_len == 0) ||
3049 (req->cmd_req_len > data->client.sb_length) ||
3050 (req->resp_len > data->client.sb_length)) {
3051 pr_err("cmd buf length or response buf length not valid\n");
3052 return -EINVAL;
3053 }
3054 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3055 pr_err("Integer overflow detected in req_len & rsp_len\n");
3056 return -EINVAL;
3057 }
3058
3059 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3060 pr_debug("Not enough memory to fit cmd_buf.\n");
3061 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3062 (req->cmd_req_len + req->resp_len),
3063 data->client.sb_length);
3064 return -ENOMEM;
3065 }
3066 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3067 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3068 return -EINVAL;
3069 }
3070 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3071 pr_err("Integer overflow in resp_len & resp_buf\n");
3072 return -EINVAL;
3073 }
3074 if (data->client.user_virt_sb_base >
3075 (ULONG_MAX - data->client.sb_length)) {
3076 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3077 return -EINVAL;
3078 }
3079 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3080 ((uintptr_t)data->client.user_virt_sb_base +
3081 data->client.sb_length)) ||
3082 (((uintptr_t)req->resp_buf + req->resp_len) >
3083 ((uintptr_t)data->client.user_virt_sb_base +
3084 data->client.sb_length))) {
3085 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3086 return -EINVAL;
3087 }
3088 return 0;
3089}
3090
3091int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3092 struct qseecom_registered_app_list *ptr_app,
3093 struct qseecom_dev_handle *data)
3094{
3095 int ret = 0;
3096
3097 switch (resp->result) {
3098 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3099 pr_warn("App(%d) %s is blocked on listener %d\n",
3100 data->client.app_id, data->client.app_name,
3101 resp->data);
3102 ret = __qseecom_process_reentrancy_blocked_on_listener(
3103 resp, ptr_app, data);
3104 if (ret) {
3105 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3106 data->client.app_id, data->client.app_name, resp->data);
3107 return ret;
3108 }
3109
3110 case QSEOS_RESULT_INCOMPLETE:
3111 qseecom.app_block_ref_cnt++;
3112 ptr_app->app_blocked = true;
3113 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3114 ptr_app->app_blocked = false;
3115 qseecom.app_block_ref_cnt--;
3116 wake_up_interruptible(&qseecom.app_block_wq);
3117 if (ret)
3118 pr_err("process_incomplete_cmd failed err: %d\n",
3119 ret);
3120 return ret;
3121 case QSEOS_RESULT_SUCCESS:
3122 return ret;
3123 default:
3124 pr_err("Response result %d not supported\n",
3125 resp->result);
3126 return -EINVAL;
3127 }
3128}
3129
3130static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3131 struct qseecom_send_cmd_req *req)
3132{
3133 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003134 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003135 u32 reqd_len_sb_in = 0;
3136 struct qseecom_client_send_data_ireq send_data_req = {0};
3137 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3138 struct qseecom_command_scm_resp resp;
3139 unsigned long flags;
3140 struct qseecom_registered_app_list *ptr_app;
3141 bool found_app = false;
3142 void *cmd_buf = NULL;
3143 size_t cmd_len;
3144 struct sglist_info *table = data->sglistinfo_ptr;
3145
3146 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3147 /* find app_id & img_name from list */
3148 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3149 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3150 list) {
3151 if ((ptr_app->app_id == data->client.app_id) &&
3152 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3153 found_app = true;
3154 break;
3155 }
3156 }
3157 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3158
3159 if (!found_app) {
3160 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3161 (char *)data->client.app_name);
3162 return -ENOENT;
3163 }
3164
3165 if (qseecom.qsee_version < QSEE_VERSION_40) {
3166 send_data_req.app_id = data->client.app_id;
3167 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3168 data, (uintptr_t)req->cmd_req_buf));
3169 send_data_req.req_len = req->cmd_req_len;
3170 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3171 data, (uintptr_t)req->resp_buf));
3172 send_data_req.rsp_len = req->resp_len;
3173 send_data_req.sglistinfo_ptr =
3174 (uint32_t)virt_to_phys(table);
3175 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3176 dmac_flush_range((void *)table,
3177 (void *)table + SGLISTINFO_TABLE_SIZE);
3178 cmd_buf = (void *)&send_data_req;
3179 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3180 } else {
3181 send_data_req_64bit.app_id = data->client.app_id;
3182 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3183 (uintptr_t)req->cmd_req_buf);
3184 send_data_req_64bit.req_len = req->cmd_req_len;
3185 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3186 (uintptr_t)req->resp_buf);
3187 send_data_req_64bit.rsp_len = req->resp_len;
3188 /* check if 32bit app's phys_addr region is under 4GB.*/
3189 if ((data->client.app_arch == ELFCLASS32) &&
3190 ((send_data_req_64bit.req_ptr >=
3191 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3192 (send_data_req_64bit.rsp_ptr >=
3193 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3194 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3195 data->client.app_name,
3196 send_data_req_64bit.req_ptr,
3197 send_data_req_64bit.req_len,
3198 send_data_req_64bit.rsp_ptr,
3199 send_data_req_64bit.rsp_len);
3200 return -EFAULT;
3201 }
3202 send_data_req_64bit.sglistinfo_ptr =
3203 (uint64_t)virt_to_phys(table);
3204 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3205 dmac_flush_range((void *)table,
3206 (void *)table + SGLISTINFO_TABLE_SIZE);
3207 cmd_buf = (void *)&send_data_req_64bit;
3208 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3209 }
3210
3211 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3212 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3213 else
3214 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3215
3216 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3217 data->client.sb_virt,
3218 reqd_len_sb_in,
3219 ION_IOC_CLEAN_INV_CACHES);
3220 if (ret) {
3221 pr_err("cache operation failed %d\n", ret);
3222 return ret;
3223 }
3224
3225 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3226
3227 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3228 cmd_buf, cmd_len,
3229 &resp, sizeof(resp));
3230 if (ret) {
3231 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3232 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003233 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003234 }
3235
3236 if (qseecom.qsee_reentrancy_support) {
3237 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003238 if (ret)
3239 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003240 } else {
3241 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3242 ret = __qseecom_process_incomplete_cmd(data, &resp);
3243 if (ret) {
3244 pr_err("process_incomplete_cmd failed err: %d\n",
3245 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003246 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003247 }
3248 } else {
3249 if (resp.result != QSEOS_RESULT_SUCCESS) {
3250 pr_err("Response result %d not supported\n",
3251 resp.result);
3252 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003253 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003254 }
3255 }
3256 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003257exit:
3258 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003259 data->client.sb_virt, data->client.sb_length,
3260 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003261 if (ret2) {
3262 pr_err("cache operation failed %d\n", ret2);
3263 return ret2;
3264 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003265 return ret;
3266}
3267
3268static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3269{
3270 int ret = 0;
3271 struct qseecom_send_cmd_req req;
3272
3273 ret = copy_from_user(&req, argp, sizeof(req));
3274 if (ret) {
3275 pr_err("copy_from_user failed\n");
3276 return ret;
3277 }
3278
3279 if (__validate_send_cmd_inputs(data, &req))
3280 return -EINVAL;
3281
3282 ret = __qseecom_send_cmd(data, &req);
3283
3284 if (ret)
3285 return ret;
3286
3287 return ret;
3288}
3289
3290int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3291 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3292 struct qseecom_dev_handle *data, int i) {
3293
3294 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3295 (req->ifd_data[i].fd > 0)) {
3296 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3297 (req->ifd_data[i].cmd_buf_offset >
3298 req->cmd_req_len - sizeof(uint32_t))) {
3299 pr_err("Invalid offset (req len) 0x%x\n",
3300 req->ifd_data[i].cmd_buf_offset);
3301 return -EINVAL;
3302 }
3303 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3304 (lstnr_resp->ifd_data[i].fd > 0)) {
3305 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3306 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3307 lstnr_resp->resp_len - sizeof(uint32_t))) {
3308 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3309 lstnr_resp->ifd_data[i].cmd_buf_offset);
3310 return -EINVAL;
3311 }
3312 }
3313 return 0;
3314}
3315
3316static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3317 struct qseecom_dev_handle *data)
3318{
3319 struct ion_handle *ihandle;
3320 char *field;
3321 int ret = 0;
3322 int i = 0;
3323 uint32_t len = 0;
3324 struct scatterlist *sg;
3325 struct qseecom_send_modfd_cmd_req *req = NULL;
3326 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3327 struct qseecom_registered_listener_list *this_lstnr = NULL;
3328 uint32_t offset;
3329 struct sg_table *sg_ptr;
3330
3331 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3332 (data->type != QSEECOM_CLIENT_APP))
3333 return -EFAULT;
3334
3335 if (msg == NULL) {
3336 pr_err("Invalid address\n");
3337 return -EINVAL;
3338 }
3339 if (data->type == QSEECOM_LISTENER_SERVICE) {
3340 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3341 this_lstnr = __qseecom_find_svc(data->listener.id);
3342 if (IS_ERR_OR_NULL(this_lstnr)) {
3343 pr_err("Invalid listener ID\n");
3344 return -ENOMEM;
3345 }
3346 } else {
3347 req = (struct qseecom_send_modfd_cmd_req *)msg;
3348 }
3349
3350 for (i = 0; i < MAX_ION_FD; i++) {
3351 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3352 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003353 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003354 req->ifd_data[i].fd);
3355 if (IS_ERR_OR_NULL(ihandle)) {
3356 pr_err("Ion client can't retrieve the handle\n");
3357 return -ENOMEM;
3358 }
3359 field = (char *) req->cmd_req_buf +
3360 req->ifd_data[i].cmd_buf_offset;
3361 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3362 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003363 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003364 lstnr_resp->ifd_data[i].fd);
3365 if (IS_ERR_OR_NULL(ihandle)) {
3366 pr_err("Ion client can't retrieve the handle\n");
3367 return -ENOMEM;
3368 }
3369 field = lstnr_resp->resp_buf_ptr +
3370 lstnr_resp->ifd_data[i].cmd_buf_offset;
3371 } else {
3372 continue;
3373 }
3374 /* Populate the cmd data structure with the phys_addr */
3375 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3376 if (IS_ERR_OR_NULL(sg_ptr)) {
3377 pr_err("IOn client could not retrieve sg table\n");
3378 goto err;
3379 }
3380 if (sg_ptr->nents == 0) {
3381 pr_err("Num of scattered entries is 0\n");
3382 goto err;
3383 }
3384 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3385 pr_err("Num of scattered entries");
3386 pr_err(" (%d) is greater than max supported %d\n",
3387 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3388 goto err;
3389 }
3390 sg = sg_ptr->sgl;
3391 if (sg_ptr->nents == 1) {
3392 uint32_t *update;
3393
3394 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3395 goto err;
3396 if ((data->type == QSEECOM_CLIENT_APP &&
3397 (data->client.app_arch == ELFCLASS32 ||
3398 data->client.app_arch == ELFCLASS64)) ||
3399 (data->type == QSEECOM_LISTENER_SERVICE)) {
3400 /*
3401 * Check if sg list phy add region is under 4GB
3402 */
3403 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3404 (!cleanup) &&
3405 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3406 >= PHY_ADDR_4G - sg->length)) {
3407 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3408 data->client.app_name,
3409 &(sg_dma_address(sg_ptr->sgl)),
3410 sg->length);
3411 goto err;
3412 }
3413 update = (uint32_t *) field;
3414 *update = cleanup ? 0 :
3415 (uint32_t)sg_dma_address(sg_ptr->sgl);
3416 } else {
3417 pr_err("QSEE app arch %u is not supported\n",
3418 data->client.app_arch);
3419 goto err;
3420 }
3421 len += (uint32_t)sg->length;
3422 } else {
3423 struct qseecom_sg_entry *update;
3424 int j = 0;
3425
3426 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3427 (req->ifd_data[i].fd > 0)) {
3428
3429 if ((req->cmd_req_len <
3430 SG_ENTRY_SZ * sg_ptr->nents) ||
3431 (req->ifd_data[i].cmd_buf_offset >
3432 (req->cmd_req_len -
3433 SG_ENTRY_SZ * sg_ptr->nents))) {
3434 pr_err("Invalid offset = 0x%x\n",
3435 req->ifd_data[i].cmd_buf_offset);
3436 goto err;
3437 }
3438
3439 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3440 (lstnr_resp->ifd_data[i].fd > 0)) {
3441
3442 if ((lstnr_resp->resp_len <
3443 SG_ENTRY_SZ * sg_ptr->nents) ||
3444 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3445 (lstnr_resp->resp_len -
3446 SG_ENTRY_SZ * sg_ptr->nents))) {
3447 goto err;
3448 }
3449 }
3450 if ((data->type == QSEECOM_CLIENT_APP &&
3451 (data->client.app_arch == ELFCLASS32 ||
3452 data->client.app_arch == ELFCLASS64)) ||
3453 (data->type == QSEECOM_LISTENER_SERVICE)) {
3454 update = (struct qseecom_sg_entry *)field;
3455 for (j = 0; j < sg_ptr->nents; j++) {
3456 /*
3457 * Check if sg list PA is under 4GB
3458 */
3459 if ((qseecom.qsee_version >=
3460 QSEE_VERSION_40) &&
3461 (!cleanup) &&
3462 ((uint64_t)(sg_dma_address(sg))
3463 >= PHY_ADDR_4G - sg->length)) {
3464 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3465 data->client.app_name,
3466 &(sg_dma_address(sg)),
3467 sg->length);
3468 goto err;
3469 }
3470 update->phys_addr = cleanup ? 0 :
3471 (uint32_t)sg_dma_address(sg);
3472 update->len = cleanup ? 0 : sg->length;
3473 update++;
3474 len += sg->length;
3475 sg = sg_next(sg);
3476 }
3477 } else {
3478 pr_err("QSEE app arch %u is not supported\n",
3479 data->client.app_arch);
3480 goto err;
3481 }
3482 }
3483
3484 if (cleanup) {
3485 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3486 ihandle, NULL, len,
3487 ION_IOC_INV_CACHES);
3488 if (ret) {
3489 pr_err("cache operation failed %d\n", ret);
3490 goto err;
3491 }
3492 } else {
3493 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3494 ihandle, NULL, len,
3495 ION_IOC_CLEAN_INV_CACHES);
3496 if (ret) {
3497 pr_err("cache operation failed %d\n", ret);
3498 goto err;
3499 }
3500 if (data->type == QSEECOM_CLIENT_APP) {
3501 offset = req->ifd_data[i].cmd_buf_offset;
3502 data->sglistinfo_ptr[i].indexAndFlags =
3503 SGLISTINFO_SET_INDEX_FLAG(
3504 (sg_ptr->nents == 1), 0, offset);
3505 data->sglistinfo_ptr[i].sizeOrCount =
3506 (sg_ptr->nents == 1) ?
3507 sg->length : sg_ptr->nents;
3508 data->sglist_cnt = i + 1;
3509 } else {
3510 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3511 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3512 (uintptr_t)this_lstnr->sb_virt);
3513 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3514 SGLISTINFO_SET_INDEX_FLAG(
3515 (sg_ptr->nents == 1), 0, offset);
3516 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3517 (sg_ptr->nents == 1) ?
3518 sg->length : sg_ptr->nents;
3519 this_lstnr->sglist_cnt = i + 1;
3520 }
3521 }
3522 /* Deallocate the handle */
3523 if (!IS_ERR_OR_NULL(ihandle))
3524 ion_free(qseecom.ion_clnt, ihandle);
3525 }
3526 return ret;
3527err:
3528 if (!IS_ERR_OR_NULL(ihandle))
3529 ion_free(qseecom.ion_clnt, ihandle);
3530 return -ENOMEM;
3531}
3532
3533static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3534 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3535{
3536 struct scatterlist *sg = sg_ptr->sgl;
3537 struct qseecom_sg_entry_64bit *sg_entry;
3538 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3539 void *buf;
3540 uint i;
3541 size_t size;
3542 dma_addr_t coh_pmem;
3543
3544 if (fd_idx >= MAX_ION_FD) {
3545 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3546 return -ENOMEM;
3547 }
3548 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3549 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3550 /* Allocate a contiguous kernel buffer */
3551 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3552 size = (size + PAGE_SIZE) & PAGE_MASK;
3553 buf = dma_alloc_coherent(qseecom.pdev,
3554 size, &coh_pmem, GFP_KERNEL);
3555 if (buf == NULL) {
3556 pr_err("failed to alloc memory for sg buf\n");
3557 return -ENOMEM;
3558 }
3559 /* update qseecom_sg_list_buf_hdr_64bit */
3560 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3561 buf_hdr->new_buf_phys_addr = coh_pmem;
3562 buf_hdr->nents_total = sg_ptr->nents;
3563 /* save the left sg entries into new allocated buf */
3564 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3565 for (i = 0; i < sg_ptr->nents; i++) {
3566 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3567 sg_entry->len = sg->length;
3568 sg_entry++;
3569 sg = sg_next(sg);
3570 }
3571
3572 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3573 data->client.sec_buf_fd[fd_idx].vbase = buf;
3574 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3575 data->client.sec_buf_fd[fd_idx].size = size;
3576
3577 return 0;
3578}
3579
3580static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3581 struct qseecom_dev_handle *data)
3582{
3583 struct ion_handle *ihandle;
3584 char *field;
3585 int ret = 0;
3586 int i = 0;
3587 uint32_t len = 0;
3588 struct scatterlist *sg;
3589 struct qseecom_send_modfd_cmd_req *req = NULL;
3590 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3591 struct qseecom_registered_listener_list *this_lstnr = NULL;
3592 uint32_t offset;
3593 struct sg_table *sg_ptr;
3594
3595 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3596 (data->type != QSEECOM_CLIENT_APP))
3597 return -EFAULT;
3598
3599 if (msg == NULL) {
3600 pr_err("Invalid address\n");
3601 return -EINVAL;
3602 }
3603 if (data->type == QSEECOM_LISTENER_SERVICE) {
3604 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3605 this_lstnr = __qseecom_find_svc(data->listener.id);
3606 if (IS_ERR_OR_NULL(this_lstnr)) {
3607 pr_err("Invalid listener ID\n");
3608 return -ENOMEM;
3609 }
3610 } else {
3611 req = (struct qseecom_send_modfd_cmd_req *)msg;
3612 }
3613
3614 for (i = 0; i < MAX_ION_FD; i++) {
3615 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3616 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003617 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003618 req->ifd_data[i].fd);
3619 if (IS_ERR_OR_NULL(ihandle)) {
3620 pr_err("Ion client can't retrieve the handle\n");
3621 return -ENOMEM;
3622 }
3623 field = (char *) req->cmd_req_buf +
3624 req->ifd_data[i].cmd_buf_offset;
3625 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3626 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003627 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003628 lstnr_resp->ifd_data[i].fd);
3629 if (IS_ERR_OR_NULL(ihandle)) {
3630 pr_err("Ion client can't retrieve the handle\n");
3631 return -ENOMEM;
3632 }
3633 field = lstnr_resp->resp_buf_ptr +
3634 lstnr_resp->ifd_data[i].cmd_buf_offset;
3635 } else {
3636 continue;
3637 }
3638 /* Populate the cmd data structure with the phys_addr */
3639 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3640 if (IS_ERR_OR_NULL(sg_ptr)) {
3641 pr_err("IOn client could not retrieve sg table\n");
3642 goto err;
3643 }
3644 if (sg_ptr->nents == 0) {
3645 pr_err("Num of scattered entries is 0\n");
3646 goto err;
3647 }
3648 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3649 pr_warn("Num of scattered entries");
3650 pr_warn(" (%d) is greater than %d\n",
3651 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3652 if (cleanup) {
3653 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3654 data->client.sec_buf_fd[i].vbase)
3655 dma_free_coherent(qseecom.pdev,
3656 data->client.sec_buf_fd[i].size,
3657 data->client.sec_buf_fd[i].vbase,
3658 data->client.sec_buf_fd[i].pbase);
3659 } else {
3660 ret = __qseecom_allocate_sg_list_buffer(data,
3661 field, i, sg_ptr);
3662 if (ret) {
3663 pr_err("Failed to allocate sg list buffer\n");
3664 goto err;
3665 }
3666 }
3667 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3668 sg = sg_ptr->sgl;
3669 goto cleanup;
3670 }
3671 sg = sg_ptr->sgl;
3672 if (sg_ptr->nents == 1) {
3673 uint64_t *update_64bit;
3674
3675 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3676 goto err;
3677 /* 64bit app uses 64bit address */
3678 update_64bit = (uint64_t *) field;
3679 *update_64bit = cleanup ? 0 :
3680 (uint64_t)sg_dma_address(sg_ptr->sgl);
3681 len += (uint32_t)sg->length;
3682 } else {
3683 struct qseecom_sg_entry_64bit *update_64bit;
3684 int j = 0;
3685
3686 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3687 (req->ifd_data[i].fd > 0)) {
3688
3689 if ((req->cmd_req_len <
3690 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3691 (req->ifd_data[i].cmd_buf_offset >
3692 (req->cmd_req_len -
3693 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3694 pr_err("Invalid offset = 0x%x\n",
3695 req->ifd_data[i].cmd_buf_offset);
3696 goto err;
3697 }
3698
3699 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3700 (lstnr_resp->ifd_data[i].fd > 0)) {
3701
3702 if ((lstnr_resp->resp_len <
3703 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3704 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3705 (lstnr_resp->resp_len -
3706 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3707 goto err;
3708 }
3709 }
3710 /* 64bit app uses 64bit address */
3711 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3712 for (j = 0; j < sg_ptr->nents; j++) {
3713 update_64bit->phys_addr = cleanup ? 0 :
3714 (uint64_t)sg_dma_address(sg);
3715 update_64bit->len = cleanup ? 0 :
3716 (uint32_t)sg->length;
3717 update_64bit++;
3718 len += sg->length;
3719 sg = sg_next(sg);
3720 }
3721 }
3722cleanup:
3723 if (cleanup) {
3724 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3725 ihandle, NULL, len,
3726 ION_IOC_INV_CACHES);
3727 if (ret) {
3728 pr_err("cache operation failed %d\n", ret);
3729 goto err;
3730 }
3731 } else {
3732 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3733 ihandle, NULL, len,
3734 ION_IOC_CLEAN_INV_CACHES);
3735 if (ret) {
3736 pr_err("cache operation failed %d\n", ret);
3737 goto err;
3738 }
3739 if (data->type == QSEECOM_CLIENT_APP) {
3740 offset = req->ifd_data[i].cmd_buf_offset;
3741 data->sglistinfo_ptr[i].indexAndFlags =
3742 SGLISTINFO_SET_INDEX_FLAG(
3743 (sg_ptr->nents == 1), 1, offset);
3744 data->sglistinfo_ptr[i].sizeOrCount =
3745 (sg_ptr->nents == 1) ?
3746 sg->length : sg_ptr->nents;
3747 data->sglist_cnt = i + 1;
3748 } else {
3749 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3750 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3751 (uintptr_t)this_lstnr->sb_virt);
3752 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3753 SGLISTINFO_SET_INDEX_FLAG(
3754 (sg_ptr->nents == 1), 1, offset);
3755 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3756 (sg_ptr->nents == 1) ?
3757 sg->length : sg_ptr->nents;
3758 this_lstnr->sglist_cnt = i + 1;
3759 }
3760 }
3761 /* Deallocate the handle */
3762 if (!IS_ERR_OR_NULL(ihandle))
3763 ion_free(qseecom.ion_clnt, ihandle);
3764 }
3765 return ret;
3766err:
3767 for (i = 0; i < MAX_ION_FD; i++)
3768 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3769 data->client.sec_buf_fd[i].vbase)
3770 dma_free_coherent(qseecom.pdev,
3771 data->client.sec_buf_fd[i].size,
3772 data->client.sec_buf_fd[i].vbase,
3773 data->client.sec_buf_fd[i].pbase);
3774 if (!IS_ERR_OR_NULL(ihandle))
3775 ion_free(qseecom.ion_clnt, ihandle);
3776 return -ENOMEM;
3777}
3778
3779static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3780 void __user *argp,
3781 bool is_64bit_addr)
3782{
3783 int ret = 0;
3784 int i;
3785 struct qseecom_send_modfd_cmd_req req;
3786 struct qseecom_send_cmd_req send_cmd_req;
3787
3788 ret = copy_from_user(&req, argp, sizeof(req));
3789 if (ret) {
3790 pr_err("copy_from_user failed\n");
3791 return ret;
3792 }
3793
3794 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3795 send_cmd_req.cmd_req_len = req.cmd_req_len;
3796 send_cmd_req.resp_buf = req.resp_buf;
3797 send_cmd_req.resp_len = req.resp_len;
3798
3799 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3800 return -EINVAL;
3801
3802 /* validate offsets */
3803 for (i = 0; i < MAX_ION_FD; i++) {
3804 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3805 pr_err("Invalid offset %d = 0x%x\n",
3806 i, req.ifd_data[i].cmd_buf_offset);
3807 return -EINVAL;
3808 }
3809 }
3810 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3811 (uintptr_t)req.cmd_req_buf);
3812 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3813 (uintptr_t)req.resp_buf);
3814
3815 if (!is_64bit_addr) {
3816 ret = __qseecom_update_cmd_buf(&req, false, data);
3817 if (ret)
3818 return ret;
3819 ret = __qseecom_send_cmd(data, &send_cmd_req);
3820 if (ret)
3821 return ret;
3822 ret = __qseecom_update_cmd_buf(&req, true, data);
3823 if (ret)
3824 return ret;
3825 } else {
3826 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3827 if (ret)
3828 return ret;
3829 ret = __qseecom_send_cmd(data, &send_cmd_req);
3830 if (ret)
3831 return ret;
3832 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3833 if (ret)
3834 return ret;
3835 }
3836
3837 return ret;
3838}
3839
3840static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3841 void __user *argp)
3842{
3843 return __qseecom_send_modfd_cmd(data, argp, false);
3844}
3845
3846static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3847 void __user *argp)
3848{
3849 return __qseecom_send_modfd_cmd(data, argp, true);
3850}
3851
3852
3853
3854static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3855 struct qseecom_registered_listener_list *svc)
3856{
3857 int ret;
3858
3859 ret = (svc->rcv_req_flag != 0);
3860 return ret || data->abort;
3861}
3862
3863static int qseecom_receive_req(struct qseecom_dev_handle *data)
3864{
3865 int ret = 0;
3866 struct qseecom_registered_listener_list *this_lstnr;
3867
3868 this_lstnr = __qseecom_find_svc(data->listener.id);
3869 if (!this_lstnr) {
3870 pr_err("Invalid listener ID\n");
3871 return -ENODATA;
3872 }
3873
3874 while (1) {
3875 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3876 __qseecom_listener_has_rcvd_req(data,
3877 this_lstnr))) {
3878 pr_debug("Interrupted: exiting Listener Service = %d\n",
3879 (uint32_t)data->listener.id);
3880 /* woken up for different reason */
3881 return -ERESTARTSYS;
3882 }
3883
3884 if (data->abort) {
3885 pr_err("Aborting Listener Service = %d\n",
3886 (uint32_t)data->listener.id);
3887 return -ENODEV;
3888 }
3889 this_lstnr->rcv_req_flag = 0;
3890 break;
3891 }
3892 return ret;
3893}
3894
3895static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3896{
3897 unsigned char app_arch = 0;
3898 struct elf32_hdr *ehdr;
3899 struct elf64_hdr *ehdr64;
3900
3901 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3902
3903 switch (app_arch) {
3904 case ELFCLASS32: {
3905 ehdr = (struct elf32_hdr *)fw_entry->data;
3906 if (fw_entry->size < sizeof(*ehdr)) {
3907 pr_err("%s: Not big enough to be an elf32 header\n",
3908 qseecom.pdev->init_name);
3909 return false;
3910 }
3911 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3912 pr_err("%s: Not an elf32 header\n",
3913 qseecom.pdev->init_name);
3914 return false;
3915 }
3916 if (ehdr->e_phnum == 0) {
3917 pr_err("%s: No loadable segments\n",
3918 qseecom.pdev->init_name);
3919 return false;
3920 }
3921 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3922 sizeof(struct elf32_hdr) > fw_entry->size) {
3923 pr_err("%s: Program headers not within mdt\n",
3924 qseecom.pdev->init_name);
3925 return false;
3926 }
3927 break;
3928 }
3929 case ELFCLASS64: {
3930 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3931 if (fw_entry->size < sizeof(*ehdr64)) {
3932 pr_err("%s: Not big enough to be an elf64 header\n",
3933 qseecom.pdev->init_name);
3934 return false;
3935 }
3936 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3937 pr_err("%s: Not an elf64 header\n",
3938 qseecom.pdev->init_name);
3939 return false;
3940 }
3941 if (ehdr64->e_phnum == 0) {
3942 pr_err("%s: No loadable segments\n",
3943 qseecom.pdev->init_name);
3944 return false;
3945 }
3946 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3947 sizeof(struct elf64_hdr) > fw_entry->size) {
3948 pr_err("%s: Program headers not within mdt\n",
3949 qseecom.pdev->init_name);
3950 return false;
3951 }
3952 break;
3953 }
3954 default: {
3955 pr_err("QSEE app arch %u is not supported\n", app_arch);
3956 return false;
3957 }
3958 }
3959 return true;
3960}
3961
3962static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3963 uint32_t *app_arch)
3964{
3965 int ret = -1;
3966 int i = 0, rc = 0;
3967 const struct firmware *fw_entry = NULL;
3968 char fw_name[MAX_APP_NAME_SIZE];
3969 struct elf32_hdr *ehdr;
3970 struct elf64_hdr *ehdr64;
3971 int num_images = 0;
3972
3973 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3974 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3975 if (rc) {
3976 pr_err("error with request_firmware\n");
3977 ret = -EIO;
3978 goto err;
3979 }
3980 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3981 ret = -EIO;
3982 goto err;
3983 }
3984 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3985 *fw_size = fw_entry->size;
3986 if (*app_arch == ELFCLASS32) {
3987 ehdr = (struct elf32_hdr *)fw_entry->data;
3988 num_images = ehdr->e_phnum;
3989 } else if (*app_arch == ELFCLASS64) {
3990 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3991 num_images = ehdr64->e_phnum;
3992 } else {
3993 pr_err("QSEE %s app, arch %u is not supported\n",
3994 appname, *app_arch);
3995 ret = -EIO;
3996 goto err;
3997 }
3998 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3999 release_firmware(fw_entry);
4000 fw_entry = NULL;
4001 for (i = 0; i < num_images; i++) {
4002 memset(fw_name, 0, sizeof(fw_name));
4003 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4004 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4005 if (ret)
4006 goto err;
4007 if (*fw_size > U32_MAX - fw_entry->size) {
4008 pr_err("QSEE %s app file size overflow\n", appname);
4009 ret = -EINVAL;
4010 goto err;
4011 }
4012 *fw_size += fw_entry->size;
4013 release_firmware(fw_entry);
4014 fw_entry = NULL;
4015 }
4016
4017 return ret;
4018err:
4019 if (fw_entry)
4020 release_firmware(fw_entry);
4021 *fw_size = 0;
4022 return ret;
4023}
4024
4025static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4026 uint32_t fw_size,
4027 struct qseecom_load_app_ireq *load_req)
4028{
4029 int ret = -1;
4030 int i = 0, rc = 0;
4031 const struct firmware *fw_entry = NULL;
4032 char fw_name[MAX_APP_NAME_SIZE];
4033 u8 *img_data_ptr = img_data;
4034 struct elf32_hdr *ehdr;
4035 struct elf64_hdr *ehdr64;
4036 int num_images = 0;
4037 unsigned char app_arch = 0;
4038
4039 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4040 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4041 if (rc) {
4042 ret = -EIO;
4043 goto err;
4044 }
4045
4046 load_req->img_len = fw_entry->size;
4047 if (load_req->img_len > fw_size) {
4048 pr_err("app %s size %zu is larger than buf size %u\n",
4049 appname, fw_entry->size, fw_size);
4050 ret = -EINVAL;
4051 goto err;
4052 }
4053 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4054 img_data_ptr = img_data_ptr + fw_entry->size;
4055 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4056
4057 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4058 if (app_arch == ELFCLASS32) {
4059 ehdr = (struct elf32_hdr *)fw_entry->data;
4060 num_images = ehdr->e_phnum;
4061 } else if (app_arch == ELFCLASS64) {
4062 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4063 num_images = ehdr64->e_phnum;
4064 } else {
4065 pr_err("QSEE %s app, arch %u is not supported\n",
4066 appname, app_arch);
4067 ret = -EIO;
4068 goto err;
4069 }
4070 release_firmware(fw_entry);
4071 fw_entry = NULL;
4072 for (i = 0; i < num_images; i++) {
4073 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4074 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4075 if (ret) {
4076 pr_err("Failed to locate blob %s\n", fw_name);
4077 goto err;
4078 }
4079 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4080 (fw_entry->size + load_req->img_len > fw_size)) {
4081 pr_err("Invalid file size for %s\n", fw_name);
4082 ret = -EINVAL;
4083 goto err;
4084 }
4085 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4086 img_data_ptr = img_data_ptr + fw_entry->size;
4087 load_req->img_len += fw_entry->size;
4088 release_firmware(fw_entry);
4089 fw_entry = NULL;
4090 }
4091 return ret;
4092err:
4093 release_firmware(fw_entry);
4094 return ret;
4095}
4096
4097static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4098 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4099{
4100 size_t len = 0;
4101 int ret = 0;
4102 ion_phys_addr_t pa;
4103 struct ion_handle *ihandle = NULL;
4104 u8 *img_data = NULL;
4105
4106 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4107 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4108
4109 if (IS_ERR_OR_NULL(ihandle)) {
4110 pr_err("ION alloc failed\n");
4111 return -ENOMEM;
4112 }
4113 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4114 ihandle);
4115
4116 if (IS_ERR_OR_NULL(img_data)) {
4117 pr_err("ION memory mapping for image loading failed\n");
4118 ret = -ENOMEM;
4119 goto exit_ion_free;
4120 }
4121 /* Get the physical address of the ION BUF */
4122 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4123 if (ret) {
4124 pr_err("physical memory retrieval failure\n");
4125 ret = -EIO;
4126 goto exit_ion_unmap_kernel;
4127 }
4128
4129 *pihandle = ihandle;
4130 *data = img_data;
4131 *paddr = pa;
4132 return ret;
4133
4134exit_ion_unmap_kernel:
4135 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4136exit_ion_free:
4137 ion_free(qseecom.ion_clnt, ihandle);
4138 ihandle = NULL;
4139 return ret;
4140}
4141
4142static void __qseecom_free_img_data(struct ion_handle **ihandle)
4143{
4144 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4145 ion_free(qseecom.ion_clnt, *ihandle);
4146 *ihandle = NULL;
4147}
4148
4149static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4150 uint32_t *app_id)
4151{
4152 int ret = -1;
4153 uint32_t fw_size = 0;
4154 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4155 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4156 struct qseecom_command_scm_resp resp;
4157 u8 *img_data = NULL;
4158 ion_phys_addr_t pa = 0;
4159 struct ion_handle *ihandle = NULL;
4160 void *cmd_buf = NULL;
4161 size_t cmd_len;
4162 uint32_t app_arch = 0;
4163
4164 if (!data || !appname || !app_id) {
4165 pr_err("Null pointer to data or appname or appid\n");
4166 return -EINVAL;
4167 }
4168 *app_id = 0;
4169 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4170 return -EIO;
4171 data->client.app_arch = app_arch;
4172
4173 /* Check and load cmnlib */
4174 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4175 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4176 ret = qseecom_load_commonlib_image(data, "cmnlib");
4177 if (ret) {
4178 pr_err("failed to load cmnlib\n");
4179 return -EIO;
4180 }
4181 qseecom.commonlib_loaded = true;
4182 pr_debug("cmnlib is loaded\n");
4183 }
4184
4185 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4186 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4187 if (ret) {
4188 pr_err("failed to load cmnlib64\n");
4189 return -EIO;
4190 }
4191 qseecom.commonlib64_loaded = true;
4192 pr_debug("cmnlib64 is loaded\n");
4193 }
4194 }
4195
4196 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4197 if (ret)
4198 return ret;
4199
4200 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4201 if (ret) {
4202 ret = -EIO;
4203 goto exit_free_img_data;
4204 }
4205
4206 /* Populate the load_req parameters */
4207 if (qseecom.qsee_version < QSEE_VERSION_40) {
4208 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4209 load_req.mdt_len = load_req.mdt_len;
4210 load_req.img_len = load_req.img_len;
4211 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4212 load_req.phy_addr = (uint32_t)pa;
4213 cmd_buf = (void *)&load_req;
4214 cmd_len = sizeof(struct qseecom_load_app_ireq);
4215 } else {
4216 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4217 load_req_64bit.mdt_len = load_req.mdt_len;
4218 load_req_64bit.img_len = load_req.img_len;
4219 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4220 load_req_64bit.phy_addr = (uint64_t)pa;
4221 cmd_buf = (void *)&load_req_64bit;
4222 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4223 }
4224
4225 if (qseecom.support_bus_scaling) {
4226 mutex_lock(&qsee_bw_mutex);
4227 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4228 mutex_unlock(&qsee_bw_mutex);
4229 if (ret) {
4230 ret = -EIO;
4231 goto exit_free_img_data;
4232 }
4233 }
4234
4235 ret = __qseecom_enable_clk_scale_up(data);
4236 if (ret) {
4237 ret = -EIO;
4238 goto exit_unregister_bus_bw_need;
4239 }
4240
4241 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4242 img_data, fw_size,
4243 ION_IOC_CLEAN_INV_CACHES);
4244 if (ret) {
4245 pr_err("cache operation failed %d\n", ret);
4246 goto exit_disable_clk_vote;
4247 }
4248
4249 /* SCM_CALL to load the image */
4250 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4251 &resp, sizeof(resp));
4252 if (ret) {
4253 pr_err("scm_call to load failed : ret %d\n", ret);
4254 ret = -EIO;
4255 goto exit_disable_clk_vote;
4256 }
4257
4258 switch (resp.result) {
4259 case QSEOS_RESULT_SUCCESS:
4260 *app_id = resp.data;
4261 break;
4262 case QSEOS_RESULT_INCOMPLETE:
4263 ret = __qseecom_process_incomplete_cmd(data, &resp);
4264 if (ret)
4265 pr_err("process_incomplete_cmd FAILED\n");
4266 else
4267 *app_id = resp.data;
4268 break;
4269 case QSEOS_RESULT_FAILURE:
4270 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4271 break;
4272 default:
4273 pr_err("scm call return unknown response %d\n", resp.result);
4274 ret = -EINVAL;
4275 break;
4276 }
4277
4278exit_disable_clk_vote:
4279 __qseecom_disable_clk_scale_down(data);
4280
4281exit_unregister_bus_bw_need:
4282 if (qseecom.support_bus_scaling) {
4283 mutex_lock(&qsee_bw_mutex);
4284 qseecom_unregister_bus_bandwidth_needs(data);
4285 mutex_unlock(&qsee_bw_mutex);
4286 }
4287
4288exit_free_img_data:
4289 __qseecom_free_img_data(&ihandle);
4290 return ret;
4291}
4292
4293static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4294 char *cmnlib_name)
4295{
4296 int ret = 0;
4297 uint32_t fw_size = 0;
4298 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4299 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4300 struct qseecom_command_scm_resp resp;
4301 u8 *img_data = NULL;
4302 ion_phys_addr_t pa = 0;
4303 void *cmd_buf = NULL;
4304 size_t cmd_len;
4305 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004306 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004307
4308 if (!cmnlib_name) {
4309 pr_err("cmnlib_name is NULL\n");
4310 return -EINVAL;
4311 }
4312 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4313 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4314 cmnlib_name, strlen(cmnlib_name));
4315 return -EINVAL;
4316 }
4317
4318 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4319 return -EIO;
4320
Zhen Kong3bafb312017-10-18 10:27:20 -07004321 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004322 &img_data, fw_size, &pa);
4323 if (ret)
4324 return -EIO;
4325
4326 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4327 if (ret) {
4328 ret = -EIO;
4329 goto exit_free_img_data;
4330 }
4331 if (qseecom.qsee_version < QSEE_VERSION_40) {
4332 load_req.phy_addr = (uint32_t)pa;
4333 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4334 cmd_buf = (void *)&load_req;
4335 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4336 } else {
4337 load_req_64bit.phy_addr = (uint64_t)pa;
4338 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4339 load_req_64bit.img_len = load_req.img_len;
4340 load_req_64bit.mdt_len = load_req.mdt_len;
4341 cmd_buf = (void *)&load_req_64bit;
4342 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4343 }
4344
4345 if (qseecom.support_bus_scaling) {
4346 mutex_lock(&qsee_bw_mutex);
4347 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4348 mutex_unlock(&qsee_bw_mutex);
4349 if (ret) {
4350 ret = -EIO;
4351 goto exit_free_img_data;
4352 }
4353 }
4354
4355 /* Vote for the SFPB clock */
4356 ret = __qseecom_enable_clk_scale_up(data);
4357 if (ret) {
4358 ret = -EIO;
4359 goto exit_unregister_bus_bw_need;
4360 }
4361
Zhen Kong3bafb312017-10-18 10:27:20 -07004362 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004363 img_data, fw_size,
4364 ION_IOC_CLEAN_INV_CACHES);
4365 if (ret) {
4366 pr_err("cache operation failed %d\n", ret);
4367 goto exit_disable_clk_vote;
4368 }
4369
4370 /* SCM_CALL to load the image */
4371 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4372 &resp, sizeof(resp));
4373 if (ret) {
4374 pr_err("scm_call to load failed : ret %d\n", ret);
4375 ret = -EIO;
4376 goto exit_disable_clk_vote;
4377 }
4378
4379 switch (resp.result) {
4380 case QSEOS_RESULT_SUCCESS:
4381 break;
4382 case QSEOS_RESULT_FAILURE:
4383 pr_err("scm call failed w/response result%d\n", resp.result);
4384 ret = -EINVAL;
4385 goto exit_disable_clk_vote;
4386 case QSEOS_RESULT_INCOMPLETE:
4387 ret = __qseecom_process_incomplete_cmd(data, &resp);
4388 if (ret) {
4389 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4390 goto exit_disable_clk_vote;
4391 }
4392 break;
4393 default:
4394 pr_err("scm call return unknown response %d\n", resp.result);
4395 ret = -EINVAL;
4396 goto exit_disable_clk_vote;
4397 }
4398
4399exit_disable_clk_vote:
4400 __qseecom_disable_clk_scale_down(data);
4401
4402exit_unregister_bus_bw_need:
4403 if (qseecom.support_bus_scaling) {
4404 mutex_lock(&qsee_bw_mutex);
4405 qseecom_unregister_bus_bandwidth_needs(data);
4406 mutex_unlock(&qsee_bw_mutex);
4407 }
4408
4409exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004410 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004411 return ret;
4412}
4413
4414static int qseecom_unload_commonlib_image(void)
4415{
4416 int ret = -EINVAL;
4417 struct qseecom_unload_lib_image_ireq unload_req = {0};
4418 struct qseecom_command_scm_resp resp;
4419
4420 /* Populate the remaining parameters */
4421 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4422
4423 /* SCM_CALL to load the image */
4424 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4425 sizeof(struct qseecom_unload_lib_image_ireq),
4426 &resp, sizeof(resp));
4427 if (ret) {
4428 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4429 ret = -EIO;
4430 } else {
4431 switch (resp.result) {
4432 case QSEOS_RESULT_SUCCESS:
4433 break;
4434 case QSEOS_RESULT_FAILURE:
4435 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4436 break;
4437 default:
4438 pr_err("scm call return unknown response %d\n",
4439 resp.result);
4440 ret = -EINVAL;
4441 break;
4442 }
4443 }
4444
4445 return ret;
4446}
4447
4448int qseecom_start_app(struct qseecom_handle **handle,
4449 char *app_name, uint32_t size)
4450{
4451 int32_t ret = 0;
4452 unsigned long flags = 0;
4453 struct qseecom_dev_handle *data = NULL;
4454 struct qseecom_check_app_ireq app_ireq;
4455 struct qseecom_registered_app_list *entry = NULL;
4456 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4457 bool found_app = false;
4458 size_t len;
4459 ion_phys_addr_t pa;
4460 uint32_t fw_size, app_arch;
4461 uint32_t app_id = 0;
4462
4463 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4464 pr_err("Not allowed to be called in %d state\n",
4465 atomic_read(&qseecom.qseecom_state));
4466 return -EPERM;
4467 }
4468 if (!app_name) {
4469 pr_err("failed to get the app name\n");
4470 return -EINVAL;
4471 }
4472
Zhen Kong64a6d7282017-06-16 11:55:07 -07004473 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004474 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004475 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004476 return -EINVAL;
4477 }
4478
4479 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4480 if (!(*handle))
4481 return -ENOMEM;
4482
4483 data = kzalloc(sizeof(*data), GFP_KERNEL);
4484 if (!data) {
4485 if (ret == 0) {
4486 kfree(*handle);
4487 *handle = NULL;
4488 }
4489 return -ENOMEM;
4490 }
4491 data->abort = 0;
4492 data->type = QSEECOM_CLIENT_APP;
4493 data->released = false;
4494 data->client.sb_length = size;
4495 data->client.user_virt_sb_base = 0;
4496 data->client.ihandle = NULL;
4497
4498 init_waitqueue_head(&data->abort_wq);
4499
4500 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4501 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4502 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4503 pr_err("Ion client could not retrieve the handle\n");
4504 kfree(data);
4505 kfree(*handle);
4506 *handle = NULL;
4507 return -EINVAL;
4508 }
4509 mutex_lock(&app_access_lock);
4510
4511 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4512 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4513 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4514 if (ret)
4515 goto err;
4516
4517 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4518 if (app_id) {
4519 pr_warn("App id %d for [%s] app exists\n", app_id,
4520 (char *)app_ireq.app_name);
4521 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4522 list_for_each_entry(entry,
4523 &qseecom.registered_app_list_head, list){
4524 if (entry->app_id == app_id) {
4525 entry->ref_cnt++;
4526 found_app = true;
4527 break;
4528 }
4529 }
4530 spin_unlock_irqrestore(
4531 &qseecom.registered_app_list_lock, flags);
4532 if (!found_app)
4533 pr_warn("App_id %d [%s] was loaded but not registered\n",
4534 ret, (char *)app_ireq.app_name);
4535 } else {
4536 /* load the app and get the app_id */
4537 pr_debug("%s: Loading app for the first time'\n",
4538 qseecom.pdev->init_name);
4539 ret = __qseecom_load_fw(data, app_name, &app_id);
4540 if (ret < 0)
4541 goto err;
4542 }
4543 data->client.app_id = app_id;
4544 if (!found_app) {
4545 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4546 if (!entry) {
4547 pr_err("kmalloc for app entry failed\n");
4548 ret = -ENOMEM;
4549 goto err;
4550 }
4551 entry->app_id = app_id;
4552 entry->ref_cnt = 1;
4553 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4554 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4555 ret = -EIO;
4556 kfree(entry);
4557 goto err;
4558 }
4559 entry->app_arch = app_arch;
4560 entry->app_blocked = false;
4561 entry->blocked_on_listener_id = 0;
4562 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4563 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4564 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4565 flags);
4566 }
4567
4568 /* Get the physical address of the ION BUF */
4569 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4570 if (ret) {
4571 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4572 ret);
4573 goto err;
4574 }
4575
4576 /* Populate the structure for sending scm call to load image */
4577 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4578 data->client.ihandle);
4579 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4580 pr_err("ION memory mapping for client shared buf failed\n");
4581 ret = -ENOMEM;
4582 goto err;
4583 }
4584 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4585 data->client.sb_phys = (phys_addr_t)pa;
4586 (*handle)->dev = (void *)data;
4587 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4588 (*handle)->sbuf_len = data->client.sb_length;
4589
4590 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4591 if (!kclient_entry) {
4592 ret = -ENOMEM;
4593 goto err;
4594 }
4595 kclient_entry->handle = *handle;
4596
4597 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4598 list_add_tail(&kclient_entry->list,
4599 &qseecom.registered_kclient_list_head);
4600 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4601
4602 mutex_unlock(&app_access_lock);
4603 return 0;
4604
4605err:
4606 kfree(data);
4607 kfree(*handle);
4608 *handle = NULL;
4609 mutex_unlock(&app_access_lock);
4610 return ret;
4611}
4612EXPORT_SYMBOL(qseecom_start_app);
4613
4614int qseecom_shutdown_app(struct qseecom_handle **handle)
4615{
4616 int ret = -EINVAL;
4617 struct qseecom_dev_handle *data;
4618
4619 struct qseecom_registered_kclient_list *kclient = NULL;
4620 unsigned long flags = 0;
4621 bool found_handle = false;
4622
4623 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4624 pr_err("Not allowed to be called in %d state\n",
4625 atomic_read(&qseecom.qseecom_state));
4626 return -EPERM;
4627 }
4628
4629 if ((handle == NULL) || (*handle == NULL)) {
4630 pr_err("Handle is not initialized\n");
4631 return -EINVAL;
4632 }
4633 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4634 mutex_lock(&app_access_lock);
4635
4636 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4637 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4638 list) {
4639 if (kclient->handle == (*handle)) {
4640 list_del(&kclient->list);
4641 found_handle = true;
4642 break;
4643 }
4644 }
4645 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4646 if (!found_handle)
4647 pr_err("Unable to find the handle, exiting\n");
4648 else
4649 ret = qseecom_unload_app(data, false);
4650
4651 mutex_unlock(&app_access_lock);
4652 if (ret == 0) {
4653 kzfree(data);
4654 kzfree(*handle);
4655 kzfree(kclient);
4656 *handle = NULL;
4657 }
4658
4659 return ret;
4660}
4661EXPORT_SYMBOL(qseecom_shutdown_app);
4662
4663int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4664 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4665{
4666 int ret = 0;
4667 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4668 struct qseecom_dev_handle *data;
4669 bool perf_enabled = false;
4670
4671 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4672 pr_err("Not allowed to be called in %d state\n",
4673 atomic_read(&qseecom.qseecom_state));
4674 return -EPERM;
4675 }
4676
4677 if (handle == NULL) {
4678 pr_err("Handle is not initialized\n");
4679 return -EINVAL;
4680 }
4681 data = handle->dev;
4682
4683 req.cmd_req_len = sbuf_len;
4684 req.resp_len = rbuf_len;
4685 req.cmd_req_buf = send_buf;
4686 req.resp_buf = resp_buf;
4687
4688 if (__validate_send_cmd_inputs(data, &req))
4689 return -EINVAL;
4690
4691 mutex_lock(&app_access_lock);
4692 if (qseecom.support_bus_scaling) {
4693 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4694 if (ret) {
4695 pr_err("Failed to set bw.\n");
4696 mutex_unlock(&app_access_lock);
4697 return ret;
4698 }
4699 }
4700 /*
4701 * On targets where crypto clock is handled by HLOS,
4702 * if clk_access_cnt is zero and perf_enabled is false,
4703 * then the crypto clock was not enabled before sending cmd
4704 * to tz, qseecom will enable the clock to avoid service failure.
4705 */
4706 if (!qseecom.no_clock_support &&
4707 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4708 pr_debug("ce clock is not enabled!\n");
4709 ret = qseecom_perf_enable(data);
4710 if (ret) {
4711 pr_err("Failed to vote for clock with err %d\n",
4712 ret);
4713 mutex_unlock(&app_access_lock);
4714 return -EINVAL;
4715 }
4716 perf_enabled = true;
4717 }
4718 if (!strcmp(data->client.app_name, "securemm"))
4719 data->use_legacy_cmd = true;
4720
4721 ret = __qseecom_send_cmd(data, &req);
4722 data->use_legacy_cmd = false;
4723 if (qseecom.support_bus_scaling)
4724 __qseecom_add_bw_scale_down_timer(
4725 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4726
4727 if (perf_enabled) {
4728 qsee_disable_clock_vote(data, CLK_DFAB);
4729 qsee_disable_clock_vote(data, CLK_SFPB);
4730 }
4731
4732 mutex_unlock(&app_access_lock);
4733
4734 if (ret)
4735 return ret;
4736
4737 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4738 req.resp_len, req.resp_buf);
4739 return ret;
4740}
4741EXPORT_SYMBOL(qseecom_send_command);
4742
4743int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4744{
4745 int ret = 0;
4746
4747 if ((handle == NULL) || (handle->dev == NULL)) {
4748 pr_err("No valid kernel client\n");
4749 return -EINVAL;
4750 }
4751 if (high) {
4752 if (qseecom.support_bus_scaling) {
4753 mutex_lock(&qsee_bw_mutex);
4754 __qseecom_register_bus_bandwidth_needs(handle->dev,
4755 HIGH);
4756 mutex_unlock(&qsee_bw_mutex);
4757 } else {
4758 ret = qseecom_perf_enable(handle->dev);
4759 if (ret)
4760 pr_err("Failed to vote for clock with err %d\n",
4761 ret);
4762 }
4763 } else {
4764 if (!qseecom.support_bus_scaling) {
4765 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4766 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4767 } else {
4768 mutex_lock(&qsee_bw_mutex);
4769 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4770 mutex_unlock(&qsee_bw_mutex);
4771 }
4772 }
4773 return ret;
4774}
4775EXPORT_SYMBOL(qseecom_set_bandwidth);
4776
4777int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4778{
4779 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4780 struct qseecom_dev_handle dummy_private_data = {0};
4781 struct qseecom_command_scm_resp resp;
4782 int ret = 0;
4783
4784 if (!desc) {
4785 pr_err("desc is NULL\n");
4786 return -EINVAL;
4787 }
4788
4789 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004790 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004791 resp.data = desc->ret[2]; /*listener_id*/
4792
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004793 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004794 if (qseecom.qsee_reentrancy_support)
4795 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004796 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004797 else
4798 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4799 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004800 mutex_unlock(&app_access_lock);
4801 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004802 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004803 (int)desc->ret[0], (int)desc->ret[2],
4804 (int)desc->ret[1], ret);
4805 desc->ret[0] = resp.result;
4806 desc->ret[1] = resp.resp_type;
4807 desc->ret[2] = resp.data;
4808 return ret;
4809}
4810EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4811
4812static int qseecom_send_resp(void)
4813{
4814 qseecom.send_resp_flag = 1;
4815 wake_up_interruptible(&qseecom.send_resp_wq);
4816 return 0;
4817}
4818
4819static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4820{
4821 struct qseecom_registered_listener_list *this_lstnr = NULL;
4822
4823 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4824 this_lstnr = __qseecom_find_svc(data->listener.id);
4825 if (this_lstnr == NULL)
4826 return -EINVAL;
4827 qseecom.send_resp_flag = 1;
4828 this_lstnr->send_resp_flag = 1;
4829 wake_up_interruptible(&qseecom.send_resp_wq);
4830 return 0;
4831}
4832
4833static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4834 struct qseecom_send_modfd_listener_resp *resp,
4835 struct qseecom_registered_listener_list *this_lstnr)
4836{
4837 int i;
4838
4839 if (!data || !resp || !this_lstnr) {
4840 pr_err("listener handle or resp msg is null\n");
4841 return -EINVAL;
4842 }
4843
4844 if (resp->resp_buf_ptr == NULL) {
4845 pr_err("resp buffer is null\n");
4846 return -EINVAL;
4847 }
4848 /* validate resp buf length */
4849 if ((resp->resp_len == 0) ||
4850 (resp->resp_len > this_lstnr->sb_length)) {
4851 pr_err("resp buf length %d not valid\n", resp->resp_len);
4852 return -EINVAL;
4853 }
4854
4855 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4856 pr_err("Integer overflow in resp_len & resp_buf\n");
4857 return -EINVAL;
4858 }
4859 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4860 (ULONG_MAX - this_lstnr->sb_length)) {
4861 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4862 return -EINVAL;
4863 }
4864 /* validate resp buf */
4865 if (((uintptr_t)resp->resp_buf_ptr <
4866 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4867 ((uintptr_t)resp->resp_buf_ptr >=
4868 ((uintptr_t)this_lstnr->user_virt_sb_base +
4869 this_lstnr->sb_length)) ||
4870 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4871 ((uintptr_t)this_lstnr->user_virt_sb_base +
4872 this_lstnr->sb_length))) {
4873 pr_err("resp buf is out of shared buffer region\n");
4874 return -EINVAL;
4875 }
4876
4877 /* validate offsets */
4878 for (i = 0; i < MAX_ION_FD; i++) {
4879 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4880 pr_err("Invalid offset %d = 0x%x\n",
4881 i, resp->ifd_data[i].cmd_buf_offset);
4882 return -EINVAL;
4883 }
4884 }
4885
4886 return 0;
4887}
4888
4889static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4890 void __user *argp, bool is_64bit_addr)
4891{
4892 struct qseecom_send_modfd_listener_resp resp;
4893 struct qseecom_registered_listener_list *this_lstnr = NULL;
4894
4895 if (copy_from_user(&resp, argp, sizeof(resp))) {
4896 pr_err("copy_from_user failed");
4897 return -EINVAL;
4898 }
4899
4900 this_lstnr = __qseecom_find_svc(data->listener.id);
4901 if (this_lstnr == NULL)
4902 return -EINVAL;
4903
4904 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4905 return -EINVAL;
4906
4907 resp.resp_buf_ptr = this_lstnr->sb_virt +
4908 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4909
4910 if (!is_64bit_addr)
4911 __qseecom_update_cmd_buf(&resp, false, data);
4912 else
4913 __qseecom_update_cmd_buf_64(&resp, false, data);
4914 qseecom.send_resp_flag = 1;
4915 this_lstnr->send_resp_flag = 1;
4916 wake_up_interruptible(&qseecom.send_resp_wq);
4917 return 0;
4918}
4919
4920static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4921 void __user *argp)
4922{
4923 return __qseecom_send_modfd_resp(data, argp, false);
4924}
4925
4926static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4927 void __user *argp)
4928{
4929 return __qseecom_send_modfd_resp(data, argp, true);
4930}
4931
4932static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4933 void __user *argp)
4934{
4935 struct qseecom_qseos_version_req req;
4936
4937 if (copy_from_user(&req, argp, sizeof(req))) {
4938 pr_err("copy_from_user failed");
4939 return -EINVAL;
4940 }
4941 req.qseos_version = qseecom.qseos_version;
4942 if (copy_to_user(argp, &req, sizeof(req))) {
4943 pr_err("copy_to_user failed");
4944 return -EINVAL;
4945 }
4946 return 0;
4947}
4948
4949static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4950{
4951 int rc = 0;
4952 struct qseecom_clk *qclk = NULL;
4953
4954 if (qseecom.no_clock_support)
4955 return 0;
4956
4957 if (ce == CLK_QSEE)
4958 qclk = &qseecom.qsee;
4959 if (ce == CLK_CE_DRV)
4960 qclk = &qseecom.ce_drv;
4961
4962 if (qclk == NULL) {
4963 pr_err("CLK type not supported\n");
4964 return -EINVAL;
4965 }
4966 mutex_lock(&clk_access_lock);
4967
4968 if (qclk->clk_access_cnt == ULONG_MAX) {
4969 pr_err("clk_access_cnt beyond limitation\n");
4970 goto err;
4971 }
4972 if (qclk->clk_access_cnt > 0) {
4973 qclk->clk_access_cnt++;
4974 mutex_unlock(&clk_access_lock);
4975 return rc;
4976 }
4977
4978 /* Enable CE core clk */
4979 if (qclk->ce_core_clk != NULL) {
4980 rc = clk_prepare_enable(qclk->ce_core_clk);
4981 if (rc) {
4982 pr_err("Unable to enable/prepare CE core clk\n");
4983 goto err;
4984 }
4985 }
4986 /* Enable CE clk */
4987 if (qclk->ce_clk != NULL) {
4988 rc = clk_prepare_enable(qclk->ce_clk);
4989 if (rc) {
4990 pr_err("Unable to enable/prepare CE iface clk\n");
4991 goto ce_clk_err;
4992 }
4993 }
4994 /* Enable AXI clk */
4995 if (qclk->ce_bus_clk != NULL) {
4996 rc = clk_prepare_enable(qclk->ce_bus_clk);
4997 if (rc) {
4998 pr_err("Unable to enable/prepare CE bus clk\n");
4999 goto ce_bus_clk_err;
5000 }
5001 }
5002 qclk->clk_access_cnt++;
5003 mutex_unlock(&clk_access_lock);
5004 return 0;
5005
5006ce_bus_clk_err:
5007 if (qclk->ce_clk != NULL)
5008 clk_disable_unprepare(qclk->ce_clk);
5009ce_clk_err:
5010 if (qclk->ce_core_clk != NULL)
5011 clk_disable_unprepare(qclk->ce_core_clk);
5012err:
5013 mutex_unlock(&clk_access_lock);
5014 return -EIO;
5015}
5016
5017static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5018{
5019 struct qseecom_clk *qclk;
5020
5021 if (qseecom.no_clock_support)
5022 return;
5023
5024 if (ce == CLK_QSEE)
5025 qclk = &qseecom.qsee;
5026 else
5027 qclk = &qseecom.ce_drv;
5028
5029 mutex_lock(&clk_access_lock);
5030
5031 if (qclk->clk_access_cnt == 0) {
5032 mutex_unlock(&clk_access_lock);
5033 return;
5034 }
5035
5036 if (qclk->clk_access_cnt == 1) {
5037 if (qclk->ce_clk != NULL)
5038 clk_disable_unprepare(qclk->ce_clk);
5039 if (qclk->ce_core_clk != NULL)
5040 clk_disable_unprepare(qclk->ce_core_clk);
5041 if (qclk->ce_bus_clk != NULL)
5042 clk_disable_unprepare(qclk->ce_bus_clk);
5043 }
5044 qclk->clk_access_cnt--;
5045 mutex_unlock(&clk_access_lock);
5046}
5047
5048static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5049 int32_t clk_type)
5050{
5051 int ret = 0;
5052 struct qseecom_clk *qclk;
5053
5054 if (qseecom.no_clock_support)
5055 return 0;
5056
5057 qclk = &qseecom.qsee;
5058 if (!qseecom.qsee_perf_client)
5059 return ret;
5060
5061 switch (clk_type) {
5062 case CLK_DFAB:
5063 mutex_lock(&qsee_bw_mutex);
5064 if (!qseecom.qsee_bw_count) {
5065 if (qseecom.qsee_sfpb_bw_count > 0)
5066 ret = msm_bus_scale_client_update_request(
5067 qseecom.qsee_perf_client, 3);
5068 else {
5069 if (qclk->ce_core_src_clk != NULL)
5070 ret = __qseecom_enable_clk(CLK_QSEE);
5071 if (!ret) {
5072 ret =
5073 msm_bus_scale_client_update_request(
5074 qseecom.qsee_perf_client, 1);
5075 if ((ret) &&
5076 (qclk->ce_core_src_clk != NULL))
5077 __qseecom_disable_clk(CLK_QSEE);
5078 }
5079 }
5080 if (ret)
5081 pr_err("DFAB Bandwidth req failed (%d)\n",
5082 ret);
5083 else {
5084 qseecom.qsee_bw_count++;
5085 data->perf_enabled = true;
5086 }
5087 } else {
5088 qseecom.qsee_bw_count++;
5089 data->perf_enabled = true;
5090 }
5091 mutex_unlock(&qsee_bw_mutex);
5092 break;
5093 case CLK_SFPB:
5094 mutex_lock(&qsee_bw_mutex);
5095 if (!qseecom.qsee_sfpb_bw_count) {
5096 if (qseecom.qsee_bw_count > 0)
5097 ret = msm_bus_scale_client_update_request(
5098 qseecom.qsee_perf_client, 3);
5099 else {
5100 if (qclk->ce_core_src_clk != NULL)
5101 ret = __qseecom_enable_clk(CLK_QSEE);
5102 if (!ret) {
5103 ret =
5104 msm_bus_scale_client_update_request(
5105 qseecom.qsee_perf_client, 2);
5106 if ((ret) &&
5107 (qclk->ce_core_src_clk != NULL))
5108 __qseecom_disable_clk(CLK_QSEE);
5109 }
5110 }
5111
5112 if (ret)
5113 pr_err("SFPB Bandwidth req failed (%d)\n",
5114 ret);
5115 else {
5116 qseecom.qsee_sfpb_bw_count++;
5117 data->fast_load_enabled = true;
5118 }
5119 } else {
5120 qseecom.qsee_sfpb_bw_count++;
5121 data->fast_load_enabled = true;
5122 }
5123 mutex_unlock(&qsee_bw_mutex);
5124 break;
5125 default:
5126 pr_err("Clock type not defined\n");
5127 break;
5128 }
5129 return ret;
5130}
5131
5132static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5133 int32_t clk_type)
5134{
5135 int32_t ret = 0;
5136 struct qseecom_clk *qclk;
5137
5138 qclk = &qseecom.qsee;
5139
5140 if (qseecom.no_clock_support)
5141 return;
5142 if (!qseecom.qsee_perf_client)
5143 return;
5144
5145 switch (clk_type) {
5146 case CLK_DFAB:
5147 mutex_lock(&qsee_bw_mutex);
5148 if (qseecom.qsee_bw_count == 0) {
5149 pr_err("Client error.Extra call to disable DFAB clk\n");
5150 mutex_unlock(&qsee_bw_mutex);
5151 return;
5152 }
5153
5154 if (qseecom.qsee_bw_count == 1) {
5155 if (qseecom.qsee_sfpb_bw_count > 0)
5156 ret = msm_bus_scale_client_update_request(
5157 qseecom.qsee_perf_client, 2);
5158 else {
5159 ret = msm_bus_scale_client_update_request(
5160 qseecom.qsee_perf_client, 0);
5161 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5162 __qseecom_disable_clk(CLK_QSEE);
5163 }
5164 if (ret)
5165 pr_err("SFPB Bandwidth req fail (%d)\n",
5166 ret);
5167 else {
5168 qseecom.qsee_bw_count--;
5169 data->perf_enabled = false;
5170 }
5171 } else {
5172 qseecom.qsee_bw_count--;
5173 data->perf_enabled = false;
5174 }
5175 mutex_unlock(&qsee_bw_mutex);
5176 break;
5177 case CLK_SFPB:
5178 mutex_lock(&qsee_bw_mutex);
5179 if (qseecom.qsee_sfpb_bw_count == 0) {
5180 pr_err("Client error.Extra call to disable SFPB clk\n");
5181 mutex_unlock(&qsee_bw_mutex);
5182 return;
5183 }
5184 if (qseecom.qsee_sfpb_bw_count == 1) {
5185 if (qseecom.qsee_bw_count > 0)
5186 ret = msm_bus_scale_client_update_request(
5187 qseecom.qsee_perf_client, 1);
5188 else {
5189 ret = msm_bus_scale_client_update_request(
5190 qseecom.qsee_perf_client, 0);
5191 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5192 __qseecom_disable_clk(CLK_QSEE);
5193 }
5194 if (ret)
5195 pr_err("SFPB Bandwidth req fail (%d)\n",
5196 ret);
5197 else {
5198 qseecom.qsee_sfpb_bw_count--;
5199 data->fast_load_enabled = false;
5200 }
5201 } else {
5202 qseecom.qsee_sfpb_bw_count--;
5203 data->fast_load_enabled = false;
5204 }
5205 mutex_unlock(&qsee_bw_mutex);
5206 break;
5207 default:
5208 pr_err("Clock type not defined\n");
5209 break;
5210 }
5211
5212}
5213
5214static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5215 void __user *argp)
5216{
5217 struct ion_handle *ihandle; /* Ion handle */
5218 struct qseecom_load_img_req load_img_req;
5219 int uret = 0;
5220 int ret;
5221 ion_phys_addr_t pa = 0;
5222 size_t len;
5223 struct qseecom_load_app_ireq load_req;
5224 struct qseecom_load_app_64bit_ireq load_req_64bit;
5225 struct qseecom_command_scm_resp resp;
5226 void *cmd_buf = NULL;
5227 size_t cmd_len;
5228 /* Copy the relevant information needed for loading the image */
5229 if (copy_from_user(&load_img_req,
5230 (void __user *)argp,
5231 sizeof(struct qseecom_load_img_req))) {
5232 pr_err("copy_from_user failed\n");
5233 return -EFAULT;
5234 }
5235
5236 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005237 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005238 load_img_req.ifd_data_fd);
5239 if (IS_ERR_OR_NULL(ihandle)) {
5240 pr_err("Ion client could not retrieve the handle\n");
5241 return -ENOMEM;
5242 }
5243
5244 /* Get the physical address of the ION BUF */
5245 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5246 if (ret) {
5247 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5248 ret);
5249 return ret;
5250 }
5251 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5252 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5253 len, load_img_req.mdt_len,
5254 load_img_req.img_len);
5255 return ret;
5256 }
5257 /* Populate the structure for sending scm call to load image */
5258 if (qseecom.qsee_version < QSEE_VERSION_40) {
5259 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5260 load_req.mdt_len = load_img_req.mdt_len;
5261 load_req.img_len = load_img_req.img_len;
5262 load_req.phy_addr = (uint32_t)pa;
5263 cmd_buf = (void *)&load_req;
5264 cmd_len = sizeof(struct qseecom_load_app_ireq);
5265 } else {
5266 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5267 load_req_64bit.mdt_len = load_img_req.mdt_len;
5268 load_req_64bit.img_len = load_img_req.img_len;
5269 load_req_64bit.phy_addr = (uint64_t)pa;
5270 cmd_buf = (void *)&load_req_64bit;
5271 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5272 }
5273
5274 if (qseecom.support_bus_scaling) {
5275 mutex_lock(&qsee_bw_mutex);
5276 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5277 mutex_unlock(&qsee_bw_mutex);
5278 if (ret) {
5279 ret = -EIO;
5280 goto exit_cpu_restore;
5281 }
5282 }
5283
5284 /* Vote for the SFPB clock */
5285 ret = __qseecom_enable_clk_scale_up(data);
5286 if (ret) {
5287 ret = -EIO;
5288 goto exit_register_bus_bandwidth_needs;
5289 }
5290 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5291 ION_IOC_CLEAN_INV_CACHES);
5292 if (ret) {
5293 pr_err("cache operation failed %d\n", ret);
5294 goto exit_disable_clock;
5295 }
5296 /* SCM_CALL to load the external elf */
5297 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5298 &resp, sizeof(resp));
5299 if (ret) {
5300 pr_err("scm_call to load failed : ret %d\n",
5301 ret);
5302 ret = -EFAULT;
5303 goto exit_disable_clock;
5304 }
5305
5306 switch (resp.result) {
5307 case QSEOS_RESULT_SUCCESS:
5308 break;
5309 case QSEOS_RESULT_INCOMPLETE:
5310 pr_err("%s: qseos result incomplete\n", __func__);
5311 ret = __qseecom_process_incomplete_cmd(data, &resp);
5312 if (ret)
5313 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5314 break;
5315 case QSEOS_RESULT_FAILURE:
5316 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5317 ret = -EFAULT;
5318 break;
5319 default:
5320 pr_err("scm_call response result %d not supported\n",
5321 resp.result);
5322 ret = -EFAULT;
5323 break;
5324 }
5325
5326exit_disable_clock:
5327 __qseecom_disable_clk_scale_down(data);
5328
5329exit_register_bus_bandwidth_needs:
5330 if (qseecom.support_bus_scaling) {
5331 mutex_lock(&qsee_bw_mutex);
5332 uret = qseecom_unregister_bus_bandwidth_needs(data);
5333 mutex_unlock(&qsee_bw_mutex);
5334 if (uret)
5335 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5336 uret, ret);
5337 }
5338
5339exit_cpu_restore:
5340 /* Deallocate the handle */
5341 if (!IS_ERR_OR_NULL(ihandle))
5342 ion_free(qseecom.ion_clnt, ihandle);
5343 return ret;
5344}
5345
5346static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5347{
5348 int ret = 0;
5349 struct qseecom_command_scm_resp resp;
5350 struct qseecom_unload_app_ireq req;
5351
5352 /* unavailable client app */
5353 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5354
5355 /* Populate the structure for sending scm call to unload image */
5356 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5357
5358 /* SCM_CALL to unload the external elf */
5359 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5360 sizeof(struct qseecom_unload_app_ireq),
5361 &resp, sizeof(resp));
5362 if (ret) {
5363 pr_err("scm_call to unload failed : ret %d\n",
5364 ret);
5365 ret = -EFAULT;
5366 goto qseecom_unload_external_elf_scm_err;
5367 }
5368 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5369 ret = __qseecom_process_incomplete_cmd(data, &resp);
5370 if (ret)
5371 pr_err("process_incomplete_cmd fail err: %d\n",
5372 ret);
5373 } else {
5374 if (resp.result != QSEOS_RESULT_SUCCESS) {
5375 pr_err("scm_call to unload image failed resp.result =%d\n",
5376 resp.result);
5377 ret = -EFAULT;
5378 }
5379 }
5380
5381qseecom_unload_external_elf_scm_err:
5382
5383 return ret;
5384}
5385
5386static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5387 void __user *argp)
5388{
5389
5390 int32_t ret;
5391 struct qseecom_qseos_app_load_query query_req;
5392 struct qseecom_check_app_ireq req;
5393 struct qseecom_registered_app_list *entry = NULL;
5394 unsigned long flags = 0;
5395 uint32_t app_arch = 0, app_id = 0;
5396 bool found_app = false;
5397
5398 /* Copy the relevant information needed for loading the image */
5399 if (copy_from_user(&query_req,
5400 (void __user *)argp,
5401 sizeof(struct qseecom_qseos_app_load_query))) {
5402 pr_err("copy_from_user failed\n");
5403 return -EFAULT;
5404 }
5405
5406 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5407 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5408 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5409
5410 ret = __qseecom_check_app_exists(req, &app_id);
5411 if (ret) {
5412 pr_err(" scm call to check if app is loaded failed");
5413 return ret; /* scm call failed */
5414 }
5415 if (app_id) {
5416 pr_debug("App id %d (%s) already exists\n", app_id,
5417 (char *)(req.app_name));
5418 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5419 list_for_each_entry(entry,
5420 &qseecom.registered_app_list_head, list){
5421 if (entry->app_id == app_id) {
5422 app_arch = entry->app_arch;
5423 entry->ref_cnt++;
5424 found_app = true;
5425 break;
5426 }
5427 }
5428 spin_unlock_irqrestore(
5429 &qseecom.registered_app_list_lock, flags);
5430 data->client.app_id = app_id;
5431 query_req.app_id = app_id;
5432 if (app_arch) {
5433 data->client.app_arch = app_arch;
5434 query_req.app_arch = app_arch;
5435 } else {
5436 data->client.app_arch = 0;
5437 query_req.app_arch = 0;
5438 }
5439 strlcpy(data->client.app_name, query_req.app_name,
5440 MAX_APP_NAME_SIZE);
5441 /*
5442 * If app was loaded by appsbl before and was not registered,
5443 * regiser this app now.
5444 */
5445 if (!found_app) {
5446 pr_debug("Register app %d [%s] which was loaded before\n",
5447 ret, (char *)query_req.app_name);
5448 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5449 if (!entry) {
5450 pr_err("kmalloc for app entry failed\n");
5451 return -ENOMEM;
5452 }
5453 entry->app_id = app_id;
5454 entry->ref_cnt = 1;
5455 entry->app_arch = data->client.app_arch;
5456 strlcpy(entry->app_name, data->client.app_name,
5457 MAX_APP_NAME_SIZE);
5458 entry->app_blocked = false;
5459 entry->blocked_on_listener_id = 0;
5460 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5461 flags);
5462 list_add_tail(&entry->list,
5463 &qseecom.registered_app_list_head);
5464 spin_unlock_irqrestore(
5465 &qseecom.registered_app_list_lock, flags);
5466 }
5467 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5468 pr_err("copy_to_user failed\n");
5469 return -EFAULT;
5470 }
5471 return -EEXIST; /* app already loaded */
5472 } else {
5473 return 0; /* app not loaded */
5474 }
5475}
5476
5477static int __qseecom_get_ce_pipe_info(
5478 enum qseecom_key_management_usage_type usage,
5479 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5480{
5481 int ret = -EINVAL;
5482 int i, j;
5483 struct qseecom_ce_info_use *p = NULL;
5484 int total = 0;
5485 struct qseecom_ce_pipe_entry *pcepipe;
5486
5487 switch (usage) {
5488 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5489 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5490 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5491 if (qseecom.support_fde) {
5492 p = qseecom.ce_info.fde;
5493 total = qseecom.ce_info.num_fde;
5494 } else {
5495 pr_err("system does not support fde\n");
5496 return -EINVAL;
5497 }
5498 break;
5499 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5500 if (qseecom.support_pfe) {
5501 p = qseecom.ce_info.pfe;
5502 total = qseecom.ce_info.num_pfe;
5503 } else {
5504 pr_err("system does not support pfe\n");
5505 return -EINVAL;
5506 }
5507 break;
5508 default:
5509 pr_err("unsupported usage %d\n", usage);
5510 return -EINVAL;
5511 }
5512
5513 for (j = 0; j < total; j++) {
5514 if (p->unit_num == unit) {
5515 pcepipe = p->ce_pipe_entry;
5516 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5517 (*ce_hw)[i] = pcepipe->ce_num;
5518 *pipe = pcepipe->ce_pipe_pair;
5519 pcepipe++;
5520 }
5521 ret = 0;
5522 break;
5523 }
5524 p++;
5525 }
5526 return ret;
5527}
5528
5529static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5530 enum qseecom_key_management_usage_type usage,
5531 struct qseecom_key_generate_ireq *ireq)
5532{
5533 struct qseecom_command_scm_resp resp;
5534 int ret;
5535
5536 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5537 usage >= QSEOS_KM_USAGE_MAX) {
5538 pr_err("Error:: unsupported usage %d\n", usage);
5539 return -EFAULT;
5540 }
5541 ret = __qseecom_enable_clk(CLK_QSEE);
5542 if (ret)
5543 return ret;
5544
5545 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5546 ireq, sizeof(struct qseecom_key_generate_ireq),
5547 &resp, sizeof(resp));
5548 if (ret) {
5549 if (ret == -EINVAL &&
5550 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5551 pr_debug("Key ID exists.\n");
5552 ret = 0;
5553 } else {
5554 pr_err("scm call to generate key failed : %d\n", ret);
5555 ret = -EFAULT;
5556 }
5557 goto generate_key_exit;
5558 }
5559
5560 switch (resp.result) {
5561 case QSEOS_RESULT_SUCCESS:
5562 break;
5563 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5564 pr_debug("Key ID exists.\n");
5565 break;
5566 case QSEOS_RESULT_INCOMPLETE:
5567 ret = __qseecom_process_incomplete_cmd(data, &resp);
5568 if (ret) {
5569 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5570 pr_debug("Key ID exists.\n");
5571 ret = 0;
5572 } else {
5573 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5574 resp.result);
5575 }
5576 }
5577 break;
5578 case QSEOS_RESULT_FAILURE:
5579 default:
5580 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5581 ret = -EINVAL;
5582 break;
5583 }
5584generate_key_exit:
5585 __qseecom_disable_clk(CLK_QSEE);
5586 return ret;
5587}
5588
5589static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5590 enum qseecom_key_management_usage_type usage,
5591 struct qseecom_key_delete_ireq *ireq)
5592{
5593 struct qseecom_command_scm_resp resp;
5594 int ret;
5595
5596 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5597 usage >= QSEOS_KM_USAGE_MAX) {
5598 pr_err("Error:: unsupported usage %d\n", usage);
5599 return -EFAULT;
5600 }
5601 ret = __qseecom_enable_clk(CLK_QSEE);
5602 if (ret)
5603 return ret;
5604
5605 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5606 ireq, sizeof(struct qseecom_key_delete_ireq),
5607 &resp, sizeof(struct qseecom_command_scm_resp));
5608 if (ret) {
5609 if (ret == -EINVAL &&
5610 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5611 pr_debug("Max attempts to input password reached.\n");
5612 ret = -ERANGE;
5613 } else {
5614 pr_err("scm call to delete key failed : %d\n", ret);
5615 ret = -EFAULT;
5616 }
5617 goto del_key_exit;
5618 }
5619
5620 switch (resp.result) {
5621 case QSEOS_RESULT_SUCCESS:
5622 break;
5623 case QSEOS_RESULT_INCOMPLETE:
5624 ret = __qseecom_process_incomplete_cmd(data, &resp);
5625 if (ret) {
5626 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5627 resp.result);
5628 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5629 pr_debug("Max attempts to input password reached.\n");
5630 ret = -ERANGE;
5631 }
5632 }
5633 break;
5634 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5635 pr_debug("Max attempts to input password reached.\n");
5636 ret = -ERANGE;
5637 break;
5638 case QSEOS_RESULT_FAILURE:
5639 default:
5640 pr_err("Delete key scm call failed resp.result %d\n",
5641 resp.result);
5642 ret = -EINVAL;
5643 break;
5644 }
5645del_key_exit:
5646 __qseecom_disable_clk(CLK_QSEE);
5647 return ret;
5648}
5649
5650static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5651 enum qseecom_key_management_usage_type usage,
5652 struct qseecom_key_select_ireq *ireq)
5653{
5654 struct qseecom_command_scm_resp resp;
5655 int ret;
5656
5657 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5658 usage >= QSEOS_KM_USAGE_MAX) {
5659 pr_err("Error:: unsupported usage %d\n", usage);
5660 return -EFAULT;
5661 }
5662 ret = __qseecom_enable_clk(CLK_QSEE);
5663 if (ret)
5664 return ret;
5665
5666 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5667 ret = __qseecom_enable_clk(CLK_CE_DRV);
5668 if (ret)
5669 return ret;
5670 }
5671
5672 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5673 ireq, sizeof(struct qseecom_key_select_ireq),
5674 &resp, sizeof(struct qseecom_command_scm_resp));
5675 if (ret) {
5676 if (ret == -EINVAL &&
5677 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5678 pr_debug("Max attempts to input password reached.\n");
5679 ret = -ERANGE;
5680 } else if (ret == -EINVAL &&
5681 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5682 pr_debug("Set Key operation under processing...\n");
5683 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5684 } else {
5685 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5686 ret);
5687 ret = -EFAULT;
5688 }
5689 goto set_key_exit;
5690 }
5691
5692 switch (resp.result) {
5693 case QSEOS_RESULT_SUCCESS:
5694 break;
5695 case QSEOS_RESULT_INCOMPLETE:
5696 ret = __qseecom_process_incomplete_cmd(data, &resp);
5697 if (ret) {
5698 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5699 resp.result);
5700 if (resp.result ==
5701 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5702 pr_debug("Set Key operation under processing...\n");
5703 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5704 }
5705 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5706 pr_debug("Max attempts to input password reached.\n");
5707 ret = -ERANGE;
5708 }
5709 }
5710 break;
5711 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5712 pr_debug("Max attempts to input password reached.\n");
5713 ret = -ERANGE;
5714 break;
5715 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5716 pr_debug("Set Key operation under processing...\n");
5717 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5718 break;
5719 case QSEOS_RESULT_FAILURE:
5720 default:
5721 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5722 ret = -EINVAL;
5723 break;
5724 }
5725set_key_exit:
5726 __qseecom_disable_clk(CLK_QSEE);
5727 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5728 __qseecom_disable_clk(CLK_CE_DRV);
5729 return ret;
5730}
5731
5732static int __qseecom_update_current_key_user_info(
5733 struct qseecom_dev_handle *data,
5734 enum qseecom_key_management_usage_type usage,
5735 struct qseecom_key_userinfo_update_ireq *ireq)
5736{
5737 struct qseecom_command_scm_resp resp;
5738 int ret;
5739
5740 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5741 usage >= QSEOS_KM_USAGE_MAX) {
5742 pr_err("Error:: unsupported usage %d\n", usage);
5743 return -EFAULT;
5744 }
5745 ret = __qseecom_enable_clk(CLK_QSEE);
5746 if (ret)
5747 return ret;
5748
5749 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5750 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5751 &resp, sizeof(struct qseecom_command_scm_resp));
5752 if (ret) {
5753 if (ret == -EINVAL &&
5754 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5755 pr_debug("Set Key operation under processing...\n");
5756 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5757 } else {
5758 pr_err("scm call to update key userinfo failed: %d\n",
5759 ret);
5760 __qseecom_disable_clk(CLK_QSEE);
5761 return -EFAULT;
5762 }
5763 }
5764
5765 switch (resp.result) {
5766 case QSEOS_RESULT_SUCCESS:
5767 break;
5768 case QSEOS_RESULT_INCOMPLETE:
5769 ret = __qseecom_process_incomplete_cmd(data, &resp);
5770 if (resp.result ==
5771 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5772 pr_debug("Set Key operation under processing...\n");
5773 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5774 }
5775 if (ret)
5776 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5777 resp.result);
5778 break;
5779 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5780 pr_debug("Update Key operation under processing...\n");
5781 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5782 break;
5783 case QSEOS_RESULT_FAILURE:
5784 default:
5785 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5786 ret = -EINVAL;
5787 break;
5788 }
5789
5790 __qseecom_disable_clk(CLK_QSEE);
5791 return ret;
5792}
5793
5794
5795static int qseecom_enable_ice_setup(int usage)
5796{
5797 int ret = 0;
5798
5799 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5800 ret = qcom_ice_setup_ice_hw("ufs", true);
5801 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5802 ret = qcom_ice_setup_ice_hw("sdcc", true);
5803
5804 return ret;
5805}
5806
5807static int qseecom_disable_ice_setup(int usage)
5808{
5809 int ret = 0;
5810
5811 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5812 ret = qcom_ice_setup_ice_hw("ufs", false);
5813 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5814 ret = qcom_ice_setup_ice_hw("sdcc", false);
5815
5816 return ret;
5817}
5818
5819static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5820{
5821 struct qseecom_ce_info_use *pce_info_use, *p;
5822 int total = 0;
5823 int i;
5824
5825 switch (usage) {
5826 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5827 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5828 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5829 p = qseecom.ce_info.fde;
5830 total = qseecom.ce_info.num_fde;
5831 break;
5832 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5833 p = qseecom.ce_info.pfe;
5834 total = qseecom.ce_info.num_pfe;
5835 break;
5836 default:
5837 pr_err("unsupported usage %d\n", usage);
5838 return -EINVAL;
5839 }
5840
5841 pce_info_use = NULL;
5842
5843 for (i = 0; i < total; i++) {
5844 if (p->unit_num == unit) {
5845 pce_info_use = p;
5846 break;
5847 }
5848 p++;
5849 }
5850 if (!pce_info_use) {
5851 pr_err("can not find %d\n", unit);
5852 return -EINVAL;
5853 }
5854 return pce_info_use->num_ce_pipe_entries;
5855}
5856
5857static int qseecom_create_key(struct qseecom_dev_handle *data,
5858 void __user *argp)
5859{
5860 int i;
5861 uint32_t *ce_hw = NULL;
5862 uint32_t pipe = 0;
5863 int ret = 0;
5864 uint32_t flags = 0;
5865 struct qseecom_create_key_req create_key_req;
5866 struct qseecom_key_generate_ireq generate_key_ireq;
5867 struct qseecom_key_select_ireq set_key_ireq;
5868 uint32_t entries = 0;
5869
5870 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5871 if (ret) {
5872 pr_err("copy_from_user failed\n");
5873 return ret;
5874 }
5875
5876 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5877 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5878 pr_err("unsupported usage %d\n", create_key_req.usage);
5879 ret = -EFAULT;
5880 return ret;
5881 }
5882 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5883 create_key_req.usage);
5884 if (entries <= 0) {
5885 pr_err("no ce instance for usage %d instance %d\n",
5886 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5887 ret = -EINVAL;
5888 return ret;
5889 }
5890
5891 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5892 if (!ce_hw) {
5893 ret = -ENOMEM;
5894 return ret;
5895 }
5896 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5897 DEFAULT_CE_INFO_UNIT);
5898 if (ret) {
5899 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5900 ret = -EINVAL;
5901 goto free_buf;
5902 }
5903
5904 if (qseecom.fde_key_size)
5905 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5906 else
5907 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5908
5909 generate_key_ireq.flags = flags;
5910 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5911 memset((void *)generate_key_ireq.key_id,
5912 0, QSEECOM_KEY_ID_SIZE);
5913 memset((void *)generate_key_ireq.hash32,
5914 0, QSEECOM_HASH_SIZE);
5915 memcpy((void *)generate_key_ireq.key_id,
5916 (void *)key_id_array[create_key_req.usage].desc,
5917 QSEECOM_KEY_ID_SIZE);
5918 memcpy((void *)generate_key_ireq.hash32,
5919 (void *)create_key_req.hash32,
5920 QSEECOM_HASH_SIZE);
5921
5922 ret = __qseecom_generate_and_save_key(data,
5923 create_key_req.usage, &generate_key_ireq);
5924 if (ret) {
5925 pr_err("Failed to generate key on storage: %d\n", ret);
5926 goto free_buf;
5927 }
5928
5929 for (i = 0; i < entries; i++) {
5930 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5931 if (create_key_req.usage ==
5932 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5933 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5934 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5935
5936 } else if (create_key_req.usage ==
5937 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5938 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5939 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5940
5941 } else {
5942 set_key_ireq.ce = ce_hw[i];
5943 set_key_ireq.pipe = pipe;
5944 }
5945 set_key_ireq.flags = flags;
5946
5947 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5948 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5949 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5950 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5951 memcpy((void *)set_key_ireq.key_id,
5952 (void *)key_id_array[create_key_req.usage].desc,
5953 QSEECOM_KEY_ID_SIZE);
5954 memcpy((void *)set_key_ireq.hash32,
5955 (void *)create_key_req.hash32,
5956 QSEECOM_HASH_SIZE);
5957 /*
5958 * It will return false if it is GPCE based crypto instance or
5959 * ICE is setup properly
5960 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005961 ret = qseecom_enable_ice_setup(create_key_req.usage);
5962 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005963 goto free_buf;
5964
5965 do {
5966 ret = __qseecom_set_clear_ce_key(data,
5967 create_key_req.usage,
5968 &set_key_ireq);
5969 /*
5970 * wait a little before calling scm again to let other
5971 * processes run
5972 */
5973 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5974 msleep(50);
5975
5976 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5977
5978 qseecom_disable_ice_setup(create_key_req.usage);
5979
5980 if (ret) {
5981 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5982 pipe, ce_hw[i], ret);
5983 goto free_buf;
5984 } else {
5985 pr_err("Set the key successfully\n");
5986 if ((create_key_req.usage ==
5987 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5988 (create_key_req.usage ==
5989 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5990 goto free_buf;
5991 }
5992 }
5993
5994free_buf:
5995 kzfree(ce_hw);
5996 return ret;
5997}
5998
5999static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6000 void __user *argp)
6001{
6002 uint32_t *ce_hw = NULL;
6003 uint32_t pipe = 0;
6004 int ret = 0;
6005 uint32_t flags = 0;
6006 int i, j;
6007 struct qseecom_wipe_key_req wipe_key_req;
6008 struct qseecom_key_delete_ireq delete_key_ireq;
6009 struct qseecom_key_select_ireq clear_key_ireq;
6010 uint32_t entries = 0;
6011
6012 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6013 if (ret) {
6014 pr_err("copy_from_user failed\n");
6015 return ret;
6016 }
6017
6018 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6019 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6020 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6021 ret = -EFAULT;
6022 return ret;
6023 }
6024
6025 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6026 wipe_key_req.usage);
6027 if (entries <= 0) {
6028 pr_err("no ce instance for usage %d instance %d\n",
6029 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6030 ret = -EINVAL;
6031 return ret;
6032 }
6033
6034 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6035 if (!ce_hw) {
6036 ret = -ENOMEM;
6037 return ret;
6038 }
6039
6040 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6041 DEFAULT_CE_INFO_UNIT);
6042 if (ret) {
6043 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6044 ret = -EINVAL;
6045 goto free_buf;
6046 }
6047
6048 if (wipe_key_req.wipe_key_flag) {
6049 delete_key_ireq.flags = flags;
6050 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6051 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6052 memcpy((void *)delete_key_ireq.key_id,
6053 (void *)key_id_array[wipe_key_req.usage].desc,
6054 QSEECOM_KEY_ID_SIZE);
6055 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6056
6057 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6058 &delete_key_ireq);
6059 if (ret) {
6060 pr_err("Failed to delete key from ssd storage: %d\n",
6061 ret);
6062 ret = -EFAULT;
6063 goto free_buf;
6064 }
6065 }
6066
6067 for (j = 0; j < entries; j++) {
6068 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6069 if (wipe_key_req.usage ==
6070 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6071 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6072 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6073 } else if (wipe_key_req.usage ==
6074 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6075 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6076 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6077 } else {
6078 clear_key_ireq.ce = ce_hw[j];
6079 clear_key_ireq.pipe = pipe;
6080 }
6081 clear_key_ireq.flags = flags;
6082 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6083 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6084 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6085 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6086
6087 /*
6088 * It will return false if it is GPCE based crypto instance or
6089 * ICE is setup properly
6090 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006091 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6092 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006093 goto free_buf;
6094
6095 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6096 &clear_key_ireq);
6097
6098 qseecom_disable_ice_setup(wipe_key_req.usage);
6099
6100 if (ret) {
6101 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6102 pipe, ce_hw[j], ret);
6103 ret = -EFAULT;
6104 goto free_buf;
6105 }
6106 }
6107
6108free_buf:
6109 kzfree(ce_hw);
6110 return ret;
6111}
6112
6113static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6114 void __user *argp)
6115{
6116 int ret = 0;
6117 uint32_t flags = 0;
6118 struct qseecom_update_key_userinfo_req update_key_req;
6119 struct qseecom_key_userinfo_update_ireq ireq;
6120
6121 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6122 if (ret) {
6123 pr_err("copy_from_user failed\n");
6124 return ret;
6125 }
6126
6127 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6128 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6129 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6130 return -EFAULT;
6131 }
6132
6133 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6134
6135 if (qseecom.fde_key_size)
6136 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6137 else
6138 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6139
6140 ireq.flags = flags;
6141 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6142 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6143 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6144 memcpy((void *)ireq.key_id,
6145 (void *)key_id_array[update_key_req.usage].desc,
6146 QSEECOM_KEY_ID_SIZE);
6147 memcpy((void *)ireq.current_hash32,
6148 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6149 memcpy((void *)ireq.new_hash32,
6150 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6151
6152 do {
6153 ret = __qseecom_update_current_key_user_info(data,
6154 update_key_req.usage,
6155 &ireq);
6156 /*
6157 * wait a little before calling scm again to let other
6158 * processes run
6159 */
6160 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6161 msleep(50);
6162
6163 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6164 if (ret) {
6165 pr_err("Failed to update key info: %d\n", ret);
6166 return ret;
6167 }
6168 return ret;
6169
6170}
6171static int qseecom_is_es_activated(void __user *argp)
6172{
6173 struct qseecom_is_es_activated_req req;
6174 struct qseecom_command_scm_resp resp;
6175 int ret;
6176
6177 if (qseecom.qsee_version < QSEE_VERSION_04) {
6178 pr_err("invalid qsee version\n");
6179 return -ENODEV;
6180 }
6181
6182 if (argp == NULL) {
6183 pr_err("arg is null\n");
6184 return -EINVAL;
6185 }
6186
6187 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6188 &req, sizeof(req), &resp, sizeof(resp));
6189 if (ret) {
6190 pr_err("scm_call failed\n");
6191 return ret;
6192 }
6193
6194 req.is_activated = resp.result;
6195 ret = copy_to_user(argp, &req, sizeof(req));
6196 if (ret) {
6197 pr_err("copy_to_user failed\n");
6198 return ret;
6199 }
6200
6201 return 0;
6202}
6203
6204static int qseecom_save_partition_hash(void __user *argp)
6205{
6206 struct qseecom_save_partition_hash_req req;
6207 struct qseecom_command_scm_resp resp;
6208 int ret;
6209
6210 memset(&resp, 0x00, sizeof(resp));
6211
6212 if (qseecom.qsee_version < QSEE_VERSION_04) {
6213 pr_err("invalid qsee version\n");
6214 return -ENODEV;
6215 }
6216
6217 if (argp == NULL) {
6218 pr_err("arg is null\n");
6219 return -EINVAL;
6220 }
6221
6222 ret = copy_from_user(&req, argp, sizeof(req));
6223 if (ret) {
6224 pr_err("copy_from_user failed\n");
6225 return ret;
6226 }
6227
6228 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6229 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6230 if (ret) {
6231 pr_err("qseecom_scm_call failed\n");
6232 return ret;
6233 }
6234
6235 return 0;
6236}
6237
6238static int qseecom_mdtp_cipher_dip(void __user *argp)
6239{
6240 struct qseecom_mdtp_cipher_dip_req req;
6241 u32 tzbuflenin, tzbuflenout;
6242 char *tzbufin = NULL, *tzbufout = NULL;
6243 struct scm_desc desc = {0};
6244 int ret;
6245
6246 do {
6247 /* Copy the parameters from userspace */
6248 if (argp == NULL) {
6249 pr_err("arg is null\n");
6250 ret = -EINVAL;
6251 break;
6252 }
6253
6254 ret = copy_from_user(&req, argp, sizeof(req));
6255 if (ret) {
6256 pr_err("copy_from_user failed, ret= %d\n", ret);
6257 break;
6258 }
6259
6260 if (req.in_buf == NULL || req.out_buf == NULL ||
6261 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6262 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6263 req.direction > 1) {
6264 pr_err("invalid parameters\n");
6265 ret = -EINVAL;
6266 break;
6267 }
6268
6269 /* Copy the input buffer from userspace to kernel space */
6270 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6271 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6272 if (!tzbufin) {
6273 pr_err("error allocating in buffer\n");
6274 ret = -ENOMEM;
6275 break;
6276 }
6277
6278 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6279 if (ret) {
6280 pr_err("copy_from_user failed, ret=%d\n", ret);
6281 break;
6282 }
6283
6284 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6285
6286 /* Prepare the output buffer in kernel space */
6287 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6288 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6289 if (!tzbufout) {
6290 pr_err("error allocating out buffer\n");
6291 ret = -ENOMEM;
6292 break;
6293 }
6294
6295 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6296
6297 /* Send the command to TZ */
6298 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6299 desc.args[0] = virt_to_phys(tzbufin);
6300 desc.args[1] = req.in_buf_size;
6301 desc.args[2] = virt_to_phys(tzbufout);
6302 desc.args[3] = req.out_buf_size;
6303 desc.args[4] = req.direction;
6304
6305 ret = __qseecom_enable_clk(CLK_QSEE);
6306 if (ret)
6307 break;
6308
6309 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6310
6311 __qseecom_disable_clk(CLK_QSEE);
6312
6313 if (ret) {
6314 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6315 ret);
6316 break;
6317 }
6318
6319 /* Copy the output buffer from kernel space to userspace */
6320 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6321 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6322 if (ret) {
6323 pr_err("copy_to_user failed, ret=%d\n", ret);
6324 break;
6325 }
6326 } while (0);
6327
6328 kzfree(tzbufin);
6329 kzfree(tzbufout);
6330
6331 return ret;
6332}
6333
6334static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6335 struct qseecom_qteec_req *req)
6336{
6337 if (!data || !data->client.ihandle) {
6338 pr_err("Client or client handle is not initialized\n");
6339 return -EINVAL;
6340 }
6341
6342 if (data->type != QSEECOM_CLIENT_APP)
6343 return -EFAULT;
6344
6345 if (req->req_len > UINT_MAX - req->resp_len) {
6346 pr_err("Integer overflow detected in req_len & rsp_len\n");
6347 return -EINVAL;
6348 }
6349
6350 if (req->req_len + req->resp_len > data->client.sb_length) {
6351 pr_debug("Not enough memory to fit cmd_buf.\n");
6352 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6353 (req->req_len + req->resp_len), data->client.sb_length);
6354 return -ENOMEM;
6355 }
6356
6357 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6358 pr_err("cmd buffer or response buffer is null\n");
6359 return -EINVAL;
6360 }
6361 if (((uintptr_t)req->req_ptr <
6362 data->client.user_virt_sb_base) ||
6363 ((uintptr_t)req->req_ptr >=
6364 (data->client.user_virt_sb_base + data->client.sb_length))) {
6365 pr_err("cmd buffer address not within shared bufffer\n");
6366 return -EINVAL;
6367 }
6368
6369 if (((uintptr_t)req->resp_ptr <
6370 data->client.user_virt_sb_base) ||
6371 ((uintptr_t)req->resp_ptr >=
6372 (data->client.user_virt_sb_base + data->client.sb_length))) {
6373 pr_err("response buffer address not within shared bufffer\n");
6374 return -EINVAL;
6375 }
6376
6377 if ((req->req_len == 0) || (req->resp_len == 0)) {
6378 pr_err("cmd buf lengtgh/response buf length not valid\n");
6379 return -EINVAL;
6380 }
6381
6382 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6383 pr_err("Integer overflow in req_len & req_ptr\n");
6384 return -EINVAL;
6385 }
6386
6387 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6388 pr_err("Integer overflow in resp_len & resp_ptr\n");
6389 return -EINVAL;
6390 }
6391
6392 if (data->client.user_virt_sb_base >
6393 (ULONG_MAX - data->client.sb_length)) {
6394 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6395 return -EINVAL;
6396 }
6397 if ((((uintptr_t)req->req_ptr + req->req_len) >
6398 ((uintptr_t)data->client.user_virt_sb_base +
6399 data->client.sb_length)) ||
6400 (((uintptr_t)req->resp_ptr + req->resp_len) >
6401 ((uintptr_t)data->client.user_virt_sb_base +
6402 data->client.sb_length))) {
6403 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6404 return -EINVAL;
6405 }
6406 return 0;
6407}
6408
6409static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6410 uint32_t fd_idx, struct sg_table *sg_ptr)
6411{
6412 struct scatterlist *sg = sg_ptr->sgl;
6413 struct qseecom_sg_entry *sg_entry;
6414 void *buf;
6415 uint i;
6416 size_t size;
6417 dma_addr_t coh_pmem;
6418
6419 if (fd_idx >= MAX_ION_FD) {
6420 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6421 return -ENOMEM;
6422 }
6423 /*
6424 * Allocate a buffer, populate it with number of entry plus
6425 * each sg entry's phy addr and length; then return the
6426 * phy_addr of the buffer.
6427 */
6428 size = sizeof(uint32_t) +
6429 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6430 size = (size + PAGE_SIZE) & PAGE_MASK;
6431 buf = dma_alloc_coherent(qseecom.pdev,
6432 size, &coh_pmem, GFP_KERNEL);
6433 if (buf == NULL) {
6434 pr_err("failed to alloc memory for sg buf\n");
6435 return -ENOMEM;
6436 }
6437 *(uint32_t *)buf = sg_ptr->nents;
6438 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6439 for (i = 0; i < sg_ptr->nents; i++) {
6440 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6441 sg_entry->len = sg->length;
6442 sg_entry++;
6443 sg = sg_next(sg);
6444 }
6445 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6446 data->client.sec_buf_fd[fd_idx].vbase = buf;
6447 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6448 data->client.sec_buf_fd[fd_idx].size = size;
6449 return 0;
6450}
6451
6452static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6453 struct qseecom_dev_handle *data, bool cleanup)
6454{
6455 struct ion_handle *ihandle;
6456 int ret = 0;
6457 int i = 0;
6458 uint32_t *update;
6459 struct sg_table *sg_ptr = NULL;
6460 struct scatterlist *sg;
6461 struct qseecom_param_memref *memref;
6462
6463 if (req == NULL) {
6464 pr_err("Invalid address\n");
6465 return -EINVAL;
6466 }
6467 for (i = 0; i < MAX_ION_FD; i++) {
6468 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006469 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006470 req->ifd_data[i].fd);
6471 if (IS_ERR_OR_NULL(ihandle)) {
6472 pr_err("Ion client can't retrieve the handle\n");
6473 return -ENOMEM;
6474 }
6475 if ((req->req_len < sizeof(uint32_t)) ||
6476 (req->ifd_data[i].cmd_buf_offset >
6477 req->req_len - sizeof(uint32_t))) {
6478 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6479 req->req_len,
6480 req->ifd_data[i].cmd_buf_offset);
6481 return -EINVAL;
6482 }
6483 update = (uint32_t *)((char *) req->req_ptr +
6484 req->ifd_data[i].cmd_buf_offset);
6485 if (!update) {
6486 pr_err("update pointer is NULL\n");
6487 return -EINVAL;
6488 }
6489 } else {
6490 continue;
6491 }
6492 /* Populate the cmd data structure with the phys_addr */
6493 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6494 if (IS_ERR_OR_NULL(sg_ptr)) {
6495 pr_err("IOn client could not retrieve sg table\n");
6496 goto err;
6497 }
6498 sg = sg_ptr->sgl;
6499 if (sg == NULL) {
6500 pr_err("sg is NULL\n");
6501 goto err;
6502 }
6503 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6504 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6505 sg_ptr->nents, sg->length);
6506 goto err;
6507 }
6508 /* clean up buf for pre-allocated fd */
6509 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6510 (*update)) {
6511 if (data->client.sec_buf_fd[i].vbase)
6512 dma_free_coherent(qseecom.pdev,
6513 data->client.sec_buf_fd[i].size,
6514 data->client.sec_buf_fd[i].vbase,
6515 data->client.sec_buf_fd[i].pbase);
6516 memset((void *)update, 0,
6517 sizeof(struct qseecom_param_memref));
6518 memset(&(data->client.sec_buf_fd[i]), 0,
6519 sizeof(struct qseecom_sec_buf_fd_info));
6520 goto clean;
6521 }
6522
6523 if (*update == 0) {
6524 /* update buf for pre-allocated fd from secure heap*/
6525 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6526 sg_ptr);
6527 if (ret) {
6528 pr_err("Failed to handle buf for fd[%d]\n", i);
6529 goto err;
6530 }
6531 memref = (struct qseecom_param_memref *)update;
6532 memref->buffer =
6533 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6534 memref->size =
6535 (uint32_t)(data->client.sec_buf_fd[i].size);
6536 } else {
6537 /* update buf for fd from non-secure qseecom heap */
6538 if (sg_ptr->nents != 1) {
6539 pr_err("Num of scat entr (%d) invalid\n",
6540 sg_ptr->nents);
6541 goto err;
6542 }
6543 if (cleanup)
6544 *update = 0;
6545 else
6546 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6547 }
6548clean:
6549 if (cleanup) {
6550 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6551 ihandle, NULL, sg->length,
6552 ION_IOC_INV_CACHES);
6553 if (ret) {
6554 pr_err("cache operation failed %d\n", ret);
6555 goto err;
6556 }
6557 } else {
6558 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6559 ihandle, NULL, sg->length,
6560 ION_IOC_CLEAN_INV_CACHES);
6561 if (ret) {
6562 pr_err("cache operation failed %d\n", ret);
6563 goto err;
6564 }
6565 data->sglistinfo_ptr[i].indexAndFlags =
6566 SGLISTINFO_SET_INDEX_FLAG(
6567 (sg_ptr->nents == 1), 0,
6568 req->ifd_data[i].cmd_buf_offset);
6569 data->sglistinfo_ptr[i].sizeOrCount =
6570 (sg_ptr->nents == 1) ?
6571 sg->length : sg_ptr->nents;
6572 data->sglist_cnt = i + 1;
6573 }
6574 /* Deallocate the handle */
6575 if (!IS_ERR_OR_NULL(ihandle))
6576 ion_free(qseecom.ion_clnt, ihandle);
6577 }
6578 return ret;
6579err:
6580 if (!IS_ERR_OR_NULL(ihandle))
6581 ion_free(qseecom.ion_clnt, ihandle);
6582 return -ENOMEM;
6583}
6584
6585static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6586 struct qseecom_qteec_req *req, uint32_t cmd_id)
6587{
6588 struct qseecom_command_scm_resp resp;
6589 struct qseecom_qteec_ireq ireq;
6590 struct qseecom_qteec_64bit_ireq ireq_64bit;
6591 struct qseecom_registered_app_list *ptr_app;
6592 bool found_app = false;
6593 unsigned long flags;
6594 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006595 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006596 uint32_t reqd_len_sb_in = 0;
6597 void *cmd_buf = NULL;
6598 size_t cmd_len;
6599 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306600 void *req_ptr = NULL;
6601 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006602
6603 ret = __qseecom_qteec_validate_msg(data, req);
6604 if (ret)
6605 return ret;
6606
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306607 req_ptr = req->req_ptr;
6608 resp_ptr = req->resp_ptr;
6609
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006610 /* find app_id & img_name from list */
6611 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6612 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6613 list) {
6614 if ((ptr_app->app_id == data->client.app_id) &&
6615 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6616 found_app = true;
6617 break;
6618 }
6619 }
6620 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6621 if (!found_app) {
6622 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6623 (char *)data->client.app_name);
6624 return -ENOENT;
6625 }
6626
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306627 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6628 (uintptr_t)req->req_ptr);
6629 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6630 (uintptr_t)req->resp_ptr);
6631
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006632 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6633 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6634 ret = __qseecom_update_qteec_req_buf(
6635 (struct qseecom_qteec_modfd_req *)req, data, false);
6636 if (ret)
6637 return ret;
6638 }
6639
6640 if (qseecom.qsee_version < QSEE_VERSION_40) {
6641 ireq.app_id = data->client.app_id;
6642 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306643 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006644 ireq.req_len = req->req_len;
6645 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306646 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006647 ireq.resp_len = req->resp_len;
6648 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6649 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6650 dmac_flush_range((void *)table,
6651 (void *)table + SGLISTINFO_TABLE_SIZE);
6652 cmd_buf = (void *)&ireq;
6653 cmd_len = sizeof(struct qseecom_qteec_ireq);
6654 } else {
6655 ireq_64bit.app_id = data->client.app_id;
6656 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306657 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006658 ireq_64bit.req_len = req->req_len;
6659 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306660 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006661 ireq_64bit.resp_len = req->resp_len;
6662 if ((data->client.app_arch == ELFCLASS32) &&
6663 ((ireq_64bit.req_ptr >=
6664 PHY_ADDR_4G - ireq_64bit.req_len) ||
6665 (ireq_64bit.resp_ptr >=
6666 PHY_ADDR_4G - ireq_64bit.resp_len))){
6667 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6668 data->client.app_name, data->client.app_id);
6669 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6670 ireq_64bit.req_ptr, ireq_64bit.req_len,
6671 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6672 return -EFAULT;
6673 }
6674 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6675 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6676 dmac_flush_range((void *)table,
6677 (void *)table + SGLISTINFO_TABLE_SIZE);
6678 cmd_buf = (void *)&ireq_64bit;
6679 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6680 }
6681 if (qseecom.whitelist_support == true
6682 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6683 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6684 else
6685 *(uint32_t *)cmd_buf = cmd_id;
6686
6687 reqd_len_sb_in = req->req_len + req->resp_len;
6688 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6689 data->client.sb_virt,
6690 reqd_len_sb_in,
6691 ION_IOC_CLEAN_INV_CACHES);
6692 if (ret) {
6693 pr_err("cache operation failed %d\n", ret);
6694 return ret;
6695 }
6696
6697 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6698
6699 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6700 cmd_buf, cmd_len,
6701 &resp, sizeof(resp));
6702 if (ret) {
6703 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6704 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006705 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006706 }
6707
6708 if (qseecom.qsee_reentrancy_support) {
6709 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006710 if (ret)
6711 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006712 } else {
6713 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6714 ret = __qseecom_process_incomplete_cmd(data, &resp);
6715 if (ret) {
6716 pr_err("process_incomplete_cmd failed err: %d\n",
6717 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006718 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006719 }
6720 } else {
6721 if (resp.result != QSEOS_RESULT_SUCCESS) {
6722 pr_err("Response result %d not supported\n",
6723 resp.result);
6724 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006725 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006726 }
6727 }
6728 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006729exit:
6730 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006731 data->client.sb_virt, data->client.sb_length,
6732 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006733 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006734 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006735 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006736 }
6737
6738 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6739 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006740 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006741 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006742 if (ret2)
6743 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006745 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006746}
6747
6748static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6749 void __user *argp)
6750{
6751 struct qseecom_qteec_modfd_req req;
6752 int ret = 0;
6753
6754 ret = copy_from_user(&req, argp,
6755 sizeof(struct qseecom_qteec_modfd_req));
6756 if (ret) {
6757 pr_err("copy_from_user failed\n");
6758 return ret;
6759 }
6760 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6761 QSEOS_TEE_OPEN_SESSION);
6762
6763 return ret;
6764}
6765
6766static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6767 void __user *argp)
6768{
6769 struct qseecom_qteec_req req;
6770 int ret = 0;
6771
6772 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6773 if (ret) {
6774 pr_err("copy_from_user failed\n");
6775 return ret;
6776 }
6777 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6778 return ret;
6779}
6780
6781static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6782 void __user *argp)
6783{
6784 struct qseecom_qteec_modfd_req req;
6785 struct qseecom_command_scm_resp resp;
6786 struct qseecom_qteec_ireq ireq;
6787 struct qseecom_qteec_64bit_ireq ireq_64bit;
6788 struct qseecom_registered_app_list *ptr_app;
6789 bool found_app = false;
6790 unsigned long flags;
6791 int ret = 0;
6792 int i = 0;
6793 uint32_t reqd_len_sb_in = 0;
6794 void *cmd_buf = NULL;
6795 size_t cmd_len;
6796 struct sglist_info *table = data->sglistinfo_ptr;
6797 void *req_ptr = NULL;
6798 void *resp_ptr = NULL;
6799
6800 ret = copy_from_user(&req, argp,
6801 sizeof(struct qseecom_qteec_modfd_req));
6802 if (ret) {
6803 pr_err("copy_from_user failed\n");
6804 return ret;
6805 }
6806 ret = __qseecom_qteec_validate_msg(data,
6807 (struct qseecom_qteec_req *)(&req));
6808 if (ret)
6809 return ret;
6810 req_ptr = req.req_ptr;
6811 resp_ptr = req.resp_ptr;
6812
6813 /* find app_id & img_name from list */
6814 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6815 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6816 list) {
6817 if ((ptr_app->app_id == data->client.app_id) &&
6818 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6819 found_app = true;
6820 break;
6821 }
6822 }
6823 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6824 if (!found_app) {
6825 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6826 (char *)data->client.app_name);
6827 return -ENOENT;
6828 }
6829
6830 /* validate offsets */
6831 for (i = 0; i < MAX_ION_FD; i++) {
6832 if (req.ifd_data[i].fd) {
6833 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6834 return -EINVAL;
6835 }
6836 }
6837 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6838 (uintptr_t)req.req_ptr);
6839 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6840 (uintptr_t)req.resp_ptr);
6841 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6842 if (ret)
6843 return ret;
6844
6845 if (qseecom.qsee_version < QSEE_VERSION_40) {
6846 ireq.app_id = data->client.app_id;
6847 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6848 (uintptr_t)req_ptr);
6849 ireq.req_len = req.req_len;
6850 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6851 (uintptr_t)resp_ptr);
6852 ireq.resp_len = req.resp_len;
6853 cmd_buf = (void *)&ireq;
6854 cmd_len = sizeof(struct qseecom_qteec_ireq);
6855 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6856 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6857 dmac_flush_range((void *)table,
6858 (void *)table + SGLISTINFO_TABLE_SIZE);
6859 } else {
6860 ireq_64bit.app_id = data->client.app_id;
6861 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6862 (uintptr_t)req_ptr);
6863 ireq_64bit.req_len = req.req_len;
6864 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6865 (uintptr_t)resp_ptr);
6866 ireq_64bit.resp_len = req.resp_len;
6867 cmd_buf = (void *)&ireq_64bit;
6868 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6869 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6870 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6871 dmac_flush_range((void *)table,
6872 (void *)table + SGLISTINFO_TABLE_SIZE);
6873 }
6874 reqd_len_sb_in = req.req_len + req.resp_len;
6875 if (qseecom.whitelist_support == true)
6876 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6877 else
6878 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6879
6880 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6881 data->client.sb_virt,
6882 reqd_len_sb_in,
6883 ION_IOC_CLEAN_INV_CACHES);
6884 if (ret) {
6885 pr_err("cache operation failed %d\n", ret);
6886 return ret;
6887 }
6888
6889 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6890
6891 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6892 cmd_buf, cmd_len,
6893 &resp, sizeof(resp));
6894 if (ret) {
6895 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6896 ret, data->client.app_id);
6897 return ret;
6898 }
6899
6900 if (qseecom.qsee_reentrancy_support) {
6901 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6902 } else {
6903 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6904 ret = __qseecom_process_incomplete_cmd(data, &resp);
6905 if (ret) {
6906 pr_err("process_incomplete_cmd failed err: %d\n",
6907 ret);
6908 return ret;
6909 }
6910 } else {
6911 if (resp.result != QSEOS_RESULT_SUCCESS) {
6912 pr_err("Response result %d not supported\n",
6913 resp.result);
6914 ret = -EINVAL;
6915 }
6916 }
6917 }
6918 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6919 if (ret)
6920 return ret;
6921
6922 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6923 data->client.sb_virt, data->client.sb_length,
6924 ION_IOC_INV_CACHES);
6925 if (ret) {
6926 pr_err("cache operation failed %d\n", ret);
6927 return ret;
6928 }
6929 return 0;
6930}
6931
6932static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6933 void __user *argp)
6934{
6935 struct qseecom_qteec_modfd_req req;
6936 int ret = 0;
6937
6938 ret = copy_from_user(&req, argp,
6939 sizeof(struct qseecom_qteec_modfd_req));
6940 if (ret) {
6941 pr_err("copy_from_user failed\n");
6942 return ret;
6943 }
6944 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6945 QSEOS_TEE_REQUEST_CANCELLATION);
6946
6947 return ret;
6948}
6949
6950static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6951{
6952 if (data->sglist_cnt) {
6953 memset(data->sglistinfo_ptr, 0,
6954 SGLISTINFO_TABLE_SIZE);
6955 data->sglist_cnt = 0;
6956 }
6957}
6958
6959static inline long qseecom_ioctl(struct file *file,
6960 unsigned int cmd, unsigned long arg)
6961{
6962 int ret = 0;
6963 struct qseecom_dev_handle *data = file->private_data;
6964 void __user *argp = (void __user *) arg;
6965 bool perf_enabled = false;
6966
6967 if (!data) {
6968 pr_err("Invalid/uninitialized device handle\n");
6969 return -EINVAL;
6970 }
6971
6972 if (data->abort) {
6973 pr_err("Aborting qseecom driver\n");
6974 return -ENODEV;
6975 }
6976
6977 switch (cmd) {
6978 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6979 if (data->type != QSEECOM_GENERIC) {
6980 pr_err("reg lstnr req: invalid handle (%d)\n",
6981 data->type);
6982 ret = -EINVAL;
6983 break;
6984 }
6985 pr_debug("ioctl register_listener_req()\n");
6986 mutex_lock(&app_access_lock);
6987 atomic_inc(&data->ioctl_count);
6988 data->type = QSEECOM_LISTENER_SERVICE;
6989 ret = qseecom_register_listener(data, argp);
6990 atomic_dec(&data->ioctl_count);
6991 wake_up_all(&data->abort_wq);
6992 mutex_unlock(&app_access_lock);
6993 if (ret)
6994 pr_err("failed qseecom_register_listener: %d\n", ret);
6995 break;
6996 }
6997 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6998 if ((data->listener.id == 0) ||
6999 (data->type != QSEECOM_LISTENER_SERVICE)) {
7000 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7001 data->type, data->listener.id);
7002 ret = -EINVAL;
7003 break;
7004 }
7005 pr_debug("ioctl unregister_listener_req()\n");
7006 mutex_lock(&app_access_lock);
7007 atomic_inc(&data->ioctl_count);
7008 ret = qseecom_unregister_listener(data);
7009 atomic_dec(&data->ioctl_count);
7010 wake_up_all(&data->abort_wq);
7011 mutex_unlock(&app_access_lock);
7012 if (ret)
7013 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7014 break;
7015 }
7016 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7017 if ((data->client.app_id == 0) ||
7018 (data->type != QSEECOM_CLIENT_APP)) {
7019 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7020 data->type, data->client.app_id);
7021 ret = -EINVAL;
7022 break;
7023 }
7024 /* Only one client allowed here at a time */
7025 mutex_lock(&app_access_lock);
7026 if (qseecom.support_bus_scaling) {
7027 /* register bus bw in case the client doesn't do it */
7028 if (!data->mode) {
7029 mutex_lock(&qsee_bw_mutex);
7030 __qseecom_register_bus_bandwidth_needs(
7031 data, HIGH);
7032 mutex_unlock(&qsee_bw_mutex);
7033 }
7034 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7035 if (ret) {
7036 pr_err("Failed to set bw.\n");
7037 ret = -EINVAL;
7038 mutex_unlock(&app_access_lock);
7039 break;
7040 }
7041 }
7042 /*
7043 * On targets where crypto clock is handled by HLOS,
7044 * if clk_access_cnt is zero and perf_enabled is false,
7045 * then the crypto clock was not enabled before sending cmd to
7046 * tz, qseecom will enable the clock to avoid service failure.
7047 */
7048 if (!qseecom.no_clock_support &&
7049 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7050 pr_debug("ce clock is not enabled!\n");
7051 ret = qseecom_perf_enable(data);
7052 if (ret) {
7053 pr_err("Failed to vote for clock with err %d\n",
7054 ret);
7055 mutex_unlock(&app_access_lock);
7056 ret = -EINVAL;
7057 break;
7058 }
7059 perf_enabled = true;
7060 }
7061 atomic_inc(&data->ioctl_count);
7062 ret = qseecom_send_cmd(data, argp);
7063 if (qseecom.support_bus_scaling)
7064 __qseecom_add_bw_scale_down_timer(
7065 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7066 if (perf_enabled) {
7067 qsee_disable_clock_vote(data, CLK_DFAB);
7068 qsee_disable_clock_vote(data, CLK_SFPB);
7069 }
7070 atomic_dec(&data->ioctl_count);
7071 wake_up_all(&data->abort_wq);
7072 mutex_unlock(&app_access_lock);
7073 if (ret)
7074 pr_err("failed qseecom_send_cmd: %d\n", ret);
7075 break;
7076 }
7077 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7078 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7079 if ((data->client.app_id == 0) ||
7080 (data->type != QSEECOM_CLIENT_APP)) {
7081 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7082 data->type, data->client.app_id);
7083 ret = -EINVAL;
7084 break;
7085 }
7086 /* Only one client allowed here at a time */
7087 mutex_lock(&app_access_lock);
7088 if (qseecom.support_bus_scaling) {
7089 if (!data->mode) {
7090 mutex_lock(&qsee_bw_mutex);
7091 __qseecom_register_bus_bandwidth_needs(
7092 data, HIGH);
7093 mutex_unlock(&qsee_bw_mutex);
7094 }
7095 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7096 if (ret) {
7097 pr_err("Failed to set bw.\n");
7098 mutex_unlock(&app_access_lock);
7099 ret = -EINVAL;
7100 break;
7101 }
7102 }
7103 /*
7104 * On targets where crypto clock is handled by HLOS,
7105 * if clk_access_cnt is zero and perf_enabled is false,
7106 * then the crypto clock was not enabled before sending cmd to
7107 * tz, qseecom will enable the clock to avoid service failure.
7108 */
7109 if (!qseecom.no_clock_support &&
7110 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7111 pr_debug("ce clock is not enabled!\n");
7112 ret = qseecom_perf_enable(data);
7113 if (ret) {
7114 pr_err("Failed to vote for clock with err %d\n",
7115 ret);
7116 mutex_unlock(&app_access_lock);
7117 ret = -EINVAL;
7118 break;
7119 }
7120 perf_enabled = true;
7121 }
7122 atomic_inc(&data->ioctl_count);
7123 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7124 ret = qseecom_send_modfd_cmd(data, argp);
7125 else
7126 ret = qseecom_send_modfd_cmd_64(data, argp);
7127 if (qseecom.support_bus_scaling)
7128 __qseecom_add_bw_scale_down_timer(
7129 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7130 if (perf_enabled) {
7131 qsee_disable_clock_vote(data, CLK_DFAB);
7132 qsee_disable_clock_vote(data, CLK_SFPB);
7133 }
7134 atomic_dec(&data->ioctl_count);
7135 wake_up_all(&data->abort_wq);
7136 mutex_unlock(&app_access_lock);
7137 if (ret)
7138 pr_err("failed qseecom_send_cmd: %d\n", ret);
7139 __qseecom_clean_data_sglistinfo(data);
7140 break;
7141 }
7142 case QSEECOM_IOCTL_RECEIVE_REQ: {
7143 if ((data->listener.id == 0) ||
7144 (data->type != QSEECOM_LISTENER_SERVICE)) {
7145 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7146 data->type, data->listener.id);
7147 ret = -EINVAL;
7148 break;
7149 }
7150 atomic_inc(&data->ioctl_count);
7151 ret = qseecom_receive_req(data);
7152 atomic_dec(&data->ioctl_count);
7153 wake_up_all(&data->abort_wq);
7154 if (ret && (ret != -ERESTARTSYS))
7155 pr_err("failed qseecom_receive_req: %d\n", ret);
7156 break;
7157 }
7158 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7159 if ((data->listener.id == 0) ||
7160 (data->type != QSEECOM_LISTENER_SERVICE)) {
7161 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7162 data->type, data->listener.id);
7163 ret = -EINVAL;
7164 break;
7165 }
7166 atomic_inc(&data->ioctl_count);
7167 if (!qseecom.qsee_reentrancy_support)
7168 ret = qseecom_send_resp();
7169 else
7170 ret = qseecom_reentrancy_send_resp(data);
7171 atomic_dec(&data->ioctl_count);
7172 wake_up_all(&data->abort_wq);
7173 if (ret)
7174 pr_err("failed qseecom_send_resp: %d\n", ret);
7175 break;
7176 }
7177 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7178 if ((data->type != QSEECOM_CLIENT_APP) &&
7179 (data->type != QSEECOM_GENERIC) &&
7180 (data->type != QSEECOM_SECURE_SERVICE)) {
7181 pr_err("set mem param req: invalid handle (%d)\n",
7182 data->type);
7183 ret = -EINVAL;
7184 break;
7185 }
7186 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7187 mutex_lock(&app_access_lock);
7188 atomic_inc(&data->ioctl_count);
7189 ret = qseecom_set_client_mem_param(data, argp);
7190 atomic_dec(&data->ioctl_count);
7191 mutex_unlock(&app_access_lock);
7192 if (ret)
7193 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7194 ret);
7195 break;
7196 }
7197 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7198 if ((data->type != QSEECOM_GENERIC) &&
7199 (data->type != QSEECOM_CLIENT_APP)) {
7200 pr_err("load app req: invalid handle (%d)\n",
7201 data->type);
7202 ret = -EINVAL;
7203 break;
7204 }
7205 data->type = QSEECOM_CLIENT_APP;
7206 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7207 mutex_lock(&app_access_lock);
7208 atomic_inc(&data->ioctl_count);
7209 ret = qseecom_load_app(data, argp);
7210 atomic_dec(&data->ioctl_count);
7211 mutex_unlock(&app_access_lock);
7212 if (ret)
7213 pr_err("failed load_app request: %d\n", ret);
7214 break;
7215 }
7216 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7217 if ((data->client.app_id == 0) ||
7218 (data->type != QSEECOM_CLIENT_APP)) {
7219 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7220 data->type, data->client.app_id);
7221 ret = -EINVAL;
7222 break;
7223 }
7224 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7225 mutex_lock(&app_access_lock);
7226 atomic_inc(&data->ioctl_count);
7227 ret = qseecom_unload_app(data, false);
7228 atomic_dec(&data->ioctl_count);
7229 mutex_unlock(&app_access_lock);
7230 if (ret)
7231 pr_err("failed unload_app request: %d\n", ret);
7232 break;
7233 }
7234 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7235 atomic_inc(&data->ioctl_count);
7236 ret = qseecom_get_qseos_version(data, argp);
7237 if (ret)
7238 pr_err("qseecom_get_qseos_version: %d\n", ret);
7239 atomic_dec(&data->ioctl_count);
7240 break;
7241 }
7242 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7243 if ((data->type != QSEECOM_GENERIC) &&
7244 (data->type != QSEECOM_CLIENT_APP)) {
7245 pr_err("perf enable req: invalid handle (%d)\n",
7246 data->type);
7247 ret = -EINVAL;
7248 break;
7249 }
7250 if ((data->type == QSEECOM_CLIENT_APP) &&
7251 (data->client.app_id == 0)) {
7252 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7253 data->type, data->client.app_id);
7254 ret = -EINVAL;
7255 break;
7256 }
7257 atomic_inc(&data->ioctl_count);
7258 if (qseecom.support_bus_scaling) {
7259 mutex_lock(&qsee_bw_mutex);
7260 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7261 mutex_unlock(&qsee_bw_mutex);
7262 } else {
7263 ret = qseecom_perf_enable(data);
7264 if (ret)
7265 pr_err("Fail to vote for clocks %d\n", ret);
7266 }
7267 atomic_dec(&data->ioctl_count);
7268 break;
7269 }
7270 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7271 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7272 (data->type != QSEECOM_CLIENT_APP)) {
7273 pr_err("perf disable req: invalid handle (%d)\n",
7274 data->type);
7275 ret = -EINVAL;
7276 break;
7277 }
7278 if ((data->type == QSEECOM_CLIENT_APP) &&
7279 (data->client.app_id == 0)) {
7280 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7281 data->type, data->client.app_id);
7282 ret = -EINVAL;
7283 break;
7284 }
7285 atomic_inc(&data->ioctl_count);
7286 if (!qseecom.support_bus_scaling) {
7287 qsee_disable_clock_vote(data, CLK_DFAB);
7288 qsee_disable_clock_vote(data, CLK_SFPB);
7289 } else {
7290 mutex_lock(&qsee_bw_mutex);
7291 qseecom_unregister_bus_bandwidth_needs(data);
7292 mutex_unlock(&qsee_bw_mutex);
7293 }
7294 atomic_dec(&data->ioctl_count);
7295 break;
7296 }
7297
7298 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7299 /* If crypto clock is not handled by HLOS, return directly. */
7300 if (qseecom.no_clock_support) {
7301 pr_debug("crypto clock is not handled by HLOS\n");
7302 break;
7303 }
7304 if ((data->client.app_id == 0) ||
7305 (data->type != QSEECOM_CLIENT_APP)) {
7306 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7307 data->type, data->client.app_id);
7308 ret = -EINVAL;
7309 break;
7310 }
7311 atomic_inc(&data->ioctl_count);
7312 ret = qseecom_scale_bus_bandwidth(data, argp);
7313 atomic_dec(&data->ioctl_count);
7314 break;
7315 }
7316 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7317 if (data->type != QSEECOM_GENERIC) {
7318 pr_err("load ext elf req: invalid client handle (%d)\n",
7319 data->type);
7320 ret = -EINVAL;
7321 break;
7322 }
7323 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7324 data->released = true;
7325 mutex_lock(&app_access_lock);
7326 atomic_inc(&data->ioctl_count);
7327 ret = qseecom_load_external_elf(data, argp);
7328 atomic_dec(&data->ioctl_count);
7329 mutex_unlock(&app_access_lock);
7330 if (ret)
7331 pr_err("failed load_external_elf request: %d\n", ret);
7332 break;
7333 }
7334 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7335 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7336 pr_err("unload ext elf req: invalid handle (%d)\n",
7337 data->type);
7338 ret = -EINVAL;
7339 break;
7340 }
7341 data->released = true;
7342 mutex_lock(&app_access_lock);
7343 atomic_inc(&data->ioctl_count);
7344 ret = qseecom_unload_external_elf(data);
7345 atomic_dec(&data->ioctl_count);
7346 mutex_unlock(&app_access_lock);
7347 if (ret)
7348 pr_err("failed unload_app request: %d\n", ret);
7349 break;
7350 }
7351 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7352 data->type = QSEECOM_CLIENT_APP;
7353 mutex_lock(&app_access_lock);
7354 atomic_inc(&data->ioctl_count);
7355 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7356 ret = qseecom_query_app_loaded(data, argp);
7357 atomic_dec(&data->ioctl_count);
7358 mutex_unlock(&app_access_lock);
7359 break;
7360 }
7361 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7362 if (data->type != QSEECOM_GENERIC) {
7363 pr_err("send cmd svc req: invalid handle (%d)\n",
7364 data->type);
7365 ret = -EINVAL;
7366 break;
7367 }
7368 data->type = QSEECOM_SECURE_SERVICE;
7369 if (qseecom.qsee_version < QSEE_VERSION_03) {
7370 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7371 qseecom.qsee_version);
7372 return -EINVAL;
7373 }
7374 mutex_lock(&app_access_lock);
7375 atomic_inc(&data->ioctl_count);
7376 ret = qseecom_send_service_cmd(data, argp);
7377 atomic_dec(&data->ioctl_count);
7378 mutex_unlock(&app_access_lock);
7379 break;
7380 }
7381 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7382 if (!(qseecom.support_pfe || qseecom.support_fde))
7383 pr_err("Features requiring key init not supported\n");
7384 if (data->type != QSEECOM_GENERIC) {
7385 pr_err("create key req: invalid handle (%d)\n",
7386 data->type);
7387 ret = -EINVAL;
7388 break;
7389 }
7390 if (qseecom.qsee_version < QSEE_VERSION_05) {
7391 pr_err("Create Key feature unsupported: qsee ver %u\n",
7392 qseecom.qsee_version);
7393 return -EINVAL;
7394 }
7395 data->released = true;
7396 mutex_lock(&app_access_lock);
7397 atomic_inc(&data->ioctl_count);
7398 ret = qseecom_create_key(data, argp);
7399 if (ret)
7400 pr_err("failed to create encryption key: %d\n", ret);
7401
7402 atomic_dec(&data->ioctl_count);
7403 mutex_unlock(&app_access_lock);
7404 break;
7405 }
7406 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7407 if (!(qseecom.support_pfe || qseecom.support_fde))
7408 pr_err("Features requiring key init not supported\n");
7409 if (data->type != QSEECOM_GENERIC) {
7410 pr_err("wipe key req: invalid handle (%d)\n",
7411 data->type);
7412 ret = -EINVAL;
7413 break;
7414 }
7415 if (qseecom.qsee_version < QSEE_VERSION_05) {
7416 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7417 qseecom.qsee_version);
7418 return -EINVAL;
7419 }
7420 data->released = true;
7421 mutex_lock(&app_access_lock);
7422 atomic_inc(&data->ioctl_count);
7423 ret = qseecom_wipe_key(data, argp);
7424 if (ret)
7425 pr_err("failed to wipe encryption key: %d\n", ret);
7426 atomic_dec(&data->ioctl_count);
7427 mutex_unlock(&app_access_lock);
7428 break;
7429 }
7430 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7431 if (!(qseecom.support_pfe || qseecom.support_fde))
7432 pr_err("Features requiring key init not supported\n");
7433 if (data->type != QSEECOM_GENERIC) {
7434 pr_err("update key req: invalid handle (%d)\n",
7435 data->type);
7436 ret = -EINVAL;
7437 break;
7438 }
7439 if (qseecom.qsee_version < QSEE_VERSION_05) {
7440 pr_err("Update Key feature unsupported in qsee ver %u\n",
7441 qseecom.qsee_version);
7442 return -EINVAL;
7443 }
7444 data->released = true;
7445 mutex_lock(&app_access_lock);
7446 atomic_inc(&data->ioctl_count);
7447 ret = qseecom_update_key_user_info(data, argp);
7448 if (ret)
7449 pr_err("failed to update key user info: %d\n", ret);
7450 atomic_dec(&data->ioctl_count);
7451 mutex_unlock(&app_access_lock);
7452 break;
7453 }
7454 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7455 if (data->type != QSEECOM_GENERIC) {
7456 pr_err("save part hash req: invalid handle (%d)\n",
7457 data->type);
7458 ret = -EINVAL;
7459 break;
7460 }
7461 data->released = true;
7462 mutex_lock(&app_access_lock);
7463 atomic_inc(&data->ioctl_count);
7464 ret = qseecom_save_partition_hash(argp);
7465 atomic_dec(&data->ioctl_count);
7466 mutex_unlock(&app_access_lock);
7467 break;
7468 }
7469 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7470 if (data->type != QSEECOM_GENERIC) {
7471 pr_err("ES activated req: invalid handle (%d)\n",
7472 data->type);
7473 ret = -EINVAL;
7474 break;
7475 }
7476 data->released = true;
7477 mutex_lock(&app_access_lock);
7478 atomic_inc(&data->ioctl_count);
7479 ret = qseecom_is_es_activated(argp);
7480 atomic_dec(&data->ioctl_count);
7481 mutex_unlock(&app_access_lock);
7482 break;
7483 }
7484 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7485 if (data->type != QSEECOM_GENERIC) {
7486 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7487 data->type);
7488 ret = -EINVAL;
7489 break;
7490 }
7491 data->released = true;
7492 mutex_lock(&app_access_lock);
7493 atomic_inc(&data->ioctl_count);
7494 ret = qseecom_mdtp_cipher_dip(argp);
7495 atomic_dec(&data->ioctl_count);
7496 mutex_unlock(&app_access_lock);
7497 break;
7498 }
7499 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7500 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7501 if ((data->listener.id == 0) ||
7502 (data->type != QSEECOM_LISTENER_SERVICE)) {
7503 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7504 data->type, data->listener.id);
7505 ret = -EINVAL;
7506 break;
7507 }
7508 atomic_inc(&data->ioctl_count);
7509 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7510 ret = qseecom_send_modfd_resp(data, argp);
7511 else
7512 ret = qseecom_send_modfd_resp_64(data, argp);
7513 atomic_dec(&data->ioctl_count);
7514 wake_up_all(&data->abort_wq);
7515 if (ret)
7516 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7517 __qseecom_clean_data_sglistinfo(data);
7518 break;
7519 }
7520 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7521 if ((data->client.app_id == 0) ||
7522 (data->type != QSEECOM_CLIENT_APP)) {
7523 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7524 data->type, data->client.app_id);
7525 ret = -EINVAL;
7526 break;
7527 }
7528 if (qseecom.qsee_version < QSEE_VERSION_40) {
7529 pr_err("GP feature unsupported: qsee ver %u\n",
7530 qseecom.qsee_version);
7531 return -EINVAL;
7532 }
7533 /* Only one client allowed here at a time */
7534 mutex_lock(&app_access_lock);
7535 atomic_inc(&data->ioctl_count);
7536 ret = qseecom_qteec_open_session(data, argp);
7537 atomic_dec(&data->ioctl_count);
7538 wake_up_all(&data->abort_wq);
7539 mutex_unlock(&app_access_lock);
7540 if (ret)
7541 pr_err("failed open_session_cmd: %d\n", ret);
7542 __qseecom_clean_data_sglistinfo(data);
7543 break;
7544 }
7545 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7546 if ((data->client.app_id == 0) ||
7547 (data->type != QSEECOM_CLIENT_APP)) {
7548 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7549 data->type, data->client.app_id);
7550 ret = -EINVAL;
7551 break;
7552 }
7553 if (qseecom.qsee_version < QSEE_VERSION_40) {
7554 pr_err("GP feature unsupported: qsee ver %u\n",
7555 qseecom.qsee_version);
7556 return -EINVAL;
7557 }
7558 /* Only one client allowed here at a time */
7559 mutex_lock(&app_access_lock);
7560 atomic_inc(&data->ioctl_count);
7561 ret = qseecom_qteec_close_session(data, argp);
7562 atomic_dec(&data->ioctl_count);
7563 wake_up_all(&data->abort_wq);
7564 mutex_unlock(&app_access_lock);
7565 if (ret)
7566 pr_err("failed close_session_cmd: %d\n", ret);
7567 break;
7568 }
7569 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7570 if ((data->client.app_id == 0) ||
7571 (data->type != QSEECOM_CLIENT_APP)) {
7572 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7573 data->type, data->client.app_id);
7574 ret = -EINVAL;
7575 break;
7576 }
7577 if (qseecom.qsee_version < QSEE_VERSION_40) {
7578 pr_err("GP feature unsupported: qsee ver %u\n",
7579 qseecom.qsee_version);
7580 return -EINVAL;
7581 }
7582 /* Only one client allowed here at a time */
7583 mutex_lock(&app_access_lock);
7584 atomic_inc(&data->ioctl_count);
7585 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7586 atomic_dec(&data->ioctl_count);
7587 wake_up_all(&data->abort_wq);
7588 mutex_unlock(&app_access_lock);
7589 if (ret)
7590 pr_err("failed Invoke cmd: %d\n", ret);
7591 __qseecom_clean_data_sglistinfo(data);
7592 break;
7593 }
7594 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7595 if ((data->client.app_id == 0) ||
7596 (data->type != QSEECOM_CLIENT_APP)) {
7597 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7598 data->type, data->client.app_id);
7599 ret = -EINVAL;
7600 break;
7601 }
7602 if (qseecom.qsee_version < QSEE_VERSION_40) {
7603 pr_err("GP feature unsupported: qsee ver %u\n",
7604 qseecom.qsee_version);
7605 return -EINVAL;
7606 }
7607 /* Only one client allowed here at a time */
7608 mutex_lock(&app_access_lock);
7609 atomic_inc(&data->ioctl_count);
7610 ret = qseecom_qteec_request_cancellation(data, argp);
7611 atomic_dec(&data->ioctl_count);
7612 wake_up_all(&data->abort_wq);
7613 mutex_unlock(&app_access_lock);
7614 if (ret)
7615 pr_err("failed request_cancellation: %d\n", ret);
7616 break;
7617 }
7618 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7619 atomic_inc(&data->ioctl_count);
7620 ret = qseecom_get_ce_info(data, argp);
7621 if (ret)
7622 pr_err("failed get fde ce pipe info: %d\n", ret);
7623 atomic_dec(&data->ioctl_count);
7624 break;
7625 }
7626 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7627 atomic_inc(&data->ioctl_count);
7628 ret = qseecom_free_ce_info(data, argp);
7629 if (ret)
7630 pr_err("failed get fde ce pipe info: %d\n", ret);
7631 atomic_dec(&data->ioctl_count);
7632 break;
7633 }
7634 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7635 atomic_inc(&data->ioctl_count);
7636 ret = qseecom_query_ce_info(data, argp);
7637 if (ret)
7638 pr_err("failed get fde ce pipe info: %d\n", ret);
7639 atomic_dec(&data->ioctl_count);
7640 break;
7641 }
7642 default:
7643 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7644 return -EINVAL;
7645 }
7646 return ret;
7647}
7648
7649static int qseecom_open(struct inode *inode, struct file *file)
7650{
7651 int ret = 0;
7652 struct qseecom_dev_handle *data;
7653
7654 data = kzalloc(sizeof(*data), GFP_KERNEL);
7655 if (!data)
7656 return -ENOMEM;
7657 file->private_data = data;
7658 data->abort = 0;
7659 data->type = QSEECOM_GENERIC;
7660 data->released = false;
7661 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7662 data->mode = INACTIVE;
7663 init_waitqueue_head(&data->abort_wq);
7664 atomic_set(&data->ioctl_count, 0);
7665 return ret;
7666}
7667
7668static int qseecom_release(struct inode *inode, struct file *file)
7669{
7670 struct qseecom_dev_handle *data = file->private_data;
7671 int ret = 0;
7672
7673 if (data->released == false) {
7674 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7675 data->type, data->mode, data);
7676 switch (data->type) {
7677 case QSEECOM_LISTENER_SERVICE:
7678 mutex_lock(&app_access_lock);
7679 ret = qseecom_unregister_listener(data);
7680 mutex_unlock(&app_access_lock);
7681 break;
7682 case QSEECOM_CLIENT_APP:
7683 mutex_lock(&app_access_lock);
7684 ret = qseecom_unload_app(data, true);
7685 mutex_unlock(&app_access_lock);
7686 break;
7687 case QSEECOM_SECURE_SERVICE:
7688 case QSEECOM_GENERIC:
7689 ret = qseecom_unmap_ion_allocated_memory(data);
7690 if (ret)
7691 pr_err("Ion Unmap failed\n");
7692 break;
7693 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7694 break;
7695 default:
7696 pr_err("Unsupported clnt_handle_type %d",
7697 data->type);
7698 break;
7699 }
7700 }
7701
7702 if (qseecom.support_bus_scaling) {
7703 mutex_lock(&qsee_bw_mutex);
7704 if (data->mode != INACTIVE) {
7705 qseecom_unregister_bus_bandwidth_needs(data);
7706 if (qseecom.cumulative_mode == INACTIVE) {
7707 ret = __qseecom_set_msm_bus_request(INACTIVE);
7708 if (ret)
7709 pr_err("Fail to scale down bus\n");
7710 }
7711 }
7712 mutex_unlock(&qsee_bw_mutex);
7713 } else {
7714 if (data->fast_load_enabled == true)
7715 qsee_disable_clock_vote(data, CLK_SFPB);
7716 if (data->perf_enabled == true)
7717 qsee_disable_clock_vote(data, CLK_DFAB);
7718 }
7719 kfree(data);
7720
7721 return ret;
7722}
7723
7724#ifdef CONFIG_COMPAT
7725#include "compat_qseecom.c"
7726#else
7727#define compat_qseecom_ioctl NULL
7728#endif
7729
7730static const struct file_operations qseecom_fops = {
7731 .owner = THIS_MODULE,
7732 .unlocked_ioctl = qseecom_ioctl,
7733 .compat_ioctl = compat_qseecom_ioctl,
7734 .open = qseecom_open,
7735 .release = qseecom_release
7736};
7737
7738static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7739{
7740 int rc = 0;
7741 struct device *pdev;
7742 struct qseecom_clk *qclk;
7743 char *core_clk_src = NULL;
7744 char *core_clk = NULL;
7745 char *iface_clk = NULL;
7746 char *bus_clk = NULL;
7747
7748 switch (ce) {
7749 case CLK_QSEE: {
7750 core_clk_src = "core_clk_src";
7751 core_clk = "core_clk";
7752 iface_clk = "iface_clk";
7753 bus_clk = "bus_clk";
7754 qclk = &qseecom.qsee;
7755 qclk->instance = CLK_QSEE;
7756 break;
7757 };
7758 case CLK_CE_DRV: {
7759 core_clk_src = "ce_drv_core_clk_src";
7760 core_clk = "ce_drv_core_clk";
7761 iface_clk = "ce_drv_iface_clk";
7762 bus_clk = "ce_drv_bus_clk";
7763 qclk = &qseecom.ce_drv;
7764 qclk->instance = CLK_CE_DRV;
7765 break;
7766 };
7767 default:
7768 pr_err("Invalid ce hw instance: %d!\n", ce);
7769 return -EIO;
7770 }
7771
7772 if (qseecom.no_clock_support) {
7773 qclk->ce_core_clk = NULL;
7774 qclk->ce_clk = NULL;
7775 qclk->ce_bus_clk = NULL;
7776 qclk->ce_core_src_clk = NULL;
7777 return 0;
7778 }
7779
7780 pdev = qseecom.pdev;
7781
7782 /* Get CE3 src core clk. */
7783 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7784 if (!IS_ERR(qclk->ce_core_src_clk)) {
7785 rc = clk_set_rate(qclk->ce_core_src_clk,
7786 qseecom.ce_opp_freq_hz);
7787 if (rc) {
7788 clk_put(qclk->ce_core_src_clk);
7789 qclk->ce_core_src_clk = NULL;
7790 pr_err("Unable to set the core src clk @%uMhz.\n",
7791 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7792 return -EIO;
7793 }
7794 } else {
7795 pr_warn("Unable to get CE core src clk, set to NULL\n");
7796 qclk->ce_core_src_clk = NULL;
7797 }
7798
7799 /* Get CE core clk */
7800 qclk->ce_core_clk = clk_get(pdev, core_clk);
7801 if (IS_ERR(qclk->ce_core_clk)) {
7802 rc = PTR_ERR(qclk->ce_core_clk);
7803 pr_err("Unable to get CE core clk\n");
7804 if (qclk->ce_core_src_clk != NULL)
7805 clk_put(qclk->ce_core_src_clk);
7806 return -EIO;
7807 }
7808
7809 /* Get CE Interface clk */
7810 qclk->ce_clk = clk_get(pdev, iface_clk);
7811 if (IS_ERR(qclk->ce_clk)) {
7812 rc = PTR_ERR(qclk->ce_clk);
7813 pr_err("Unable to get CE interface clk\n");
7814 if (qclk->ce_core_src_clk != NULL)
7815 clk_put(qclk->ce_core_src_clk);
7816 clk_put(qclk->ce_core_clk);
7817 return -EIO;
7818 }
7819
7820 /* Get CE AXI clk */
7821 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7822 if (IS_ERR(qclk->ce_bus_clk)) {
7823 rc = PTR_ERR(qclk->ce_bus_clk);
7824 pr_err("Unable to get CE BUS interface clk\n");
7825 if (qclk->ce_core_src_clk != NULL)
7826 clk_put(qclk->ce_core_src_clk);
7827 clk_put(qclk->ce_core_clk);
7828 clk_put(qclk->ce_clk);
7829 return -EIO;
7830 }
7831
7832 return rc;
7833}
7834
7835static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7836{
7837 struct qseecom_clk *qclk;
7838
7839 if (ce == CLK_QSEE)
7840 qclk = &qseecom.qsee;
7841 else
7842 qclk = &qseecom.ce_drv;
7843
7844 if (qclk->ce_clk != NULL) {
7845 clk_put(qclk->ce_clk);
7846 qclk->ce_clk = NULL;
7847 }
7848 if (qclk->ce_core_clk != NULL) {
7849 clk_put(qclk->ce_core_clk);
7850 qclk->ce_core_clk = NULL;
7851 }
7852 if (qclk->ce_bus_clk != NULL) {
7853 clk_put(qclk->ce_bus_clk);
7854 qclk->ce_bus_clk = NULL;
7855 }
7856 if (qclk->ce_core_src_clk != NULL) {
7857 clk_put(qclk->ce_core_src_clk);
7858 qclk->ce_core_src_clk = NULL;
7859 }
7860 qclk->instance = CLK_INVALID;
7861}
7862
7863static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7864{
7865 int rc = 0;
7866 uint32_t hlos_num_ce_hw_instances;
7867 uint32_t disk_encrypt_pipe;
7868 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007869 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007870 int i;
7871 const int *tbl;
7872 int size;
7873 int entry;
7874 struct qseecom_crypto_info *pfde_tbl = NULL;
7875 struct qseecom_crypto_info *p;
7876 int tbl_size;
7877 int j;
7878 bool old_db = true;
7879 struct qseecom_ce_info_use *pce_info_use;
7880 uint32_t *unit_tbl = NULL;
7881 int total_units = 0;
7882 struct qseecom_ce_pipe_entry *pce_entry;
7883
7884 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7885 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7886
7887 if (of_property_read_u32((&pdev->dev)->of_node,
7888 "qcom,qsee-ce-hw-instance",
7889 &qseecom.ce_info.qsee_ce_hw_instance)) {
7890 pr_err("Fail to get qsee ce hw instance information.\n");
7891 rc = -EINVAL;
7892 goto out;
7893 } else {
7894 pr_debug("qsee-ce-hw-instance=0x%x\n",
7895 qseecom.ce_info.qsee_ce_hw_instance);
7896 }
7897
7898 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7899 "qcom,support-fde");
7900 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7901 "qcom,support-pfe");
7902
7903 if (!qseecom.support_pfe && !qseecom.support_fde) {
7904 pr_warn("Device does not support PFE/FDE");
7905 goto out;
7906 }
7907
7908 if (qseecom.support_fde)
7909 tbl = of_get_property((&pdev->dev)->of_node,
7910 "qcom,full-disk-encrypt-info", &size);
7911 else
7912 tbl = NULL;
7913 if (tbl) {
7914 old_db = false;
7915 if (size % sizeof(struct qseecom_crypto_info)) {
7916 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7917 size);
7918 rc = -EINVAL;
7919 goto out;
7920 }
7921 tbl_size = size / sizeof
7922 (struct qseecom_crypto_info);
7923
7924 pfde_tbl = kzalloc(size, GFP_KERNEL);
7925 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7926 total_units = 0;
7927
7928 if (!pfde_tbl || !unit_tbl) {
7929 pr_err("failed to alloc memory\n");
7930 rc = -ENOMEM;
7931 goto out;
7932 }
7933 if (of_property_read_u32_array((&pdev->dev)->of_node,
7934 "qcom,full-disk-encrypt-info",
7935 (u32 *)pfde_tbl, size/sizeof(u32))) {
7936 pr_err("failed to read full-disk-encrypt-info tbl\n");
7937 rc = -EINVAL;
7938 goto out;
7939 }
7940
7941 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7942 for (j = 0; j < total_units; j++) {
7943 if (p->unit_num == *(unit_tbl + j))
7944 break;
7945 }
7946 if (j == total_units) {
7947 *(unit_tbl + total_units) = p->unit_num;
7948 total_units++;
7949 }
7950 }
7951
7952 qseecom.ce_info.num_fde = total_units;
7953 pce_info_use = qseecom.ce_info.fde = kcalloc(
7954 total_units, sizeof(struct qseecom_ce_info_use),
7955 GFP_KERNEL);
7956 if (!pce_info_use) {
7957 pr_err("failed to alloc memory\n");
7958 rc = -ENOMEM;
7959 goto out;
7960 }
7961
7962 for (j = 0; j < total_units; j++, pce_info_use++) {
7963 pce_info_use->unit_num = *(unit_tbl + j);
7964 pce_info_use->alloc = false;
7965 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7966 pce_info_use->num_ce_pipe_entries = 0;
7967 pce_info_use->ce_pipe_entry = NULL;
7968 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7969 if (p->unit_num == pce_info_use->unit_num)
7970 pce_info_use->num_ce_pipe_entries++;
7971 }
7972
7973 entry = pce_info_use->num_ce_pipe_entries;
7974 pce_entry = pce_info_use->ce_pipe_entry =
7975 kcalloc(entry,
7976 sizeof(struct qseecom_ce_pipe_entry),
7977 GFP_KERNEL);
7978 if (pce_entry == NULL) {
7979 pr_err("failed to alloc memory\n");
7980 rc = -ENOMEM;
7981 goto out;
7982 }
7983
7984 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7985 if (p->unit_num == pce_info_use->unit_num) {
7986 pce_entry->ce_num = p->ce;
7987 pce_entry->ce_pipe_pair =
7988 p->pipe_pair;
7989 pce_entry->valid = true;
7990 pce_entry++;
7991 }
7992 }
7993 }
7994 kfree(unit_tbl);
7995 unit_tbl = NULL;
7996 kfree(pfde_tbl);
7997 pfde_tbl = NULL;
7998 }
7999
8000 if (qseecom.support_pfe)
8001 tbl = of_get_property((&pdev->dev)->of_node,
8002 "qcom,per-file-encrypt-info", &size);
8003 else
8004 tbl = NULL;
8005 if (tbl) {
8006 old_db = false;
8007 if (size % sizeof(struct qseecom_crypto_info)) {
8008 pr_err("per-file-encrypt-info tbl size(%d)\n",
8009 size);
8010 rc = -EINVAL;
8011 goto out;
8012 }
8013 tbl_size = size / sizeof
8014 (struct qseecom_crypto_info);
8015
8016 pfde_tbl = kzalloc(size, GFP_KERNEL);
8017 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8018 total_units = 0;
8019 if (!pfde_tbl || !unit_tbl) {
8020 pr_err("failed to alloc memory\n");
8021 rc = -ENOMEM;
8022 goto out;
8023 }
8024 if (of_property_read_u32_array((&pdev->dev)->of_node,
8025 "qcom,per-file-encrypt-info",
8026 (u32 *)pfde_tbl, size/sizeof(u32))) {
8027 pr_err("failed to read per-file-encrypt-info tbl\n");
8028 rc = -EINVAL;
8029 goto out;
8030 }
8031
8032 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8033 for (j = 0; j < total_units; j++) {
8034 if (p->unit_num == *(unit_tbl + j))
8035 break;
8036 }
8037 if (j == total_units) {
8038 *(unit_tbl + total_units) = p->unit_num;
8039 total_units++;
8040 }
8041 }
8042
8043 qseecom.ce_info.num_pfe = total_units;
8044 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8045 total_units, sizeof(struct qseecom_ce_info_use),
8046 GFP_KERNEL);
8047 if (!pce_info_use) {
8048 pr_err("failed to alloc memory\n");
8049 rc = -ENOMEM;
8050 goto out;
8051 }
8052
8053 for (j = 0; j < total_units; j++, pce_info_use++) {
8054 pce_info_use->unit_num = *(unit_tbl + j);
8055 pce_info_use->alloc = false;
8056 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8057 pce_info_use->num_ce_pipe_entries = 0;
8058 pce_info_use->ce_pipe_entry = NULL;
8059 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8060 if (p->unit_num == pce_info_use->unit_num)
8061 pce_info_use->num_ce_pipe_entries++;
8062 }
8063
8064 entry = pce_info_use->num_ce_pipe_entries;
8065 pce_entry = pce_info_use->ce_pipe_entry =
8066 kcalloc(entry,
8067 sizeof(struct qseecom_ce_pipe_entry),
8068 GFP_KERNEL);
8069 if (pce_entry == NULL) {
8070 pr_err("failed to alloc memory\n");
8071 rc = -ENOMEM;
8072 goto out;
8073 }
8074
8075 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8076 if (p->unit_num == pce_info_use->unit_num) {
8077 pce_entry->ce_num = p->ce;
8078 pce_entry->ce_pipe_pair =
8079 p->pipe_pair;
8080 pce_entry->valid = true;
8081 pce_entry++;
8082 }
8083 }
8084 }
8085 kfree(unit_tbl);
8086 unit_tbl = NULL;
8087 kfree(pfde_tbl);
8088 pfde_tbl = NULL;
8089 }
8090
8091 if (!old_db)
8092 goto out1;
8093
8094 if (of_property_read_bool((&pdev->dev)->of_node,
8095 "qcom,support-multiple-ce-hw-instance")) {
8096 if (of_property_read_u32((&pdev->dev)->of_node,
8097 "qcom,hlos-num-ce-hw-instances",
8098 &hlos_num_ce_hw_instances)) {
8099 pr_err("Fail: get hlos number of ce hw instance\n");
8100 rc = -EINVAL;
8101 goto out;
8102 }
8103 } else {
8104 hlos_num_ce_hw_instances = 1;
8105 }
8106
8107 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8108 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8109 MAX_CE_PIPE_PAIR_PER_UNIT);
8110 rc = -EINVAL;
8111 goto out;
8112 }
8113
8114 if (of_property_read_u32_array((&pdev->dev)->of_node,
8115 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8116 hlos_num_ce_hw_instances)) {
8117 pr_err("Fail: get hlos ce hw instance info\n");
8118 rc = -EINVAL;
8119 goto out;
8120 }
8121
8122 if (qseecom.support_fde) {
8123 pce_info_use = qseecom.ce_info.fde =
8124 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8125 if (!pce_info_use) {
8126 pr_err("failed to alloc memory\n");
8127 rc = -ENOMEM;
8128 goto out;
8129 }
8130 /* by default for old db */
8131 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8132 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8133 pce_info_use->alloc = false;
8134 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8135 pce_info_use->ce_pipe_entry = NULL;
8136 if (of_property_read_u32((&pdev->dev)->of_node,
8137 "qcom,disk-encrypt-pipe-pair",
8138 &disk_encrypt_pipe)) {
8139 pr_err("Fail to get FDE pipe information.\n");
8140 rc = -EINVAL;
8141 goto out;
8142 } else {
8143 pr_debug("disk-encrypt-pipe-pair=0x%x",
8144 disk_encrypt_pipe);
8145 }
8146 entry = pce_info_use->num_ce_pipe_entries =
8147 hlos_num_ce_hw_instances;
8148 pce_entry = pce_info_use->ce_pipe_entry =
8149 kcalloc(entry,
8150 sizeof(struct qseecom_ce_pipe_entry),
8151 GFP_KERNEL);
8152 if (pce_entry == NULL) {
8153 pr_err("failed to alloc memory\n");
8154 rc = -ENOMEM;
8155 goto out;
8156 }
8157 for (i = 0; i < entry; i++) {
8158 pce_entry->ce_num = hlos_ce_hw_instance[i];
8159 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8160 pce_entry->valid = 1;
8161 pce_entry++;
8162 }
8163 } else {
8164 pr_warn("Device does not support FDE");
8165 disk_encrypt_pipe = 0xff;
8166 }
8167 if (qseecom.support_pfe) {
8168 pce_info_use = qseecom.ce_info.pfe =
8169 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8170 if (!pce_info_use) {
8171 pr_err("failed to alloc memory\n");
8172 rc = -ENOMEM;
8173 goto out;
8174 }
8175 /* by default for old db */
8176 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8177 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8178 pce_info_use->alloc = false;
8179 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8180 pce_info_use->ce_pipe_entry = NULL;
8181
8182 if (of_property_read_u32((&pdev->dev)->of_node,
8183 "qcom,file-encrypt-pipe-pair",
8184 &file_encrypt_pipe)) {
8185 pr_err("Fail to get PFE pipe information.\n");
8186 rc = -EINVAL;
8187 goto out;
8188 } else {
8189 pr_debug("file-encrypt-pipe-pair=0x%x",
8190 file_encrypt_pipe);
8191 }
8192 entry = pce_info_use->num_ce_pipe_entries =
8193 hlos_num_ce_hw_instances;
8194 pce_entry = pce_info_use->ce_pipe_entry =
8195 kcalloc(entry,
8196 sizeof(struct qseecom_ce_pipe_entry),
8197 GFP_KERNEL);
8198 if (pce_entry == NULL) {
8199 pr_err("failed to alloc memory\n");
8200 rc = -ENOMEM;
8201 goto out;
8202 }
8203 for (i = 0; i < entry; i++) {
8204 pce_entry->ce_num = hlos_ce_hw_instance[i];
8205 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8206 pce_entry->valid = 1;
8207 pce_entry++;
8208 }
8209 } else {
8210 pr_warn("Device does not support PFE");
8211 file_encrypt_pipe = 0xff;
8212 }
8213
8214out1:
8215 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8216 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8217out:
8218 if (rc) {
8219 if (qseecom.ce_info.fde) {
8220 pce_info_use = qseecom.ce_info.fde;
8221 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8222 pce_entry = pce_info_use->ce_pipe_entry;
8223 kfree(pce_entry);
8224 pce_info_use++;
8225 }
8226 }
8227 kfree(qseecom.ce_info.fde);
8228 qseecom.ce_info.fde = NULL;
8229 if (qseecom.ce_info.pfe) {
8230 pce_info_use = qseecom.ce_info.pfe;
8231 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8232 pce_entry = pce_info_use->ce_pipe_entry;
8233 kfree(pce_entry);
8234 pce_info_use++;
8235 }
8236 }
8237 kfree(qseecom.ce_info.pfe);
8238 qseecom.ce_info.pfe = NULL;
8239 }
8240 kfree(unit_tbl);
8241 kfree(pfde_tbl);
8242 return rc;
8243}
8244
8245static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8246 void __user *argp)
8247{
8248 struct qseecom_ce_info_req req;
8249 struct qseecom_ce_info_req *pinfo = &req;
8250 int ret = 0;
8251 int i;
8252 unsigned int entries;
8253 struct qseecom_ce_info_use *pce_info_use, *p;
8254 int total = 0;
8255 bool found = false;
8256 struct qseecom_ce_pipe_entry *pce_entry;
8257
8258 ret = copy_from_user(pinfo, argp,
8259 sizeof(struct qseecom_ce_info_req));
8260 if (ret) {
8261 pr_err("copy_from_user failed\n");
8262 return ret;
8263 }
8264
8265 switch (pinfo->usage) {
8266 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8267 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8268 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8269 if (qseecom.support_fde) {
8270 p = qseecom.ce_info.fde;
8271 total = qseecom.ce_info.num_fde;
8272 } else {
8273 pr_err("system does not support fde\n");
8274 return -EINVAL;
8275 }
8276 break;
8277 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8278 if (qseecom.support_pfe) {
8279 p = qseecom.ce_info.pfe;
8280 total = qseecom.ce_info.num_pfe;
8281 } else {
8282 pr_err("system does not support pfe\n");
8283 return -EINVAL;
8284 }
8285 break;
8286 default:
8287 pr_err("unsupported usage %d\n", pinfo->usage);
8288 return -EINVAL;
8289 }
8290
8291 pce_info_use = NULL;
8292 for (i = 0; i < total; i++) {
8293 if (!p->alloc)
8294 pce_info_use = p;
8295 else if (!memcmp(p->handle, pinfo->handle,
8296 MAX_CE_INFO_HANDLE_SIZE)) {
8297 pce_info_use = p;
8298 found = true;
8299 break;
8300 }
8301 p++;
8302 }
8303
8304 if (pce_info_use == NULL)
8305 return -EBUSY;
8306
8307 pinfo->unit_num = pce_info_use->unit_num;
8308 if (!pce_info_use->alloc) {
8309 pce_info_use->alloc = true;
8310 memcpy(pce_info_use->handle,
8311 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8312 }
8313 if (pce_info_use->num_ce_pipe_entries >
8314 MAX_CE_PIPE_PAIR_PER_UNIT)
8315 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8316 else
8317 entries = pce_info_use->num_ce_pipe_entries;
8318 pinfo->num_ce_pipe_entries = entries;
8319 pce_entry = pce_info_use->ce_pipe_entry;
8320 for (i = 0; i < entries; i++, pce_entry++)
8321 pinfo->ce_pipe_entry[i] = *pce_entry;
8322 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8323 pinfo->ce_pipe_entry[i].valid = 0;
8324
8325 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8326 pr_err("copy_to_user failed\n");
8327 ret = -EFAULT;
8328 }
8329 return ret;
8330}
8331
8332static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8333 void __user *argp)
8334{
8335 struct qseecom_ce_info_req req;
8336 struct qseecom_ce_info_req *pinfo = &req;
8337 int ret = 0;
8338 struct qseecom_ce_info_use *p;
8339 int total = 0;
8340 int i;
8341 bool found = false;
8342
8343 ret = copy_from_user(pinfo, argp,
8344 sizeof(struct qseecom_ce_info_req));
8345 if (ret)
8346 return ret;
8347
8348 switch (pinfo->usage) {
8349 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8350 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8351 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8352 if (qseecom.support_fde) {
8353 p = qseecom.ce_info.fde;
8354 total = qseecom.ce_info.num_fde;
8355 } else {
8356 pr_err("system does not support fde\n");
8357 return -EINVAL;
8358 }
8359 break;
8360 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8361 if (qseecom.support_pfe) {
8362 p = qseecom.ce_info.pfe;
8363 total = qseecom.ce_info.num_pfe;
8364 } else {
8365 pr_err("system does not support pfe\n");
8366 return -EINVAL;
8367 }
8368 break;
8369 default:
8370 pr_err("unsupported usage %d\n", pinfo->usage);
8371 return -EINVAL;
8372 }
8373
8374 for (i = 0; i < total; i++) {
8375 if (p->alloc &&
8376 !memcmp(p->handle, pinfo->handle,
8377 MAX_CE_INFO_HANDLE_SIZE)) {
8378 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8379 p->alloc = false;
8380 found = true;
8381 break;
8382 }
8383 p++;
8384 }
8385 return ret;
8386}
8387
8388static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8389 void __user *argp)
8390{
8391 struct qseecom_ce_info_req req;
8392 struct qseecom_ce_info_req *pinfo = &req;
8393 int ret = 0;
8394 int i;
8395 unsigned int entries;
8396 struct qseecom_ce_info_use *pce_info_use, *p;
8397 int total = 0;
8398 bool found = false;
8399 struct qseecom_ce_pipe_entry *pce_entry;
8400
8401 ret = copy_from_user(pinfo, argp,
8402 sizeof(struct qseecom_ce_info_req));
8403 if (ret)
8404 return ret;
8405
8406 switch (pinfo->usage) {
8407 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8408 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8409 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8410 if (qseecom.support_fde) {
8411 p = qseecom.ce_info.fde;
8412 total = qseecom.ce_info.num_fde;
8413 } else {
8414 pr_err("system does not support fde\n");
8415 return -EINVAL;
8416 }
8417 break;
8418 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8419 if (qseecom.support_pfe) {
8420 p = qseecom.ce_info.pfe;
8421 total = qseecom.ce_info.num_pfe;
8422 } else {
8423 pr_err("system does not support pfe\n");
8424 return -EINVAL;
8425 }
8426 break;
8427 default:
8428 pr_err("unsupported usage %d\n", pinfo->usage);
8429 return -EINVAL;
8430 }
8431
8432 pce_info_use = NULL;
8433 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8434 pinfo->num_ce_pipe_entries = 0;
8435 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8436 pinfo->ce_pipe_entry[i].valid = 0;
8437
8438 for (i = 0; i < total; i++) {
8439
8440 if (p->alloc && !memcmp(p->handle,
8441 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8442 pce_info_use = p;
8443 found = true;
8444 break;
8445 }
8446 p++;
8447 }
8448 if (!pce_info_use)
8449 goto out;
8450 pinfo->unit_num = pce_info_use->unit_num;
8451 if (pce_info_use->num_ce_pipe_entries >
8452 MAX_CE_PIPE_PAIR_PER_UNIT)
8453 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8454 else
8455 entries = pce_info_use->num_ce_pipe_entries;
8456 pinfo->num_ce_pipe_entries = entries;
8457 pce_entry = pce_info_use->ce_pipe_entry;
8458 for (i = 0; i < entries; i++, pce_entry++)
8459 pinfo->ce_pipe_entry[i] = *pce_entry;
8460 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8461 pinfo->ce_pipe_entry[i].valid = 0;
8462out:
8463 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8464 pr_err("copy_to_user failed\n");
8465 ret = -EFAULT;
8466 }
8467 return ret;
8468}
8469
8470/*
8471 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8472 * then whitelist feature is not supported.
8473 */
8474static int qseecom_check_whitelist_feature(void)
8475{
8476 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8477
8478 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8479}
8480
8481static int qseecom_probe(struct platform_device *pdev)
8482{
8483 int rc;
8484 int i;
8485 uint32_t feature = 10;
8486 struct device *class_dev;
8487 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8488 struct qseecom_command_scm_resp resp;
8489 struct qseecom_ce_info_use *pce_info_use = NULL;
8490
8491 qseecom.qsee_bw_count = 0;
8492 qseecom.qsee_perf_client = 0;
8493 qseecom.qsee_sfpb_bw_count = 0;
8494
8495 qseecom.qsee.ce_core_clk = NULL;
8496 qseecom.qsee.ce_clk = NULL;
8497 qseecom.qsee.ce_core_src_clk = NULL;
8498 qseecom.qsee.ce_bus_clk = NULL;
8499
8500 qseecom.cumulative_mode = 0;
8501 qseecom.current_mode = INACTIVE;
8502 qseecom.support_bus_scaling = false;
8503 qseecom.support_fde = false;
8504 qseecom.support_pfe = false;
8505
8506 qseecom.ce_drv.ce_core_clk = NULL;
8507 qseecom.ce_drv.ce_clk = NULL;
8508 qseecom.ce_drv.ce_core_src_clk = NULL;
8509 qseecom.ce_drv.ce_bus_clk = NULL;
8510 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8511
8512 qseecom.app_block_ref_cnt = 0;
8513 init_waitqueue_head(&qseecom.app_block_wq);
8514 qseecom.whitelist_support = true;
8515
8516 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8517 if (rc < 0) {
8518 pr_err("alloc_chrdev_region failed %d\n", rc);
8519 return rc;
8520 }
8521
8522 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8523 if (IS_ERR(driver_class)) {
8524 rc = -ENOMEM;
8525 pr_err("class_create failed %d\n", rc);
8526 goto exit_unreg_chrdev_region;
8527 }
8528
8529 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8530 QSEECOM_DEV);
8531 if (IS_ERR(class_dev)) {
8532 pr_err("class_device_create failed %d\n", rc);
8533 rc = -ENOMEM;
8534 goto exit_destroy_class;
8535 }
8536
8537 cdev_init(&qseecom.cdev, &qseecom_fops);
8538 qseecom.cdev.owner = THIS_MODULE;
8539
8540 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8541 if (rc < 0) {
8542 pr_err("cdev_add failed %d\n", rc);
8543 goto exit_destroy_device;
8544 }
8545
8546 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8547 spin_lock_init(&qseecom.registered_listener_list_lock);
8548 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8549 spin_lock_init(&qseecom.registered_app_list_lock);
8550 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8551 spin_lock_init(&qseecom.registered_kclient_list_lock);
8552 init_waitqueue_head(&qseecom.send_resp_wq);
8553 qseecom.send_resp_flag = 0;
8554
8555 qseecom.qsee_version = QSEEE_VERSION_00;
8556 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8557 &resp, sizeof(resp));
8558 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8559 if (rc) {
8560 pr_err("Failed to get QSEE version info %d\n", rc);
8561 goto exit_del_cdev;
8562 }
8563 qseecom.qsee_version = resp.result;
8564 qseecom.qseos_version = QSEOS_VERSION_14;
8565 qseecom.commonlib_loaded = false;
8566 qseecom.commonlib64_loaded = false;
8567 qseecom.pdev = class_dev;
8568 /* Create ION msm client */
8569 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8570 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8571 pr_err("Ion client cannot be created\n");
8572 rc = -ENOMEM;
8573 goto exit_del_cdev;
8574 }
8575
8576 /* register client for bus scaling */
8577 if (pdev->dev.of_node) {
8578 qseecom.pdev->of_node = pdev->dev.of_node;
8579 qseecom.support_bus_scaling =
8580 of_property_read_bool((&pdev->dev)->of_node,
8581 "qcom,support-bus-scaling");
8582 rc = qseecom_retrieve_ce_data(pdev);
8583 if (rc)
8584 goto exit_destroy_ion_client;
8585 qseecom.appsbl_qseecom_support =
8586 of_property_read_bool((&pdev->dev)->of_node,
8587 "qcom,appsbl-qseecom-support");
8588 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8589 qseecom.appsbl_qseecom_support);
8590
8591 qseecom.commonlib64_loaded =
8592 of_property_read_bool((&pdev->dev)->of_node,
8593 "qcom,commonlib64-loaded-by-uefi");
8594 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8595 qseecom.commonlib64_loaded);
8596 qseecom.fde_key_size =
8597 of_property_read_bool((&pdev->dev)->of_node,
8598 "qcom,fde-key-size");
8599 qseecom.no_clock_support =
8600 of_property_read_bool((&pdev->dev)->of_node,
8601 "qcom,no-clock-support");
8602 if (!qseecom.no_clock_support) {
8603 pr_info("qseecom clocks handled by other subsystem\n");
8604 } else {
8605 pr_info("no-clock-support=0x%x",
8606 qseecom.no_clock_support);
8607 }
8608
8609 if (of_property_read_u32((&pdev->dev)->of_node,
8610 "qcom,qsee-reentrancy-support",
8611 &qseecom.qsee_reentrancy_support)) {
8612 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8613 qseecom.qsee_reentrancy_support = 0;
8614 } else {
8615 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8616 qseecom.qsee_reentrancy_support);
8617 }
8618
8619 /*
8620 * The qseecom bus scaling flag can not be enabled when
8621 * crypto clock is not handled by HLOS.
8622 */
8623 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8624 pr_err("support_bus_scaling flag can not be enabled.\n");
8625 rc = -EINVAL;
8626 goto exit_destroy_ion_client;
8627 }
8628
8629 if (of_property_read_u32((&pdev->dev)->of_node,
8630 "qcom,ce-opp-freq",
8631 &qseecom.ce_opp_freq_hz)) {
8632 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8633 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8634 }
8635 rc = __qseecom_init_clk(CLK_QSEE);
8636 if (rc)
8637 goto exit_destroy_ion_client;
8638
8639 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8640 (qseecom.support_pfe || qseecom.support_fde)) {
8641 rc = __qseecom_init_clk(CLK_CE_DRV);
8642 if (rc) {
8643 __qseecom_deinit_clk(CLK_QSEE);
8644 goto exit_destroy_ion_client;
8645 }
8646 } else {
8647 struct qseecom_clk *qclk;
8648
8649 qclk = &qseecom.qsee;
8650 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8651 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8652 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8653 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8654 }
8655
8656 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8657 msm_bus_cl_get_pdata(pdev);
8658 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8659 (!qseecom.is_apps_region_protected &&
8660 !qseecom.appsbl_qseecom_support)) {
8661 struct resource *resource = NULL;
8662 struct qsee_apps_region_info_ireq req;
8663 struct qsee_apps_region_info_64bit_ireq req_64bit;
8664 struct qseecom_command_scm_resp resp;
8665 void *cmd_buf = NULL;
8666 size_t cmd_len;
8667
8668 resource = platform_get_resource_byname(pdev,
8669 IORESOURCE_MEM, "secapp-region");
8670 if (resource) {
8671 if (qseecom.qsee_version < QSEE_VERSION_40) {
8672 req.qsee_cmd_id =
8673 QSEOS_APP_REGION_NOTIFICATION;
8674 req.addr = (uint32_t)resource->start;
8675 req.size = resource_size(resource);
8676 cmd_buf = (void *)&req;
8677 cmd_len = sizeof(struct
8678 qsee_apps_region_info_ireq);
8679 pr_warn("secure app region addr=0x%x size=0x%x",
8680 req.addr, req.size);
8681 } else {
8682 req_64bit.qsee_cmd_id =
8683 QSEOS_APP_REGION_NOTIFICATION;
8684 req_64bit.addr = resource->start;
8685 req_64bit.size = resource_size(
8686 resource);
8687 cmd_buf = (void *)&req_64bit;
8688 cmd_len = sizeof(struct
8689 qsee_apps_region_info_64bit_ireq);
8690 pr_warn("secure app region addr=0x%llx size=0x%x",
8691 req_64bit.addr, req_64bit.size);
8692 }
8693 } else {
8694 pr_err("Fail to get secure app region info\n");
8695 rc = -EINVAL;
8696 goto exit_deinit_clock;
8697 }
8698 rc = __qseecom_enable_clk(CLK_QSEE);
8699 if (rc) {
8700 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8701 rc = -EIO;
8702 goto exit_deinit_clock;
8703 }
8704 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8705 cmd_buf, cmd_len,
8706 &resp, sizeof(resp));
8707 __qseecom_disable_clk(CLK_QSEE);
8708 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8709 pr_err("send secapp reg fail %d resp.res %d\n",
8710 rc, resp.result);
8711 rc = -EINVAL;
8712 goto exit_deinit_clock;
8713 }
8714 }
8715 /*
8716 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8717 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8718 * Pls add "qseecom.commonlib64_loaded = true" here too.
8719 */
8720 if (qseecom.is_apps_region_protected ||
8721 qseecom.appsbl_qseecom_support)
8722 qseecom.commonlib_loaded = true;
8723 } else {
8724 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8725 pdev->dev.platform_data;
8726 }
8727 if (qseecom.support_bus_scaling) {
8728 init_timer(&(qseecom.bw_scale_down_timer));
8729 INIT_WORK(&qseecom.bw_inactive_req_ws,
8730 qseecom_bw_inactive_req_work);
8731 qseecom.bw_scale_down_timer.function =
8732 qseecom_scale_bus_bandwidth_timer_callback;
8733 }
8734 qseecom.timer_running = false;
8735 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8736 qseecom_platform_support);
8737
8738 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8739 pr_warn("qseecom.whitelist_support = %d\n",
8740 qseecom.whitelist_support);
8741
8742 if (!qseecom.qsee_perf_client)
8743 pr_err("Unable to register bus client\n");
8744
8745 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8746 return 0;
8747
8748exit_deinit_clock:
8749 __qseecom_deinit_clk(CLK_QSEE);
8750 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8751 (qseecom.support_pfe || qseecom.support_fde))
8752 __qseecom_deinit_clk(CLK_CE_DRV);
8753exit_destroy_ion_client:
8754 if (qseecom.ce_info.fde) {
8755 pce_info_use = qseecom.ce_info.fde;
8756 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8757 kzfree(pce_info_use->ce_pipe_entry);
8758 pce_info_use++;
8759 }
8760 kfree(qseecom.ce_info.fde);
8761 }
8762 if (qseecom.ce_info.pfe) {
8763 pce_info_use = qseecom.ce_info.pfe;
8764 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8765 kzfree(pce_info_use->ce_pipe_entry);
8766 pce_info_use++;
8767 }
8768 kfree(qseecom.ce_info.pfe);
8769 }
8770 ion_client_destroy(qseecom.ion_clnt);
8771exit_del_cdev:
8772 cdev_del(&qseecom.cdev);
8773exit_destroy_device:
8774 device_destroy(driver_class, qseecom_device_no);
8775exit_destroy_class:
8776 class_destroy(driver_class);
8777exit_unreg_chrdev_region:
8778 unregister_chrdev_region(qseecom_device_no, 1);
8779 return rc;
8780}
8781
8782static int qseecom_remove(struct platform_device *pdev)
8783{
8784 struct qseecom_registered_kclient_list *kclient = NULL;
8785 unsigned long flags = 0;
8786 int ret = 0;
8787 int i;
8788 struct qseecom_ce_pipe_entry *pce_entry;
8789 struct qseecom_ce_info_use *pce_info_use;
8790
8791 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8792 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8793
8794 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8795 list) {
8796 if (!kclient)
8797 goto exit_irqrestore;
8798
8799 /* Break the loop if client handle is NULL */
8800 if (!kclient->handle)
8801 goto exit_free_kclient;
8802
8803 if (list_empty(&kclient->list))
8804 goto exit_free_kc_handle;
8805
8806 list_del(&kclient->list);
8807 mutex_lock(&app_access_lock);
8808 ret = qseecom_unload_app(kclient->handle->dev, false);
8809 mutex_unlock(&app_access_lock);
8810 if (!ret) {
8811 kzfree(kclient->handle->dev);
8812 kzfree(kclient->handle);
8813 kzfree(kclient);
8814 }
8815 }
8816
8817exit_free_kc_handle:
8818 kzfree(kclient->handle);
8819exit_free_kclient:
8820 kzfree(kclient);
8821exit_irqrestore:
8822 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8823
8824 if (qseecom.qseos_version > QSEEE_VERSION_00)
8825 qseecom_unload_commonlib_image();
8826
8827 if (qseecom.qsee_perf_client)
8828 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8829 0);
8830 if (pdev->dev.platform_data != NULL)
8831 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8832
8833 if (qseecom.support_bus_scaling) {
8834 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8835 del_timer_sync(&qseecom.bw_scale_down_timer);
8836 }
8837
8838 if (qseecom.ce_info.fde) {
8839 pce_info_use = qseecom.ce_info.fde;
8840 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8841 pce_entry = pce_info_use->ce_pipe_entry;
8842 kfree(pce_entry);
8843 pce_info_use++;
8844 }
8845 }
8846 kfree(qseecom.ce_info.fde);
8847 if (qseecom.ce_info.pfe) {
8848 pce_info_use = qseecom.ce_info.pfe;
8849 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8850 pce_entry = pce_info_use->ce_pipe_entry;
8851 kfree(pce_entry);
8852 pce_info_use++;
8853 }
8854 }
8855 kfree(qseecom.ce_info.pfe);
8856
8857 /* register client for bus scaling */
8858 if (pdev->dev.of_node) {
8859 __qseecom_deinit_clk(CLK_QSEE);
8860 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8861 (qseecom.support_pfe || qseecom.support_fde))
8862 __qseecom_deinit_clk(CLK_CE_DRV);
8863 }
8864
8865 ion_client_destroy(qseecom.ion_clnt);
8866
8867 cdev_del(&qseecom.cdev);
8868
8869 device_destroy(driver_class, qseecom_device_no);
8870
8871 class_destroy(driver_class);
8872
8873 unregister_chrdev_region(qseecom_device_no, 1);
8874
8875 return ret;
8876}
8877
8878static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8879{
8880 int ret = 0;
8881 struct qseecom_clk *qclk;
8882
8883 qclk = &qseecom.qsee;
8884 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8885 if (qseecom.no_clock_support)
8886 return 0;
8887
8888 mutex_lock(&qsee_bw_mutex);
8889 mutex_lock(&clk_access_lock);
8890
8891 if (qseecom.current_mode != INACTIVE) {
8892 ret = msm_bus_scale_client_update_request(
8893 qseecom.qsee_perf_client, INACTIVE);
8894 if (ret)
8895 pr_err("Fail to scale down bus\n");
8896 else
8897 qseecom.current_mode = INACTIVE;
8898 }
8899
8900 if (qclk->clk_access_cnt) {
8901 if (qclk->ce_clk != NULL)
8902 clk_disable_unprepare(qclk->ce_clk);
8903 if (qclk->ce_core_clk != NULL)
8904 clk_disable_unprepare(qclk->ce_core_clk);
8905 if (qclk->ce_bus_clk != NULL)
8906 clk_disable_unprepare(qclk->ce_bus_clk);
8907 }
8908
8909 del_timer_sync(&(qseecom.bw_scale_down_timer));
8910 qseecom.timer_running = false;
8911
8912 mutex_unlock(&clk_access_lock);
8913 mutex_unlock(&qsee_bw_mutex);
8914 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8915
8916 return 0;
8917}
8918
8919static int qseecom_resume(struct platform_device *pdev)
8920{
8921 int mode = 0;
8922 int ret = 0;
8923 struct qseecom_clk *qclk;
8924
8925 qclk = &qseecom.qsee;
8926 if (qseecom.no_clock_support)
8927 goto exit;
8928
8929 mutex_lock(&qsee_bw_mutex);
8930 mutex_lock(&clk_access_lock);
8931 if (qseecom.cumulative_mode >= HIGH)
8932 mode = HIGH;
8933 else
8934 mode = qseecom.cumulative_mode;
8935
8936 if (qseecom.cumulative_mode != INACTIVE) {
8937 ret = msm_bus_scale_client_update_request(
8938 qseecom.qsee_perf_client, mode);
8939 if (ret)
8940 pr_err("Fail to scale up bus to %d\n", mode);
8941 else
8942 qseecom.current_mode = mode;
8943 }
8944
8945 if (qclk->clk_access_cnt) {
8946 if (qclk->ce_core_clk != NULL) {
8947 ret = clk_prepare_enable(qclk->ce_core_clk);
8948 if (ret) {
8949 pr_err("Unable to enable/prep CE core clk\n");
8950 qclk->clk_access_cnt = 0;
8951 goto err;
8952 }
8953 }
8954 if (qclk->ce_clk != NULL) {
8955 ret = clk_prepare_enable(qclk->ce_clk);
8956 if (ret) {
8957 pr_err("Unable to enable/prep CE iface clk\n");
8958 qclk->clk_access_cnt = 0;
8959 goto ce_clk_err;
8960 }
8961 }
8962 if (qclk->ce_bus_clk != NULL) {
8963 ret = clk_prepare_enable(qclk->ce_bus_clk);
8964 if (ret) {
8965 pr_err("Unable to enable/prep CE bus clk\n");
8966 qclk->clk_access_cnt = 0;
8967 goto ce_bus_clk_err;
8968 }
8969 }
8970 }
8971
8972 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8973 qseecom.bw_scale_down_timer.expires = jiffies +
8974 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8975 mod_timer(&(qseecom.bw_scale_down_timer),
8976 qseecom.bw_scale_down_timer.expires);
8977 qseecom.timer_running = true;
8978 }
8979
8980 mutex_unlock(&clk_access_lock);
8981 mutex_unlock(&qsee_bw_mutex);
8982 goto exit;
8983
8984ce_bus_clk_err:
8985 if (qclk->ce_clk)
8986 clk_disable_unprepare(qclk->ce_clk);
8987ce_clk_err:
8988 if (qclk->ce_core_clk)
8989 clk_disable_unprepare(qclk->ce_core_clk);
8990err:
8991 mutex_unlock(&clk_access_lock);
8992 mutex_unlock(&qsee_bw_mutex);
8993 ret = -EIO;
8994exit:
8995 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8996 return ret;
8997}
8998
8999static const struct of_device_id qseecom_match[] = {
9000 {
9001 .compatible = "qcom,qseecom",
9002 },
9003 {}
9004};
9005
9006static struct platform_driver qseecom_plat_driver = {
9007 .probe = qseecom_probe,
9008 .remove = qseecom_remove,
9009 .suspend = qseecom_suspend,
9010 .resume = qseecom_resume,
9011 .driver = {
9012 .name = "qseecom",
9013 .owner = THIS_MODULE,
9014 .of_match_table = qseecom_match,
9015 },
9016};
9017
9018static int qseecom_init(void)
9019{
9020 return platform_driver_register(&qseecom_plat_driver);
9021}
9022
9023static void qseecom_exit(void)
9024{
9025 platform_driver_unregister(&qseecom_plat_driver);
9026}
9027
9028MODULE_LICENSE("GPL v2");
9029MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9030
9031module_init(qseecom_init);
9032module_exit(qseecom_exit);