blob: 3f7e7bb3bc82439302c53bd527313d620e4091cd [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
4 * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700255 struct ce_hw_usage_info ce_info;
256
257 int qsee_bw_count;
258 int qsee_sfpb_bw_count;
259
260 uint32_t qsee_perf_client;
261 struct qseecom_clk qsee;
262 struct qseecom_clk ce_drv;
263
264 bool support_bus_scaling;
265 bool support_fde;
266 bool support_pfe;
267 bool fde_key_size;
268 uint32_t cumulative_mode;
269 enum qseecom_bandwidth_request_mode current_mode;
270 struct timer_list bw_scale_down_timer;
271 struct work_struct bw_inactive_req_ws;
272 struct cdev cdev;
273 bool timer_running;
274 bool no_clock_support;
275 unsigned int ce_opp_freq_hz;
276 bool appsbl_qseecom_support;
277 uint32_t qsee_reentrancy_support;
278
279 uint32_t app_block_ref_cnt;
280 wait_queue_head_t app_block_wq;
281 atomic_t qseecom_state;
282 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700283 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700583 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700584 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
585 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
586 ret = scm_call2(smc_id, &desc);
587 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700588 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700589 smc_id = TZ_OS_REGISTER_LISTENER_ID;
590 __qseecom_reentrancy_check_if_no_app_blocked(
591 smc_id);
592 ret = scm_call2(smc_id, &desc);
593 }
594 break;
595 }
596 case QSEOS_DEREGISTER_LISTENER: {
597 struct qseecom_unregister_listener_ireq *req;
598
599 req = (struct qseecom_unregister_listener_ireq *)
600 req_buf;
601 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
602 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
603 desc.args[0] = req->listener_id;
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
605 ret = scm_call2(smc_id, &desc);
606 break;
607 }
608 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
609 struct qseecom_client_listener_data_irsp *req;
610
611 req = (struct qseecom_client_listener_data_irsp *)
612 req_buf;
613 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
614 desc.arginfo =
615 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
616 desc.args[0] = req->listener_id;
617 desc.args[1] = req->status;
618 ret = scm_call2(smc_id, &desc);
619 break;
620 }
621 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
622 struct qseecom_client_listener_data_irsp *req;
623 struct qseecom_client_listener_data_64bit_irsp *req_64;
624
625 smc_id =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
627 desc.arginfo =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
629 if (qseecom.qsee_version < QSEE_VERSION_40) {
630 req =
631 (struct qseecom_client_listener_data_irsp *)
632 req_buf;
633 desc.args[0] = req->listener_id;
634 desc.args[1] = req->status;
635 desc.args[2] = req->sglistinfo_ptr;
636 desc.args[3] = req->sglistinfo_len;
637 } else {
638 req_64 =
639 (struct qseecom_client_listener_data_64bit_irsp *)
640 req_buf;
641 desc.args[0] = req_64->listener_id;
642 desc.args[1] = req_64->status;
643 desc.args[2] = req_64->sglistinfo_ptr;
644 desc.args[3] = req_64->sglistinfo_len;
645 }
646 ret = scm_call2(smc_id, &desc);
647 break;
648 }
649 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
650 struct qseecom_load_app_ireq *req;
651 struct qseecom_load_app_64bit_ireq *req_64bit;
652
653 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
654 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
655 if (qseecom.qsee_version < QSEE_VERSION_40) {
656 req = (struct qseecom_load_app_ireq *)req_buf;
657 desc.args[0] = req->mdt_len;
658 desc.args[1] = req->img_len;
659 desc.args[2] = req->phy_addr;
660 } else {
661 req_64bit =
662 (struct qseecom_load_app_64bit_ireq *)req_buf;
663 desc.args[0] = req_64bit->mdt_len;
664 desc.args[1] = req_64bit->img_len;
665 desc.args[2] = req_64bit->phy_addr;
666 }
667 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
668 ret = scm_call2(smc_id, &desc);
669 break;
670 }
671 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
672 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
673 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
674 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
675 ret = scm_call2(smc_id, &desc);
676 break;
677 }
678
679 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
680 struct qseecom_client_send_data_ireq *req;
681 struct qseecom_client_send_data_64bit_ireq *req_64bit;
682
683 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
684 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
685 if (qseecom.qsee_version < QSEE_VERSION_40) {
686 req = (struct qseecom_client_send_data_ireq *)
687 req_buf;
688 desc.args[0] = req->app_id;
689 desc.args[1] = req->req_ptr;
690 desc.args[2] = req->req_len;
691 desc.args[3] = req->rsp_ptr;
692 desc.args[4] = req->rsp_len;
693 } else {
694 req_64bit =
695 (struct qseecom_client_send_data_64bit_ireq *)
696 req_buf;
697 desc.args[0] = req_64bit->app_id;
698 desc.args[1] = req_64bit->req_ptr;
699 desc.args[2] = req_64bit->req_len;
700 desc.args[3] = req_64bit->rsp_ptr;
701 desc.args[4] = req_64bit->rsp_len;
702 }
703 ret = scm_call2(smc_id, &desc);
704 break;
705 }
706 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
707 struct qseecom_client_send_data_ireq *req;
708 struct qseecom_client_send_data_64bit_ireq *req_64bit;
709
710 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
711 desc.arginfo =
712 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
713 if (qseecom.qsee_version < QSEE_VERSION_40) {
714 req = (struct qseecom_client_send_data_ireq *)
715 req_buf;
716 desc.args[0] = req->app_id;
717 desc.args[1] = req->req_ptr;
718 desc.args[2] = req->req_len;
719 desc.args[3] = req->rsp_ptr;
720 desc.args[4] = req->rsp_len;
721 desc.args[5] = req->sglistinfo_ptr;
722 desc.args[6] = req->sglistinfo_len;
723 } else {
724 req_64bit =
725 (struct qseecom_client_send_data_64bit_ireq *)
726 req_buf;
727 desc.args[0] = req_64bit->app_id;
728 desc.args[1] = req_64bit->req_ptr;
729 desc.args[2] = req_64bit->req_len;
730 desc.args[3] = req_64bit->rsp_ptr;
731 desc.args[4] = req_64bit->rsp_len;
732 desc.args[5] = req_64bit->sglistinfo_ptr;
733 desc.args[6] = req_64bit->sglistinfo_len;
734 }
735 ret = scm_call2(smc_id, &desc);
736 break;
737 }
738 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
739 struct qseecom_client_send_service_ireq *req;
740
741 req = (struct qseecom_client_send_service_ireq *)
742 req_buf;
743 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
744 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
745 desc.args[0] = req->key_type;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
747 ret = scm_call2(smc_id, &desc);
748 break;
749 }
750 case QSEOS_RPMB_ERASE_COMMAND: {
751 smc_id = TZ_OS_RPMB_ERASE_ID;
752 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
753 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
754 ret = scm_call2(smc_id, &desc);
755 break;
756 }
757 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
758 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
759 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_GENERATE_KEY: {
765 u32 tzbuflen = PAGE_ALIGN(sizeof
766 (struct qseecom_key_generate_ireq) -
767 sizeof(uint32_t));
768 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
769
770 if (!tzbuf)
771 return -ENOMEM;
772 memset(tzbuf, 0, tzbuflen);
773 memcpy(tzbuf, req_buf + sizeof(uint32_t),
774 (sizeof(struct qseecom_key_generate_ireq) -
775 sizeof(uint32_t)));
776 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
777 smc_id = TZ_OS_KS_GEN_KEY_ID;
778 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
779 desc.args[0] = virt_to_phys(tzbuf);
780 desc.args[1] = tzbuflen;
781 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
782 ret = scm_call2(smc_id, &desc);
783 kzfree(tzbuf);
784 break;
785 }
786 case QSEOS_DELETE_KEY: {
787 u32 tzbuflen = PAGE_ALIGN(sizeof
788 (struct qseecom_key_delete_ireq) -
789 sizeof(uint32_t));
790 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
791
792 if (!tzbuf)
793 return -ENOMEM;
794 memset(tzbuf, 0, tzbuflen);
795 memcpy(tzbuf, req_buf + sizeof(uint32_t),
796 (sizeof(struct qseecom_key_delete_ireq) -
797 sizeof(uint32_t)));
798 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
799 smc_id = TZ_OS_KS_DEL_KEY_ID;
800 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
801 desc.args[0] = virt_to_phys(tzbuf);
802 desc.args[1] = tzbuflen;
803 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
804 ret = scm_call2(smc_id, &desc);
805 kzfree(tzbuf);
806 break;
807 }
808 case QSEOS_SET_KEY: {
809 u32 tzbuflen = PAGE_ALIGN(sizeof
810 (struct qseecom_key_select_ireq) -
811 sizeof(uint32_t));
812 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
813
814 if (!tzbuf)
815 return -ENOMEM;
816 memset(tzbuf, 0, tzbuflen);
817 memcpy(tzbuf, req_buf + sizeof(uint32_t),
818 (sizeof(struct qseecom_key_select_ireq) -
819 sizeof(uint32_t)));
820 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
821 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
822 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
823 desc.args[0] = virt_to_phys(tzbuf);
824 desc.args[1] = tzbuflen;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
826 ret = scm_call2(smc_id, &desc);
827 kzfree(tzbuf);
828 break;
829 }
830 case QSEOS_UPDATE_KEY_USERINFO: {
831 u32 tzbuflen = PAGE_ALIGN(sizeof
832 (struct qseecom_key_userinfo_update_ireq) -
833 sizeof(uint32_t));
834 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
835
836 if (!tzbuf)
837 return -ENOMEM;
838 memset(tzbuf, 0, tzbuflen);
839 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
840 (struct qseecom_key_userinfo_update_ireq) -
841 sizeof(uint32_t)));
842 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
843 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
844 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
845 desc.args[0] = virt_to_phys(tzbuf);
846 desc.args[1] = tzbuflen;
847 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
848 ret = scm_call2(smc_id, &desc);
849 kzfree(tzbuf);
850 break;
851 }
852 case QSEOS_TEE_OPEN_SESSION: {
853 struct qseecom_qteec_ireq *req;
854 struct qseecom_qteec_64bit_ireq *req_64bit;
855
856 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
857 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
858 if (qseecom.qsee_version < QSEE_VERSION_40) {
859 req = (struct qseecom_qteec_ireq *)req_buf;
860 desc.args[0] = req->app_id;
861 desc.args[1] = req->req_ptr;
862 desc.args[2] = req->req_len;
863 desc.args[3] = req->resp_ptr;
864 desc.args[4] = req->resp_len;
865 } else {
866 req_64bit = (struct qseecom_qteec_64bit_ireq *)
867 req_buf;
868 desc.args[0] = req_64bit->app_id;
869 desc.args[1] = req_64bit->req_ptr;
870 desc.args[2] = req_64bit->req_len;
871 desc.args[3] = req_64bit->resp_ptr;
872 desc.args[4] = req_64bit->resp_len;
873 }
874 ret = scm_call2(smc_id, &desc);
875 break;
876 }
877 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
878 struct qseecom_qteec_ireq *req;
879 struct qseecom_qteec_64bit_ireq *req_64bit;
880
881 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
882 desc.arginfo =
883 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
884 if (qseecom.qsee_version < QSEE_VERSION_40) {
885 req = (struct qseecom_qteec_ireq *)req_buf;
886 desc.args[0] = req->app_id;
887 desc.args[1] = req->req_ptr;
888 desc.args[2] = req->req_len;
889 desc.args[3] = req->resp_ptr;
890 desc.args[4] = req->resp_len;
891 desc.args[5] = req->sglistinfo_ptr;
892 desc.args[6] = req->sglistinfo_len;
893 } else {
894 req_64bit = (struct qseecom_qteec_64bit_ireq *)
895 req_buf;
896 desc.args[0] = req_64bit->app_id;
897 desc.args[1] = req_64bit->req_ptr;
898 desc.args[2] = req_64bit->req_len;
899 desc.args[3] = req_64bit->resp_ptr;
900 desc.args[4] = req_64bit->resp_len;
901 desc.args[5] = req_64bit->sglistinfo_ptr;
902 desc.args[6] = req_64bit->sglistinfo_len;
903 }
904 ret = scm_call2(smc_id, &desc);
905 break;
906 }
907 case QSEOS_TEE_INVOKE_COMMAND: {
908 struct qseecom_qteec_ireq *req;
909 struct qseecom_qteec_64bit_ireq *req_64bit;
910
911 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
912 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
913 if (qseecom.qsee_version < QSEE_VERSION_40) {
914 req = (struct qseecom_qteec_ireq *)req_buf;
915 desc.args[0] = req->app_id;
916 desc.args[1] = req->req_ptr;
917 desc.args[2] = req->req_len;
918 desc.args[3] = req->resp_ptr;
919 desc.args[4] = req->resp_len;
920 } else {
921 req_64bit = (struct qseecom_qteec_64bit_ireq *)
922 req_buf;
923 desc.args[0] = req_64bit->app_id;
924 desc.args[1] = req_64bit->req_ptr;
925 desc.args[2] = req_64bit->req_len;
926 desc.args[3] = req_64bit->resp_ptr;
927 desc.args[4] = req_64bit->resp_len;
928 }
929 ret = scm_call2(smc_id, &desc);
930 break;
931 }
932 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
933 struct qseecom_qteec_ireq *req;
934 struct qseecom_qteec_64bit_ireq *req_64bit;
935
936 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
937 desc.arginfo =
938 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
939 if (qseecom.qsee_version < QSEE_VERSION_40) {
940 req = (struct qseecom_qteec_ireq *)req_buf;
941 desc.args[0] = req->app_id;
942 desc.args[1] = req->req_ptr;
943 desc.args[2] = req->req_len;
944 desc.args[3] = req->resp_ptr;
945 desc.args[4] = req->resp_len;
946 desc.args[5] = req->sglistinfo_ptr;
947 desc.args[6] = req->sglistinfo_len;
948 } else {
949 req_64bit = (struct qseecom_qteec_64bit_ireq *)
950 req_buf;
951 desc.args[0] = req_64bit->app_id;
952 desc.args[1] = req_64bit->req_ptr;
953 desc.args[2] = req_64bit->req_len;
954 desc.args[3] = req_64bit->resp_ptr;
955 desc.args[4] = req_64bit->resp_len;
956 desc.args[5] = req_64bit->sglistinfo_ptr;
957 desc.args[6] = req_64bit->sglistinfo_len;
958 }
959 ret = scm_call2(smc_id, &desc);
960 break;
961 }
962 case QSEOS_TEE_CLOSE_SESSION: {
963 struct qseecom_qteec_ireq *req;
964 struct qseecom_qteec_64bit_ireq *req_64bit;
965
966 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
967 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
968 if (qseecom.qsee_version < QSEE_VERSION_40) {
969 req = (struct qseecom_qteec_ireq *)req_buf;
970 desc.args[0] = req->app_id;
971 desc.args[1] = req->req_ptr;
972 desc.args[2] = req->req_len;
973 desc.args[3] = req->resp_ptr;
974 desc.args[4] = req->resp_len;
975 } else {
976 req_64bit = (struct qseecom_qteec_64bit_ireq *)
977 req_buf;
978 desc.args[0] = req_64bit->app_id;
979 desc.args[1] = req_64bit->req_ptr;
980 desc.args[2] = req_64bit->req_len;
981 desc.args[3] = req_64bit->resp_ptr;
982 desc.args[4] = req_64bit->resp_len;
983 }
984 ret = scm_call2(smc_id, &desc);
985 break;
986 }
987 case QSEOS_TEE_REQUEST_CANCELLATION: {
988 struct qseecom_qteec_ireq *req;
989 struct qseecom_qteec_64bit_ireq *req_64bit;
990
991 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
992 desc.arginfo =
993 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
994 if (qseecom.qsee_version < QSEE_VERSION_40) {
995 req = (struct qseecom_qteec_ireq *)req_buf;
996 desc.args[0] = req->app_id;
997 desc.args[1] = req->req_ptr;
998 desc.args[2] = req->req_len;
999 desc.args[3] = req->resp_ptr;
1000 desc.args[4] = req->resp_len;
1001 } else {
1002 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1003 req_buf;
1004 desc.args[0] = req_64bit->app_id;
1005 desc.args[1] = req_64bit->req_ptr;
1006 desc.args[2] = req_64bit->req_len;
1007 desc.args[3] = req_64bit->resp_ptr;
1008 desc.args[4] = req_64bit->resp_len;
1009 }
1010 ret = scm_call2(smc_id, &desc);
1011 break;
1012 }
1013 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1014 struct qseecom_continue_blocked_request_ireq *req =
1015 (struct qseecom_continue_blocked_request_ireq *)
1016 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001017 if (qseecom.smcinvoke_support)
1018 smc_id =
1019 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1020 else
1021 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001022 desc.arginfo =
1023 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001024 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 ret = scm_call2(smc_id, &desc);
1026 break;
1027 }
1028 default: {
1029 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1030 qseos_cmd_id);
1031 ret = -EINVAL;
1032 break;
1033 }
1034 } /*end of switch (qsee_cmd_id) */
1035 break;
1036 } /*end of case SCM_SVC_TZSCHEDULER*/
1037 default: {
1038 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1039 svc_id);
1040 ret = -EINVAL;
1041 break;
1042 }
1043 } /*end of switch svc_id */
1044 scm_resp->result = desc.ret[0];
1045 scm_resp->resp_type = desc.ret[1];
1046 scm_resp->data = desc.ret[2];
1047 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1048 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1049 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1050 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1051 return ret;
1052}
1053
1054
1055static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1056 size_t cmd_len, void *resp_buf, size_t resp_len)
1057{
1058 if (!is_scm_armv8())
1059 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1060 resp_buf, resp_len);
1061 else
1062 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1063}
1064
1065static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1066 struct qseecom_register_listener_req *svc)
1067{
1068 struct qseecom_registered_listener_list *ptr;
1069 int unique = 1;
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1073 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1074 if (ptr->svc.listener_id == svc->listener_id) {
1075 pr_err("Service id: %u is already registered\n",
1076 ptr->svc.listener_id);
1077 unique = 0;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1082 return unique;
1083}
1084
1085static struct qseecom_registered_listener_list *__qseecom_find_svc(
1086 int32_t listener_id)
1087{
1088 struct qseecom_registered_listener_list *entry = NULL;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1092 list_for_each_entry(entry,
1093 &qseecom.registered_listener_list_head, list) {
1094 if (entry->svc.listener_id == listener_id)
1095 break;
1096 }
1097 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1098
1099 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1100 pr_err("Service id: %u is not found\n", listener_id);
1101 return NULL;
1102 }
1103
1104 return entry;
1105}
1106
1107static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1108 struct qseecom_dev_handle *handle,
1109 struct qseecom_register_listener_req *listener)
1110{
1111 int ret = 0;
1112 struct qseecom_register_listener_ireq req;
1113 struct qseecom_register_listener_64bit_ireq req_64bit;
1114 struct qseecom_command_scm_resp resp;
1115 ion_phys_addr_t pa;
1116 void *cmd_buf = NULL;
1117 size_t cmd_len;
1118
1119 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001120 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001121 listener->ifd_data_fd);
1122 if (IS_ERR_OR_NULL(svc->ihandle)) {
1123 pr_err("Ion client could not retrieve the handle\n");
1124 return -ENOMEM;
1125 }
1126
1127 /* Get the physical address of the ION BUF */
1128 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1129 if (ret) {
1130 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1131 ret);
1132 return ret;
1133 }
1134 /* Populate the structure for sending scm call to load image */
1135 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1136 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1137 pr_err("ION memory mapping for listener shared buffer failed\n");
1138 return -ENOMEM;
1139 }
1140 svc->sb_phys = (phys_addr_t)pa;
1141
1142 if (qseecom.qsee_version < QSEE_VERSION_40) {
1143 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1144 req.listener_id = svc->svc.listener_id;
1145 req.sb_len = svc->sb_length;
1146 req.sb_ptr = (uint32_t)svc->sb_phys;
1147 cmd_buf = (void *)&req;
1148 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1149 } else {
1150 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1151 req_64bit.listener_id = svc->svc.listener_id;
1152 req_64bit.sb_len = svc->sb_length;
1153 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1154 cmd_buf = (void *)&req_64bit;
1155 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1156 }
1157
1158 resp.result = QSEOS_RESULT_INCOMPLETE;
1159
1160 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1161 &resp, sizeof(resp));
1162 if (ret) {
1163 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1164 return -EINVAL;
1165 }
1166
1167 if (resp.result != QSEOS_RESULT_SUCCESS) {
1168 pr_err("Error SB registration req: resp.result = %d\n",
1169 resp.result);
1170 return -EPERM;
1171 }
1172 return 0;
1173}
1174
1175static int qseecom_register_listener(struct qseecom_dev_handle *data,
1176 void __user *argp)
1177{
1178 int ret = 0;
1179 unsigned long flags;
1180 struct qseecom_register_listener_req rcvd_lstnr;
1181 struct qseecom_registered_listener_list *new_entry;
1182
1183 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1184 if (ret) {
1185 pr_err("copy_from_user failed\n");
1186 return ret;
1187 }
1188 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1189 rcvd_lstnr.sb_size))
1190 return -EFAULT;
1191
1192 data->listener.id = 0;
1193 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1194 pr_err("Service is not unique and is already registered\n");
1195 data->released = true;
1196 return -EBUSY;
1197 }
1198
1199 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1200 if (!new_entry)
1201 return -ENOMEM;
1202 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1203 new_entry->rcv_req_flag = 0;
1204
1205 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1206 new_entry->sb_length = rcvd_lstnr.sb_size;
1207 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1208 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1209 pr_err("qseecom_set_sb_memoryfailed\n");
1210 kzfree(new_entry);
1211 return -ENOMEM;
1212 }
1213
1214 data->listener.id = rcvd_lstnr.listener_id;
1215 init_waitqueue_head(&new_entry->rcv_req_wq);
1216 init_waitqueue_head(&new_entry->listener_block_app_wq);
1217 new_entry->send_resp_flag = 0;
1218 new_entry->listener_in_use = false;
1219 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1220 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1221 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1222
1223 return ret;
1224}
1225
1226static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1227{
1228 int ret = 0;
1229 unsigned long flags;
1230 uint32_t unmap_mem = 0;
1231 struct qseecom_register_listener_ireq req;
1232 struct qseecom_registered_listener_list *ptr_svc = NULL;
1233 struct qseecom_command_scm_resp resp;
1234 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1235
1236 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1237 req.listener_id = data->listener.id;
1238 resp.result = QSEOS_RESULT_INCOMPLETE;
1239
1240 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1241 sizeof(req), &resp, sizeof(resp));
1242 if (ret) {
1243 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1244 ret, data->listener.id);
1245 return ret;
1246 }
1247
1248 if (resp.result != QSEOS_RESULT_SUCCESS) {
1249 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1250 resp.result, data->listener.id);
1251 return -EPERM;
1252 }
1253
1254 data->abort = 1;
1255 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1256 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1257 list) {
1258 if (ptr_svc->svc.listener_id == data->listener.id) {
1259 wake_up_all(&ptr_svc->rcv_req_wq);
1260 break;
1261 }
1262 }
1263 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1264
1265 while (atomic_read(&data->ioctl_count) > 1) {
1266 if (wait_event_freezable(data->abort_wq,
1267 atomic_read(&data->ioctl_count) <= 1)) {
1268 pr_err("Interrupted from abort\n");
1269 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001270 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272 }
1273
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc,
1276 &qseecom.registered_listener_list_head, list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
1278 if (ptr_svc->sb_virt) {
1279 unmap_mem = 1;
1280 ihandle = ptr_svc->ihandle;
1281 }
1282 list_del(&ptr_svc->list);
1283 kzfree(ptr_svc);
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1288
1289 /* Unmap the memory */
1290 if (unmap_mem) {
1291 if (!IS_ERR_OR_NULL(ihandle)) {
1292 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1293 ion_free(qseecom.ion_clnt, ihandle);
1294 }
1295 }
1296 data->released = true;
1297 return ret;
1298}
1299
1300static int __qseecom_set_msm_bus_request(uint32_t mode)
1301{
1302 int ret = 0;
1303 struct qseecom_clk *qclk;
1304
1305 qclk = &qseecom.qsee;
1306 if (qclk->ce_core_src_clk != NULL) {
1307 if (mode == INACTIVE) {
1308 __qseecom_disable_clk(CLK_QSEE);
1309 } else {
1310 ret = __qseecom_enable_clk(CLK_QSEE);
1311 if (ret)
1312 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1313 ret, mode);
1314 }
1315 }
1316
1317 if ((!ret) && (qseecom.current_mode != mode)) {
1318 ret = msm_bus_scale_client_update_request(
1319 qseecom.qsee_perf_client, mode);
1320 if (ret) {
1321 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1322 ret, mode);
1323 if (qclk->ce_core_src_clk != NULL) {
1324 if (mode == INACTIVE) {
1325 ret = __qseecom_enable_clk(CLK_QSEE);
1326 if (ret)
1327 pr_err("CLK enable failed\n");
1328 } else
1329 __qseecom_disable_clk(CLK_QSEE);
1330 }
1331 }
1332 qseecom.current_mode = mode;
1333 }
1334 return ret;
1335}
1336
1337static void qseecom_bw_inactive_req_work(struct work_struct *work)
1338{
1339 mutex_lock(&app_access_lock);
1340 mutex_lock(&qsee_bw_mutex);
1341 if (qseecom.timer_running)
1342 __qseecom_set_msm_bus_request(INACTIVE);
1343 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1344 qseecom.current_mode, qseecom.cumulative_mode);
1345 qseecom.timer_running = false;
1346 mutex_unlock(&qsee_bw_mutex);
1347 mutex_unlock(&app_access_lock);
1348}
1349
1350static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1351{
1352 schedule_work(&qseecom.bw_inactive_req_ws);
1353}
1354
1355static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1356{
1357 struct qseecom_clk *qclk;
1358 int ret = 0;
1359
1360 mutex_lock(&clk_access_lock);
1361 if (ce == CLK_QSEE)
1362 qclk = &qseecom.qsee;
1363 else
1364 qclk = &qseecom.ce_drv;
1365
1366 if (qclk->clk_access_cnt > 2) {
1367 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1368 ret = -EINVAL;
1369 goto err_dec_ref_cnt;
1370 }
1371 if (qclk->clk_access_cnt == 2)
1372 qclk->clk_access_cnt--;
1373
1374err_dec_ref_cnt:
1375 mutex_unlock(&clk_access_lock);
1376 return ret;
1377}
1378
1379
1380static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1381{
1382 int32_t ret = 0;
1383 int32_t request_mode = INACTIVE;
1384
1385 mutex_lock(&qsee_bw_mutex);
1386 if (mode == 0) {
1387 if (qseecom.cumulative_mode > MEDIUM)
1388 request_mode = HIGH;
1389 else
1390 request_mode = qseecom.cumulative_mode;
1391 } else {
1392 request_mode = mode;
1393 }
1394
1395 ret = __qseecom_set_msm_bus_request(request_mode);
1396 if (ret) {
1397 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1398 ret, request_mode);
1399 goto err_scale_timer;
1400 }
1401
1402 if (qseecom.timer_running) {
1403 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1404 if (ret) {
1405 pr_err("Failed to decrease clk ref count.\n");
1406 goto err_scale_timer;
1407 }
1408 del_timer_sync(&(qseecom.bw_scale_down_timer));
1409 qseecom.timer_running = false;
1410 }
1411err_scale_timer:
1412 mutex_unlock(&qsee_bw_mutex);
1413 return ret;
1414}
1415
1416
1417static int qseecom_unregister_bus_bandwidth_needs(
1418 struct qseecom_dev_handle *data)
1419{
1420 int32_t ret = 0;
1421
1422 qseecom.cumulative_mode -= data->mode;
1423 data->mode = INACTIVE;
1424
1425 return ret;
1426}
1427
1428static int __qseecom_register_bus_bandwidth_needs(
1429 struct qseecom_dev_handle *data, uint32_t request_mode)
1430{
1431 int32_t ret = 0;
1432
1433 if (data->mode == INACTIVE) {
1434 qseecom.cumulative_mode += request_mode;
1435 data->mode = request_mode;
1436 } else {
1437 if (data->mode != request_mode) {
1438 qseecom.cumulative_mode -= data->mode;
1439 qseecom.cumulative_mode += request_mode;
1440 data->mode = request_mode;
1441 }
1442 }
1443 return ret;
1444}
1445
1446static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1447{
1448 int ret = 0;
1449
1450 ret = qsee_vote_for_clock(data, CLK_DFAB);
1451 if (ret) {
1452 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455 ret = qsee_vote_for_clock(data, CLK_SFPB);
1456 if (ret) {
1457 qsee_disable_clock_vote(data, CLK_DFAB);
1458 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1459 goto perf_enable_exit;
1460 }
1461
1462perf_enable_exit:
1463 return ret;
1464}
1465
1466static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1467 void __user *argp)
1468{
1469 int32_t ret = 0;
1470 int32_t req_mode;
1471
1472 if (qseecom.no_clock_support)
1473 return 0;
1474
1475 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1476 if (ret) {
1477 pr_err("copy_from_user failed\n");
1478 return ret;
1479 }
1480 if (req_mode > HIGH) {
1481 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1482 return -EINVAL;
1483 }
1484
1485 /*
1486 * Register bus bandwidth needs if bus scaling feature is enabled;
1487 * otherwise, qseecom enable/disable clocks for the client directly.
1488 */
1489 if (qseecom.support_bus_scaling) {
1490 mutex_lock(&qsee_bw_mutex);
1491 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1492 mutex_unlock(&qsee_bw_mutex);
1493 } else {
1494 pr_debug("Bus scaling feature is NOT enabled\n");
1495 pr_debug("request bandwidth mode %d for the client\n",
1496 req_mode);
1497 if (req_mode != INACTIVE) {
1498 ret = qseecom_perf_enable(data);
1499 if (ret)
1500 pr_err("Failed to vote for clock with err %d\n",
1501 ret);
1502 } else {
1503 qsee_disable_clock_vote(data, CLK_DFAB);
1504 qsee_disable_clock_vote(data, CLK_SFPB);
1505 }
1506 }
1507 return ret;
1508}
1509
1510static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1511{
1512 if (qseecom.no_clock_support)
1513 return;
1514
1515 mutex_lock(&qsee_bw_mutex);
1516 qseecom.bw_scale_down_timer.expires = jiffies +
1517 msecs_to_jiffies(duration);
1518 mod_timer(&(qseecom.bw_scale_down_timer),
1519 qseecom.bw_scale_down_timer.expires);
1520 qseecom.timer_running = true;
1521 mutex_unlock(&qsee_bw_mutex);
1522}
1523
1524static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1525{
1526 if (!qseecom.support_bus_scaling)
1527 qsee_disable_clock_vote(data, CLK_SFPB);
1528 else
1529 __qseecom_add_bw_scale_down_timer(
1530 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1531}
1532
1533static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1534{
1535 int ret = 0;
1536
1537 if (qseecom.support_bus_scaling) {
1538 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1539 if (ret)
1540 pr_err("Failed to set bw MEDIUM.\n");
1541 } else {
1542 ret = qsee_vote_for_clock(data, CLK_SFPB);
1543 if (ret)
1544 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1545 }
1546 return ret;
1547}
1548
1549static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1550 void __user *argp)
1551{
1552 ion_phys_addr_t pa;
1553 int32_t ret;
1554 struct qseecom_set_sb_mem_param_req req;
1555 size_t len;
1556
1557 /* Copy the relevant information needed for loading the image */
1558 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1559 return -EFAULT;
1560
1561 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1562 (req.sb_len == 0)) {
1563 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1564 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1565 return -EFAULT;
1566 }
1567 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1568 req.sb_len))
1569 return -EFAULT;
1570
1571 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001572 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001573 req.ifd_data_fd);
1574 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1575 pr_err("Ion client could not retrieve the handle\n");
1576 return -ENOMEM;
1577 }
1578 /* Get the physical address of the ION BUF */
1579 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1580 if (ret) {
1581
1582 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1583 ret);
1584 return ret;
1585 }
1586
1587 if (len < req.sb_len) {
1588 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1589 req.sb_len, len);
1590 return -EINVAL;
1591 }
1592 /* Populate the structure for sending scm call to load image */
1593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1594 data->client.ihandle);
1595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1596 pr_err("ION memory mapping for client shared buf failed\n");
1597 return -ENOMEM;
1598 }
1599 data->client.sb_phys = (phys_addr_t)pa;
1600 data->client.sb_length = req.sb_len;
1601 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1602 return 0;
1603}
1604
1605static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1606{
1607 int ret;
1608
1609 ret = (qseecom.send_resp_flag != 0);
1610 return ret || data->abort;
1611}
1612
1613static int __qseecom_reentrancy_listener_has_sent_rsp(
1614 struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
1616{
1617 int ret;
1618
1619 ret = (ptr_svc->send_resp_flag != 0);
1620 return ret || data->abort;
1621}
1622
1623static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1624 struct qseecom_command_scm_resp *resp,
1625 struct qseecom_client_listener_data_irsp *send_data_rsp,
1626 struct qseecom_registered_listener_list *ptr_svc,
1627 uint32_t lstnr) {
1628 int ret = 0;
1629
1630 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1631 qseecom.send_resp_flag = 0;
1632 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1633 send_data_rsp->listener_id = lstnr;
1634 if (ptr_svc)
1635 pr_warn("listener_id:%x, lstnr: %x\n",
1636 ptr_svc->svc.listener_id, lstnr);
1637 if (ptr_svc && ptr_svc->ihandle) {
1638 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1639 ptr_svc->sb_virt, ptr_svc->sb_length,
1640 ION_IOC_CLEAN_INV_CACHES);
1641 if (ret) {
1642 pr_err("cache operation failed %d\n", ret);
1643 return ret;
1644 }
1645 }
1646
1647 if (lstnr == RPMB_SERVICE) {
1648 ret = __qseecom_enable_clk(CLK_QSEE);
1649 if (ret)
1650 return ret;
1651 }
1652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1653 sizeof(send_data_rsp), resp, sizeof(*resp));
1654 if (ret) {
1655 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1656 ret, data->client.app_id);
1657 if (lstnr == RPMB_SERVICE)
1658 __qseecom_disable_clk(CLK_QSEE);
1659 return ret;
1660 }
1661 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1662 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1663 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1664 resp->result, data->client.app_id, lstnr);
1665 ret = -EINVAL;
1666 }
1667 if (lstnr == RPMB_SERVICE)
1668 __qseecom_disable_clk(CLK_QSEE);
1669 return ret;
1670}
1671
1672static void __qseecom_clean_listener_sglistinfo(
1673 struct qseecom_registered_listener_list *ptr_svc)
1674{
1675 if (ptr_svc->sglist_cnt) {
1676 memset(ptr_svc->sglistinfo_ptr, 0,
1677 SGLISTINFO_TABLE_SIZE);
1678 ptr_svc->sglist_cnt = 0;
1679 }
1680}
1681
1682static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1683 struct qseecom_command_scm_resp *resp)
1684{
1685 int ret = 0;
1686 int rc = 0;
1687 uint32_t lstnr;
1688 unsigned long flags;
1689 struct qseecom_client_listener_data_irsp send_data_rsp;
1690 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1691 struct qseecom_registered_listener_list *ptr_svc = NULL;
1692 sigset_t new_sigset;
1693 sigset_t old_sigset;
1694 uint32_t status;
1695 void *cmd_buf = NULL;
1696 size_t cmd_len;
1697 struct sglist_info *table = NULL;
1698
1699 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1700 lstnr = resp->data;
1701 /*
1702 * Wake up blocking lsitener service with the lstnr id
1703 */
1704 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1705 flags);
1706 list_for_each_entry(ptr_svc,
1707 &qseecom.registered_listener_list_head, list) {
1708 if (ptr_svc->svc.listener_id == lstnr) {
1709 ptr_svc->listener_in_use = true;
1710 ptr_svc->rcv_req_flag = 1;
1711 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1716 flags);
1717
1718 if (ptr_svc == NULL) {
1719 pr_err("Listener Svc %d does not exist\n", lstnr);
1720 __qseecom_qseos_fail_return_resp_tz(data, resp,
1721 &send_data_rsp, ptr_svc, lstnr);
1722 return -EINVAL;
1723 }
1724
1725 if (!ptr_svc->ihandle) {
1726 pr_err("Client handle is not initialized\n");
1727 __qseecom_qseos_fail_return_resp_tz(data, resp,
1728 &send_data_rsp, ptr_svc, lstnr);
1729 return -EINVAL;
1730 }
1731
1732 if (ptr_svc->svc.listener_id != lstnr) {
1733 pr_warn("Service requested does not exist\n");
1734 __qseecom_qseos_fail_return_resp_tz(data, resp,
Zhen Kongad83f302017-12-09 12:51:36 -08001735 &send_data_rsp, NULL, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001736 return -ERESTARTSYS;
1737 }
1738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_listener_has_sent_rsp(data))) {
1754 break;
1755 }
1756
1757 if (qseecom.qsee_reentrancy_support &&
1758 !wait_event_freezable(qseecom.send_resp_wq,
1759 __qseecom_reentrancy_listener_has_sent_rsp(
1760 data, ptr_svc))) {
1761 break;
1762 }
1763 } while (1);
1764
1765 /* restore signal mask */
1766 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1767 if (data->abort) {
1768 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1769 data->client.app_id, lstnr, ret);
1770 rc = -ENODEV;
1771 status = QSEOS_RESULT_FAILURE;
1772 } else {
1773 status = QSEOS_RESULT_SUCCESS;
1774 }
1775
1776 qseecom.send_resp_flag = 0;
1777 ptr_svc->send_resp_flag = 0;
1778 table = ptr_svc->sglistinfo_ptr;
1779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
1782 send_data_rsp.sglistinfo_ptr =
1783 (uint32_t)virt_to_phys(table);
1784 send_data_rsp.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 cmd_buf = (void *)&send_data_rsp;
1789 cmd_len = sizeof(send_data_rsp);
1790 } else {
1791 send_data_rsp_64bit.listener_id = lstnr;
1792 send_data_rsp_64bit.status = status;
1793 send_data_rsp_64bit.sglistinfo_ptr =
1794 virt_to_phys(table);
1795 send_data_rsp_64bit.sglistinfo_len =
1796 SGLISTINFO_TABLE_SIZE;
1797 dmac_flush_range((void *)table,
1798 (void *)table + SGLISTINFO_TABLE_SIZE);
1799 cmd_buf = (void *)&send_data_rsp_64bit;
1800 cmd_len = sizeof(send_data_rsp_64bit);
1801 }
1802 if (qseecom.whitelist_support == false)
1803 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1804 else
1805 *(uint32_t *)cmd_buf =
1806 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1807 if (ptr_svc) {
1808 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1809 ptr_svc->ihandle,
1810 ptr_svc->sb_virt, ptr_svc->sb_length,
1811 ION_IOC_CLEAN_INV_CACHES);
1812 if (ret) {
1813 pr_err("cache operation failed %d\n", ret);
1814 return ret;
1815 }
1816 }
1817
1818 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1819 ret = __qseecom_enable_clk(CLK_QSEE);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1825 cmd_buf, cmd_len, resp, sizeof(*resp));
1826 ptr_svc->listener_in_use = false;
1827 __qseecom_clean_listener_sglistinfo(ptr_svc);
1828 if (ret) {
1829 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1830 ret, data->client.app_id);
1831 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1832 __qseecom_disable_clk(CLK_QSEE);
1833 return ret;
1834 }
1835 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1836 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1837 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1838 resp->result, data->client.app_id, lstnr);
1839 ret = -EINVAL;
1840 }
1841 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1842 __qseecom_disable_clk(CLK_QSEE);
1843
1844 }
1845 if (rc)
1846 return rc;
1847
1848 return ret;
1849}
1850
Zhen Kong2f60f492017-06-29 15:22:14 -07001851static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001852 struct qseecom_command_scm_resp *resp,
1853 struct qseecom_registered_app_list *ptr_app,
1854 struct qseecom_dev_handle *data)
1855{
1856 struct qseecom_registered_listener_list *list_ptr;
1857 int ret = 0;
1858 struct qseecom_continue_blocked_request_ireq ireq;
1859 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001861 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862
1863 if (!resp || !data) {
1864 pr_err("invalid resp or data pointer\n");
1865 ret = -EINVAL;
1866 goto exit;
1867 }
1868
1869 /* find app_id & img_name from list */
1870 if (!ptr_app) {
1871 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1872 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1873 list) {
1874 if ((ptr_app->app_id == data->client.app_id) &&
1875 (!strcmp(ptr_app->app_name,
1876 data->client.app_name))) {
1877 found_app = true;
1878 break;
1879 }
1880 }
1881 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1882 flags);
1883 if (!found_app) {
1884 pr_err("app_id %d (%s) is not found\n",
1885 data->client.app_id,
1886 (char *)data->client.app_name);
1887 ret = -ENOENT;
1888 goto exit;
1889 }
1890 }
1891
1892 list_ptr = __qseecom_find_svc(resp->data);
1893 if (!list_ptr) {
1894 pr_err("Invalid listener ID\n");
1895 ret = -ENODATA;
1896 goto exit;
1897 }
1898 pr_debug("lsntr %d in_use = %d\n",
1899 resp->data, list_ptr->listener_in_use);
1900 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001901
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001902 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001903 do {
1904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001910 pr_err("Interrupted: listener_id %d, app_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001911 resp->data, ptr_app->app_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001919
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001920 ptr_app->blocked_on_listener_id = 0;
1921 /* notify the blocked app that listener is available */
1922 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1923 resp->data, data->client.app_id,
1924 data->client.app_name);
1925 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001926 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001927 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1928 &ireq, sizeof(ireq),
1929 &continue_resp, sizeof(continue_resp));
1930 if (ret) {
1931 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1932 data->client.app_id,
1933 data->client.app_name, ret);
1934 goto exit;
1935 }
1936 /*
1937 * After TZ app is unblocked, then continue to next case
1938 * for incomplete request processing
1939 */
1940 resp->result = QSEOS_RESULT_INCOMPLETE;
1941exit:
1942 return ret;
1943}
1944
Zhen Kong2f60f492017-06-29 15:22:14 -07001945static int __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08001946 struct qseecom_command_scm_resp *resp, uint32_t app_id)
Zhen Kong2f60f492017-06-29 15:22:14 -07001947{
1948 struct qseecom_registered_listener_list *list_ptr;
1949 int ret = 0;
1950 struct qseecom_continue_blocked_request_ireq ireq;
1951 struct qseecom_command_scm_resp continue_resp;
1952 unsigned int session_id;
1953
1954 if (!resp) {
1955 pr_err("invalid resp pointer\n");
1956 ret = -EINVAL;
1957 goto exit;
1958 }
1959 session_id = resp->resp_type;
1960 list_ptr = __qseecom_find_svc(resp->data);
1961 if (!list_ptr) {
1962 pr_err("Invalid listener ID\n");
1963 ret = -ENODATA;
1964 goto exit;
1965 }
1966 pr_debug("lsntr %d in_use = %d\n",
1967 resp->data, list_ptr->listener_in_use);
1968 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001969 do {
1970 qseecom.app_block_ref_cnt++;
1971 mutex_unlock(&app_access_lock);
1972 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 list_ptr->listener_block_app_wq,
1974 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001975 pr_err("Interrupted: listener_id %d, session_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001976 resp->data, session_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001977 ret = -ERESTARTSYS;
1978 goto exit;
1979 }
1980 mutex_lock(&app_access_lock);
1981 qseecom.app_block_ref_cnt--;
1982 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001983
1984 /* notify TZ that listener is available */
1985 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1986 resp->data, session_id);
1987 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1988 ireq.app_or_session_id = session_id;
1989 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1990 &ireq, sizeof(ireq),
1991 &continue_resp, sizeof(continue_resp));
1992 if (ret) {
Zhen Konge7f525f2017-12-01 18:26:25 -08001993 /* retry with legacy cmd */
1994 qseecom.smcinvoke_support = false;
1995 ireq.app_or_session_id = app_id;
1996 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1997 &ireq, sizeof(ireq),
1998 &continue_resp, sizeof(continue_resp));
1999 qseecom.smcinvoke_support = true;
2000 if (ret) {
2001 pr_err("cont block req for app %d or session %d fail\n",
2002 app_id, session_id);
2003 goto exit;
2004 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002005 }
2006 resp->result = QSEOS_RESULT_INCOMPLETE;
2007exit:
2008 return ret;
2009}
2010
2011static int __qseecom_process_reentrancy_blocked_on_listener(
2012 struct qseecom_command_scm_resp *resp,
2013 struct qseecom_registered_app_list *ptr_app,
2014 struct qseecom_dev_handle *data)
2015{
2016 if (!qseecom.smcinvoke_support)
2017 return __qseecom_process_blocked_on_listener_legacy(
2018 resp, ptr_app, data);
2019 else
2020 return __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08002021 resp, data->client.app_id);
Zhen Kong2f60f492017-06-29 15:22:14 -07002022}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002023static int __qseecom_reentrancy_process_incomplete_cmd(
2024 struct qseecom_dev_handle *data,
2025 struct qseecom_command_scm_resp *resp)
2026{
2027 int ret = 0;
2028 int rc = 0;
2029 uint32_t lstnr;
2030 unsigned long flags;
2031 struct qseecom_client_listener_data_irsp send_data_rsp;
2032 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2033 struct qseecom_registered_listener_list *ptr_svc = NULL;
2034 sigset_t new_sigset;
2035 sigset_t old_sigset;
2036 uint32_t status;
2037 void *cmd_buf = NULL;
2038 size_t cmd_len;
2039 struct sglist_info *table = NULL;
2040
2041 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2042 lstnr = resp->data;
2043 /*
2044 * Wake up blocking lsitener service with the lstnr id
2045 */
2046 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2047 flags);
2048 list_for_each_entry(ptr_svc,
2049 &qseecom.registered_listener_list_head, list) {
2050 if (ptr_svc->svc.listener_id == lstnr) {
2051 ptr_svc->listener_in_use = true;
2052 ptr_svc->rcv_req_flag = 1;
2053 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2054 break;
2055 }
2056 }
2057 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2058 flags);
2059
2060 if (ptr_svc == NULL) {
2061 pr_err("Listener Svc %d does not exist\n", lstnr);
2062 return -EINVAL;
2063 }
2064
2065 if (!ptr_svc->ihandle) {
2066 pr_err("Client handle is not initialized\n");
2067 return -EINVAL;
2068 }
2069
2070 if (ptr_svc->svc.listener_id != lstnr) {
2071 pr_warn("Service requested does not exist\n");
2072 return -ERESTARTSYS;
2073 }
2074 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2075
2076 /* initialize the new signal mask with all signals*/
2077 sigfillset(&new_sigset);
2078
2079 /* block all signals */
2080 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2081
2082 /* unlock mutex btw waking listener and sleep-wait */
2083 mutex_unlock(&app_access_lock);
2084 do {
2085 if (!wait_event_freezable(qseecom.send_resp_wq,
2086 __qseecom_reentrancy_listener_has_sent_rsp(
2087 data, ptr_svc))) {
2088 break;
2089 }
2090 } while (1);
2091 /* lock mutex again after resp sent */
2092 mutex_lock(&app_access_lock);
2093 ptr_svc->send_resp_flag = 0;
2094 qseecom.send_resp_flag = 0;
2095
2096 /* restore signal mask */
2097 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2098 if (data->abort) {
2099 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2100 data->client.app_id, lstnr, ret);
2101 rc = -ENODEV;
2102 status = QSEOS_RESULT_FAILURE;
2103 } else {
2104 status = QSEOS_RESULT_SUCCESS;
2105 }
2106 table = ptr_svc->sglistinfo_ptr;
2107 if (qseecom.qsee_version < QSEE_VERSION_40) {
2108 send_data_rsp.listener_id = lstnr;
2109 send_data_rsp.status = status;
2110 send_data_rsp.sglistinfo_ptr =
2111 (uint32_t)virt_to_phys(table);
2112 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2113 dmac_flush_range((void *)table,
2114 (void *)table + SGLISTINFO_TABLE_SIZE);
2115 cmd_buf = (void *)&send_data_rsp;
2116 cmd_len = sizeof(send_data_rsp);
2117 } else {
2118 send_data_rsp_64bit.listener_id = lstnr;
2119 send_data_rsp_64bit.status = status;
2120 send_data_rsp_64bit.sglistinfo_ptr =
2121 virt_to_phys(table);
2122 send_data_rsp_64bit.sglistinfo_len =
2123 SGLISTINFO_TABLE_SIZE;
2124 dmac_flush_range((void *)table,
2125 (void *)table + SGLISTINFO_TABLE_SIZE);
2126 cmd_buf = (void *)&send_data_rsp_64bit;
2127 cmd_len = sizeof(send_data_rsp_64bit);
2128 }
2129 if (qseecom.whitelist_support == false)
2130 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2131 else
2132 *(uint32_t *)cmd_buf =
2133 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2134 if (ptr_svc) {
2135 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2136 ptr_svc->ihandle,
2137 ptr_svc->sb_virt, ptr_svc->sb_length,
2138 ION_IOC_CLEAN_INV_CACHES);
2139 if (ret) {
2140 pr_err("cache operation failed %d\n", ret);
2141 return ret;
2142 }
2143 }
2144 if (lstnr == RPMB_SERVICE) {
2145 ret = __qseecom_enable_clk(CLK_QSEE);
2146 if (ret)
2147 return ret;
2148 }
2149
2150 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2151 cmd_buf, cmd_len, resp, sizeof(*resp));
2152 ptr_svc->listener_in_use = false;
2153 __qseecom_clean_listener_sglistinfo(ptr_svc);
2154 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2155
2156 if (ret) {
2157 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2158 ret, data->client.app_id);
2159 goto exit;
2160 }
2161
2162 switch (resp->result) {
2163 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2164 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2165 lstnr, data->client.app_id, resp->data);
2166 if (lstnr == resp->data) {
2167 pr_err("lstnr %d should not be blocked!\n",
2168 lstnr);
2169 ret = -EINVAL;
2170 goto exit;
2171 }
2172 ret = __qseecom_process_reentrancy_blocked_on_listener(
2173 resp, NULL, data);
2174 if (ret) {
2175 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2176 data->client.app_id,
2177 data->client.app_name, resp->data);
2178 goto exit;
2179 }
2180 case QSEOS_RESULT_SUCCESS:
2181 case QSEOS_RESULT_INCOMPLETE:
2182 break;
2183 default:
2184 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2185 resp->result, data->client.app_id, lstnr);
2186 ret = -EINVAL;
2187 goto exit;
2188 }
2189exit:
2190 if (lstnr == RPMB_SERVICE)
2191 __qseecom_disable_clk(CLK_QSEE);
2192
2193 }
2194 if (rc)
2195 return rc;
2196
2197 return ret;
2198}
2199
2200/*
2201 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2202 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2203 * So, needs to first check if no app blocked before sending OS level scm call,
2204 * then wait until all apps are unblocked.
2205 */
2206static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2207{
2208 sigset_t new_sigset, old_sigset;
2209
2210 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2211 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2212 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2213 /* thread sleep until this app unblocked */
2214 while (qseecom.app_block_ref_cnt > 0) {
2215 sigfillset(&new_sigset);
2216 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2217 mutex_unlock(&app_access_lock);
2218 do {
2219 if (!wait_event_freezable(qseecom.app_block_wq,
2220 (qseecom.app_block_ref_cnt == 0)))
2221 break;
2222 } while (1);
2223 mutex_lock(&app_access_lock);
2224 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2225 }
2226 }
2227}
2228
2229/*
2230 * scm_call of send data will fail if this TA is blocked or there are more
2231 * than one TA requesting listener services; So, first check to see if need
2232 * to wait.
2233 */
2234static void __qseecom_reentrancy_check_if_this_app_blocked(
2235 struct qseecom_registered_app_list *ptr_app)
2236{
2237 sigset_t new_sigset, old_sigset;
2238
2239 if (qseecom.qsee_reentrancy_support) {
2240 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2241 /* thread sleep until this app unblocked */
2242 sigfillset(&new_sigset);
2243 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2244 mutex_unlock(&app_access_lock);
2245 do {
2246 if (!wait_event_freezable(qseecom.app_block_wq,
2247 (!ptr_app->app_blocked &&
2248 qseecom.app_block_ref_cnt <= 1)))
2249 break;
2250 } while (1);
2251 mutex_lock(&app_access_lock);
2252 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2253 }
2254 }
2255}
2256
2257static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2258 uint32_t *app_id)
2259{
2260 int32_t ret;
2261 struct qseecom_command_scm_resp resp;
2262 bool found_app = false;
2263 struct qseecom_registered_app_list *entry = NULL;
2264 unsigned long flags = 0;
2265
2266 if (!app_id) {
2267 pr_err("Null pointer to app_id\n");
2268 return -EINVAL;
2269 }
2270 *app_id = 0;
2271
2272 /* check if app exists and has been registered locally */
2273 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2274 list_for_each_entry(entry,
2275 &qseecom.registered_app_list_head, list) {
2276 if (!strcmp(entry->app_name, req.app_name)) {
2277 found_app = true;
2278 break;
2279 }
2280 }
2281 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2282 if (found_app) {
2283 pr_debug("Found app with id %d\n", entry->app_id);
2284 *app_id = entry->app_id;
2285 return 0;
2286 }
2287
2288 memset((void *)&resp, 0, sizeof(resp));
2289
2290 /* SCM_CALL to check if app_id for the mentioned app exists */
2291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2292 sizeof(struct qseecom_check_app_ireq),
2293 &resp, sizeof(resp));
2294 if (ret) {
2295 pr_err("scm_call to check if app is already loaded failed\n");
2296 return -EINVAL;
2297 }
2298
2299 if (resp.result == QSEOS_RESULT_FAILURE)
2300 return 0;
2301
2302 switch (resp.resp_type) {
2303 /*qsee returned listener type response */
2304 case QSEOS_LISTENER_ID:
2305 pr_err("resp type is of listener type instead of app");
2306 return -EINVAL;
2307 case QSEOS_APP_ID:
2308 *app_id = resp.data;
2309 return 0;
2310 default:
2311 pr_err("invalid resp type (%d) from qsee",
2312 resp.resp_type);
2313 return -ENODEV;
2314 }
2315}
2316
2317static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2318{
2319 struct qseecom_registered_app_list *entry = NULL;
2320 unsigned long flags = 0;
2321 u32 app_id = 0;
2322 struct ion_handle *ihandle; /* Ion handle */
2323 struct qseecom_load_img_req load_img_req;
2324 int32_t ret = 0;
2325 ion_phys_addr_t pa = 0;
2326 size_t len;
2327 struct qseecom_command_scm_resp resp;
2328 struct qseecom_check_app_ireq req;
2329 struct qseecom_load_app_ireq load_req;
2330 struct qseecom_load_app_64bit_ireq load_req_64bit;
2331 void *cmd_buf = NULL;
2332 size_t cmd_len;
2333 bool first_time = false;
2334
2335 /* Copy the relevant information needed for loading the image */
2336 if (copy_from_user(&load_img_req,
2337 (void __user *)argp,
2338 sizeof(struct qseecom_load_img_req))) {
2339 pr_err("copy_from_user failed\n");
2340 return -EFAULT;
2341 }
2342
2343 /* Check and load cmnlib */
2344 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2345 if (!qseecom.commonlib_loaded &&
2346 load_img_req.app_arch == ELFCLASS32) {
2347 ret = qseecom_load_commonlib_image(data, "cmnlib");
2348 if (ret) {
2349 pr_err("failed to load cmnlib\n");
2350 return -EIO;
2351 }
2352 qseecom.commonlib_loaded = true;
2353 pr_debug("cmnlib is loaded\n");
2354 }
2355
2356 if (!qseecom.commonlib64_loaded &&
2357 load_img_req.app_arch == ELFCLASS64) {
2358 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2359 if (ret) {
2360 pr_err("failed to load cmnlib64\n");
2361 return -EIO;
2362 }
2363 qseecom.commonlib64_loaded = true;
2364 pr_debug("cmnlib64 is loaded\n");
2365 }
2366 }
2367
2368 if (qseecom.support_bus_scaling) {
2369 mutex_lock(&qsee_bw_mutex);
2370 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2371 mutex_unlock(&qsee_bw_mutex);
2372 if (ret)
2373 return ret;
2374 }
2375
2376 /* Vote for the SFPB clock */
2377 ret = __qseecom_enable_clk_scale_up(data);
2378 if (ret)
2379 goto enable_clk_err;
2380
2381 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2382 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2383 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2384
2385 ret = __qseecom_check_app_exists(req, &app_id);
2386 if (ret < 0)
2387 goto loadapp_err;
2388
2389 if (app_id) {
2390 pr_debug("App id %d (%s) already exists\n", app_id,
2391 (char *)(req.app_name));
2392 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2393 list_for_each_entry(entry,
2394 &qseecom.registered_app_list_head, list){
2395 if (entry->app_id == app_id) {
2396 entry->ref_cnt++;
2397 break;
2398 }
2399 }
2400 spin_unlock_irqrestore(
2401 &qseecom.registered_app_list_lock, flags);
2402 ret = 0;
2403 } else {
2404 first_time = true;
2405 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2406 (char *)(load_img_req.img_name));
2407 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002408 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002409 load_img_req.ifd_data_fd);
2410 if (IS_ERR_OR_NULL(ihandle)) {
2411 pr_err("Ion client could not retrieve the handle\n");
2412 ret = -ENOMEM;
2413 goto loadapp_err;
2414 }
2415
2416 /* Get the physical address of the ION BUF */
2417 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2418 if (ret) {
2419 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2420 ret);
2421 goto loadapp_err;
2422 }
2423 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2424 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2425 len, load_img_req.mdt_len,
2426 load_img_req.img_len);
2427 ret = -EINVAL;
2428 goto loadapp_err;
2429 }
2430 /* Populate the structure for sending scm call to load image */
2431 if (qseecom.qsee_version < QSEE_VERSION_40) {
2432 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2433 load_req.mdt_len = load_img_req.mdt_len;
2434 load_req.img_len = load_img_req.img_len;
2435 strlcpy(load_req.app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 load_req.phy_addr = (uint32_t)pa;
2438 cmd_buf = (void *)&load_req;
2439 cmd_len = sizeof(struct qseecom_load_app_ireq);
2440 } else {
2441 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2442 load_req_64bit.mdt_len = load_img_req.mdt_len;
2443 load_req_64bit.img_len = load_img_req.img_len;
2444 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2445 MAX_APP_NAME_SIZE);
2446 load_req_64bit.phy_addr = (uint64_t)pa;
2447 cmd_buf = (void *)&load_req_64bit;
2448 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2449 }
2450
2451 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2452 ION_IOC_CLEAN_INV_CACHES);
2453 if (ret) {
2454 pr_err("cache operation failed %d\n", ret);
2455 goto loadapp_err;
2456 }
2457
2458 /* SCM_CALL to load the app and get the app_id back */
2459 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2460 cmd_len, &resp, sizeof(resp));
2461 if (ret) {
2462 pr_err("scm_call to load app failed\n");
2463 if (!IS_ERR_OR_NULL(ihandle))
2464 ion_free(qseecom.ion_clnt, ihandle);
2465 ret = -EINVAL;
2466 goto loadapp_err;
2467 }
2468
2469 if (resp.result == QSEOS_RESULT_FAILURE) {
2470 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2471 if (!IS_ERR_OR_NULL(ihandle))
2472 ion_free(qseecom.ion_clnt, ihandle);
2473 ret = -EFAULT;
2474 goto loadapp_err;
2475 }
2476
2477 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2478 ret = __qseecom_process_incomplete_cmd(data, &resp);
2479 if (ret) {
2480 pr_err("process_incomplete_cmd failed err: %d\n",
2481 ret);
2482 if (!IS_ERR_OR_NULL(ihandle))
2483 ion_free(qseecom.ion_clnt, ihandle);
2484 ret = -EFAULT;
2485 goto loadapp_err;
2486 }
2487 }
2488
2489 if (resp.result != QSEOS_RESULT_SUCCESS) {
2490 pr_err("scm_call failed resp.result unknown, %d\n",
2491 resp.result);
2492 if (!IS_ERR_OR_NULL(ihandle))
2493 ion_free(qseecom.ion_clnt, ihandle);
2494 ret = -EFAULT;
2495 goto loadapp_err;
2496 }
2497
2498 app_id = resp.data;
2499
2500 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2501 if (!entry) {
2502 ret = -ENOMEM;
2503 goto loadapp_err;
2504 }
2505 entry->app_id = app_id;
2506 entry->ref_cnt = 1;
2507 entry->app_arch = load_img_req.app_arch;
2508 /*
2509 * keymaster app may be first loaded as "keymaste" by qseecomd,
2510 * and then used as "keymaster" on some targets. To avoid app
2511 * name checking error, register "keymaster" into app_list and
2512 * thread private data.
2513 */
2514 if (!strcmp(load_img_req.img_name, "keymaste"))
2515 strlcpy(entry->app_name, "keymaster",
2516 MAX_APP_NAME_SIZE);
2517 else
2518 strlcpy(entry->app_name, load_img_req.img_name,
2519 MAX_APP_NAME_SIZE);
2520 entry->app_blocked = false;
2521 entry->blocked_on_listener_id = 0;
2522
2523 /* Deallocate the handle */
2524 if (!IS_ERR_OR_NULL(ihandle))
2525 ion_free(qseecom.ion_clnt, ihandle);
2526
2527 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2528 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2529 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2530 flags);
2531
2532 pr_warn("App with id %u (%s) now loaded\n", app_id,
2533 (char *)(load_img_req.img_name));
2534 }
2535 data->client.app_id = app_id;
2536 data->client.app_arch = load_img_req.app_arch;
2537 if (!strcmp(load_img_req.img_name, "keymaste"))
2538 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2539 else
2540 strlcpy(data->client.app_name, load_img_req.img_name,
2541 MAX_APP_NAME_SIZE);
2542 load_img_req.app_id = app_id;
2543 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2544 pr_err("copy_to_user failed\n");
2545 ret = -EFAULT;
2546 if (first_time == true) {
2547 spin_lock_irqsave(
2548 &qseecom.registered_app_list_lock, flags);
2549 list_del(&entry->list);
2550 spin_unlock_irqrestore(
2551 &qseecom.registered_app_list_lock, flags);
2552 kzfree(entry);
2553 }
2554 }
2555
2556loadapp_err:
2557 __qseecom_disable_clk_scale_down(data);
2558enable_clk_err:
2559 if (qseecom.support_bus_scaling) {
2560 mutex_lock(&qsee_bw_mutex);
2561 qseecom_unregister_bus_bandwidth_needs(data);
2562 mutex_unlock(&qsee_bw_mutex);
2563 }
2564 return ret;
2565}
2566
2567static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2568{
2569 int ret = 1; /* Set unload app */
2570
2571 wake_up_all(&qseecom.send_resp_wq);
2572 if (qseecom.qsee_reentrancy_support)
2573 mutex_unlock(&app_access_lock);
2574 while (atomic_read(&data->ioctl_count) > 1) {
2575 if (wait_event_freezable(data->abort_wq,
2576 atomic_read(&data->ioctl_count) <= 1)) {
2577 pr_err("Interrupted from abort\n");
2578 ret = -ERESTARTSYS;
2579 break;
2580 }
2581 }
2582 if (qseecom.qsee_reentrancy_support)
2583 mutex_lock(&app_access_lock);
2584 return ret;
2585}
2586
2587static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2588{
2589 int ret = 0;
2590
2591 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2592 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2593 ion_free(qseecom.ion_clnt, data->client.ihandle);
2594 data->client.ihandle = NULL;
2595 }
2596 return ret;
2597}
2598
2599static int qseecom_unload_app(struct qseecom_dev_handle *data,
2600 bool app_crash)
2601{
2602 unsigned long flags;
2603 unsigned long flags1;
2604 int ret = 0;
2605 struct qseecom_command_scm_resp resp;
2606 struct qseecom_registered_app_list *ptr_app = NULL;
2607 bool unload = false;
2608 bool found_app = false;
2609 bool found_dead_app = false;
2610
2611 if (!data) {
2612 pr_err("Invalid/uninitialized device handle\n");
2613 return -EINVAL;
2614 }
2615
2616 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2617 pr_debug("Do not unload keymaster app from tz\n");
2618 goto unload_exit;
2619 }
2620
2621 __qseecom_cleanup_app(data);
2622 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2623
2624 if (data->client.app_id > 0) {
2625 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2626 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2627 list) {
2628 if (ptr_app->app_id == data->client.app_id) {
2629 if (!strcmp((void *)ptr_app->app_name,
2630 (void *)data->client.app_name)) {
2631 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002632 if (ptr_app->app_blocked)
2633 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002634 if (app_crash || ptr_app->ref_cnt == 1)
2635 unload = true;
2636 break;
2637 }
2638 found_dead_app = true;
2639 break;
2640 }
2641 }
2642 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2643 flags);
2644 if (found_app == false && found_dead_app == false) {
2645 pr_err("Cannot find app with id = %d (%s)\n",
2646 data->client.app_id,
2647 (char *)data->client.app_name);
2648 ret = -EINVAL;
2649 goto unload_exit;
2650 }
2651 }
2652
2653 if (found_dead_app)
2654 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2655 (char *)data->client.app_name);
2656
2657 if (unload) {
2658 struct qseecom_unload_app_ireq req;
2659 /* Populate the structure for sending scm call to load image */
2660 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2661 req.app_id = data->client.app_id;
2662
2663 /* SCM_CALL to unload the app */
2664 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2665 sizeof(struct qseecom_unload_app_ireq),
2666 &resp, sizeof(resp));
2667 if (ret) {
2668 pr_err("scm_call to unload app (id = %d) failed\n",
2669 req.app_id);
2670 ret = -EFAULT;
2671 goto unload_exit;
2672 } else {
2673 pr_warn("App id %d now unloaded\n", req.app_id);
2674 }
2675 if (resp.result == QSEOS_RESULT_FAILURE) {
2676 pr_err("app (%d) unload_failed!!\n",
2677 data->client.app_id);
2678 ret = -EFAULT;
2679 goto unload_exit;
2680 }
2681 if (resp.result == QSEOS_RESULT_SUCCESS)
2682 pr_debug("App (%d) is unloaded!!\n",
2683 data->client.app_id);
2684 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2685 ret = __qseecom_process_incomplete_cmd(data, &resp);
2686 if (ret) {
2687 pr_err("process_incomplete_cmd fail err: %d\n",
2688 ret);
2689 goto unload_exit;
2690 }
2691 }
2692 }
2693
2694 if (found_app) {
2695 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2696 if (app_crash) {
2697 ptr_app->ref_cnt = 0;
2698 pr_debug("app_crash: ref_count = 0\n");
2699 } else {
2700 if (ptr_app->ref_cnt == 1) {
2701 ptr_app->ref_cnt = 0;
2702 pr_debug("ref_count set to 0\n");
2703 } else {
2704 ptr_app->ref_cnt--;
2705 pr_debug("Can't unload app(%d) inuse\n",
2706 ptr_app->app_id);
2707 }
2708 }
2709 if (unload) {
2710 list_del(&ptr_app->list);
2711 kzfree(ptr_app);
2712 }
2713 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2714 flags1);
2715 }
2716unload_exit:
2717 qseecom_unmap_ion_allocated_memory(data);
2718 data->released = true;
2719 return ret;
2720}
2721
2722static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2723 unsigned long virt)
2724{
2725 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2726}
2727
2728static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2729 unsigned long virt)
2730{
2731 return (uintptr_t)data->client.sb_virt +
2732 (virt - data->client.user_virt_sb_base);
2733}
2734
2735int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2736 struct qseecom_send_svc_cmd_req *req_ptr,
2737 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2738{
2739 int ret = 0;
2740 void *req_buf = NULL;
2741
2742 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2743 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2744 req_ptr, send_svc_ireq_ptr);
2745 return -EINVAL;
2746 }
2747
2748 /* Clients need to ensure req_buf is at base offset of shared buffer */
2749 if ((uintptr_t)req_ptr->cmd_req_buf !=
2750 data_ptr->client.user_virt_sb_base) {
2751 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2752 return -EINVAL;
2753 }
2754
2755 if (data_ptr->client.sb_length <
2756 sizeof(struct qseecom_rpmb_provision_key)) {
2757 pr_err("shared buffer is too small to hold key type\n");
2758 return -EINVAL;
2759 }
2760 req_buf = data_ptr->client.sb_virt;
2761
2762 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2763 send_svc_ireq_ptr->key_type =
2764 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2765 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2766 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2767 data_ptr, (uintptr_t)req_ptr->resp_buf));
2768 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2769
2770 return ret;
2771}
2772
2773int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2774 struct qseecom_send_svc_cmd_req *req_ptr,
2775 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2776{
2777 int ret = 0;
2778 uint32_t reqd_len_sb_in = 0;
2779
2780 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2781 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2782 req_ptr, send_svc_ireq_ptr);
2783 return -EINVAL;
2784 }
2785
2786 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2787 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2788 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2789 pr_err("Required: %u, Available: %zu\n",
2790 reqd_len_sb_in, data_ptr->client.sb_length);
2791 return -ENOMEM;
2792 }
2793
2794 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2795 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2796 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2797 data_ptr, (uintptr_t)req_ptr->resp_buf));
2798 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2799
2800 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2801 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2802
2803
2804 return ret;
2805}
2806
2807static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2808 struct qseecom_send_svc_cmd_req *req)
2809{
2810 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2811 pr_err("req or cmd buffer or response buffer is null\n");
2812 return -EINVAL;
2813 }
2814
2815 if (!data || !data->client.ihandle) {
2816 pr_err("Client or client handle is not initialized\n");
2817 return -EINVAL;
2818 }
2819
2820 if (data->client.sb_virt == NULL) {
2821 pr_err("sb_virt null\n");
2822 return -EINVAL;
2823 }
2824
2825 if (data->client.user_virt_sb_base == 0) {
2826 pr_err("user_virt_sb_base is null\n");
2827 return -EINVAL;
2828 }
2829
2830 if (data->client.sb_length == 0) {
2831 pr_err("sb_length is 0\n");
2832 return -EINVAL;
2833 }
2834
2835 if (((uintptr_t)req->cmd_req_buf <
2836 data->client.user_virt_sb_base) ||
2837 ((uintptr_t)req->cmd_req_buf >=
2838 (data->client.user_virt_sb_base + data->client.sb_length))) {
2839 pr_err("cmd buffer address not within shared bufffer\n");
2840 return -EINVAL;
2841 }
2842 if (((uintptr_t)req->resp_buf <
2843 data->client.user_virt_sb_base) ||
2844 ((uintptr_t)req->resp_buf >=
2845 (data->client.user_virt_sb_base + data->client.sb_length))) {
2846 pr_err("response buffer address not within shared bufffer\n");
2847 return -EINVAL;
2848 }
2849 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2850 (req->cmd_req_len > data->client.sb_length) ||
2851 (req->resp_len > data->client.sb_length)) {
2852 pr_err("cmd buf length or response buf length not valid\n");
2853 return -EINVAL;
2854 }
2855 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2856 pr_err("Integer overflow detected in req_len & rsp_len\n");
2857 return -EINVAL;
2858 }
2859
2860 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2861 pr_debug("Not enough memory to fit cmd_buf.\n");
2862 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2863 (req->cmd_req_len + req->resp_len),
2864 data->client.sb_length);
2865 return -ENOMEM;
2866 }
2867 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2868 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2869 return -EINVAL;
2870 }
2871 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2872 pr_err("Integer overflow in resp_len & resp_buf\n");
2873 return -EINVAL;
2874 }
2875 if (data->client.user_virt_sb_base >
2876 (ULONG_MAX - data->client.sb_length)) {
2877 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2878 return -EINVAL;
2879 }
2880 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2881 ((uintptr_t)data->client.user_virt_sb_base +
2882 data->client.sb_length)) ||
2883 (((uintptr_t)req->resp_buf + req->resp_len) >
2884 ((uintptr_t)data->client.user_virt_sb_base +
2885 data->client.sb_length))) {
2886 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2887 return -EINVAL;
2888 }
2889 return 0;
2890}
2891
2892static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2893 void __user *argp)
2894{
2895 int ret = 0;
2896 struct qseecom_client_send_service_ireq send_svc_ireq;
2897 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2898 struct qseecom_command_scm_resp resp;
2899 struct qseecom_send_svc_cmd_req req;
2900 void *send_req_ptr;
2901 size_t req_buf_size;
2902
2903 /*struct qseecom_command_scm_resp resp;*/
2904
2905 if (copy_from_user(&req,
2906 (void __user *)argp,
2907 sizeof(req))) {
2908 pr_err("copy_from_user failed\n");
2909 return -EFAULT;
2910 }
2911
2912 if (__validate_send_service_cmd_inputs(data, &req))
2913 return -EINVAL;
2914
2915 data->type = QSEECOM_SECURE_SERVICE;
2916
2917 switch (req.cmd_id) {
2918 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2919 case QSEOS_RPMB_ERASE_COMMAND:
2920 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2921 send_req_ptr = &send_svc_ireq;
2922 req_buf_size = sizeof(send_svc_ireq);
2923 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2924 send_req_ptr))
2925 return -EINVAL;
2926 break;
2927 case QSEOS_FSM_LTEOTA_REQ_CMD:
2928 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2929 case QSEOS_FSM_IKE_REQ_CMD:
2930 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2931 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2932 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2933 case QSEOS_FSM_ENCFS_REQ_CMD:
2934 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2935 send_req_ptr = &send_fsm_key_svc_ireq;
2936 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2937 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2938 send_req_ptr))
2939 return -EINVAL;
2940 break;
2941 default:
2942 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2943 return -EINVAL;
2944 }
2945
2946 if (qseecom.support_bus_scaling) {
2947 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2948 if (ret) {
2949 pr_err("Fail to set bw HIGH\n");
2950 return ret;
2951 }
2952 } else {
2953 ret = qseecom_perf_enable(data);
2954 if (ret) {
2955 pr_err("Failed to vote for clocks with err %d\n", ret);
2956 goto exit;
2957 }
2958 }
2959
2960 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2961 data->client.sb_virt, data->client.sb_length,
2962 ION_IOC_CLEAN_INV_CACHES);
2963 if (ret) {
2964 pr_err("cache operation failed %d\n", ret);
2965 goto exit;
2966 }
2967 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2968 (const void *)send_req_ptr,
2969 req_buf_size, &resp, sizeof(resp));
2970 if (ret) {
2971 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2972 if (!qseecom.support_bus_scaling) {
2973 qsee_disable_clock_vote(data, CLK_DFAB);
2974 qsee_disable_clock_vote(data, CLK_SFPB);
2975 } else {
2976 __qseecom_add_bw_scale_down_timer(
2977 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2978 }
2979 goto exit;
2980 }
2981 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2982 data->client.sb_virt, data->client.sb_length,
2983 ION_IOC_INV_CACHES);
2984 if (ret) {
2985 pr_err("cache operation failed %d\n", ret);
2986 goto exit;
2987 }
2988 switch (resp.result) {
2989 case QSEOS_RESULT_SUCCESS:
2990 break;
2991 case QSEOS_RESULT_INCOMPLETE:
2992 pr_debug("qseos_result_incomplete\n");
2993 ret = __qseecom_process_incomplete_cmd(data, &resp);
2994 if (ret) {
2995 pr_err("process_incomplete_cmd fail with result: %d\n",
2996 resp.result);
2997 }
2998 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2999 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303000 if (put_user(resp.result,
3001 (uint32_t __user *)req.resp_buf)) {
3002 ret = -EINVAL;
3003 goto exit;
3004 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003005 ret = 0;
3006 }
3007 break;
3008 case QSEOS_RESULT_FAILURE:
3009 pr_err("scm call failed with resp.result: %d\n", resp.result);
3010 ret = -EINVAL;
3011 break;
3012 default:
3013 pr_err("Response result %d not supported\n",
3014 resp.result);
3015 ret = -EINVAL;
3016 break;
3017 }
3018 if (!qseecom.support_bus_scaling) {
3019 qsee_disable_clock_vote(data, CLK_DFAB);
3020 qsee_disable_clock_vote(data, CLK_SFPB);
3021 } else {
3022 __qseecom_add_bw_scale_down_timer(
3023 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3024 }
3025
3026exit:
3027 return ret;
3028}
3029
3030static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3031 struct qseecom_send_cmd_req *req)
3032
3033{
3034 if (!data || !data->client.ihandle) {
3035 pr_err("Client or client handle is not initialized\n");
3036 return -EINVAL;
3037 }
3038 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3039 (req->cmd_req_buf == NULL)) {
3040 pr_err("cmd buffer or response buffer is null\n");
3041 return -EINVAL;
3042 }
3043 if (((uintptr_t)req->cmd_req_buf <
3044 data->client.user_virt_sb_base) ||
3045 ((uintptr_t)req->cmd_req_buf >=
3046 (data->client.user_virt_sb_base + data->client.sb_length))) {
3047 pr_err("cmd buffer address not within shared bufffer\n");
3048 return -EINVAL;
3049 }
3050 if (((uintptr_t)req->resp_buf <
3051 data->client.user_virt_sb_base) ||
3052 ((uintptr_t)req->resp_buf >=
3053 (data->client.user_virt_sb_base + data->client.sb_length))) {
3054 pr_err("response buffer address not within shared bufffer\n");
3055 return -EINVAL;
3056 }
3057 if ((req->cmd_req_len == 0) ||
3058 (req->cmd_req_len > data->client.sb_length) ||
3059 (req->resp_len > data->client.sb_length)) {
3060 pr_err("cmd buf length or response buf length not valid\n");
3061 return -EINVAL;
3062 }
3063 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3064 pr_err("Integer overflow detected in req_len & rsp_len\n");
3065 return -EINVAL;
3066 }
3067
3068 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3069 pr_debug("Not enough memory to fit cmd_buf.\n");
3070 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3071 (req->cmd_req_len + req->resp_len),
3072 data->client.sb_length);
3073 return -ENOMEM;
3074 }
3075 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3076 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3077 return -EINVAL;
3078 }
3079 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3080 pr_err("Integer overflow in resp_len & resp_buf\n");
3081 return -EINVAL;
3082 }
3083 if (data->client.user_virt_sb_base >
3084 (ULONG_MAX - data->client.sb_length)) {
3085 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3086 return -EINVAL;
3087 }
3088 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3089 ((uintptr_t)data->client.user_virt_sb_base +
3090 data->client.sb_length)) ||
3091 (((uintptr_t)req->resp_buf + req->resp_len) >
3092 ((uintptr_t)data->client.user_virt_sb_base +
3093 data->client.sb_length))) {
3094 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3095 return -EINVAL;
3096 }
3097 return 0;
3098}
3099
3100int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3101 struct qseecom_registered_app_list *ptr_app,
3102 struct qseecom_dev_handle *data)
3103{
3104 int ret = 0;
3105
3106 switch (resp->result) {
3107 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3108 pr_warn("App(%d) %s is blocked on listener %d\n",
3109 data->client.app_id, data->client.app_name,
3110 resp->data);
3111 ret = __qseecom_process_reentrancy_blocked_on_listener(
3112 resp, ptr_app, data);
3113 if (ret) {
3114 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3115 data->client.app_id, data->client.app_name, resp->data);
3116 return ret;
3117 }
3118
3119 case QSEOS_RESULT_INCOMPLETE:
3120 qseecom.app_block_ref_cnt++;
3121 ptr_app->app_blocked = true;
3122 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3123 ptr_app->app_blocked = false;
3124 qseecom.app_block_ref_cnt--;
3125 wake_up_interruptible(&qseecom.app_block_wq);
3126 if (ret)
3127 pr_err("process_incomplete_cmd failed err: %d\n",
3128 ret);
3129 return ret;
3130 case QSEOS_RESULT_SUCCESS:
3131 return ret;
3132 default:
3133 pr_err("Response result %d not supported\n",
3134 resp->result);
3135 return -EINVAL;
3136 }
3137}
3138
3139static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3140 struct qseecom_send_cmd_req *req)
3141{
3142 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003143 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003144 u32 reqd_len_sb_in = 0;
3145 struct qseecom_client_send_data_ireq send_data_req = {0};
3146 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3147 struct qseecom_command_scm_resp resp;
3148 unsigned long flags;
3149 struct qseecom_registered_app_list *ptr_app;
3150 bool found_app = false;
3151 void *cmd_buf = NULL;
3152 size_t cmd_len;
3153 struct sglist_info *table = data->sglistinfo_ptr;
3154
3155 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3156 /* find app_id & img_name from list */
3157 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3158 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3159 list) {
3160 if ((ptr_app->app_id == data->client.app_id) &&
3161 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3162 found_app = true;
3163 break;
3164 }
3165 }
3166 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3167
3168 if (!found_app) {
3169 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3170 (char *)data->client.app_name);
3171 return -ENOENT;
3172 }
3173
3174 if (qseecom.qsee_version < QSEE_VERSION_40) {
3175 send_data_req.app_id = data->client.app_id;
3176 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3177 data, (uintptr_t)req->cmd_req_buf));
3178 send_data_req.req_len = req->cmd_req_len;
3179 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3180 data, (uintptr_t)req->resp_buf));
3181 send_data_req.rsp_len = req->resp_len;
3182 send_data_req.sglistinfo_ptr =
3183 (uint32_t)virt_to_phys(table);
3184 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3185 dmac_flush_range((void *)table,
3186 (void *)table + SGLISTINFO_TABLE_SIZE);
3187 cmd_buf = (void *)&send_data_req;
3188 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3189 } else {
3190 send_data_req_64bit.app_id = data->client.app_id;
3191 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3192 (uintptr_t)req->cmd_req_buf);
3193 send_data_req_64bit.req_len = req->cmd_req_len;
3194 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3195 (uintptr_t)req->resp_buf);
3196 send_data_req_64bit.rsp_len = req->resp_len;
3197 /* check if 32bit app's phys_addr region is under 4GB.*/
3198 if ((data->client.app_arch == ELFCLASS32) &&
3199 ((send_data_req_64bit.req_ptr >=
3200 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3201 (send_data_req_64bit.rsp_ptr >=
3202 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3203 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3204 data->client.app_name,
3205 send_data_req_64bit.req_ptr,
3206 send_data_req_64bit.req_len,
3207 send_data_req_64bit.rsp_ptr,
3208 send_data_req_64bit.rsp_len);
3209 return -EFAULT;
3210 }
3211 send_data_req_64bit.sglistinfo_ptr =
3212 (uint64_t)virt_to_phys(table);
3213 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3214 dmac_flush_range((void *)table,
3215 (void *)table + SGLISTINFO_TABLE_SIZE);
3216 cmd_buf = (void *)&send_data_req_64bit;
3217 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3218 }
3219
3220 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3221 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3222 else
3223 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3224
3225 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3226 data->client.sb_virt,
3227 reqd_len_sb_in,
3228 ION_IOC_CLEAN_INV_CACHES);
3229 if (ret) {
3230 pr_err("cache operation failed %d\n", ret);
3231 return ret;
3232 }
3233
3234 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3235
3236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3237 cmd_buf, cmd_len,
3238 &resp, sizeof(resp));
3239 if (ret) {
3240 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3241 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003242 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 }
3244
3245 if (qseecom.qsee_reentrancy_support) {
3246 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003247 if (ret)
3248 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003249 } else {
3250 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3251 ret = __qseecom_process_incomplete_cmd(data, &resp);
3252 if (ret) {
3253 pr_err("process_incomplete_cmd failed err: %d\n",
3254 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003255 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003256 }
3257 } else {
3258 if (resp.result != QSEOS_RESULT_SUCCESS) {
3259 pr_err("Response result %d not supported\n",
3260 resp.result);
3261 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003262 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003263 }
3264 }
3265 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003266exit:
3267 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 data->client.sb_virt, data->client.sb_length,
3269 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003270 if (ret2) {
3271 pr_err("cache operation failed %d\n", ret2);
3272 return ret2;
3273 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003274 return ret;
3275}
3276
3277static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3278{
3279 int ret = 0;
3280 struct qseecom_send_cmd_req req;
3281
3282 ret = copy_from_user(&req, argp, sizeof(req));
3283 if (ret) {
3284 pr_err("copy_from_user failed\n");
3285 return ret;
3286 }
3287
3288 if (__validate_send_cmd_inputs(data, &req))
3289 return -EINVAL;
3290
3291 ret = __qseecom_send_cmd(data, &req);
3292
3293 if (ret)
3294 return ret;
3295
3296 return ret;
3297}
3298
3299int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3300 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3301 struct qseecom_dev_handle *data, int i) {
3302
3303 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3304 (req->ifd_data[i].fd > 0)) {
3305 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3306 (req->ifd_data[i].cmd_buf_offset >
3307 req->cmd_req_len - sizeof(uint32_t))) {
3308 pr_err("Invalid offset (req len) 0x%x\n",
3309 req->ifd_data[i].cmd_buf_offset);
3310 return -EINVAL;
3311 }
3312 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3313 (lstnr_resp->ifd_data[i].fd > 0)) {
3314 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3315 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3316 lstnr_resp->resp_len - sizeof(uint32_t))) {
3317 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3318 lstnr_resp->ifd_data[i].cmd_buf_offset);
3319 return -EINVAL;
3320 }
3321 }
3322 return 0;
3323}
3324
3325static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3326 struct qseecom_dev_handle *data)
3327{
3328 struct ion_handle *ihandle;
3329 char *field;
3330 int ret = 0;
3331 int i = 0;
3332 uint32_t len = 0;
3333 struct scatterlist *sg;
3334 struct qseecom_send_modfd_cmd_req *req = NULL;
3335 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3336 struct qseecom_registered_listener_list *this_lstnr = NULL;
3337 uint32_t offset;
3338 struct sg_table *sg_ptr;
3339
3340 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3341 (data->type != QSEECOM_CLIENT_APP))
3342 return -EFAULT;
3343
3344 if (msg == NULL) {
3345 pr_err("Invalid address\n");
3346 return -EINVAL;
3347 }
3348 if (data->type == QSEECOM_LISTENER_SERVICE) {
3349 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3350 this_lstnr = __qseecom_find_svc(data->listener.id);
3351 if (IS_ERR_OR_NULL(this_lstnr)) {
3352 pr_err("Invalid listener ID\n");
3353 return -ENOMEM;
3354 }
3355 } else {
3356 req = (struct qseecom_send_modfd_cmd_req *)msg;
3357 }
3358
3359 for (i = 0; i < MAX_ION_FD; i++) {
3360 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3361 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003362 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003363 req->ifd_data[i].fd);
3364 if (IS_ERR_OR_NULL(ihandle)) {
3365 pr_err("Ion client can't retrieve the handle\n");
3366 return -ENOMEM;
3367 }
3368 field = (char *) req->cmd_req_buf +
3369 req->ifd_data[i].cmd_buf_offset;
3370 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3371 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003372 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003373 lstnr_resp->ifd_data[i].fd);
3374 if (IS_ERR_OR_NULL(ihandle)) {
3375 pr_err("Ion client can't retrieve the handle\n");
3376 return -ENOMEM;
3377 }
3378 field = lstnr_resp->resp_buf_ptr +
3379 lstnr_resp->ifd_data[i].cmd_buf_offset;
3380 } else {
3381 continue;
3382 }
3383 /* Populate the cmd data structure with the phys_addr */
3384 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3385 if (IS_ERR_OR_NULL(sg_ptr)) {
3386 pr_err("IOn client could not retrieve sg table\n");
3387 goto err;
3388 }
3389 if (sg_ptr->nents == 0) {
3390 pr_err("Num of scattered entries is 0\n");
3391 goto err;
3392 }
3393 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3394 pr_err("Num of scattered entries");
3395 pr_err(" (%d) is greater than max supported %d\n",
3396 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3397 goto err;
3398 }
3399 sg = sg_ptr->sgl;
3400 if (sg_ptr->nents == 1) {
3401 uint32_t *update;
3402
3403 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3404 goto err;
3405 if ((data->type == QSEECOM_CLIENT_APP &&
3406 (data->client.app_arch == ELFCLASS32 ||
3407 data->client.app_arch == ELFCLASS64)) ||
3408 (data->type == QSEECOM_LISTENER_SERVICE)) {
3409 /*
3410 * Check if sg list phy add region is under 4GB
3411 */
3412 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3413 (!cleanup) &&
3414 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3415 >= PHY_ADDR_4G - sg->length)) {
3416 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3417 data->client.app_name,
3418 &(sg_dma_address(sg_ptr->sgl)),
3419 sg->length);
3420 goto err;
3421 }
3422 update = (uint32_t *) field;
3423 *update = cleanup ? 0 :
3424 (uint32_t)sg_dma_address(sg_ptr->sgl);
3425 } else {
3426 pr_err("QSEE app arch %u is not supported\n",
3427 data->client.app_arch);
3428 goto err;
3429 }
3430 len += (uint32_t)sg->length;
3431 } else {
3432 struct qseecom_sg_entry *update;
3433 int j = 0;
3434
3435 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3436 (req->ifd_data[i].fd > 0)) {
3437
3438 if ((req->cmd_req_len <
3439 SG_ENTRY_SZ * sg_ptr->nents) ||
3440 (req->ifd_data[i].cmd_buf_offset >
3441 (req->cmd_req_len -
3442 SG_ENTRY_SZ * sg_ptr->nents))) {
3443 pr_err("Invalid offset = 0x%x\n",
3444 req->ifd_data[i].cmd_buf_offset);
3445 goto err;
3446 }
3447
3448 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3449 (lstnr_resp->ifd_data[i].fd > 0)) {
3450
3451 if ((lstnr_resp->resp_len <
3452 SG_ENTRY_SZ * sg_ptr->nents) ||
3453 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3454 (lstnr_resp->resp_len -
3455 SG_ENTRY_SZ * sg_ptr->nents))) {
3456 goto err;
3457 }
3458 }
3459 if ((data->type == QSEECOM_CLIENT_APP &&
3460 (data->client.app_arch == ELFCLASS32 ||
3461 data->client.app_arch == ELFCLASS64)) ||
3462 (data->type == QSEECOM_LISTENER_SERVICE)) {
3463 update = (struct qseecom_sg_entry *)field;
3464 for (j = 0; j < sg_ptr->nents; j++) {
3465 /*
3466 * Check if sg list PA is under 4GB
3467 */
3468 if ((qseecom.qsee_version >=
3469 QSEE_VERSION_40) &&
3470 (!cleanup) &&
3471 ((uint64_t)(sg_dma_address(sg))
3472 >= PHY_ADDR_4G - sg->length)) {
3473 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3474 data->client.app_name,
3475 &(sg_dma_address(sg)),
3476 sg->length);
3477 goto err;
3478 }
3479 update->phys_addr = cleanup ? 0 :
3480 (uint32_t)sg_dma_address(sg);
3481 update->len = cleanup ? 0 : sg->length;
3482 update++;
3483 len += sg->length;
3484 sg = sg_next(sg);
3485 }
3486 } else {
3487 pr_err("QSEE app arch %u is not supported\n",
3488 data->client.app_arch);
3489 goto err;
3490 }
3491 }
3492
3493 if (cleanup) {
3494 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3495 ihandle, NULL, len,
3496 ION_IOC_INV_CACHES);
3497 if (ret) {
3498 pr_err("cache operation failed %d\n", ret);
3499 goto err;
3500 }
3501 } else {
3502 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3503 ihandle, NULL, len,
3504 ION_IOC_CLEAN_INV_CACHES);
3505 if (ret) {
3506 pr_err("cache operation failed %d\n", ret);
3507 goto err;
3508 }
3509 if (data->type == QSEECOM_CLIENT_APP) {
3510 offset = req->ifd_data[i].cmd_buf_offset;
3511 data->sglistinfo_ptr[i].indexAndFlags =
3512 SGLISTINFO_SET_INDEX_FLAG(
3513 (sg_ptr->nents == 1), 0, offset);
3514 data->sglistinfo_ptr[i].sizeOrCount =
3515 (sg_ptr->nents == 1) ?
3516 sg->length : sg_ptr->nents;
3517 data->sglist_cnt = i + 1;
3518 } else {
3519 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3520 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3521 (uintptr_t)this_lstnr->sb_virt);
3522 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3523 SGLISTINFO_SET_INDEX_FLAG(
3524 (sg_ptr->nents == 1), 0, offset);
3525 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3526 (sg_ptr->nents == 1) ?
3527 sg->length : sg_ptr->nents;
3528 this_lstnr->sglist_cnt = i + 1;
3529 }
3530 }
3531 /* Deallocate the handle */
3532 if (!IS_ERR_OR_NULL(ihandle))
3533 ion_free(qseecom.ion_clnt, ihandle);
3534 }
3535 return ret;
3536err:
3537 if (!IS_ERR_OR_NULL(ihandle))
3538 ion_free(qseecom.ion_clnt, ihandle);
3539 return -ENOMEM;
3540}
3541
3542static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3543 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3544{
3545 struct scatterlist *sg = sg_ptr->sgl;
3546 struct qseecom_sg_entry_64bit *sg_entry;
3547 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3548 void *buf;
3549 uint i;
3550 size_t size;
3551 dma_addr_t coh_pmem;
3552
3553 if (fd_idx >= MAX_ION_FD) {
3554 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3555 return -ENOMEM;
3556 }
3557 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3558 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3559 /* Allocate a contiguous kernel buffer */
3560 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3561 size = (size + PAGE_SIZE) & PAGE_MASK;
3562 buf = dma_alloc_coherent(qseecom.pdev,
3563 size, &coh_pmem, GFP_KERNEL);
3564 if (buf == NULL) {
3565 pr_err("failed to alloc memory for sg buf\n");
3566 return -ENOMEM;
3567 }
3568 /* update qseecom_sg_list_buf_hdr_64bit */
3569 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3570 buf_hdr->new_buf_phys_addr = coh_pmem;
3571 buf_hdr->nents_total = sg_ptr->nents;
3572 /* save the left sg entries into new allocated buf */
3573 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3574 for (i = 0; i < sg_ptr->nents; i++) {
3575 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3576 sg_entry->len = sg->length;
3577 sg_entry++;
3578 sg = sg_next(sg);
3579 }
3580
3581 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3582 data->client.sec_buf_fd[fd_idx].vbase = buf;
3583 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3584 data->client.sec_buf_fd[fd_idx].size = size;
3585
3586 return 0;
3587}
3588
3589static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3590 struct qseecom_dev_handle *data)
3591{
3592 struct ion_handle *ihandle;
3593 char *field;
3594 int ret = 0;
3595 int i = 0;
3596 uint32_t len = 0;
3597 struct scatterlist *sg;
3598 struct qseecom_send_modfd_cmd_req *req = NULL;
3599 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3600 struct qseecom_registered_listener_list *this_lstnr = NULL;
3601 uint32_t offset;
3602 struct sg_table *sg_ptr;
3603
3604 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3605 (data->type != QSEECOM_CLIENT_APP))
3606 return -EFAULT;
3607
3608 if (msg == NULL) {
3609 pr_err("Invalid address\n");
3610 return -EINVAL;
3611 }
3612 if (data->type == QSEECOM_LISTENER_SERVICE) {
3613 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3614 this_lstnr = __qseecom_find_svc(data->listener.id);
3615 if (IS_ERR_OR_NULL(this_lstnr)) {
3616 pr_err("Invalid listener ID\n");
3617 return -ENOMEM;
3618 }
3619 } else {
3620 req = (struct qseecom_send_modfd_cmd_req *)msg;
3621 }
3622
3623 for (i = 0; i < MAX_ION_FD; i++) {
3624 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3625 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003626 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003627 req->ifd_data[i].fd);
3628 if (IS_ERR_OR_NULL(ihandle)) {
3629 pr_err("Ion client can't retrieve the handle\n");
3630 return -ENOMEM;
3631 }
3632 field = (char *) req->cmd_req_buf +
3633 req->ifd_data[i].cmd_buf_offset;
3634 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3635 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003636 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003637 lstnr_resp->ifd_data[i].fd);
3638 if (IS_ERR_OR_NULL(ihandle)) {
3639 pr_err("Ion client can't retrieve the handle\n");
3640 return -ENOMEM;
3641 }
3642 field = lstnr_resp->resp_buf_ptr +
3643 lstnr_resp->ifd_data[i].cmd_buf_offset;
3644 } else {
3645 continue;
3646 }
3647 /* Populate the cmd data structure with the phys_addr */
3648 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3649 if (IS_ERR_OR_NULL(sg_ptr)) {
3650 pr_err("IOn client could not retrieve sg table\n");
3651 goto err;
3652 }
3653 if (sg_ptr->nents == 0) {
3654 pr_err("Num of scattered entries is 0\n");
3655 goto err;
3656 }
3657 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3658 pr_warn("Num of scattered entries");
3659 pr_warn(" (%d) is greater than %d\n",
3660 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3661 if (cleanup) {
3662 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3663 data->client.sec_buf_fd[i].vbase)
3664 dma_free_coherent(qseecom.pdev,
3665 data->client.sec_buf_fd[i].size,
3666 data->client.sec_buf_fd[i].vbase,
3667 data->client.sec_buf_fd[i].pbase);
3668 } else {
3669 ret = __qseecom_allocate_sg_list_buffer(data,
3670 field, i, sg_ptr);
3671 if (ret) {
3672 pr_err("Failed to allocate sg list buffer\n");
3673 goto err;
3674 }
3675 }
3676 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3677 sg = sg_ptr->sgl;
3678 goto cleanup;
3679 }
3680 sg = sg_ptr->sgl;
3681 if (sg_ptr->nents == 1) {
3682 uint64_t *update_64bit;
3683
3684 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3685 goto err;
3686 /* 64bit app uses 64bit address */
3687 update_64bit = (uint64_t *) field;
3688 *update_64bit = cleanup ? 0 :
3689 (uint64_t)sg_dma_address(sg_ptr->sgl);
3690 len += (uint32_t)sg->length;
3691 } else {
3692 struct qseecom_sg_entry_64bit *update_64bit;
3693 int j = 0;
3694
3695 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3696 (req->ifd_data[i].fd > 0)) {
3697
3698 if ((req->cmd_req_len <
3699 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3700 (req->ifd_data[i].cmd_buf_offset >
3701 (req->cmd_req_len -
3702 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3703 pr_err("Invalid offset = 0x%x\n",
3704 req->ifd_data[i].cmd_buf_offset);
3705 goto err;
3706 }
3707
3708 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3709 (lstnr_resp->ifd_data[i].fd > 0)) {
3710
3711 if ((lstnr_resp->resp_len <
3712 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3713 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3714 (lstnr_resp->resp_len -
3715 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3716 goto err;
3717 }
3718 }
3719 /* 64bit app uses 64bit address */
3720 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3721 for (j = 0; j < sg_ptr->nents; j++) {
3722 update_64bit->phys_addr = cleanup ? 0 :
3723 (uint64_t)sg_dma_address(sg);
3724 update_64bit->len = cleanup ? 0 :
3725 (uint32_t)sg->length;
3726 update_64bit++;
3727 len += sg->length;
3728 sg = sg_next(sg);
3729 }
3730 }
3731cleanup:
3732 if (cleanup) {
3733 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3734 ihandle, NULL, len,
3735 ION_IOC_INV_CACHES);
3736 if (ret) {
3737 pr_err("cache operation failed %d\n", ret);
3738 goto err;
3739 }
3740 } else {
3741 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3742 ihandle, NULL, len,
3743 ION_IOC_CLEAN_INV_CACHES);
3744 if (ret) {
3745 pr_err("cache operation failed %d\n", ret);
3746 goto err;
3747 }
3748 if (data->type == QSEECOM_CLIENT_APP) {
3749 offset = req->ifd_data[i].cmd_buf_offset;
3750 data->sglistinfo_ptr[i].indexAndFlags =
3751 SGLISTINFO_SET_INDEX_FLAG(
3752 (sg_ptr->nents == 1), 1, offset);
3753 data->sglistinfo_ptr[i].sizeOrCount =
3754 (sg_ptr->nents == 1) ?
3755 sg->length : sg_ptr->nents;
3756 data->sglist_cnt = i + 1;
3757 } else {
3758 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3759 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3760 (uintptr_t)this_lstnr->sb_virt);
3761 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3762 SGLISTINFO_SET_INDEX_FLAG(
3763 (sg_ptr->nents == 1), 1, offset);
3764 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3765 (sg_ptr->nents == 1) ?
3766 sg->length : sg_ptr->nents;
3767 this_lstnr->sglist_cnt = i + 1;
3768 }
3769 }
3770 /* Deallocate the handle */
3771 if (!IS_ERR_OR_NULL(ihandle))
3772 ion_free(qseecom.ion_clnt, ihandle);
3773 }
3774 return ret;
3775err:
3776 for (i = 0; i < MAX_ION_FD; i++)
3777 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3778 data->client.sec_buf_fd[i].vbase)
3779 dma_free_coherent(qseecom.pdev,
3780 data->client.sec_buf_fd[i].size,
3781 data->client.sec_buf_fd[i].vbase,
3782 data->client.sec_buf_fd[i].pbase);
3783 if (!IS_ERR_OR_NULL(ihandle))
3784 ion_free(qseecom.ion_clnt, ihandle);
3785 return -ENOMEM;
3786}
3787
3788static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3789 void __user *argp,
3790 bool is_64bit_addr)
3791{
3792 int ret = 0;
3793 int i;
3794 struct qseecom_send_modfd_cmd_req req;
3795 struct qseecom_send_cmd_req send_cmd_req;
3796
3797 ret = copy_from_user(&req, argp, sizeof(req));
3798 if (ret) {
3799 pr_err("copy_from_user failed\n");
3800 return ret;
3801 }
3802
3803 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3804 send_cmd_req.cmd_req_len = req.cmd_req_len;
3805 send_cmd_req.resp_buf = req.resp_buf;
3806 send_cmd_req.resp_len = req.resp_len;
3807
3808 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3809 return -EINVAL;
3810
3811 /* validate offsets */
3812 for (i = 0; i < MAX_ION_FD; i++) {
3813 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3814 pr_err("Invalid offset %d = 0x%x\n",
3815 i, req.ifd_data[i].cmd_buf_offset);
3816 return -EINVAL;
3817 }
3818 }
3819 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3820 (uintptr_t)req.cmd_req_buf);
3821 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3822 (uintptr_t)req.resp_buf);
3823
3824 if (!is_64bit_addr) {
3825 ret = __qseecom_update_cmd_buf(&req, false, data);
3826 if (ret)
3827 return ret;
3828 ret = __qseecom_send_cmd(data, &send_cmd_req);
3829 if (ret)
3830 return ret;
3831 ret = __qseecom_update_cmd_buf(&req, true, data);
3832 if (ret)
3833 return ret;
3834 } else {
3835 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3836 if (ret)
3837 return ret;
3838 ret = __qseecom_send_cmd(data, &send_cmd_req);
3839 if (ret)
3840 return ret;
3841 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3842 if (ret)
3843 return ret;
3844 }
3845
3846 return ret;
3847}
3848
3849static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3850 void __user *argp)
3851{
3852 return __qseecom_send_modfd_cmd(data, argp, false);
3853}
3854
3855static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3856 void __user *argp)
3857{
3858 return __qseecom_send_modfd_cmd(data, argp, true);
3859}
3860
3861
3862
3863static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3864 struct qseecom_registered_listener_list *svc)
3865{
3866 int ret;
3867
3868 ret = (svc->rcv_req_flag != 0);
3869 return ret || data->abort;
3870}
3871
3872static int qseecom_receive_req(struct qseecom_dev_handle *data)
3873{
3874 int ret = 0;
3875 struct qseecom_registered_listener_list *this_lstnr;
3876
3877 this_lstnr = __qseecom_find_svc(data->listener.id);
3878 if (!this_lstnr) {
3879 pr_err("Invalid listener ID\n");
3880 return -ENODATA;
3881 }
3882
3883 while (1) {
3884 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3885 __qseecom_listener_has_rcvd_req(data,
3886 this_lstnr))) {
3887 pr_debug("Interrupted: exiting Listener Service = %d\n",
3888 (uint32_t)data->listener.id);
3889 /* woken up for different reason */
3890 return -ERESTARTSYS;
3891 }
3892
3893 if (data->abort) {
3894 pr_err("Aborting Listener Service = %d\n",
3895 (uint32_t)data->listener.id);
3896 return -ENODEV;
3897 }
3898 this_lstnr->rcv_req_flag = 0;
3899 break;
3900 }
3901 return ret;
3902}
3903
3904static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3905{
3906 unsigned char app_arch = 0;
3907 struct elf32_hdr *ehdr;
3908 struct elf64_hdr *ehdr64;
3909
3910 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3911
3912 switch (app_arch) {
3913 case ELFCLASS32: {
3914 ehdr = (struct elf32_hdr *)fw_entry->data;
3915 if (fw_entry->size < sizeof(*ehdr)) {
3916 pr_err("%s: Not big enough to be an elf32 header\n",
3917 qseecom.pdev->init_name);
3918 return false;
3919 }
3920 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3921 pr_err("%s: Not an elf32 header\n",
3922 qseecom.pdev->init_name);
3923 return false;
3924 }
3925 if (ehdr->e_phnum == 0) {
3926 pr_err("%s: No loadable segments\n",
3927 qseecom.pdev->init_name);
3928 return false;
3929 }
3930 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3931 sizeof(struct elf32_hdr) > fw_entry->size) {
3932 pr_err("%s: Program headers not within mdt\n",
3933 qseecom.pdev->init_name);
3934 return false;
3935 }
3936 break;
3937 }
3938 case ELFCLASS64: {
3939 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3940 if (fw_entry->size < sizeof(*ehdr64)) {
3941 pr_err("%s: Not big enough to be an elf64 header\n",
3942 qseecom.pdev->init_name);
3943 return false;
3944 }
3945 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3946 pr_err("%s: Not an elf64 header\n",
3947 qseecom.pdev->init_name);
3948 return false;
3949 }
3950 if (ehdr64->e_phnum == 0) {
3951 pr_err("%s: No loadable segments\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3956 sizeof(struct elf64_hdr) > fw_entry->size) {
3957 pr_err("%s: Program headers not within mdt\n",
3958 qseecom.pdev->init_name);
3959 return false;
3960 }
3961 break;
3962 }
3963 default: {
3964 pr_err("QSEE app arch %u is not supported\n", app_arch);
3965 return false;
3966 }
3967 }
3968 return true;
3969}
3970
3971static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3972 uint32_t *app_arch)
3973{
3974 int ret = -1;
3975 int i = 0, rc = 0;
3976 const struct firmware *fw_entry = NULL;
3977 char fw_name[MAX_APP_NAME_SIZE];
3978 struct elf32_hdr *ehdr;
3979 struct elf64_hdr *ehdr64;
3980 int num_images = 0;
3981
3982 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3983 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3984 if (rc) {
3985 pr_err("error with request_firmware\n");
3986 ret = -EIO;
3987 goto err;
3988 }
3989 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3990 ret = -EIO;
3991 goto err;
3992 }
3993 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3994 *fw_size = fw_entry->size;
3995 if (*app_arch == ELFCLASS32) {
3996 ehdr = (struct elf32_hdr *)fw_entry->data;
3997 num_images = ehdr->e_phnum;
3998 } else if (*app_arch == ELFCLASS64) {
3999 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4000 num_images = ehdr64->e_phnum;
4001 } else {
4002 pr_err("QSEE %s app, arch %u is not supported\n",
4003 appname, *app_arch);
4004 ret = -EIO;
4005 goto err;
4006 }
4007 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4008 release_firmware(fw_entry);
4009 fw_entry = NULL;
4010 for (i = 0; i < num_images; i++) {
4011 memset(fw_name, 0, sizeof(fw_name));
4012 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4013 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4014 if (ret)
4015 goto err;
4016 if (*fw_size > U32_MAX - fw_entry->size) {
4017 pr_err("QSEE %s app file size overflow\n", appname);
4018 ret = -EINVAL;
4019 goto err;
4020 }
4021 *fw_size += fw_entry->size;
4022 release_firmware(fw_entry);
4023 fw_entry = NULL;
4024 }
4025
4026 return ret;
4027err:
4028 if (fw_entry)
4029 release_firmware(fw_entry);
4030 *fw_size = 0;
4031 return ret;
4032}
4033
4034static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4035 uint32_t fw_size,
4036 struct qseecom_load_app_ireq *load_req)
4037{
4038 int ret = -1;
4039 int i = 0, rc = 0;
4040 const struct firmware *fw_entry = NULL;
4041 char fw_name[MAX_APP_NAME_SIZE];
4042 u8 *img_data_ptr = img_data;
4043 struct elf32_hdr *ehdr;
4044 struct elf64_hdr *ehdr64;
4045 int num_images = 0;
4046 unsigned char app_arch = 0;
4047
4048 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4049 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4050 if (rc) {
4051 ret = -EIO;
4052 goto err;
4053 }
4054
4055 load_req->img_len = fw_entry->size;
4056 if (load_req->img_len > fw_size) {
4057 pr_err("app %s size %zu is larger than buf size %u\n",
4058 appname, fw_entry->size, fw_size);
4059 ret = -EINVAL;
4060 goto err;
4061 }
4062 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4063 img_data_ptr = img_data_ptr + fw_entry->size;
4064 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4065
4066 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4067 if (app_arch == ELFCLASS32) {
4068 ehdr = (struct elf32_hdr *)fw_entry->data;
4069 num_images = ehdr->e_phnum;
4070 } else if (app_arch == ELFCLASS64) {
4071 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4072 num_images = ehdr64->e_phnum;
4073 } else {
4074 pr_err("QSEE %s app, arch %u is not supported\n",
4075 appname, app_arch);
4076 ret = -EIO;
4077 goto err;
4078 }
4079 release_firmware(fw_entry);
4080 fw_entry = NULL;
4081 for (i = 0; i < num_images; i++) {
4082 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4083 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4084 if (ret) {
4085 pr_err("Failed to locate blob %s\n", fw_name);
4086 goto err;
4087 }
4088 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4089 (fw_entry->size + load_req->img_len > fw_size)) {
4090 pr_err("Invalid file size for %s\n", fw_name);
4091 ret = -EINVAL;
4092 goto err;
4093 }
4094 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4095 img_data_ptr = img_data_ptr + fw_entry->size;
4096 load_req->img_len += fw_entry->size;
4097 release_firmware(fw_entry);
4098 fw_entry = NULL;
4099 }
4100 return ret;
4101err:
4102 release_firmware(fw_entry);
4103 return ret;
4104}
4105
4106static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4107 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4108{
4109 size_t len = 0;
4110 int ret = 0;
4111 ion_phys_addr_t pa;
4112 struct ion_handle *ihandle = NULL;
4113 u8 *img_data = NULL;
4114
4115 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4116 SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4117
4118 if (IS_ERR_OR_NULL(ihandle)) {
4119 pr_err("ION alloc failed\n");
4120 return -ENOMEM;
4121 }
4122 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4123 ihandle);
4124
4125 if (IS_ERR_OR_NULL(img_data)) {
4126 pr_err("ION memory mapping for image loading failed\n");
4127 ret = -ENOMEM;
4128 goto exit_ion_free;
4129 }
4130 /* Get the physical address of the ION BUF */
4131 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4132 if (ret) {
4133 pr_err("physical memory retrieval failure\n");
4134 ret = -EIO;
4135 goto exit_ion_unmap_kernel;
4136 }
4137
4138 *pihandle = ihandle;
4139 *data = img_data;
4140 *paddr = pa;
4141 return ret;
4142
4143exit_ion_unmap_kernel:
4144 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4145exit_ion_free:
4146 ion_free(qseecom.ion_clnt, ihandle);
4147 ihandle = NULL;
4148 return ret;
4149}
4150
4151static void __qseecom_free_img_data(struct ion_handle **ihandle)
4152{
4153 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4154 ion_free(qseecom.ion_clnt, *ihandle);
4155 *ihandle = NULL;
4156}
4157
4158static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4159 uint32_t *app_id)
4160{
4161 int ret = -1;
4162 uint32_t fw_size = 0;
4163 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4164 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4165 struct qseecom_command_scm_resp resp;
4166 u8 *img_data = NULL;
4167 ion_phys_addr_t pa = 0;
4168 struct ion_handle *ihandle = NULL;
4169 void *cmd_buf = NULL;
4170 size_t cmd_len;
4171 uint32_t app_arch = 0;
4172
4173 if (!data || !appname || !app_id) {
4174 pr_err("Null pointer to data or appname or appid\n");
4175 return -EINVAL;
4176 }
4177 *app_id = 0;
4178 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4179 return -EIO;
4180 data->client.app_arch = app_arch;
4181
4182 /* Check and load cmnlib */
4183 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4184 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4185 ret = qseecom_load_commonlib_image(data, "cmnlib");
4186 if (ret) {
4187 pr_err("failed to load cmnlib\n");
4188 return -EIO;
4189 }
4190 qseecom.commonlib_loaded = true;
4191 pr_debug("cmnlib is loaded\n");
4192 }
4193
4194 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4195 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4196 if (ret) {
4197 pr_err("failed to load cmnlib64\n");
4198 return -EIO;
4199 }
4200 qseecom.commonlib64_loaded = true;
4201 pr_debug("cmnlib64 is loaded\n");
4202 }
4203 }
4204
4205 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4206 if (ret)
4207 return ret;
4208
4209 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4210 if (ret) {
4211 ret = -EIO;
4212 goto exit_free_img_data;
4213 }
4214
4215 /* Populate the load_req parameters */
4216 if (qseecom.qsee_version < QSEE_VERSION_40) {
4217 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4218 load_req.mdt_len = load_req.mdt_len;
4219 load_req.img_len = load_req.img_len;
4220 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4221 load_req.phy_addr = (uint32_t)pa;
4222 cmd_buf = (void *)&load_req;
4223 cmd_len = sizeof(struct qseecom_load_app_ireq);
4224 } else {
4225 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4226 load_req_64bit.mdt_len = load_req.mdt_len;
4227 load_req_64bit.img_len = load_req.img_len;
4228 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4229 load_req_64bit.phy_addr = (uint64_t)pa;
4230 cmd_buf = (void *)&load_req_64bit;
4231 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4232 }
4233
4234 if (qseecom.support_bus_scaling) {
4235 mutex_lock(&qsee_bw_mutex);
4236 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4237 mutex_unlock(&qsee_bw_mutex);
4238 if (ret) {
4239 ret = -EIO;
4240 goto exit_free_img_data;
4241 }
4242 }
4243
4244 ret = __qseecom_enable_clk_scale_up(data);
4245 if (ret) {
4246 ret = -EIO;
4247 goto exit_unregister_bus_bw_need;
4248 }
4249
4250 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4251 img_data, fw_size,
4252 ION_IOC_CLEAN_INV_CACHES);
4253 if (ret) {
4254 pr_err("cache operation failed %d\n", ret);
4255 goto exit_disable_clk_vote;
4256 }
4257
4258 /* SCM_CALL to load the image */
4259 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4260 &resp, sizeof(resp));
4261 if (ret) {
4262 pr_err("scm_call to load failed : ret %d\n", ret);
4263 ret = -EIO;
4264 goto exit_disable_clk_vote;
4265 }
4266
4267 switch (resp.result) {
4268 case QSEOS_RESULT_SUCCESS:
4269 *app_id = resp.data;
4270 break;
4271 case QSEOS_RESULT_INCOMPLETE:
4272 ret = __qseecom_process_incomplete_cmd(data, &resp);
4273 if (ret)
4274 pr_err("process_incomplete_cmd FAILED\n");
4275 else
4276 *app_id = resp.data;
4277 break;
4278 case QSEOS_RESULT_FAILURE:
4279 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4280 break;
4281 default:
4282 pr_err("scm call return unknown response %d\n", resp.result);
4283 ret = -EINVAL;
4284 break;
4285 }
4286
4287exit_disable_clk_vote:
4288 __qseecom_disable_clk_scale_down(data);
4289
4290exit_unregister_bus_bw_need:
4291 if (qseecom.support_bus_scaling) {
4292 mutex_lock(&qsee_bw_mutex);
4293 qseecom_unregister_bus_bandwidth_needs(data);
4294 mutex_unlock(&qsee_bw_mutex);
4295 }
4296
4297exit_free_img_data:
4298 __qseecom_free_img_data(&ihandle);
4299 return ret;
4300}
4301
4302static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4303 char *cmnlib_name)
4304{
4305 int ret = 0;
4306 uint32_t fw_size = 0;
4307 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4308 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4309 struct qseecom_command_scm_resp resp;
4310 u8 *img_data = NULL;
4311 ion_phys_addr_t pa = 0;
4312 void *cmd_buf = NULL;
4313 size_t cmd_len;
4314 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004315 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004316
4317 if (!cmnlib_name) {
4318 pr_err("cmnlib_name is NULL\n");
4319 return -EINVAL;
4320 }
4321 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4322 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4323 cmnlib_name, strlen(cmnlib_name));
4324 return -EINVAL;
4325 }
4326
4327 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4328 return -EIO;
4329
Zhen Kong3bafb312017-10-18 10:27:20 -07004330 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004331 &img_data, fw_size, &pa);
4332 if (ret)
4333 return -EIO;
4334
4335 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4336 if (ret) {
4337 ret = -EIO;
4338 goto exit_free_img_data;
4339 }
4340 if (qseecom.qsee_version < QSEE_VERSION_40) {
4341 load_req.phy_addr = (uint32_t)pa;
4342 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4343 cmd_buf = (void *)&load_req;
4344 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4345 } else {
4346 load_req_64bit.phy_addr = (uint64_t)pa;
4347 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4348 load_req_64bit.img_len = load_req.img_len;
4349 load_req_64bit.mdt_len = load_req.mdt_len;
4350 cmd_buf = (void *)&load_req_64bit;
4351 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4352 }
4353
4354 if (qseecom.support_bus_scaling) {
4355 mutex_lock(&qsee_bw_mutex);
4356 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4357 mutex_unlock(&qsee_bw_mutex);
4358 if (ret) {
4359 ret = -EIO;
4360 goto exit_free_img_data;
4361 }
4362 }
4363
4364 /* Vote for the SFPB clock */
4365 ret = __qseecom_enable_clk_scale_up(data);
4366 if (ret) {
4367 ret = -EIO;
4368 goto exit_unregister_bus_bw_need;
4369 }
4370
Zhen Kong3bafb312017-10-18 10:27:20 -07004371 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004372 img_data, fw_size,
4373 ION_IOC_CLEAN_INV_CACHES);
4374 if (ret) {
4375 pr_err("cache operation failed %d\n", ret);
4376 goto exit_disable_clk_vote;
4377 }
4378
4379 /* SCM_CALL to load the image */
4380 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4381 &resp, sizeof(resp));
4382 if (ret) {
4383 pr_err("scm_call to load failed : ret %d\n", ret);
4384 ret = -EIO;
4385 goto exit_disable_clk_vote;
4386 }
4387
4388 switch (resp.result) {
4389 case QSEOS_RESULT_SUCCESS:
4390 break;
4391 case QSEOS_RESULT_FAILURE:
4392 pr_err("scm call failed w/response result%d\n", resp.result);
4393 ret = -EINVAL;
4394 goto exit_disable_clk_vote;
4395 case QSEOS_RESULT_INCOMPLETE:
4396 ret = __qseecom_process_incomplete_cmd(data, &resp);
4397 if (ret) {
4398 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4399 goto exit_disable_clk_vote;
4400 }
4401 break;
4402 default:
4403 pr_err("scm call return unknown response %d\n", resp.result);
4404 ret = -EINVAL;
4405 goto exit_disable_clk_vote;
4406 }
4407
4408exit_disable_clk_vote:
4409 __qseecom_disable_clk_scale_down(data);
4410
4411exit_unregister_bus_bw_need:
4412 if (qseecom.support_bus_scaling) {
4413 mutex_lock(&qsee_bw_mutex);
4414 qseecom_unregister_bus_bandwidth_needs(data);
4415 mutex_unlock(&qsee_bw_mutex);
4416 }
4417
4418exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004419 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004420 return ret;
4421}
4422
4423static int qseecom_unload_commonlib_image(void)
4424{
4425 int ret = -EINVAL;
4426 struct qseecom_unload_lib_image_ireq unload_req = {0};
4427 struct qseecom_command_scm_resp resp;
4428
4429 /* Populate the remaining parameters */
4430 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4431
4432 /* SCM_CALL to load the image */
4433 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4434 sizeof(struct qseecom_unload_lib_image_ireq),
4435 &resp, sizeof(resp));
4436 if (ret) {
4437 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4438 ret = -EIO;
4439 } else {
4440 switch (resp.result) {
4441 case QSEOS_RESULT_SUCCESS:
4442 break;
4443 case QSEOS_RESULT_FAILURE:
4444 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4445 break;
4446 default:
4447 pr_err("scm call return unknown response %d\n",
4448 resp.result);
4449 ret = -EINVAL;
4450 break;
4451 }
4452 }
4453
4454 return ret;
4455}
4456
4457int qseecom_start_app(struct qseecom_handle **handle,
4458 char *app_name, uint32_t size)
4459{
4460 int32_t ret = 0;
4461 unsigned long flags = 0;
4462 struct qseecom_dev_handle *data = NULL;
4463 struct qseecom_check_app_ireq app_ireq;
4464 struct qseecom_registered_app_list *entry = NULL;
4465 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4466 bool found_app = false;
4467 size_t len;
4468 ion_phys_addr_t pa;
4469 uint32_t fw_size, app_arch;
4470 uint32_t app_id = 0;
4471
4472 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4473 pr_err("Not allowed to be called in %d state\n",
4474 atomic_read(&qseecom.qseecom_state));
4475 return -EPERM;
4476 }
4477 if (!app_name) {
4478 pr_err("failed to get the app name\n");
4479 return -EINVAL;
4480 }
4481
Zhen Kong64a6d7282017-06-16 11:55:07 -07004482 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004483 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004484 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004485 return -EINVAL;
4486 }
4487
4488 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4489 if (!(*handle))
4490 return -ENOMEM;
4491
4492 data = kzalloc(sizeof(*data), GFP_KERNEL);
4493 if (!data) {
4494 if (ret == 0) {
4495 kfree(*handle);
4496 *handle = NULL;
4497 }
4498 return -ENOMEM;
4499 }
4500 data->abort = 0;
4501 data->type = QSEECOM_CLIENT_APP;
4502 data->released = false;
4503 data->client.sb_length = size;
4504 data->client.user_virt_sb_base = 0;
4505 data->client.ihandle = NULL;
4506
4507 init_waitqueue_head(&data->abort_wq);
4508
4509 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4510 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4511 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4512 pr_err("Ion client could not retrieve the handle\n");
4513 kfree(data);
4514 kfree(*handle);
4515 *handle = NULL;
4516 return -EINVAL;
4517 }
4518 mutex_lock(&app_access_lock);
4519
4520 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4521 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4522 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4523 if (ret)
4524 goto err;
4525
4526 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4527 if (app_id) {
4528 pr_warn("App id %d for [%s] app exists\n", app_id,
4529 (char *)app_ireq.app_name);
4530 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4531 list_for_each_entry(entry,
4532 &qseecom.registered_app_list_head, list){
4533 if (entry->app_id == app_id) {
4534 entry->ref_cnt++;
4535 found_app = true;
4536 break;
4537 }
4538 }
4539 spin_unlock_irqrestore(
4540 &qseecom.registered_app_list_lock, flags);
4541 if (!found_app)
4542 pr_warn("App_id %d [%s] was loaded but not registered\n",
4543 ret, (char *)app_ireq.app_name);
4544 } else {
4545 /* load the app and get the app_id */
4546 pr_debug("%s: Loading app for the first time'\n",
4547 qseecom.pdev->init_name);
4548 ret = __qseecom_load_fw(data, app_name, &app_id);
4549 if (ret < 0)
4550 goto err;
4551 }
4552 data->client.app_id = app_id;
4553 if (!found_app) {
4554 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4555 if (!entry) {
4556 pr_err("kmalloc for app entry failed\n");
4557 ret = -ENOMEM;
4558 goto err;
4559 }
4560 entry->app_id = app_id;
4561 entry->ref_cnt = 1;
4562 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4563 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4564 ret = -EIO;
4565 kfree(entry);
4566 goto err;
4567 }
4568 entry->app_arch = app_arch;
4569 entry->app_blocked = false;
4570 entry->blocked_on_listener_id = 0;
4571 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4572 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4573 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4574 flags);
4575 }
4576
4577 /* Get the physical address of the ION BUF */
4578 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4579 if (ret) {
4580 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4581 ret);
4582 goto err;
4583 }
4584
4585 /* Populate the structure for sending scm call to load image */
4586 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4587 data->client.ihandle);
4588 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4589 pr_err("ION memory mapping for client shared buf failed\n");
4590 ret = -ENOMEM;
4591 goto err;
4592 }
4593 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4594 data->client.sb_phys = (phys_addr_t)pa;
4595 (*handle)->dev = (void *)data;
4596 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4597 (*handle)->sbuf_len = data->client.sb_length;
4598
4599 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4600 if (!kclient_entry) {
4601 ret = -ENOMEM;
4602 goto err;
4603 }
4604 kclient_entry->handle = *handle;
4605
4606 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4607 list_add_tail(&kclient_entry->list,
4608 &qseecom.registered_kclient_list_head);
4609 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4610
4611 mutex_unlock(&app_access_lock);
4612 return 0;
4613
4614err:
4615 kfree(data);
4616 kfree(*handle);
4617 *handle = NULL;
4618 mutex_unlock(&app_access_lock);
4619 return ret;
4620}
4621EXPORT_SYMBOL(qseecom_start_app);
4622
4623int qseecom_shutdown_app(struct qseecom_handle **handle)
4624{
4625 int ret = -EINVAL;
4626 struct qseecom_dev_handle *data;
4627
4628 struct qseecom_registered_kclient_list *kclient = NULL;
4629 unsigned long flags = 0;
4630 bool found_handle = false;
4631
4632 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4633 pr_err("Not allowed to be called in %d state\n",
4634 atomic_read(&qseecom.qseecom_state));
4635 return -EPERM;
4636 }
4637
4638 if ((handle == NULL) || (*handle == NULL)) {
4639 pr_err("Handle is not initialized\n");
4640 return -EINVAL;
4641 }
4642 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4643 mutex_lock(&app_access_lock);
4644
4645 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4646 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4647 list) {
4648 if (kclient->handle == (*handle)) {
4649 list_del(&kclient->list);
4650 found_handle = true;
4651 break;
4652 }
4653 }
4654 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4655 if (!found_handle)
4656 pr_err("Unable to find the handle, exiting\n");
4657 else
4658 ret = qseecom_unload_app(data, false);
4659
4660 mutex_unlock(&app_access_lock);
4661 if (ret == 0) {
4662 kzfree(data);
4663 kzfree(*handle);
4664 kzfree(kclient);
4665 *handle = NULL;
4666 }
4667
4668 return ret;
4669}
4670EXPORT_SYMBOL(qseecom_shutdown_app);
4671
4672int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4673 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4674{
4675 int ret = 0;
4676 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4677 struct qseecom_dev_handle *data;
4678 bool perf_enabled = false;
4679
4680 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4681 pr_err("Not allowed to be called in %d state\n",
4682 atomic_read(&qseecom.qseecom_state));
4683 return -EPERM;
4684 }
4685
4686 if (handle == NULL) {
4687 pr_err("Handle is not initialized\n");
4688 return -EINVAL;
4689 }
4690 data = handle->dev;
4691
4692 req.cmd_req_len = sbuf_len;
4693 req.resp_len = rbuf_len;
4694 req.cmd_req_buf = send_buf;
4695 req.resp_buf = resp_buf;
4696
4697 if (__validate_send_cmd_inputs(data, &req))
4698 return -EINVAL;
4699
4700 mutex_lock(&app_access_lock);
4701 if (qseecom.support_bus_scaling) {
4702 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4703 if (ret) {
4704 pr_err("Failed to set bw.\n");
4705 mutex_unlock(&app_access_lock);
4706 return ret;
4707 }
4708 }
4709 /*
4710 * On targets where crypto clock is handled by HLOS,
4711 * if clk_access_cnt is zero and perf_enabled is false,
4712 * then the crypto clock was not enabled before sending cmd
4713 * to tz, qseecom will enable the clock to avoid service failure.
4714 */
4715 if (!qseecom.no_clock_support &&
4716 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4717 pr_debug("ce clock is not enabled!\n");
4718 ret = qseecom_perf_enable(data);
4719 if (ret) {
4720 pr_err("Failed to vote for clock with err %d\n",
4721 ret);
4722 mutex_unlock(&app_access_lock);
4723 return -EINVAL;
4724 }
4725 perf_enabled = true;
4726 }
4727 if (!strcmp(data->client.app_name, "securemm"))
4728 data->use_legacy_cmd = true;
4729
4730 ret = __qseecom_send_cmd(data, &req);
4731 data->use_legacy_cmd = false;
4732 if (qseecom.support_bus_scaling)
4733 __qseecom_add_bw_scale_down_timer(
4734 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4735
4736 if (perf_enabled) {
4737 qsee_disable_clock_vote(data, CLK_DFAB);
4738 qsee_disable_clock_vote(data, CLK_SFPB);
4739 }
4740
4741 mutex_unlock(&app_access_lock);
4742
4743 if (ret)
4744 return ret;
4745
4746 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4747 req.resp_len, req.resp_buf);
4748 return ret;
4749}
4750EXPORT_SYMBOL(qseecom_send_command);
4751
4752int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4753{
4754 int ret = 0;
4755
4756 if ((handle == NULL) || (handle->dev == NULL)) {
4757 pr_err("No valid kernel client\n");
4758 return -EINVAL;
4759 }
4760 if (high) {
4761 if (qseecom.support_bus_scaling) {
4762 mutex_lock(&qsee_bw_mutex);
4763 __qseecom_register_bus_bandwidth_needs(handle->dev,
4764 HIGH);
4765 mutex_unlock(&qsee_bw_mutex);
4766 } else {
4767 ret = qseecom_perf_enable(handle->dev);
4768 if (ret)
4769 pr_err("Failed to vote for clock with err %d\n",
4770 ret);
4771 }
4772 } else {
4773 if (!qseecom.support_bus_scaling) {
4774 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4775 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4776 } else {
4777 mutex_lock(&qsee_bw_mutex);
4778 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4779 mutex_unlock(&qsee_bw_mutex);
4780 }
4781 }
4782 return ret;
4783}
4784EXPORT_SYMBOL(qseecom_set_bandwidth);
4785
4786int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4787{
4788 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4789 struct qseecom_dev_handle dummy_private_data = {0};
4790 struct qseecom_command_scm_resp resp;
4791 int ret = 0;
4792
4793 if (!desc) {
4794 pr_err("desc is NULL\n");
4795 return -EINVAL;
4796 }
4797
4798 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004799 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004800 resp.data = desc->ret[2]; /*listener_id*/
4801
Zhen Konge7f525f2017-12-01 18:26:25 -08004802 dummy_private_data.client.app_id = desc->ret[1];
4803 dummy_app_entry.app_id = desc->ret[1];
4804
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004805 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004806 if (qseecom.qsee_reentrancy_support)
4807 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004808 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004809 else
4810 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4811 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004812 mutex_unlock(&app_access_lock);
4813 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004814 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004815 (int)desc->ret[0], (int)desc->ret[2],
4816 (int)desc->ret[1], ret);
4817 desc->ret[0] = resp.result;
4818 desc->ret[1] = resp.resp_type;
4819 desc->ret[2] = resp.data;
4820 return ret;
4821}
4822EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4823
4824static int qseecom_send_resp(void)
4825{
4826 qseecom.send_resp_flag = 1;
4827 wake_up_interruptible(&qseecom.send_resp_wq);
4828 return 0;
4829}
4830
4831static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4832{
4833 struct qseecom_registered_listener_list *this_lstnr = NULL;
4834
4835 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4836 this_lstnr = __qseecom_find_svc(data->listener.id);
4837 if (this_lstnr == NULL)
4838 return -EINVAL;
4839 qseecom.send_resp_flag = 1;
4840 this_lstnr->send_resp_flag = 1;
4841 wake_up_interruptible(&qseecom.send_resp_wq);
4842 return 0;
4843}
4844
4845static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4846 struct qseecom_send_modfd_listener_resp *resp,
4847 struct qseecom_registered_listener_list *this_lstnr)
4848{
4849 int i;
4850
4851 if (!data || !resp || !this_lstnr) {
4852 pr_err("listener handle or resp msg is null\n");
4853 return -EINVAL;
4854 }
4855
4856 if (resp->resp_buf_ptr == NULL) {
4857 pr_err("resp buffer is null\n");
4858 return -EINVAL;
4859 }
4860 /* validate resp buf length */
4861 if ((resp->resp_len == 0) ||
4862 (resp->resp_len > this_lstnr->sb_length)) {
4863 pr_err("resp buf length %d not valid\n", resp->resp_len);
4864 return -EINVAL;
4865 }
4866
4867 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4868 pr_err("Integer overflow in resp_len & resp_buf\n");
4869 return -EINVAL;
4870 }
4871 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4872 (ULONG_MAX - this_lstnr->sb_length)) {
4873 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4874 return -EINVAL;
4875 }
4876 /* validate resp buf */
4877 if (((uintptr_t)resp->resp_buf_ptr <
4878 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4879 ((uintptr_t)resp->resp_buf_ptr >=
4880 ((uintptr_t)this_lstnr->user_virt_sb_base +
4881 this_lstnr->sb_length)) ||
4882 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4883 ((uintptr_t)this_lstnr->user_virt_sb_base +
4884 this_lstnr->sb_length))) {
4885 pr_err("resp buf is out of shared buffer region\n");
4886 return -EINVAL;
4887 }
4888
4889 /* validate offsets */
4890 for (i = 0; i < MAX_ION_FD; i++) {
4891 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4892 pr_err("Invalid offset %d = 0x%x\n",
4893 i, resp->ifd_data[i].cmd_buf_offset);
4894 return -EINVAL;
4895 }
4896 }
4897
4898 return 0;
4899}
4900
4901static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4902 void __user *argp, bool is_64bit_addr)
4903{
4904 struct qseecom_send_modfd_listener_resp resp;
4905 struct qseecom_registered_listener_list *this_lstnr = NULL;
4906
4907 if (copy_from_user(&resp, argp, sizeof(resp))) {
4908 pr_err("copy_from_user failed");
4909 return -EINVAL;
4910 }
4911
4912 this_lstnr = __qseecom_find_svc(data->listener.id);
4913 if (this_lstnr == NULL)
4914 return -EINVAL;
4915
4916 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4917 return -EINVAL;
4918
4919 resp.resp_buf_ptr = this_lstnr->sb_virt +
4920 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4921
4922 if (!is_64bit_addr)
4923 __qseecom_update_cmd_buf(&resp, false, data);
4924 else
4925 __qseecom_update_cmd_buf_64(&resp, false, data);
4926 qseecom.send_resp_flag = 1;
4927 this_lstnr->send_resp_flag = 1;
4928 wake_up_interruptible(&qseecom.send_resp_wq);
4929 return 0;
4930}
4931
4932static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4933 void __user *argp)
4934{
4935 return __qseecom_send_modfd_resp(data, argp, false);
4936}
4937
4938static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4939 void __user *argp)
4940{
4941 return __qseecom_send_modfd_resp(data, argp, true);
4942}
4943
4944static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4945 void __user *argp)
4946{
4947 struct qseecom_qseos_version_req req;
4948
4949 if (copy_from_user(&req, argp, sizeof(req))) {
4950 pr_err("copy_from_user failed");
4951 return -EINVAL;
4952 }
4953 req.qseos_version = qseecom.qseos_version;
4954 if (copy_to_user(argp, &req, sizeof(req))) {
4955 pr_err("copy_to_user failed");
4956 return -EINVAL;
4957 }
4958 return 0;
4959}
4960
4961static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4962{
4963 int rc = 0;
4964 struct qseecom_clk *qclk = NULL;
4965
4966 if (qseecom.no_clock_support)
4967 return 0;
4968
4969 if (ce == CLK_QSEE)
4970 qclk = &qseecom.qsee;
4971 if (ce == CLK_CE_DRV)
4972 qclk = &qseecom.ce_drv;
4973
4974 if (qclk == NULL) {
4975 pr_err("CLK type not supported\n");
4976 return -EINVAL;
4977 }
4978 mutex_lock(&clk_access_lock);
4979
4980 if (qclk->clk_access_cnt == ULONG_MAX) {
4981 pr_err("clk_access_cnt beyond limitation\n");
4982 goto err;
4983 }
4984 if (qclk->clk_access_cnt > 0) {
4985 qclk->clk_access_cnt++;
4986 mutex_unlock(&clk_access_lock);
4987 return rc;
4988 }
4989
4990 /* Enable CE core clk */
4991 if (qclk->ce_core_clk != NULL) {
4992 rc = clk_prepare_enable(qclk->ce_core_clk);
4993 if (rc) {
4994 pr_err("Unable to enable/prepare CE core clk\n");
4995 goto err;
4996 }
4997 }
4998 /* Enable CE clk */
4999 if (qclk->ce_clk != NULL) {
5000 rc = clk_prepare_enable(qclk->ce_clk);
5001 if (rc) {
5002 pr_err("Unable to enable/prepare CE iface clk\n");
5003 goto ce_clk_err;
5004 }
5005 }
5006 /* Enable AXI clk */
5007 if (qclk->ce_bus_clk != NULL) {
5008 rc = clk_prepare_enable(qclk->ce_bus_clk);
5009 if (rc) {
5010 pr_err("Unable to enable/prepare CE bus clk\n");
5011 goto ce_bus_clk_err;
5012 }
5013 }
5014 qclk->clk_access_cnt++;
5015 mutex_unlock(&clk_access_lock);
5016 return 0;
5017
5018ce_bus_clk_err:
5019 if (qclk->ce_clk != NULL)
5020 clk_disable_unprepare(qclk->ce_clk);
5021ce_clk_err:
5022 if (qclk->ce_core_clk != NULL)
5023 clk_disable_unprepare(qclk->ce_core_clk);
5024err:
5025 mutex_unlock(&clk_access_lock);
5026 return -EIO;
5027}
5028
5029static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5030{
5031 struct qseecom_clk *qclk;
5032
5033 if (qseecom.no_clock_support)
5034 return;
5035
5036 if (ce == CLK_QSEE)
5037 qclk = &qseecom.qsee;
5038 else
5039 qclk = &qseecom.ce_drv;
5040
5041 mutex_lock(&clk_access_lock);
5042
5043 if (qclk->clk_access_cnt == 0) {
5044 mutex_unlock(&clk_access_lock);
5045 return;
5046 }
5047
5048 if (qclk->clk_access_cnt == 1) {
5049 if (qclk->ce_clk != NULL)
5050 clk_disable_unprepare(qclk->ce_clk);
5051 if (qclk->ce_core_clk != NULL)
5052 clk_disable_unprepare(qclk->ce_core_clk);
5053 if (qclk->ce_bus_clk != NULL)
5054 clk_disable_unprepare(qclk->ce_bus_clk);
5055 }
5056 qclk->clk_access_cnt--;
5057 mutex_unlock(&clk_access_lock);
5058}
5059
5060static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5061 int32_t clk_type)
5062{
5063 int ret = 0;
5064 struct qseecom_clk *qclk;
5065
5066 if (qseecom.no_clock_support)
5067 return 0;
5068
5069 qclk = &qseecom.qsee;
5070 if (!qseecom.qsee_perf_client)
5071 return ret;
5072
5073 switch (clk_type) {
5074 case CLK_DFAB:
5075 mutex_lock(&qsee_bw_mutex);
5076 if (!qseecom.qsee_bw_count) {
5077 if (qseecom.qsee_sfpb_bw_count > 0)
5078 ret = msm_bus_scale_client_update_request(
5079 qseecom.qsee_perf_client, 3);
5080 else {
5081 if (qclk->ce_core_src_clk != NULL)
5082 ret = __qseecom_enable_clk(CLK_QSEE);
5083 if (!ret) {
5084 ret =
5085 msm_bus_scale_client_update_request(
5086 qseecom.qsee_perf_client, 1);
5087 if ((ret) &&
5088 (qclk->ce_core_src_clk != NULL))
5089 __qseecom_disable_clk(CLK_QSEE);
5090 }
5091 }
5092 if (ret)
5093 pr_err("DFAB Bandwidth req failed (%d)\n",
5094 ret);
5095 else {
5096 qseecom.qsee_bw_count++;
5097 data->perf_enabled = true;
5098 }
5099 } else {
5100 qseecom.qsee_bw_count++;
5101 data->perf_enabled = true;
5102 }
5103 mutex_unlock(&qsee_bw_mutex);
5104 break;
5105 case CLK_SFPB:
5106 mutex_lock(&qsee_bw_mutex);
5107 if (!qseecom.qsee_sfpb_bw_count) {
5108 if (qseecom.qsee_bw_count > 0)
5109 ret = msm_bus_scale_client_update_request(
5110 qseecom.qsee_perf_client, 3);
5111 else {
5112 if (qclk->ce_core_src_clk != NULL)
5113 ret = __qseecom_enable_clk(CLK_QSEE);
5114 if (!ret) {
5115 ret =
5116 msm_bus_scale_client_update_request(
5117 qseecom.qsee_perf_client, 2);
5118 if ((ret) &&
5119 (qclk->ce_core_src_clk != NULL))
5120 __qseecom_disable_clk(CLK_QSEE);
5121 }
5122 }
5123
5124 if (ret)
5125 pr_err("SFPB Bandwidth req failed (%d)\n",
5126 ret);
5127 else {
5128 qseecom.qsee_sfpb_bw_count++;
5129 data->fast_load_enabled = true;
5130 }
5131 } else {
5132 qseecom.qsee_sfpb_bw_count++;
5133 data->fast_load_enabled = true;
5134 }
5135 mutex_unlock(&qsee_bw_mutex);
5136 break;
5137 default:
5138 pr_err("Clock type not defined\n");
5139 break;
5140 }
5141 return ret;
5142}
5143
5144static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5145 int32_t clk_type)
5146{
5147 int32_t ret = 0;
5148 struct qseecom_clk *qclk;
5149
5150 qclk = &qseecom.qsee;
5151
5152 if (qseecom.no_clock_support)
5153 return;
5154 if (!qseecom.qsee_perf_client)
5155 return;
5156
5157 switch (clk_type) {
5158 case CLK_DFAB:
5159 mutex_lock(&qsee_bw_mutex);
5160 if (qseecom.qsee_bw_count == 0) {
5161 pr_err("Client error.Extra call to disable DFAB clk\n");
5162 mutex_unlock(&qsee_bw_mutex);
5163 return;
5164 }
5165
5166 if (qseecom.qsee_bw_count == 1) {
5167 if (qseecom.qsee_sfpb_bw_count > 0)
5168 ret = msm_bus_scale_client_update_request(
5169 qseecom.qsee_perf_client, 2);
5170 else {
5171 ret = msm_bus_scale_client_update_request(
5172 qseecom.qsee_perf_client, 0);
5173 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5174 __qseecom_disable_clk(CLK_QSEE);
5175 }
5176 if (ret)
5177 pr_err("SFPB Bandwidth req fail (%d)\n",
5178 ret);
5179 else {
5180 qseecom.qsee_bw_count--;
5181 data->perf_enabled = false;
5182 }
5183 } else {
5184 qseecom.qsee_bw_count--;
5185 data->perf_enabled = false;
5186 }
5187 mutex_unlock(&qsee_bw_mutex);
5188 break;
5189 case CLK_SFPB:
5190 mutex_lock(&qsee_bw_mutex);
5191 if (qseecom.qsee_sfpb_bw_count == 0) {
5192 pr_err("Client error.Extra call to disable SFPB clk\n");
5193 mutex_unlock(&qsee_bw_mutex);
5194 return;
5195 }
5196 if (qseecom.qsee_sfpb_bw_count == 1) {
5197 if (qseecom.qsee_bw_count > 0)
5198 ret = msm_bus_scale_client_update_request(
5199 qseecom.qsee_perf_client, 1);
5200 else {
5201 ret = msm_bus_scale_client_update_request(
5202 qseecom.qsee_perf_client, 0);
5203 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5204 __qseecom_disable_clk(CLK_QSEE);
5205 }
5206 if (ret)
5207 pr_err("SFPB Bandwidth req fail (%d)\n",
5208 ret);
5209 else {
5210 qseecom.qsee_sfpb_bw_count--;
5211 data->fast_load_enabled = false;
5212 }
5213 } else {
5214 qseecom.qsee_sfpb_bw_count--;
5215 data->fast_load_enabled = false;
5216 }
5217 mutex_unlock(&qsee_bw_mutex);
5218 break;
5219 default:
5220 pr_err("Clock type not defined\n");
5221 break;
5222 }
5223
5224}
5225
5226static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5227 void __user *argp)
5228{
5229 struct ion_handle *ihandle; /* Ion handle */
5230 struct qseecom_load_img_req load_img_req;
5231 int uret = 0;
5232 int ret;
5233 ion_phys_addr_t pa = 0;
5234 size_t len;
5235 struct qseecom_load_app_ireq load_req;
5236 struct qseecom_load_app_64bit_ireq load_req_64bit;
5237 struct qseecom_command_scm_resp resp;
5238 void *cmd_buf = NULL;
5239 size_t cmd_len;
5240 /* Copy the relevant information needed for loading the image */
5241 if (copy_from_user(&load_img_req,
5242 (void __user *)argp,
5243 sizeof(struct qseecom_load_img_req))) {
5244 pr_err("copy_from_user failed\n");
5245 return -EFAULT;
5246 }
5247
5248 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005249 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005250 load_img_req.ifd_data_fd);
5251 if (IS_ERR_OR_NULL(ihandle)) {
5252 pr_err("Ion client could not retrieve the handle\n");
5253 return -ENOMEM;
5254 }
5255
5256 /* Get the physical address of the ION BUF */
5257 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5258 if (ret) {
5259 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5260 ret);
5261 return ret;
5262 }
5263 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5264 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5265 len, load_img_req.mdt_len,
5266 load_img_req.img_len);
5267 return ret;
5268 }
5269 /* Populate the structure for sending scm call to load image */
5270 if (qseecom.qsee_version < QSEE_VERSION_40) {
5271 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5272 load_req.mdt_len = load_img_req.mdt_len;
5273 load_req.img_len = load_img_req.img_len;
5274 load_req.phy_addr = (uint32_t)pa;
5275 cmd_buf = (void *)&load_req;
5276 cmd_len = sizeof(struct qseecom_load_app_ireq);
5277 } else {
5278 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5279 load_req_64bit.mdt_len = load_img_req.mdt_len;
5280 load_req_64bit.img_len = load_img_req.img_len;
5281 load_req_64bit.phy_addr = (uint64_t)pa;
5282 cmd_buf = (void *)&load_req_64bit;
5283 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5284 }
5285
5286 if (qseecom.support_bus_scaling) {
5287 mutex_lock(&qsee_bw_mutex);
5288 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5289 mutex_unlock(&qsee_bw_mutex);
5290 if (ret) {
5291 ret = -EIO;
5292 goto exit_cpu_restore;
5293 }
5294 }
5295
5296 /* Vote for the SFPB clock */
5297 ret = __qseecom_enable_clk_scale_up(data);
5298 if (ret) {
5299 ret = -EIO;
5300 goto exit_register_bus_bandwidth_needs;
5301 }
5302 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5303 ION_IOC_CLEAN_INV_CACHES);
5304 if (ret) {
5305 pr_err("cache operation failed %d\n", ret);
5306 goto exit_disable_clock;
5307 }
5308 /* SCM_CALL to load the external elf */
5309 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5310 &resp, sizeof(resp));
5311 if (ret) {
5312 pr_err("scm_call to load failed : ret %d\n",
5313 ret);
5314 ret = -EFAULT;
5315 goto exit_disable_clock;
5316 }
5317
5318 switch (resp.result) {
5319 case QSEOS_RESULT_SUCCESS:
5320 break;
5321 case QSEOS_RESULT_INCOMPLETE:
5322 pr_err("%s: qseos result incomplete\n", __func__);
5323 ret = __qseecom_process_incomplete_cmd(data, &resp);
5324 if (ret)
5325 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5326 break;
5327 case QSEOS_RESULT_FAILURE:
5328 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5329 ret = -EFAULT;
5330 break;
5331 default:
5332 pr_err("scm_call response result %d not supported\n",
5333 resp.result);
5334 ret = -EFAULT;
5335 break;
5336 }
5337
5338exit_disable_clock:
5339 __qseecom_disable_clk_scale_down(data);
5340
5341exit_register_bus_bandwidth_needs:
5342 if (qseecom.support_bus_scaling) {
5343 mutex_lock(&qsee_bw_mutex);
5344 uret = qseecom_unregister_bus_bandwidth_needs(data);
5345 mutex_unlock(&qsee_bw_mutex);
5346 if (uret)
5347 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5348 uret, ret);
5349 }
5350
5351exit_cpu_restore:
5352 /* Deallocate the handle */
5353 if (!IS_ERR_OR_NULL(ihandle))
5354 ion_free(qseecom.ion_clnt, ihandle);
5355 return ret;
5356}
5357
5358static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5359{
5360 int ret = 0;
5361 struct qseecom_command_scm_resp resp;
5362 struct qseecom_unload_app_ireq req;
5363
5364 /* unavailable client app */
5365 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5366
5367 /* Populate the structure for sending scm call to unload image */
5368 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5369
5370 /* SCM_CALL to unload the external elf */
5371 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5372 sizeof(struct qseecom_unload_app_ireq),
5373 &resp, sizeof(resp));
5374 if (ret) {
5375 pr_err("scm_call to unload failed : ret %d\n",
5376 ret);
5377 ret = -EFAULT;
5378 goto qseecom_unload_external_elf_scm_err;
5379 }
5380 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5381 ret = __qseecom_process_incomplete_cmd(data, &resp);
5382 if (ret)
5383 pr_err("process_incomplete_cmd fail err: %d\n",
5384 ret);
5385 } else {
5386 if (resp.result != QSEOS_RESULT_SUCCESS) {
5387 pr_err("scm_call to unload image failed resp.result =%d\n",
5388 resp.result);
5389 ret = -EFAULT;
5390 }
5391 }
5392
5393qseecom_unload_external_elf_scm_err:
5394
5395 return ret;
5396}
5397
5398static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5399 void __user *argp)
5400{
5401
5402 int32_t ret;
5403 struct qseecom_qseos_app_load_query query_req;
5404 struct qseecom_check_app_ireq req;
5405 struct qseecom_registered_app_list *entry = NULL;
5406 unsigned long flags = 0;
5407 uint32_t app_arch = 0, app_id = 0;
5408 bool found_app = false;
5409
5410 /* Copy the relevant information needed for loading the image */
5411 if (copy_from_user(&query_req,
5412 (void __user *)argp,
5413 sizeof(struct qseecom_qseos_app_load_query))) {
5414 pr_err("copy_from_user failed\n");
5415 return -EFAULT;
5416 }
5417
5418 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5419 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5420 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5421
5422 ret = __qseecom_check_app_exists(req, &app_id);
5423 if (ret) {
5424 pr_err(" scm call to check if app is loaded failed");
5425 return ret; /* scm call failed */
5426 }
5427 if (app_id) {
5428 pr_debug("App id %d (%s) already exists\n", app_id,
5429 (char *)(req.app_name));
5430 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5431 list_for_each_entry(entry,
5432 &qseecom.registered_app_list_head, list){
5433 if (entry->app_id == app_id) {
5434 app_arch = entry->app_arch;
5435 entry->ref_cnt++;
5436 found_app = true;
5437 break;
5438 }
5439 }
5440 spin_unlock_irqrestore(
5441 &qseecom.registered_app_list_lock, flags);
5442 data->client.app_id = app_id;
5443 query_req.app_id = app_id;
5444 if (app_arch) {
5445 data->client.app_arch = app_arch;
5446 query_req.app_arch = app_arch;
5447 } else {
5448 data->client.app_arch = 0;
5449 query_req.app_arch = 0;
5450 }
5451 strlcpy(data->client.app_name, query_req.app_name,
5452 MAX_APP_NAME_SIZE);
5453 /*
5454 * If app was loaded by appsbl before and was not registered,
5455 * regiser this app now.
5456 */
5457 if (!found_app) {
5458 pr_debug("Register app %d [%s] which was loaded before\n",
5459 ret, (char *)query_req.app_name);
5460 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5461 if (!entry) {
5462 pr_err("kmalloc for app entry failed\n");
5463 return -ENOMEM;
5464 }
5465 entry->app_id = app_id;
5466 entry->ref_cnt = 1;
5467 entry->app_arch = data->client.app_arch;
5468 strlcpy(entry->app_name, data->client.app_name,
5469 MAX_APP_NAME_SIZE);
5470 entry->app_blocked = false;
5471 entry->blocked_on_listener_id = 0;
5472 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5473 flags);
5474 list_add_tail(&entry->list,
5475 &qseecom.registered_app_list_head);
5476 spin_unlock_irqrestore(
5477 &qseecom.registered_app_list_lock, flags);
5478 }
5479 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5480 pr_err("copy_to_user failed\n");
5481 return -EFAULT;
5482 }
5483 return -EEXIST; /* app already loaded */
5484 } else {
5485 return 0; /* app not loaded */
5486 }
5487}
5488
5489static int __qseecom_get_ce_pipe_info(
5490 enum qseecom_key_management_usage_type usage,
5491 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5492{
5493 int ret = -EINVAL;
5494 int i, j;
5495 struct qseecom_ce_info_use *p = NULL;
5496 int total = 0;
5497 struct qseecom_ce_pipe_entry *pcepipe;
5498
5499 switch (usage) {
5500 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5501 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5502 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5503 if (qseecom.support_fde) {
5504 p = qseecom.ce_info.fde;
5505 total = qseecom.ce_info.num_fde;
5506 } else {
5507 pr_err("system does not support fde\n");
5508 return -EINVAL;
5509 }
5510 break;
5511 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5512 if (qseecom.support_pfe) {
5513 p = qseecom.ce_info.pfe;
5514 total = qseecom.ce_info.num_pfe;
5515 } else {
5516 pr_err("system does not support pfe\n");
5517 return -EINVAL;
5518 }
5519 break;
5520 default:
5521 pr_err("unsupported usage %d\n", usage);
5522 return -EINVAL;
5523 }
5524
5525 for (j = 0; j < total; j++) {
5526 if (p->unit_num == unit) {
5527 pcepipe = p->ce_pipe_entry;
5528 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5529 (*ce_hw)[i] = pcepipe->ce_num;
5530 *pipe = pcepipe->ce_pipe_pair;
5531 pcepipe++;
5532 }
5533 ret = 0;
5534 break;
5535 }
5536 p++;
5537 }
5538 return ret;
5539}
5540
5541static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5542 enum qseecom_key_management_usage_type usage,
5543 struct qseecom_key_generate_ireq *ireq)
5544{
5545 struct qseecom_command_scm_resp resp;
5546 int ret;
5547
5548 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5549 usage >= QSEOS_KM_USAGE_MAX) {
5550 pr_err("Error:: unsupported usage %d\n", usage);
5551 return -EFAULT;
5552 }
5553 ret = __qseecom_enable_clk(CLK_QSEE);
5554 if (ret)
5555 return ret;
5556
5557 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5558 ireq, sizeof(struct qseecom_key_generate_ireq),
5559 &resp, sizeof(resp));
5560 if (ret) {
5561 if (ret == -EINVAL &&
5562 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5563 pr_debug("Key ID exists.\n");
5564 ret = 0;
5565 } else {
5566 pr_err("scm call to generate key failed : %d\n", ret);
5567 ret = -EFAULT;
5568 }
5569 goto generate_key_exit;
5570 }
5571
5572 switch (resp.result) {
5573 case QSEOS_RESULT_SUCCESS:
5574 break;
5575 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5576 pr_debug("Key ID exists.\n");
5577 break;
5578 case QSEOS_RESULT_INCOMPLETE:
5579 ret = __qseecom_process_incomplete_cmd(data, &resp);
5580 if (ret) {
5581 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5582 pr_debug("Key ID exists.\n");
5583 ret = 0;
5584 } else {
5585 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5586 resp.result);
5587 }
5588 }
5589 break;
5590 case QSEOS_RESULT_FAILURE:
5591 default:
5592 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5593 ret = -EINVAL;
5594 break;
5595 }
5596generate_key_exit:
5597 __qseecom_disable_clk(CLK_QSEE);
5598 return ret;
5599}
5600
5601static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5602 enum qseecom_key_management_usage_type usage,
5603 struct qseecom_key_delete_ireq *ireq)
5604{
5605 struct qseecom_command_scm_resp resp;
5606 int ret;
5607
5608 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5609 usage >= QSEOS_KM_USAGE_MAX) {
5610 pr_err("Error:: unsupported usage %d\n", usage);
5611 return -EFAULT;
5612 }
5613 ret = __qseecom_enable_clk(CLK_QSEE);
5614 if (ret)
5615 return ret;
5616
5617 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5618 ireq, sizeof(struct qseecom_key_delete_ireq),
5619 &resp, sizeof(struct qseecom_command_scm_resp));
5620 if (ret) {
5621 if (ret == -EINVAL &&
5622 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5623 pr_debug("Max attempts to input password reached.\n");
5624 ret = -ERANGE;
5625 } else {
5626 pr_err("scm call to delete key failed : %d\n", ret);
5627 ret = -EFAULT;
5628 }
5629 goto del_key_exit;
5630 }
5631
5632 switch (resp.result) {
5633 case QSEOS_RESULT_SUCCESS:
5634 break;
5635 case QSEOS_RESULT_INCOMPLETE:
5636 ret = __qseecom_process_incomplete_cmd(data, &resp);
5637 if (ret) {
5638 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5639 resp.result);
5640 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5641 pr_debug("Max attempts to input password reached.\n");
5642 ret = -ERANGE;
5643 }
5644 }
5645 break;
5646 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5647 pr_debug("Max attempts to input password reached.\n");
5648 ret = -ERANGE;
5649 break;
5650 case QSEOS_RESULT_FAILURE:
5651 default:
5652 pr_err("Delete key scm call failed resp.result %d\n",
5653 resp.result);
5654 ret = -EINVAL;
5655 break;
5656 }
5657del_key_exit:
5658 __qseecom_disable_clk(CLK_QSEE);
5659 return ret;
5660}
5661
5662static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5663 enum qseecom_key_management_usage_type usage,
5664 struct qseecom_key_select_ireq *ireq)
5665{
5666 struct qseecom_command_scm_resp resp;
5667 int ret;
5668
5669 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5670 usage >= QSEOS_KM_USAGE_MAX) {
5671 pr_err("Error:: unsupported usage %d\n", usage);
5672 return -EFAULT;
5673 }
5674 ret = __qseecom_enable_clk(CLK_QSEE);
5675 if (ret)
5676 return ret;
5677
5678 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5679 ret = __qseecom_enable_clk(CLK_CE_DRV);
5680 if (ret)
5681 return ret;
5682 }
5683
5684 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5685 ireq, sizeof(struct qseecom_key_select_ireq),
5686 &resp, sizeof(struct qseecom_command_scm_resp));
5687 if (ret) {
5688 if (ret == -EINVAL &&
5689 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5690 pr_debug("Max attempts to input password reached.\n");
5691 ret = -ERANGE;
5692 } else if (ret == -EINVAL &&
5693 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5694 pr_debug("Set Key operation under processing...\n");
5695 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5696 } else {
5697 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5698 ret);
5699 ret = -EFAULT;
5700 }
5701 goto set_key_exit;
5702 }
5703
5704 switch (resp.result) {
5705 case QSEOS_RESULT_SUCCESS:
5706 break;
5707 case QSEOS_RESULT_INCOMPLETE:
5708 ret = __qseecom_process_incomplete_cmd(data, &resp);
5709 if (ret) {
5710 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5711 resp.result);
5712 if (resp.result ==
5713 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5714 pr_debug("Set Key operation under processing...\n");
5715 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5716 }
5717 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5718 pr_debug("Max attempts to input password reached.\n");
5719 ret = -ERANGE;
5720 }
5721 }
5722 break;
5723 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5724 pr_debug("Max attempts to input password reached.\n");
5725 ret = -ERANGE;
5726 break;
5727 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5728 pr_debug("Set Key operation under processing...\n");
5729 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5730 break;
5731 case QSEOS_RESULT_FAILURE:
5732 default:
5733 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5734 ret = -EINVAL;
5735 break;
5736 }
5737set_key_exit:
5738 __qseecom_disable_clk(CLK_QSEE);
5739 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5740 __qseecom_disable_clk(CLK_CE_DRV);
5741 return ret;
5742}
5743
5744static int __qseecom_update_current_key_user_info(
5745 struct qseecom_dev_handle *data,
5746 enum qseecom_key_management_usage_type usage,
5747 struct qseecom_key_userinfo_update_ireq *ireq)
5748{
5749 struct qseecom_command_scm_resp resp;
5750 int ret;
5751
5752 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5753 usage >= QSEOS_KM_USAGE_MAX) {
5754 pr_err("Error:: unsupported usage %d\n", usage);
5755 return -EFAULT;
5756 }
5757 ret = __qseecom_enable_clk(CLK_QSEE);
5758 if (ret)
5759 return ret;
5760
5761 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5762 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5763 &resp, sizeof(struct qseecom_command_scm_resp));
5764 if (ret) {
5765 if (ret == -EINVAL &&
5766 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5767 pr_debug("Set Key operation under processing...\n");
5768 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5769 } else {
5770 pr_err("scm call to update key userinfo failed: %d\n",
5771 ret);
5772 __qseecom_disable_clk(CLK_QSEE);
5773 return -EFAULT;
5774 }
5775 }
5776
5777 switch (resp.result) {
5778 case QSEOS_RESULT_SUCCESS:
5779 break;
5780 case QSEOS_RESULT_INCOMPLETE:
5781 ret = __qseecom_process_incomplete_cmd(data, &resp);
5782 if (resp.result ==
5783 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5784 pr_debug("Set Key operation under processing...\n");
5785 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5786 }
5787 if (ret)
5788 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5789 resp.result);
5790 break;
5791 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5792 pr_debug("Update Key operation under processing...\n");
5793 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5794 break;
5795 case QSEOS_RESULT_FAILURE:
5796 default:
5797 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5798 ret = -EINVAL;
5799 break;
5800 }
5801
5802 __qseecom_disable_clk(CLK_QSEE);
5803 return ret;
5804}
5805
5806
5807static int qseecom_enable_ice_setup(int usage)
5808{
5809 int ret = 0;
5810
5811 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5812 ret = qcom_ice_setup_ice_hw("ufs", true);
5813 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5814 ret = qcom_ice_setup_ice_hw("sdcc", true);
5815
5816 return ret;
5817}
5818
5819static int qseecom_disable_ice_setup(int usage)
5820{
5821 int ret = 0;
5822
5823 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5824 ret = qcom_ice_setup_ice_hw("ufs", false);
5825 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5826 ret = qcom_ice_setup_ice_hw("sdcc", false);
5827
5828 return ret;
5829}
5830
5831static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5832{
5833 struct qseecom_ce_info_use *pce_info_use, *p;
5834 int total = 0;
5835 int i;
5836
5837 switch (usage) {
5838 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5839 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5840 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5841 p = qseecom.ce_info.fde;
5842 total = qseecom.ce_info.num_fde;
5843 break;
5844 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5845 p = qseecom.ce_info.pfe;
5846 total = qseecom.ce_info.num_pfe;
5847 break;
5848 default:
5849 pr_err("unsupported usage %d\n", usage);
5850 return -EINVAL;
5851 }
5852
5853 pce_info_use = NULL;
5854
5855 for (i = 0; i < total; i++) {
5856 if (p->unit_num == unit) {
5857 pce_info_use = p;
5858 break;
5859 }
5860 p++;
5861 }
5862 if (!pce_info_use) {
5863 pr_err("can not find %d\n", unit);
5864 return -EINVAL;
5865 }
5866 return pce_info_use->num_ce_pipe_entries;
5867}
5868
5869static int qseecom_create_key(struct qseecom_dev_handle *data,
5870 void __user *argp)
5871{
5872 int i;
5873 uint32_t *ce_hw = NULL;
5874 uint32_t pipe = 0;
5875 int ret = 0;
5876 uint32_t flags = 0;
5877 struct qseecom_create_key_req create_key_req;
5878 struct qseecom_key_generate_ireq generate_key_ireq;
5879 struct qseecom_key_select_ireq set_key_ireq;
5880 uint32_t entries = 0;
5881
5882 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5883 if (ret) {
5884 pr_err("copy_from_user failed\n");
5885 return ret;
5886 }
5887
5888 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5889 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5890 pr_err("unsupported usage %d\n", create_key_req.usage);
5891 ret = -EFAULT;
5892 return ret;
5893 }
5894 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5895 create_key_req.usage);
5896 if (entries <= 0) {
5897 pr_err("no ce instance for usage %d instance %d\n",
5898 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5899 ret = -EINVAL;
5900 return ret;
5901 }
5902
5903 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5904 if (!ce_hw) {
5905 ret = -ENOMEM;
5906 return ret;
5907 }
5908 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5909 DEFAULT_CE_INFO_UNIT);
5910 if (ret) {
5911 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5912 ret = -EINVAL;
5913 goto free_buf;
5914 }
5915
5916 if (qseecom.fde_key_size)
5917 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5918 else
5919 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5920
5921 generate_key_ireq.flags = flags;
5922 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5923 memset((void *)generate_key_ireq.key_id,
5924 0, QSEECOM_KEY_ID_SIZE);
5925 memset((void *)generate_key_ireq.hash32,
5926 0, QSEECOM_HASH_SIZE);
5927 memcpy((void *)generate_key_ireq.key_id,
5928 (void *)key_id_array[create_key_req.usage].desc,
5929 QSEECOM_KEY_ID_SIZE);
5930 memcpy((void *)generate_key_ireq.hash32,
5931 (void *)create_key_req.hash32,
5932 QSEECOM_HASH_SIZE);
5933
5934 ret = __qseecom_generate_and_save_key(data,
5935 create_key_req.usage, &generate_key_ireq);
5936 if (ret) {
5937 pr_err("Failed to generate key on storage: %d\n", ret);
5938 goto free_buf;
5939 }
5940
5941 for (i = 0; i < entries; i++) {
5942 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5943 if (create_key_req.usage ==
5944 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5945 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5946 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5947
5948 } else if (create_key_req.usage ==
5949 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5950 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5951 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5952
5953 } else {
5954 set_key_ireq.ce = ce_hw[i];
5955 set_key_ireq.pipe = pipe;
5956 }
5957 set_key_ireq.flags = flags;
5958
5959 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5960 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5961 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5962 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5963 memcpy((void *)set_key_ireq.key_id,
5964 (void *)key_id_array[create_key_req.usage].desc,
5965 QSEECOM_KEY_ID_SIZE);
5966 memcpy((void *)set_key_ireq.hash32,
5967 (void *)create_key_req.hash32,
5968 QSEECOM_HASH_SIZE);
5969 /*
5970 * It will return false if it is GPCE based crypto instance or
5971 * ICE is setup properly
5972 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005973 ret = qseecom_enable_ice_setup(create_key_req.usage);
5974 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005975 goto free_buf;
5976
5977 do {
5978 ret = __qseecom_set_clear_ce_key(data,
5979 create_key_req.usage,
5980 &set_key_ireq);
5981 /*
5982 * wait a little before calling scm again to let other
5983 * processes run
5984 */
5985 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5986 msleep(50);
5987
5988 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5989
5990 qseecom_disable_ice_setup(create_key_req.usage);
5991
5992 if (ret) {
5993 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5994 pipe, ce_hw[i], ret);
5995 goto free_buf;
5996 } else {
5997 pr_err("Set the key successfully\n");
5998 if ((create_key_req.usage ==
5999 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6000 (create_key_req.usage ==
6001 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6002 goto free_buf;
6003 }
6004 }
6005
6006free_buf:
6007 kzfree(ce_hw);
6008 return ret;
6009}
6010
6011static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6012 void __user *argp)
6013{
6014 uint32_t *ce_hw = NULL;
6015 uint32_t pipe = 0;
6016 int ret = 0;
6017 uint32_t flags = 0;
6018 int i, j;
6019 struct qseecom_wipe_key_req wipe_key_req;
6020 struct qseecom_key_delete_ireq delete_key_ireq;
6021 struct qseecom_key_select_ireq clear_key_ireq;
6022 uint32_t entries = 0;
6023
6024 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6025 if (ret) {
6026 pr_err("copy_from_user failed\n");
6027 return ret;
6028 }
6029
6030 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6031 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6032 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6033 ret = -EFAULT;
6034 return ret;
6035 }
6036
6037 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6038 wipe_key_req.usage);
6039 if (entries <= 0) {
6040 pr_err("no ce instance for usage %d instance %d\n",
6041 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6042 ret = -EINVAL;
6043 return ret;
6044 }
6045
6046 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6047 if (!ce_hw) {
6048 ret = -ENOMEM;
6049 return ret;
6050 }
6051
6052 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6053 DEFAULT_CE_INFO_UNIT);
6054 if (ret) {
6055 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6056 ret = -EINVAL;
6057 goto free_buf;
6058 }
6059
6060 if (wipe_key_req.wipe_key_flag) {
6061 delete_key_ireq.flags = flags;
6062 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6063 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6064 memcpy((void *)delete_key_ireq.key_id,
6065 (void *)key_id_array[wipe_key_req.usage].desc,
6066 QSEECOM_KEY_ID_SIZE);
6067 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6068
6069 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6070 &delete_key_ireq);
6071 if (ret) {
6072 pr_err("Failed to delete key from ssd storage: %d\n",
6073 ret);
6074 ret = -EFAULT;
6075 goto free_buf;
6076 }
6077 }
6078
6079 for (j = 0; j < entries; j++) {
6080 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6081 if (wipe_key_req.usage ==
6082 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6083 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6084 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6085 } else if (wipe_key_req.usage ==
6086 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6087 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6088 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6089 } else {
6090 clear_key_ireq.ce = ce_hw[j];
6091 clear_key_ireq.pipe = pipe;
6092 }
6093 clear_key_ireq.flags = flags;
6094 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6095 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6096 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6097 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6098
6099 /*
6100 * It will return false if it is GPCE based crypto instance or
6101 * ICE is setup properly
6102 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006103 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6104 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006105 goto free_buf;
6106
6107 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6108 &clear_key_ireq);
6109
6110 qseecom_disable_ice_setup(wipe_key_req.usage);
6111
6112 if (ret) {
6113 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6114 pipe, ce_hw[j], ret);
6115 ret = -EFAULT;
6116 goto free_buf;
6117 }
6118 }
6119
6120free_buf:
6121 kzfree(ce_hw);
6122 return ret;
6123}
6124
6125static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6126 void __user *argp)
6127{
6128 int ret = 0;
6129 uint32_t flags = 0;
6130 struct qseecom_update_key_userinfo_req update_key_req;
6131 struct qseecom_key_userinfo_update_ireq ireq;
6132
6133 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6134 if (ret) {
6135 pr_err("copy_from_user failed\n");
6136 return ret;
6137 }
6138
6139 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6140 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6141 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6142 return -EFAULT;
6143 }
6144
6145 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6146
6147 if (qseecom.fde_key_size)
6148 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6149 else
6150 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6151
6152 ireq.flags = flags;
6153 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6154 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6155 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6156 memcpy((void *)ireq.key_id,
6157 (void *)key_id_array[update_key_req.usage].desc,
6158 QSEECOM_KEY_ID_SIZE);
6159 memcpy((void *)ireq.current_hash32,
6160 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6161 memcpy((void *)ireq.new_hash32,
6162 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6163
6164 do {
6165 ret = __qseecom_update_current_key_user_info(data,
6166 update_key_req.usage,
6167 &ireq);
6168 /*
6169 * wait a little before calling scm again to let other
6170 * processes run
6171 */
6172 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6173 msleep(50);
6174
6175 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6176 if (ret) {
6177 pr_err("Failed to update key info: %d\n", ret);
6178 return ret;
6179 }
6180 return ret;
6181
6182}
6183static int qseecom_is_es_activated(void __user *argp)
6184{
6185 struct qseecom_is_es_activated_req req;
6186 struct qseecom_command_scm_resp resp;
6187 int ret;
6188
6189 if (qseecom.qsee_version < QSEE_VERSION_04) {
6190 pr_err("invalid qsee version\n");
6191 return -ENODEV;
6192 }
6193
6194 if (argp == NULL) {
6195 pr_err("arg is null\n");
6196 return -EINVAL;
6197 }
6198
6199 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6200 &req, sizeof(req), &resp, sizeof(resp));
6201 if (ret) {
6202 pr_err("scm_call failed\n");
6203 return ret;
6204 }
6205
6206 req.is_activated = resp.result;
6207 ret = copy_to_user(argp, &req, sizeof(req));
6208 if (ret) {
6209 pr_err("copy_to_user failed\n");
6210 return ret;
6211 }
6212
6213 return 0;
6214}
6215
6216static int qseecom_save_partition_hash(void __user *argp)
6217{
6218 struct qseecom_save_partition_hash_req req;
6219 struct qseecom_command_scm_resp resp;
6220 int ret;
6221
6222 memset(&resp, 0x00, sizeof(resp));
6223
6224 if (qseecom.qsee_version < QSEE_VERSION_04) {
6225 pr_err("invalid qsee version\n");
6226 return -ENODEV;
6227 }
6228
6229 if (argp == NULL) {
6230 pr_err("arg is null\n");
6231 return -EINVAL;
6232 }
6233
6234 ret = copy_from_user(&req, argp, sizeof(req));
6235 if (ret) {
6236 pr_err("copy_from_user failed\n");
6237 return ret;
6238 }
6239
6240 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6241 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6242 if (ret) {
6243 pr_err("qseecom_scm_call failed\n");
6244 return ret;
6245 }
6246
6247 return 0;
6248}
6249
6250static int qseecom_mdtp_cipher_dip(void __user *argp)
6251{
6252 struct qseecom_mdtp_cipher_dip_req req;
6253 u32 tzbuflenin, tzbuflenout;
6254 char *tzbufin = NULL, *tzbufout = NULL;
6255 struct scm_desc desc = {0};
6256 int ret;
6257
6258 do {
6259 /* Copy the parameters from userspace */
6260 if (argp == NULL) {
6261 pr_err("arg is null\n");
6262 ret = -EINVAL;
6263 break;
6264 }
6265
6266 ret = copy_from_user(&req, argp, sizeof(req));
6267 if (ret) {
6268 pr_err("copy_from_user failed, ret= %d\n", ret);
6269 break;
6270 }
6271
6272 if (req.in_buf == NULL || req.out_buf == NULL ||
6273 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6274 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6275 req.direction > 1) {
6276 pr_err("invalid parameters\n");
6277 ret = -EINVAL;
6278 break;
6279 }
6280
6281 /* Copy the input buffer from userspace to kernel space */
6282 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6283 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6284 if (!tzbufin) {
6285 pr_err("error allocating in buffer\n");
6286 ret = -ENOMEM;
6287 break;
6288 }
6289
6290 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6291 if (ret) {
6292 pr_err("copy_from_user failed, ret=%d\n", ret);
6293 break;
6294 }
6295
6296 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6297
6298 /* Prepare the output buffer in kernel space */
6299 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6300 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6301 if (!tzbufout) {
6302 pr_err("error allocating out buffer\n");
6303 ret = -ENOMEM;
6304 break;
6305 }
6306
6307 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6308
6309 /* Send the command to TZ */
6310 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6311 desc.args[0] = virt_to_phys(tzbufin);
6312 desc.args[1] = req.in_buf_size;
6313 desc.args[2] = virt_to_phys(tzbufout);
6314 desc.args[3] = req.out_buf_size;
6315 desc.args[4] = req.direction;
6316
6317 ret = __qseecom_enable_clk(CLK_QSEE);
6318 if (ret)
6319 break;
6320
6321 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6322
6323 __qseecom_disable_clk(CLK_QSEE);
6324
6325 if (ret) {
6326 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6327 ret);
6328 break;
6329 }
6330
6331 /* Copy the output buffer from kernel space to userspace */
6332 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6333 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6334 if (ret) {
6335 pr_err("copy_to_user failed, ret=%d\n", ret);
6336 break;
6337 }
6338 } while (0);
6339
6340 kzfree(tzbufin);
6341 kzfree(tzbufout);
6342
6343 return ret;
6344}
6345
6346static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6347 struct qseecom_qteec_req *req)
6348{
6349 if (!data || !data->client.ihandle) {
6350 pr_err("Client or client handle is not initialized\n");
6351 return -EINVAL;
6352 }
6353
6354 if (data->type != QSEECOM_CLIENT_APP)
6355 return -EFAULT;
6356
6357 if (req->req_len > UINT_MAX - req->resp_len) {
6358 pr_err("Integer overflow detected in req_len & rsp_len\n");
6359 return -EINVAL;
6360 }
6361
6362 if (req->req_len + req->resp_len > data->client.sb_length) {
6363 pr_debug("Not enough memory to fit cmd_buf.\n");
6364 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6365 (req->req_len + req->resp_len), data->client.sb_length);
6366 return -ENOMEM;
6367 }
6368
6369 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6370 pr_err("cmd buffer or response buffer is null\n");
6371 return -EINVAL;
6372 }
6373 if (((uintptr_t)req->req_ptr <
6374 data->client.user_virt_sb_base) ||
6375 ((uintptr_t)req->req_ptr >=
6376 (data->client.user_virt_sb_base + data->client.sb_length))) {
6377 pr_err("cmd buffer address not within shared bufffer\n");
6378 return -EINVAL;
6379 }
6380
6381 if (((uintptr_t)req->resp_ptr <
6382 data->client.user_virt_sb_base) ||
6383 ((uintptr_t)req->resp_ptr >=
6384 (data->client.user_virt_sb_base + data->client.sb_length))) {
6385 pr_err("response buffer address not within shared bufffer\n");
6386 return -EINVAL;
6387 }
6388
6389 if ((req->req_len == 0) || (req->resp_len == 0)) {
6390 pr_err("cmd buf lengtgh/response buf length not valid\n");
6391 return -EINVAL;
6392 }
6393
6394 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6395 pr_err("Integer overflow in req_len & req_ptr\n");
6396 return -EINVAL;
6397 }
6398
6399 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6400 pr_err("Integer overflow in resp_len & resp_ptr\n");
6401 return -EINVAL;
6402 }
6403
6404 if (data->client.user_virt_sb_base >
6405 (ULONG_MAX - data->client.sb_length)) {
6406 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6407 return -EINVAL;
6408 }
6409 if ((((uintptr_t)req->req_ptr + req->req_len) >
6410 ((uintptr_t)data->client.user_virt_sb_base +
6411 data->client.sb_length)) ||
6412 (((uintptr_t)req->resp_ptr + req->resp_len) >
6413 ((uintptr_t)data->client.user_virt_sb_base +
6414 data->client.sb_length))) {
6415 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6416 return -EINVAL;
6417 }
6418 return 0;
6419}
6420
6421static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6422 uint32_t fd_idx, struct sg_table *sg_ptr)
6423{
6424 struct scatterlist *sg = sg_ptr->sgl;
6425 struct qseecom_sg_entry *sg_entry;
6426 void *buf;
6427 uint i;
6428 size_t size;
6429 dma_addr_t coh_pmem;
6430
6431 if (fd_idx >= MAX_ION_FD) {
6432 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6433 return -ENOMEM;
6434 }
6435 /*
6436 * Allocate a buffer, populate it with number of entry plus
6437 * each sg entry's phy addr and length; then return the
6438 * phy_addr of the buffer.
6439 */
6440 size = sizeof(uint32_t) +
6441 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6442 size = (size + PAGE_SIZE) & PAGE_MASK;
6443 buf = dma_alloc_coherent(qseecom.pdev,
6444 size, &coh_pmem, GFP_KERNEL);
6445 if (buf == NULL) {
6446 pr_err("failed to alloc memory for sg buf\n");
6447 return -ENOMEM;
6448 }
6449 *(uint32_t *)buf = sg_ptr->nents;
6450 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6451 for (i = 0; i < sg_ptr->nents; i++) {
6452 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6453 sg_entry->len = sg->length;
6454 sg_entry++;
6455 sg = sg_next(sg);
6456 }
6457 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6458 data->client.sec_buf_fd[fd_idx].vbase = buf;
6459 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6460 data->client.sec_buf_fd[fd_idx].size = size;
6461 return 0;
6462}
6463
6464static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6465 struct qseecom_dev_handle *data, bool cleanup)
6466{
6467 struct ion_handle *ihandle;
6468 int ret = 0;
6469 int i = 0;
6470 uint32_t *update;
6471 struct sg_table *sg_ptr = NULL;
6472 struct scatterlist *sg;
6473 struct qseecom_param_memref *memref;
6474
6475 if (req == NULL) {
6476 pr_err("Invalid address\n");
6477 return -EINVAL;
6478 }
6479 for (i = 0; i < MAX_ION_FD; i++) {
6480 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006481 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006482 req->ifd_data[i].fd);
6483 if (IS_ERR_OR_NULL(ihandle)) {
6484 pr_err("Ion client can't retrieve the handle\n");
6485 return -ENOMEM;
6486 }
6487 if ((req->req_len < sizeof(uint32_t)) ||
6488 (req->ifd_data[i].cmd_buf_offset >
6489 req->req_len - sizeof(uint32_t))) {
6490 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6491 req->req_len,
6492 req->ifd_data[i].cmd_buf_offset);
6493 return -EINVAL;
6494 }
6495 update = (uint32_t *)((char *) req->req_ptr +
6496 req->ifd_data[i].cmd_buf_offset);
6497 if (!update) {
6498 pr_err("update pointer is NULL\n");
6499 return -EINVAL;
6500 }
6501 } else {
6502 continue;
6503 }
6504 /* Populate the cmd data structure with the phys_addr */
6505 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6506 if (IS_ERR_OR_NULL(sg_ptr)) {
6507 pr_err("IOn client could not retrieve sg table\n");
6508 goto err;
6509 }
6510 sg = sg_ptr->sgl;
6511 if (sg == NULL) {
6512 pr_err("sg is NULL\n");
6513 goto err;
6514 }
6515 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6516 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6517 sg_ptr->nents, sg->length);
6518 goto err;
6519 }
6520 /* clean up buf for pre-allocated fd */
6521 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6522 (*update)) {
6523 if (data->client.sec_buf_fd[i].vbase)
6524 dma_free_coherent(qseecom.pdev,
6525 data->client.sec_buf_fd[i].size,
6526 data->client.sec_buf_fd[i].vbase,
6527 data->client.sec_buf_fd[i].pbase);
6528 memset((void *)update, 0,
6529 sizeof(struct qseecom_param_memref));
6530 memset(&(data->client.sec_buf_fd[i]), 0,
6531 sizeof(struct qseecom_sec_buf_fd_info));
6532 goto clean;
6533 }
6534
6535 if (*update == 0) {
6536 /* update buf for pre-allocated fd from secure heap*/
6537 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6538 sg_ptr);
6539 if (ret) {
6540 pr_err("Failed to handle buf for fd[%d]\n", i);
6541 goto err;
6542 }
6543 memref = (struct qseecom_param_memref *)update;
6544 memref->buffer =
6545 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6546 memref->size =
6547 (uint32_t)(data->client.sec_buf_fd[i].size);
6548 } else {
6549 /* update buf for fd from non-secure qseecom heap */
6550 if (sg_ptr->nents != 1) {
6551 pr_err("Num of scat entr (%d) invalid\n",
6552 sg_ptr->nents);
6553 goto err;
6554 }
6555 if (cleanup)
6556 *update = 0;
6557 else
6558 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6559 }
6560clean:
6561 if (cleanup) {
6562 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6563 ihandle, NULL, sg->length,
6564 ION_IOC_INV_CACHES);
6565 if (ret) {
6566 pr_err("cache operation failed %d\n", ret);
6567 goto err;
6568 }
6569 } else {
6570 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6571 ihandle, NULL, sg->length,
6572 ION_IOC_CLEAN_INV_CACHES);
6573 if (ret) {
6574 pr_err("cache operation failed %d\n", ret);
6575 goto err;
6576 }
6577 data->sglistinfo_ptr[i].indexAndFlags =
6578 SGLISTINFO_SET_INDEX_FLAG(
6579 (sg_ptr->nents == 1), 0,
6580 req->ifd_data[i].cmd_buf_offset);
6581 data->sglistinfo_ptr[i].sizeOrCount =
6582 (sg_ptr->nents == 1) ?
6583 sg->length : sg_ptr->nents;
6584 data->sglist_cnt = i + 1;
6585 }
6586 /* Deallocate the handle */
6587 if (!IS_ERR_OR_NULL(ihandle))
6588 ion_free(qseecom.ion_clnt, ihandle);
6589 }
6590 return ret;
6591err:
6592 if (!IS_ERR_OR_NULL(ihandle))
6593 ion_free(qseecom.ion_clnt, ihandle);
6594 return -ENOMEM;
6595}
6596
6597static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6598 struct qseecom_qteec_req *req, uint32_t cmd_id)
6599{
6600 struct qseecom_command_scm_resp resp;
6601 struct qseecom_qteec_ireq ireq;
6602 struct qseecom_qteec_64bit_ireq ireq_64bit;
6603 struct qseecom_registered_app_list *ptr_app;
6604 bool found_app = false;
6605 unsigned long flags;
6606 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006607 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006608 uint32_t reqd_len_sb_in = 0;
6609 void *cmd_buf = NULL;
6610 size_t cmd_len;
6611 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306612 void *req_ptr = NULL;
6613 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006614
6615 ret = __qseecom_qteec_validate_msg(data, req);
6616 if (ret)
6617 return ret;
6618
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306619 req_ptr = req->req_ptr;
6620 resp_ptr = req->resp_ptr;
6621
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006622 /* find app_id & img_name from list */
6623 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6624 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6625 list) {
6626 if ((ptr_app->app_id == data->client.app_id) &&
6627 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6628 found_app = true;
6629 break;
6630 }
6631 }
6632 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6633 if (!found_app) {
6634 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6635 (char *)data->client.app_name);
6636 return -ENOENT;
6637 }
6638
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306639 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6640 (uintptr_t)req->req_ptr);
6641 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6642 (uintptr_t)req->resp_ptr);
6643
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006644 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6645 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6646 ret = __qseecom_update_qteec_req_buf(
6647 (struct qseecom_qteec_modfd_req *)req, data, false);
6648 if (ret)
6649 return ret;
6650 }
6651
6652 if (qseecom.qsee_version < QSEE_VERSION_40) {
6653 ireq.app_id = data->client.app_id;
6654 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306655 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006656 ireq.req_len = req->req_len;
6657 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306658 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006659 ireq.resp_len = req->resp_len;
6660 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6661 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6662 dmac_flush_range((void *)table,
6663 (void *)table + SGLISTINFO_TABLE_SIZE);
6664 cmd_buf = (void *)&ireq;
6665 cmd_len = sizeof(struct qseecom_qteec_ireq);
6666 } else {
6667 ireq_64bit.app_id = data->client.app_id;
6668 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306669 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006670 ireq_64bit.req_len = req->req_len;
6671 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306672 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006673 ireq_64bit.resp_len = req->resp_len;
6674 if ((data->client.app_arch == ELFCLASS32) &&
6675 ((ireq_64bit.req_ptr >=
6676 PHY_ADDR_4G - ireq_64bit.req_len) ||
6677 (ireq_64bit.resp_ptr >=
6678 PHY_ADDR_4G - ireq_64bit.resp_len))){
6679 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6680 data->client.app_name, data->client.app_id);
6681 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6682 ireq_64bit.req_ptr, ireq_64bit.req_len,
6683 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6684 return -EFAULT;
6685 }
6686 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6687 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6688 dmac_flush_range((void *)table,
6689 (void *)table + SGLISTINFO_TABLE_SIZE);
6690 cmd_buf = (void *)&ireq_64bit;
6691 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6692 }
6693 if (qseecom.whitelist_support == true
6694 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6695 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6696 else
6697 *(uint32_t *)cmd_buf = cmd_id;
6698
6699 reqd_len_sb_in = req->req_len + req->resp_len;
6700 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6701 data->client.sb_virt,
6702 reqd_len_sb_in,
6703 ION_IOC_CLEAN_INV_CACHES);
6704 if (ret) {
6705 pr_err("cache operation failed %d\n", ret);
6706 return ret;
6707 }
6708
6709 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6710
6711 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6712 cmd_buf, cmd_len,
6713 &resp, sizeof(resp));
6714 if (ret) {
6715 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6716 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006717 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006718 }
6719
6720 if (qseecom.qsee_reentrancy_support) {
6721 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006722 if (ret)
6723 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006724 } else {
6725 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6726 ret = __qseecom_process_incomplete_cmd(data, &resp);
6727 if (ret) {
6728 pr_err("process_incomplete_cmd failed err: %d\n",
6729 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006730 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006731 }
6732 } else {
6733 if (resp.result != QSEOS_RESULT_SUCCESS) {
6734 pr_err("Response result %d not supported\n",
6735 resp.result);
6736 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006737 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006738 }
6739 }
6740 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006741exit:
6742 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006743 data->client.sb_virt, data->client.sb_length,
6744 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006745 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006746 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006747 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006748 }
6749
6750 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6751 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006752 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006753 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006754 if (ret2)
6755 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006756 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006757 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006758}
6759
6760static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6761 void __user *argp)
6762{
6763 struct qseecom_qteec_modfd_req req;
6764 int ret = 0;
6765
6766 ret = copy_from_user(&req, argp,
6767 sizeof(struct qseecom_qteec_modfd_req));
6768 if (ret) {
6769 pr_err("copy_from_user failed\n");
6770 return ret;
6771 }
6772 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6773 QSEOS_TEE_OPEN_SESSION);
6774
6775 return ret;
6776}
6777
6778static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6779 void __user *argp)
6780{
6781 struct qseecom_qteec_req req;
6782 int ret = 0;
6783
6784 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6785 if (ret) {
6786 pr_err("copy_from_user failed\n");
6787 return ret;
6788 }
6789 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6790 return ret;
6791}
6792
6793static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6794 void __user *argp)
6795{
6796 struct qseecom_qteec_modfd_req req;
6797 struct qseecom_command_scm_resp resp;
6798 struct qseecom_qteec_ireq ireq;
6799 struct qseecom_qteec_64bit_ireq ireq_64bit;
6800 struct qseecom_registered_app_list *ptr_app;
6801 bool found_app = false;
6802 unsigned long flags;
6803 int ret = 0;
6804 int i = 0;
6805 uint32_t reqd_len_sb_in = 0;
6806 void *cmd_buf = NULL;
6807 size_t cmd_len;
6808 struct sglist_info *table = data->sglistinfo_ptr;
6809 void *req_ptr = NULL;
6810 void *resp_ptr = NULL;
6811
6812 ret = copy_from_user(&req, argp,
6813 sizeof(struct qseecom_qteec_modfd_req));
6814 if (ret) {
6815 pr_err("copy_from_user failed\n");
6816 return ret;
6817 }
6818 ret = __qseecom_qteec_validate_msg(data,
6819 (struct qseecom_qteec_req *)(&req));
6820 if (ret)
6821 return ret;
6822 req_ptr = req.req_ptr;
6823 resp_ptr = req.resp_ptr;
6824
6825 /* find app_id & img_name from list */
6826 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6827 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6828 list) {
6829 if ((ptr_app->app_id == data->client.app_id) &&
6830 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6831 found_app = true;
6832 break;
6833 }
6834 }
6835 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6836 if (!found_app) {
6837 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6838 (char *)data->client.app_name);
6839 return -ENOENT;
6840 }
6841
6842 /* validate offsets */
6843 for (i = 0; i < MAX_ION_FD; i++) {
6844 if (req.ifd_data[i].fd) {
6845 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6846 return -EINVAL;
6847 }
6848 }
6849 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6850 (uintptr_t)req.req_ptr);
6851 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6852 (uintptr_t)req.resp_ptr);
6853 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6854 if (ret)
6855 return ret;
6856
6857 if (qseecom.qsee_version < QSEE_VERSION_40) {
6858 ireq.app_id = data->client.app_id;
6859 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6860 (uintptr_t)req_ptr);
6861 ireq.req_len = req.req_len;
6862 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6863 (uintptr_t)resp_ptr);
6864 ireq.resp_len = req.resp_len;
6865 cmd_buf = (void *)&ireq;
6866 cmd_len = sizeof(struct qseecom_qteec_ireq);
6867 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6868 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6869 dmac_flush_range((void *)table,
6870 (void *)table + SGLISTINFO_TABLE_SIZE);
6871 } else {
6872 ireq_64bit.app_id = data->client.app_id;
6873 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6874 (uintptr_t)req_ptr);
6875 ireq_64bit.req_len = req.req_len;
6876 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6877 (uintptr_t)resp_ptr);
6878 ireq_64bit.resp_len = req.resp_len;
6879 cmd_buf = (void *)&ireq_64bit;
6880 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6881 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6882 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6883 dmac_flush_range((void *)table,
6884 (void *)table + SGLISTINFO_TABLE_SIZE);
6885 }
6886 reqd_len_sb_in = req.req_len + req.resp_len;
6887 if (qseecom.whitelist_support == true)
6888 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6889 else
6890 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6891
6892 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6893 data->client.sb_virt,
6894 reqd_len_sb_in,
6895 ION_IOC_CLEAN_INV_CACHES);
6896 if (ret) {
6897 pr_err("cache operation failed %d\n", ret);
6898 return ret;
6899 }
6900
6901 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6902
6903 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6904 cmd_buf, cmd_len,
6905 &resp, sizeof(resp));
6906 if (ret) {
6907 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6908 ret, data->client.app_id);
6909 return ret;
6910 }
6911
6912 if (qseecom.qsee_reentrancy_support) {
6913 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6914 } else {
6915 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6916 ret = __qseecom_process_incomplete_cmd(data, &resp);
6917 if (ret) {
6918 pr_err("process_incomplete_cmd failed err: %d\n",
6919 ret);
6920 return ret;
6921 }
6922 } else {
6923 if (resp.result != QSEOS_RESULT_SUCCESS) {
6924 pr_err("Response result %d not supported\n",
6925 resp.result);
6926 ret = -EINVAL;
6927 }
6928 }
6929 }
6930 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6931 if (ret)
6932 return ret;
6933
6934 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6935 data->client.sb_virt, data->client.sb_length,
6936 ION_IOC_INV_CACHES);
6937 if (ret) {
6938 pr_err("cache operation failed %d\n", ret);
6939 return ret;
6940 }
6941 return 0;
6942}
6943
6944static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6945 void __user *argp)
6946{
6947 struct qseecom_qteec_modfd_req req;
6948 int ret = 0;
6949
6950 ret = copy_from_user(&req, argp,
6951 sizeof(struct qseecom_qteec_modfd_req));
6952 if (ret) {
6953 pr_err("copy_from_user failed\n");
6954 return ret;
6955 }
6956 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6957 QSEOS_TEE_REQUEST_CANCELLATION);
6958
6959 return ret;
6960}
6961
6962static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6963{
6964 if (data->sglist_cnt) {
6965 memset(data->sglistinfo_ptr, 0,
6966 SGLISTINFO_TABLE_SIZE);
6967 data->sglist_cnt = 0;
6968 }
6969}
6970
6971static inline long qseecom_ioctl(struct file *file,
6972 unsigned int cmd, unsigned long arg)
6973{
6974 int ret = 0;
6975 struct qseecom_dev_handle *data = file->private_data;
6976 void __user *argp = (void __user *) arg;
6977 bool perf_enabled = false;
6978
6979 if (!data) {
6980 pr_err("Invalid/uninitialized device handle\n");
6981 return -EINVAL;
6982 }
6983
6984 if (data->abort) {
6985 pr_err("Aborting qseecom driver\n");
6986 return -ENODEV;
6987 }
6988
6989 switch (cmd) {
6990 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6991 if (data->type != QSEECOM_GENERIC) {
6992 pr_err("reg lstnr req: invalid handle (%d)\n",
6993 data->type);
6994 ret = -EINVAL;
6995 break;
6996 }
6997 pr_debug("ioctl register_listener_req()\n");
6998 mutex_lock(&app_access_lock);
6999 atomic_inc(&data->ioctl_count);
7000 data->type = QSEECOM_LISTENER_SERVICE;
7001 ret = qseecom_register_listener(data, argp);
7002 atomic_dec(&data->ioctl_count);
7003 wake_up_all(&data->abort_wq);
7004 mutex_unlock(&app_access_lock);
7005 if (ret)
7006 pr_err("failed qseecom_register_listener: %d\n", ret);
7007 break;
7008 }
7009 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7010 if ((data->listener.id == 0) ||
7011 (data->type != QSEECOM_LISTENER_SERVICE)) {
7012 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7013 data->type, data->listener.id);
7014 ret = -EINVAL;
7015 break;
7016 }
7017 pr_debug("ioctl unregister_listener_req()\n");
7018 mutex_lock(&app_access_lock);
7019 atomic_inc(&data->ioctl_count);
7020 ret = qseecom_unregister_listener(data);
7021 atomic_dec(&data->ioctl_count);
7022 wake_up_all(&data->abort_wq);
7023 mutex_unlock(&app_access_lock);
7024 if (ret)
7025 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7026 break;
7027 }
7028 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7029 if ((data->client.app_id == 0) ||
7030 (data->type != QSEECOM_CLIENT_APP)) {
7031 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7032 data->type, data->client.app_id);
7033 ret = -EINVAL;
7034 break;
7035 }
7036 /* Only one client allowed here at a time */
7037 mutex_lock(&app_access_lock);
7038 if (qseecom.support_bus_scaling) {
7039 /* register bus bw in case the client doesn't do it */
7040 if (!data->mode) {
7041 mutex_lock(&qsee_bw_mutex);
7042 __qseecom_register_bus_bandwidth_needs(
7043 data, HIGH);
7044 mutex_unlock(&qsee_bw_mutex);
7045 }
7046 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7047 if (ret) {
7048 pr_err("Failed to set bw.\n");
7049 ret = -EINVAL;
7050 mutex_unlock(&app_access_lock);
7051 break;
7052 }
7053 }
7054 /*
7055 * On targets where crypto clock is handled by HLOS,
7056 * if clk_access_cnt is zero and perf_enabled is false,
7057 * then the crypto clock was not enabled before sending cmd to
7058 * tz, qseecom will enable the clock to avoid service failure.
7059 */
7060 if (!qseecom.no_clock_support &&
7061 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7062 pr_debug("ce clock is not enabled!\n");
7063 ret = qseecom_perf_enable(data);
7064 if (ret) {
7065 pr_err("Failed to vote for clock with err %d\n",
7066 ret);
7067 mutex_unlock(&app_access_lock);
7068 ret = -EINVAL;
7069 break;
7070 }
7071 perf_enabled = true;
7072 }
7073 atomic_inc(&data->ioctl_count);
7074 ret = qseecom_send_cmd(data, argp);
7075 if (qseecom.support_bus_scaling)
7076 __qseecom_add_bw_scale_down_timer(
7077 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7078 if (perf_enabled) {
7079 qsee_disable_clock_vote(data, CLK_DFAB);
7080 qsee_disable_clock_vote(data, CLK_SFPB);
7081 }
7082 atomic_dec(&data->ioctl_count);
7083 wake_up_all(&data->abort_wq);
7084 mutex_unlock(&app_access_lock);
7085 if (ret)
7086 pr_err("failed qseecom_send_cmd: %d\n", ret);
7087 break;
7088 }
7089 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7090 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7091 if ((data->client.app_id == 0) ||
7092 (data->type != QSEECOM_CLIENT_APP)) {
7093 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7094 data->type, data->client.app_id);
7095 ret = -EINVAL;
7096 break;
7097 }
7098 /* Only one client allowed here at a time */
7099 mutex_lock(&app_access_lock);
7100 if (qseecom.support_bus_scaling) {
7101 if (!data->mode) {
7102 mutex_lock(&qsee_bw_mutex);
7103 __qseecom_register_bus_bandwidth_needs(
7104 data, HIGH);
7105 mutex_unlock(&qsee_bw_mutex);
7106 }
7107 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7108 if (ret) {
7109 pr_err("Failed to set bw.\n");
7110 mutex_unlock(&app_access_lock);
7111 ret = -EINVAL;
7112 break;
7113 }
7114 }
7115 /*
7116 * On targets where crypto clock is handled by HLOS,
7117 * if clk_access_cnt is zero and perf_enabled is false,
7118 * then the crypto clock was not enabled before sending cmd to
7119 * tz, qseecom will enable the clock to avoid service failure.
7120 */
7121 if (!qseecom.no_clock_support &&
7122 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7123 pr_debug("ce clock is not enabled!\n");
7124 ret = qseecom_perf_enable(data);
7125 if (ret) {
7126 pr_err("Failed to vote for clock with err %d\n",
7127 ret);
7128 mutex_unlock(&app_access_lock);
7129 ret = -EINVAL;
7130 break;
7131 }
7132 perf_enabled = true;
7133 }
7134 atomic_inc(&data->ioctl_count);
7135 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7136 ret = qseecom_send_modfd_cmd(data, argp);
7137 else
7138 ret = qseecom_send_modfd_cmd_64(data, argp);
7139 if (qseecom.support_bus_scaling)
7140 __qseecom_add_bw_scale_down_timer(
7141 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7142 if (perf_enabled) {
7143 qsee_disable_clock_vote(data, CLK_DFAB);
7144 qsee_disable_clock_vote(data, CLK_SFPB);
7145 }
7146 atomic_dec(&data->ioctl_count);
7147 wake_up_all(&data->abort_wq);
7148 mutex_unlock(&app_access_lock);
7149 if (ret)
7150 pr_err("failed qseecom_send_cmd: %d\n", ret);
7151 __qseecom_clean_data_sglistinfo(data);
7152 break;
7153 }
7154 case QSEECOM_IOCTL_RECEIVE_REQ: {
7155 if ((data->listener.id == 0) ||
7156 (data->type != QSEECOM_LISTENER_SERVICE)) {
7157 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7158 data->type, data->listener.id);
7159 ret = -EINVAL;
7160 break;
7161 }
7162 atomic_inc(&data->ioctl_count);
7163 ret = qseecom_receive_req(data);
7164 atomic_dec(&data->ioctl_count);
7165 wake_up_all(&data->abort_wq);
7166 if (ret && (ret != -ERESTARTSYS))
7167 pr_err("failed qseecom_receive_req: %d\n", ret);
7168 break;
7169 }
7170 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7171 if ((data->listener.id == 0) ||
7172 (data->type != QSEECOM_LISTENER_SERVICE)) {
7173 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7174 data->type, data->listener.id);
7175 ret = -EINVAL;
7176 break;
7177 }
7178 atomic_inc(&data->ioctl_count);
7179 if (!qseecom.qsee_reentrancy_support)
7180 ret = qseecom_send_resp();
7181 else
7182 ret = qseecom_reentrancy_send_resp(data);
7183 atomic_dec(&data->ioctl_count);
7184 wake_up_all(&data->abort_wq);
7185 if (ret)
7186 pr_err("failed qseecom_send_resp: %d\n", ret);
7187 break;
7188 }
7189 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7190 if ((data->type != QSEECOM_CLIENT_APP) &&
7191 (data->type != QSEECOM_GENERIC) &&
7192 (data->type != QSEECOM_SECURE_SERVICE)) {
7193 pr_err("set mem param req: invalid handle (%d)\n",
7194 data->type);
7195 ret = -EINVAL;
7196 break;
7197 }
7198 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7199 mutex_lock(&app_access_lock);
7200 atomic_inc(&data->ioctl_count);
7201 ret = qseecom_set_client_mem_param(data, argp);
7202 atomic_dec(&data->ioctl_count);
7203 mutex_unlock(&app_access_lock);
7204 if (ret)
7205 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7206 ret);
7207 break;
7208 }
7209 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7210 if ((data->type != QSEECOM_GENERIC) &&
7211 (data->type != QSEECOM_CLIENT_APP)) {
7212 pr_err("load app req: invalid handle (%d)\n",
7213 data->type);
7214 ret = -EINVAL;
7215 break;
7216 }
7217 data->type = QSEECOM_CLIENT_APP;
7218 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7219 mutex_lock(&app_access_lock);
7220 atomic_inc(&data->ioctl_count);
7221 ret = qseecom_load_app(data, argp);
7222 atomic_dec(&data->ioctl_count);
7223 mutex_unlock(&app_access_lock);
7224 if (ret)
7225 pr_err("failed load_app request: %d\n", ret);
7226 break;
7227 }
7228 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7229 if ((data->client.app_id == 0) ||
7230 (data->type != QSEECOM_CLIENT_APP)) {
7231 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7232 data->type, data->client.app_id);
7233 ret = -EINVAL;
7234 break;
7235 }
7236 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7237 mutex_lock(&app_access_lock);
7238 atomic_inc(&data->ioctl_count);
7239 ret = qseecom_unload_app(data, false);
7240 atomic_dec(&data->ioctl_count);
7241 mutex_unlock(&app_access_lock);
7242 if (ret)
7243 pr_err("failed unload_app request: %d\n", ret);
7244 break;
7245 }
7246 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7247 atomic_inc(&data->ioctl_count);
7248 ret = qseecom_get_qseos_version(data, argp);
7249 if (ret)
7250 pr_err("qseecom_get_qseos_version: %d\n", ret);
7251 atomic_dec(&data->ioctl_count);
7252 break;
7253 }
7254 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7255 if ((data->type != QSEECOM_GENERIC) &&
7256 (data->type != QSEECOM_CLIENT_APP)) {
7257 pr_err("perf enable req: invalid handle (%d)\n",
7258 data->type);
7259 ret = -EINVAL;
7260 break;
7261 }
7262 if ((data->type == QSEECOM_CLIENT_APP) &&
7263 (data->client.app_id == 0)) {
7264 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7265 data->type, data->client.app_id);
7266 ret = -EINVAL;
7267 break;
7268 }
7269 atomic_inc(&data->ioctl_count);
7270 if (qseecom.support_bus_scaling) {
7271 mutex_lock(&qsee_bw_mutex);
7272 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7273 mutex_unlock(&qsee_bw_mutex);
7274 } else {
7275 ret = qseecom_perf_enable(data);
7276 if (ret)
7277 pr_err("Fail to vote for clocks %d\n", ret);
7278 }
7279 atomic_dec(&data->ioctl_count);
7280 break;
7281 }
7282 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7283 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7284 (data->type != QSEECOM_CLIENT_APP)) {
7285 pr_err("perf disable req: invalid handle (%d)\n",
7286 data->type);
7287 ret = -EINVAL;
7288 break;
7289 }
7290 if ((data->type == QSEECOM_CLIENT_APP) &&
7291 (data->client.app_id == 0)) {
7292 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7293 data->type, data->client.app_id);
7294 ret = -EINVAL;
7295 break;
7296 }
7297 atomic_inc(&data->ioctl_count);
7298 if (!qseecom.support_bus_scaling) {
7299 qsee_disable_clock_vote(data, CLK_DFAB);
7300 qsee_disable_clock_vote(data, CLK_SFPB);
7301 } else {
7302 mutex_lock(&qsee_bw_mutex);
7303 qseecom_unregister_bus_bandwidth_needs(data);
7304 mutex_unlock(&qsee_bw_mutex);
7305 }
7306 atomic_dec(&data->ioctl_count);
7307 break;
7308 }
7309
7310 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7311 /* If crypto clock is not handled by HLOS, return directly. */
7312 if (qseecom.no_clock_support) {
7313 pr_debug("crypto clock is not handled by HLOS\n");
7314 break;
7315 }
7316 if ((data->client.app_id == 0) ||
7317 (data->type != QSEECOM_CLIENT_APP)) {
7318 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7319 data->type, data->client.app_id);
7320 ret = -EINVAL;
7321 break;
7322 }
7323 atomic_inc(&data->ioctl_count);
7324 ret = qseecom_scale_bus_bandwidth(data, argp);
7325 atomic_dec(&data->ioctl_count);
7326 break;
7327 }
7328 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7329 if (data->type != QSEECOM_GENERIC) {
7330 pr_err("load ext elf req: invalid client handle (%d)\n",
7331 data->type);
7332 ret = -EINVAL;
7333 break;
7334 }
7335 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7336 data->released = true;
7337 mutex_lock(&app_access_lock);
7338 atomic_inc(&data->ioctl_count);
7339 ret = qseecom_load_external_elf(data, argp);
7340 atomic_dec(&data->ioctl_count);
7341 mutex_unlock(&app_access_lock);
7342 if (ret)
7343 pr_err("failed load_external_elf request: %d\n", ret);
7344 break;
7345 }
7346 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7347 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7348 pr_err("unload ext elf req: invalid handle (%d)\n",
7349 data->type);
7350 ret = -EINVAL;
7351 break;
7352 }
7353 data->released = true;
7354 mutex_lock(&app_access_lock);
7355 atomic_inc(&data->ioctl_count);
7356 ret = qseecom_unload_external_elf(data);
7357 atomic_dec(&data->ioctl_count);
7358 mutex_unlock(&app_access_lock);
7359 if (ret)
7360 pr_err("failed unload_app request: %d\n", ret);
7361 break;
7362 }
7363 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7364 data->type = QSEECOM_CLIENT_APP;
7365 mutex_lock(&app_access_lock);
7366 atomic_inc(&data->ioctl_count);
7367 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7368 ret = qseecom_query_app_loaded(data, argp);
7369 atomic_dec(&data->ioctl_count);
7370 mutex_unlock(&app_access_lock);
7371 break;
7372 }
7373 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7374 if (data->type != QSEECOM_GENERIC) {
7375 pr_err("send cmd svc req: invalid handle (%d)\n",
7376 data->type);
7377 ret = -EINVAL;
7378 break;
7379 }
7380 data->type = QSEECOM_SECURE_SERVICE;
7381 if (qseecom.qsee_version < QSEE_VERSION_03) {
7382 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7383 qseecom.qsee_version);
7384 return -EINVAL;
7385 }
7386 mutex_lock(&app_access_lock);
7387 atomic_inc(&data->ioctl_count);
7388 ret = qseecom_send_service_cmd(data, argp);
7389 atomic_dec(&data->ioctl_count);
7390 mutex_unlock(&app_access_lock);
7391 break;
7392 }
7393 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7394 if (!(qseecom.support_pfe || qseecom.support_fde))
7395 pr_err("Features requiring key init not supported\n");
7396 if (data->type != QSEECOM_GENERIC) {
7397 pr_err("create key req: invalid handle (%d)\n",
7398 data->type);
7399 ret = -EINVAL;
7400 break;
7401 }
7402 if (qseecom.qsee_version < QSEE_VERSION_05) {
7403 pr_err("Create Key feature unsupported: qsee ver %u\n",
7404 qseecom.qsee_version);
7405 return -EINVAL;
7406 }
7407 data->released = true;
7408 mutex_lock(&app_access_lock);
7409 atomic_inc(&data->ioctl_count);
7410 ret = qseecom_create_key(data, argp);
7411 if (ret)
7412 pr_err("failed to create encryption key: %d\n", ret);
7413
7414 atomic_dec(&data->ioctl_count);
7415 mutex_unlock(&app_access_lock);
7416 break;
7417 }
7418 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7419 if (!(qseecom.support_pfe || qseecom.support_fde))
7420 pr_err("Features requiring key init not supported\n");
7421 if (data->type != QSEECOM_GENERIC) {
7422 pr_err("wipe key req: invalid handle (%d)\n",
7423 data->type);
7424 ret = -EINVAL;
7425 break;
7426 }
7427 if (qseecom.qsee_version < QSEE_VERSION_05) {
7428 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7429 qseecom.qsee_version);
7430 return -EINVAL;
7431 }
7432 data->released = true;
7433 mutex_lock(&app_access_lock);
7434 atomic_inc(&data->ioctl_count);
7435 ret = qseecom_wipe_key(data, argp);
7436 if (ret)
7437 pr_err("failed to wipe encryption key: %d\n", ret);
7438 atomic_dec(&data->ioctl_count);
7439 mutex_unlock(&app_access_lock);
7440 break;
7441 }
7442 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7443 if (!(qseecom.support_pfe || qseecom.support_fde))
7444 pr_err("Features requiring key init not supported\n");
7445 if (data->type != QSEECOM_GENERIC) {
7446 pr_err("update key req: invalid handle (%d)\n",
7447 data->type);
7448 ret = -EINVAL;
7449 break;
7450 }
7451 if (qseecom.qsee_version < QSEE_VERSION_05) {
7452 pr_err("Update Key feature unsupported in qsee ver %u\n",
7453 qseecom.qsee_version);
7454 return -EINVAL;
7455 }
7456 data->released = true;
7457 mutex_lock(&app_access_lock);
7458 atomic_inc(&data->ioctl_count);
7459 ret = qseecom_update_key_user_info(data, argp);
7460 if (ret)
7461 pr_err("failed to update key user info: %d\n", ret);
7462 atomic_dec(&data->ioctl_count);
7463 mutex_unlock(&app_access_lock);
7464 break;
7465 }
7466 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7467 if (data->type != QSEECOM_GENERIC) {
7468 pr_err("save part hash req: invalid handle (%d)\n",
7469 data->type);
7470 ret = -EINVAL;
7471 break;
7472 }
7473 data->released = true;
7474 mutex_lock(&app_access_lock);
7475 atomic_inc(&data->ioctl_count);
7476 ret = qseecom_save_partition_hash(argp);
7477 atomic_dec(&data->ioctl_count);
7478 mutex_unlock(&app_access_lock);
7479 break;
7480 }
7481 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7482 if (data->type != QSEECOM_GENERIC) {
7483 pr_err("ES activated req: invalid handle (%d)\n",
7484 data->type);
7485 ret = -EINVAL;
7486 break;
7487 }
7488 data->released = true;
7489 mutex_lock(&app_access_lock);
7490 atomic_inc(&data->ioctl_count);
7491 ret = qseecom_is_es_activated(argp);
7492 atomic_dec(&data->ioctl_count);
7493 mutex_unlock(&app_access_lock);
7494 break;
7495 }
7496 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7497 if (data->type != QSEECOM_GENERIC) {
7498 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7499 data->type);
7500 ret = -EINVAL;
7501 break;
7502 }
7503 data->released = true;
7504 mutex_lock(&app_access_lock);
7505 atomic_inc(&data->ioctl_count);
7506 ret = qseecom_mdtp_cipher_dip(argp);
7507 atomic_dec(&data->ioctl_count);
7508 mutex_unlock(&app_access_lock);
7509 break;
7510 }
7511 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7512 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7513 if ((data->listener.id == 0) ||
7514 (data->type != QSEECOM_LISTENER_SERVICE)) {
7515 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7516 data->type, data->listener.id);
7517 ret = -EINVAL;
7518 break;
7519 }
7520 atomic_inc(&data->ioctl_count);
7521 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7522 ret = qseecom_send_modfd_resp(data, argp);
7523 else
7524 ret = qseecom_send_modfd_resp_64(data, argp);
7525 atomic_dec(&data->ioctl_count);
7526 wake_up_all(&data->abort_wq);
7527 if (ret)
7528 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7529 __qseecom_clean_data_sglistinfo(data);
7530 break;
7531 }
7532 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7533 if ((data->client.app_id == 0) ||
7534 (data->type != QSEECOM_CLIENT_APP)) {
7535 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7536 data->type, data->client.app_id);
7537 ret = -EINVAL;
7538 break;
7539 }
7540 if (qseecom.qsee_version < QSEE_VERSION_40) {
7541 pr_err("GP feature unsupported: qsee ver %u\n",
7542 qseecom.qsee_version);
7543 return -EINVAL;
7544 }
7545 /* Only one client allowed here at a time */
7546 mutex_lock(&app_access_lock);
7547 atomic_inc(&data->ioctl_count);
7548 ret = qseecom_qteec_open_session(data, argp);
7549 atomic_dec(&data->ioctl_count);
7550 wake_up_all(&data->abort_wq);
7551 mutex_unlock(&app_access_lock);
7552 if (ret)
7553 pr_err("failed open_session_cmd: %d\n", ret);
7554 __qseecom_clean_data_sglistinfo(data);
7555 break;
7556 }
7557 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7558 if ((data->client.app_id == 0) ||
7559 (data->type != QSEECOM_CLIENT_APP)) {
7560 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7561 data->type, data->client.app_id);
7562 ret = -EINVAL;
7563 break;
7564 }
7565 if (qseecom.qsee_version < QSEE_VERSION_40) {
7566 pr_err("GP feature unsupported: qsee ver %u\n",
7567 qseecom.qsee_version);
7568 return -EINVAL;
7569 }
7570 /* Only one client allowed here at a time */
7571 mutex_lock(&app_access_lock);
7572 atomic_inc(&data->ioctl_count);
7573 ret = qseecom_qteec_close_session(data, argp);
7574 atomic_dec(&data->ioctl_count);
7575 wake_up_all(&data->abort_wq);
7576 mutex_unlock(&app_access_lock);
7577 if (ret)
7578 pr_err("failed close_session_cmd: %d\n", ret);
7579 break;
7580 }
7581 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7582 if ((data->client.app_id == 0) ||
7583 (data->type != QSEECOM_CLIENT_APP)) {
7584 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7585 data->type, data->client.app_id);
7586 ret = -EINVAL;
7587 break;
7588 }
7589 if (qseecom.qsee_version < QSEE_VERSION_40) {
7590 pr_err("GP feature unsupported: qsee ver %u\n",
7591 qseecom.qsee_version);
7592 return -EINVAL;
7593 }
7594 /* Only one client allowed here at a time */
7595 mutex_lock(&app_access_lock);
7596 atomic_inc(&data->ioctl_count);
7597 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7598 atomic_dec(&data->ioctl_count);
7599 wake_up_all(&data->abort_wq);
7600 mutex_unlock(&app_access_lock);
7601 if (ret)
7602 pr_err("failed Invoke cmd: %d\n", ret);
7603 __qseecom_clean_data_sglistinfo(data);
7604 break;
7605 }
7606 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7607 if ((data->client.app_id == 0) ||
7608 (data->type != QSEECOM_CLIENT_APP)) {
7609 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7610 data->type, data->client.app_id);
7611 ret = -EINVAL;
7612 break;
7613 }
7614 if (qseecom.qsee_version < QSEE_VERSION_40) {
7615 pr_err("GP feature unsupported: qsee ver %u\n",
7616 qseecom.qsee_version);
7617 return -EINVAL;
7618 }
7619 /* Only one client allowed here at a time */
7620 mutex_lock(&app_access_lock);
7621 atomic_inc(&data->ioctl_count);
7622 ret = qseecom_qteec_request_cancellation(data, argp);
7623 atomic_dec(&data->ioctl_count);
7624 wake_up_all(&data->abort_wq);
7625 mutex_unlock(&app_access_lock);
7626 if (ret)
7627 pr_err("failed request_cancellation: %d\n", ret);
7628 break;
7629 }
7630 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7631 atomic_inc(&data->ioctl_count);
7632 ret = qseecom_get_ce_info(data, argp);
7633 if (ret)
7634 pr_err("failed get fde ce pipe info: %d\n", ret);
7635 atomic_dec(&data->ioctl_count);
7636 break;
7637 }
7638 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7639 atomic_inc(&data->ioctl_count);
7640 ret = qseecom_free_ce_info(data, argp);
7641 if (ret)
7642 pr_err("failed get fde ce pipe info: %d\n", ret);
7643 atomic_dec(&data->ioctl_count);
7644 break;
7645 }
7646 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7647 atomic_inc(&data->ioctl_count);
7648 ret = qseecom_query_ce_info(data, argp);
7649 if (ret)
7650 pr_err("failed get fde ce pipe info: %d\n", ret);
7651 atomic_dec(&data->ioctl_count);
7652 break;
7653 }
7654 default:
7655 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7656 return -EINVAL;
7657 }
7658 return ret;
7659}
7660
7661static int qseecom_open(struct inode *inode, struct file *file)
7662{
7663 int ret = 0;
7664 struct qseecom_dev_handle *data;
7665
7666 data = kzalloc(sizeof(*data), GFP_KERNEL);
7667 if (!data)
7668 return -ENOMEM;
7669 file->private_data = data;
7670 data->abort = 0;
7671 data->type = QSEECOM_GENERIC;
7672 data->released = false;
7673 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7674 data->mode = INACTIVE;
7675 init_waitqueue_head(&data->abort_wq);
7676 atomic_set(&data->ioctl_count, 0);
7677 return ret;
7678}
7679
7680static int qseecom_release(struct inode *inode, struct file *file)
7681{
7682 struct qseecom_dev_handle *data = file->private_data;
7683 int ret = 0;
7684
7685 if (data->released == false) {
7686 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7687 data->type, data->mode, data);
7688 switch (data->type) {
7689 case QSEECOM_LISTENER_SERVICE:
7690 mutex_lock(&app_access_lock);
7691 ret = qseecom_unregister_listener(data);
7692 mutex_unlock(&app_access_lock);
7693 break;
7694 case QSEECOM_CLIENT_APP:
7695 mutex_lock(&app_access_lock);
7696 ret = qseecom_unload_app(data, true);
7697 mutex_unlock(&app_access_lock);
7698 break;
7699 case QSEECOM_SECURE_SERVICE:
7700 case QSEECOM_GENERIC:
7701 ret = qseecom_unmap_ion_allocated_memory(data);
7702 if (ret)
7703 pr_err("Ion Unmap failed\n");
7704 break;
7705 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7706 break;
7707 default:
7708 pr_err("Unsupported clnt_handle_type %d",
7709 data->type);
7710 break;
7711 }
7712 }
7713
7714 if (qseecom.support_bus_scaling) {
7715 mutex_lock(&qsee_bw_mutex);
7716 if (data->mode != INACTIVE) {
7717 qseecom_unregister_bus_bandwidth_needs(data);
7718 if (qseecom.cumulative_mode == INACTIVE) {
7719 ret = __qseecom_set_msm_bus_request(INACTIVE);
7720 if (ret)
7721 pr_err("Fail to scale down bus\n");
7722 }
7723 }
7724 mutex_unlock(&qsee_bw_mutex);
7725 } else {
7726 if (data->fast_load_enabled == true)
7727 qsee_disable_clock_vote(data, CLK_SFPB);
7728 if (data->perf_enabled == true)
7729 qsee_disable_clock_vote(data, CLK_DFAB);
7730 }
7731 kfree(data);
7732
7733 return ret;
7734}
7735
7736#ifdef CONFIG_COMPAT
7737#include "compat_qseecom.c"
7738#else
7739#define compat_qseecom_ioctl NULL
7740#endif
7741
7742static const struct file_operations qseecom_fops = {
7743 .owner = THIS_MODULE,
7744 .unlocked_ioctl = qseecom_ioctl,
7745 .compat_ioctl = compat_qseecom_ioctl,
7746 .open = qseecom_open,
7747 .release = qseecom_release
7748};
7749
7750static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7751{
7752 int rc = 0;
7753 struct device *pdev;
7754 struct qseecom_clk *qclk;
7755 char *core_clk_src = NULL;
7756 char *core_clk = NULL;
7757 char *iface_clk = NULL;
7758 char *bus_clk = NULL;
7759
7760 switch (ce) {
7761 case CLK_QSEE: {
7762 core_clk_src = "core_clk_src";
7763 core_clk = "core_clk";
7764 iface_clk = "iface_clk";
7765 bus_clk = "bus_clk";
7766 qclk = &qseecom.qsee;
7767 qclk->instance = CLK_QSEE;
7768 break;
7769 };
7770 case CLK_CE_DRV: {
7771 core_clk_src = "ce_drv_core_clk_src";
7772 core_clk = "ce_drv_core_clk";
7773 iface_clk = "ce_drv_iface_clk";
7774 bus_clk = "ce_drv_bus_clk";
7775 qclk = &qseecom.ce_drv;
7776 qclk->instance = CLK_CE_DRV;
7777 break;
7778 };
7779 default:
7780 pr_err("Invalid ce hw instance: %d!\n", ce);
7781 return -EIO;
7782 }
7783
7784 if (qseecom.no_clock_support) {
7785 qclk->ce_core_clk = NULL;
7786 qclk->ce_clk = NULL;
7787 qclk->ce_bus_clk = NULL;
7788 qclk->ce_core_src_clk = NULL;
7789 return 0;
7790 }
7791
7792 pdev = qseecom.pdev;
7793
7794 /* Get CE3 src core clk. */
7795 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7796 if (!IS_ERR(qclk->ce_core_src_clk)) {
7797 rc = clk_set_rate(qclk->ce_core_src_clk,
7798 qseecom.ce_opp_freq_hz);
7799 if (rc) {
7800 clk_put(qclk->ce_core_src_clk);
7801 qclk->ce_core_src_clk = NULL;
7802 pr_err("Unable to set the core src clk @%uMhz.\n",
7803 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7804 return -EIO;
7805 }
7806 } else {
7807 pr_warn("Unable to get CE core src clk, set to NULL\n");
7808 qclk->ce_core_src_clk = NULL;
7809 }
7810
7811 /* Get CE core clk */
7812 qclk->ce_core_clk = clk_get(pdev, core_clk);
7813 if (IS_ERR(qclk->ce_core_clk)) {
7814 rc = PTR_ERR(qclk->ce_core_clk);
7815 pr_err("Unable to get CE core clk\n");
7816 if (qclk->ce_core_src_clk != NULL)
7817 clk_put(qclk->ce_core_src_clk);
7818 return -EIO;
7819 }
7820
7821 /* Get CE Interface clk */
7822 qclk->ce_clk = clk_get(pdev, iface_clk);
7823 if (IS_ERR(qclk->ce_clk)) {
7824 rc = PTR_ERR(qclk->ce_clk);
7825 pr_err("Unable to get CE interface clk\n");
7826 if (qclk->ce_core_src_clk != NULL)
7827 clk_put(qclk->ce_core_src_clk);
7828 clk_put(qclk->ce_core_clk);
7829 return -EIO;
7830 }
7831
7832 /* Get CE AXI clk */
7833 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7834 if (IS_ERR(qclk->ce_bus_clk)) {
7835 rc = PTR_ERR(qclk->ce_bus_clk);
7836 pr_err("Unable to get CE BUS interface clk\n");
7837 if (qclk->ce_core_src_clk != NULL)
7838 clk_put(qclk->ce_core_src_clk);
7839 clk_put(qclk->ce_core_clk);
7840 clk_put(qclk->ce_clk);
7841 return -EIO;
7842 }
7843
7844 return rc;
7845}
7846
7847static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7848{
7849 struct qseecom_clk *qclk;
7850
7851 if (ce == CLK_QSEE)
7852 qclk = &qseecom.qsee;
7853 else
7854 qclk = &qseecom.ce_drv;
7855
7856 if (qclk->ce_clk != NULL) {
7857 clk_put(qclk->ce_clk);
7858 qclk->ce_clk = NULL;
7859 }
7860 if (qclk->ce_core_clk != NULL) {
7861 clk_put(qclk->ce_core_clk);
7862 qclk->ce_core_clk = NULL;
7863 }
7864 if (qclk->ce_bus_clk != NULL) {
7865 clk_put(qclk->ce_bus_clk);
7866 qclk->ce_bus_clk = NULL;
7867 }
7868 if (qclk->ce_core_src_clk != NULL) {
7869 clk_put(qclk->ce_core_src_clk);
7870 qclk->ce_core_src_clk = NULL;
7871 }
7872 qclk->instance = CLK_INVALID;
7873}
7874
7875static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7876{
7877 int rc = 0;
7878 uint32_t hlos_num_ce_hw_instances;
7879 uint32_t disk_encrypt_pipe;
7880 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007881 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007882 int i;
7883 const int *tbl;
7884 int size;
7885 int entry;
7886 struct qseecom_crypto_info *pfde_tbl = NULL;
7887 struct qseecom_crypto_info *p;
7888 int tbl_size;
7889 int j;
7890 bool old_db = true;
7891 struct qseecom_ce_info_use *pce_info_use;
7892 uint32_t *unit_tbl = NULL;
7893 int total_units = 0;
7894 struct qseecom_ce_pipe_entry *pce_entry;
7895
7896 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7897 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7898
7899 if (of_property_read_u32((&pdev->dev)->of_node,
7900 "qcom,qsee-ce-hw-instance",
7901 &qseecom.ce_info.qsee_ce_hw_instance)) {
7902 pr_err("Fail to get qsee ce hw instance information.\n");
7903 rc = -EINVAL;
7904 goto out;
7905 } else {
7906 pr_debug("qsee-ce-hw-instance=0x%x\n",
7907 qseecom.ce_info.qsee_ce_hw_instance);
7908 }
7909
7910 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7911 "qcom,support-fde");
7912 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7913 "qcom,support-pfe");
7914
7915 if (!qseecom.support_pfe && !qseecom.support_fde) {
7916 pr_warn("Device does not support PFE/FDE");
7917 goto out;
7918 }
7919
7920 if (qseecom.support_fde)
7921 tbl = of_get_property((&pdev->dev)->of_node,
7922 "qcom,full-disk-encrypt-info", &size);
7923 else
7924 tbl = NULL;
7925 if (tbl) {
7926 old_db = false;
7927 if (size % sizeof(struct qseecom_crypto_info)) {
7928 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7929 size);
7930 rc = -EINVAL;
7931 goto out;
7932 }
7933 tbl_size = size / sizeof
7934 (struct qseecom_crypto_info);
7935
7936 pfde_tbl = kzalloc(size, GFP_KERNEL);
7937 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7938 total_units = 0;
7939
7940 if (!pfde_tbl || !unit_tbl) {
7941 pr_err("failed to alloc memory\n");
7942 rc = -ENOMEM;
7943 goto out;
7944 }
7945 if (of_property_read_u32_array((&pdev->dev)->of_node,
7946 "qcom,full-disk-encrypt-info",
7947 (u32 *)pfde_tbl, size/sizeof(u32))) {
7948 pr_err("failed to read full-disk-encrypt-info tbl\n");
7949 rc = -EINVAL;
7950 goto out;
7951 }
7952
7953 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7954 for (j = 0; j < total_units; j++) {
7955 if (p->unit_num == *(unit_tbl + j))
7956 break;
7957 }
7958 if (j == total_units) {
7959 *(unit_tbl + total_units) = p->unit_num;
7960 total_units++;
7961 }
7962 }
7963
7964 qseecom.ce_info.num_fde = total_units;
7965 pce_info_use = qseecom.ce_info.fde = kcalloc(
7966 total_units, sizeof(struct qseecom_ce_info_use),
7967 GFP_KERNEL);
7968 if (!pce_info_use) {
7969 pr_err("failed to alloc memory\n");
7970 rc = -ENOMEM;
7971 goto out;
7972 }
7973
7974 for (j = 0; j < total_units; j++, pce_info_use++) {
7975 pce_info_use->unit_num = *(unit_tbl + j);
7976 pce_info_use->alloc = false;
7977 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7978 pce_info_use->num_ce_pipe_entries = 0;
7979 pce_info_use->ce_pipe_entry = NULL;
7980 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7981 if (p->unit_num == pce_info_use->unit_num)
7982 pce_info_use->num_ce_pipe_entries++;
7983 }
7984
7985 entry = pce_info_use->num_ce_pipe_entries;
7986 pce_entry = pce_info_use->ce_pipe_entry =
7987 kcalloc(entry,
7988 sizeof(struct qseecom_ce_pipe_entry),
7989 GFP_KERNEL);
7990 if (pce_entry == NULL) {
7991 pr_err("failed to alloc memory\n");
7992 rc = -ENOMEM;
7993 goto out;
7994 }
7995
7996 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7997 if (p->unit_num == pce_info_use->unit_num) {
7998 pce_entry->ce_num = p->ce;
7999 pce_entry->ce_pipe_pair =
8000 p->pipe_pair;
8001 pce_entry->valid = true;
8002 pce_entry++;
8003 }
8004 }
8005 }
8006 kfree(unit_tbl);
8007 unit_tbl = NULL;
8008 kfree(pfde_tbl);
8009 pfde_tbl = NULL;
8010 }
8011
8012 if (qseecom.support_pfe)
8013 tbl = of_get_property((&pdev->dev)->of_node,
8014 "qcom,per-file-encrypt-info", &size);
8015 else
8016 tbl = NULL;
8017 if (tbl) {
8018 old_db = false;
8019 if (size % sizeof(struct qseecom_crypto_info)) {
8020 pr_err("per-file-encrypt-info tbl size(%d)\n",
8021 size);
8022 rc = -EINVAL;
8023 goto out;
8024 }
8025 tbl_size = size / sizeof
8026 (struct qseecom_crypto_info);
8027
8028 pfde_tbl = kzalloc(size, GFP_KERNEL);
8029 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8030 total_units = 0;
8031 if (!pfde_tbl || !unit_tbl) {
8032 pr_err("failed to alloc memory\n");
8033 rc = -ENOMEM;
8034 goto out;
8035 }
8036 if (of_property_read_u32_array((&pdev->dev)->of_node,
8037 "qcom,per-file-encrypt-info",
8038 (u32 *)pfde_tbl, size/sizeof(u32))) {
8039 pr_err("failed to read per-file-encrypt-info tbl\n");
8040 rc = -EINVAL;
8041 goto out;
8042 }
8043
8044 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8045 for (j = 0; j < total_units; j++) {
8046 if (p->unit_num == *(unit_tbl + j))
8047 break;
8048 }
8049 if (j == total_units) {
8050 *(unit_tbl + total_units) = p->unit_num;
8051 total_units++;
8052 }
8053 }
8054
8055 qseecom.ce_info.num_pfe = total_units;
8056 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8057 total_units, sizeof(struct qseecom_ce_info_use),
8058 GFP_KERNEL);
8059 if (!pce_info_use) {
8060 pr_err("failed to alloc memory\n");
8061 rc = -ENOMEM;
8062 goto out;
8063 }
8064
8065 for (j = 0; j < total_units; j++, pce_info_use++) {
8066 pce_info_use->unit_num = *(unit_tbl + j);
8067 pce_info_use->alloc = false;
8068 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8069 pce_info_use->num_ce_pipe_entries = 0;
8070 pce_info_use->ce_pipe_entry = NULL;
8071 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8072 if (p->unit_num == pce_info_use->unit_num)
8073 pce_info_use->num_ce_pipe_entries++;
8074 }
8075
8076 entry = pce_info_use->num_ce_pipe_entries;
8077 pce_entry = pce_info_use->ce_pipe_entry =
8078 kcalloc(entry,
8079 sizeof(struct qseecom_ce_pipe_entry),
8080 GFP_KERNEL);
8081 if (pce_entry == NULL) {
8082 pr_err("failed to alloc memory\n");
8083 rc = -ENOMEM;
8084 goto out;
8085 }
8086
8087 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8088 if (p->unit_num == pce_info_use->unit_num) {
8089 pce_entry->ce_num = p->ce;
8090 pce_entry->ce_pipe_pair =
8091 p->pipe_pair;
8092 pce_entry->valid = true;
8093 pce_entry++;
8094 }
8095 }
8096 }
8097 kfree(unit_tbl);
8098 unit_tbl = NULL;
8099 kfree(pfde_tbl);
8100 pfde_tbl = NULL;
8101 }
8102
8103 if (!old_db)
8104 goto out1;
8105
8106 if (of_property_read_bool((&pdev->dev)->of_node,
8107 "qcom,support-multiple-ce-hw-instance")) {
8108 if (of_property_read_u32((&pdev->dev)->of_node,
8109 "qcom,hlos-num-ce-hw-instances",
8110 &hlos_num_ce_hw_instances)) {
8111 pr_err("Fail: get hlos number of ce hw instance\n");
8112 rc = -EINVAL;
8113 goto out;
8114 }
8115 } else {
8116 hlos_num_ce_hw_instances = 1;
8117 }
8118
8119 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8120 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8121 MAX_CE_PIPE_PAIR_PER_UNIT);
8122 rc = -EINVAL;
8123 goto out;
8124 }
8125
8126 if (of_property_read_u32_array((&pdev->dev)->of_node,
8127 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8128 hlos_num_ce_hw_instances)) {
8129 pr_err("Fail: get hlos ce hw instance info\n");
8130 rc = -EINVAL;
8131 goto out;
8132 }
8133
8134 if (qseecom.support_fde) {
8135 pce_info_use = qseecom.ce_info.fde =
8136 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8137 if (!pce_info_use) {
8138 pr_err("failed to alloc memory\n");
8139 rc = -ENOMEM;
8140 goto out;
8141 }
8142 /* by default for old db */
8143 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8144 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8145 pce_info_use->alloc = false;
8146 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8147 pce_info_use->ce_pipe_entry = NULL;
8148 if (of_property_read_u32((&pdev->dev)->of_node,
8149 "qcom,disk-encrypt-pipe-pair",
8150 &disk_encrypt_pipe)) {
8151 pr_err("Fail to get FDE pipe information.\n");
8152 rc = -EINVAL;
8153 goto out;
8154 } else {
8155 pr_debug("disk-encrypt-pipe-pair=0x%x",
8156 disk_encrypt_pipe);
8157 }
8158 entry = pce_info_use->num_ce_pipe_entries =
8159 hlos_num_ce_hw_instances;
8160 pce_entry = pce_info_use->ce_pipe_entry =
8161 kcalloc(entry,
8162 sizeof(struct qseecom_ce_pipe_entry),
8163 GFP_KERNEL);
8164 if (pce_entry == NULL) {
8165 pr_err("failed to alloc memory\n");
8166 rc = -ENOMEM;
8167 goto out;
8168 }
8169 for (i = 0; i < entry; i++) {
8170 pce_entry->ce_num = hlos_ce_hw_instance[i];
8171 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8172 pce_entry->valid = 1;
8173 pce_entry++;
8174 }
8175 } else {
8176 pr_warn("Device does not support FDE");
8177 disk_encrypt_pipe = 0xff;
8178 }
8179 if (qseecom.support_pfe) {
8180 pce_info_use = qseecom.ce_info.pfe =
8181 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8182 if (!pce_info_use) {
8183 pr_err("failed to alloc memory\n");
8184 rc = -ENOMEM;
8185 goto out;
8186 }
8187 /* by default for old db */
8188 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8189 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8190 pce_info_use->alloc = false;
8191 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8192 pce_info_use->ce_pipe_entry = NULL;
8193
8194 if (of_property_read_u32((&pdev->dev)->of_node,
8195 "qcom,file-encrypt-pipe-pair",
8196 &file_encrypt_pipe)) {
8197 pr_err("Fail to get PFE pipe information.\n");
8198 rc = -EINVAL;
8199 goto out;
8200 } else {
8201 pr_debug("file-encrypt-pipe-pair=0x%x",
8202 file_encrypt_pipe);
8203 }
8204 entry = pce_info_use->num_ce_pipe_entries =
8205 hlos_num_ce_hw_instances;
8206 pce_entry = pce_info_use->ce_pipe_entry =
8207 kcalloc(entry,
8208 sizeof(struct qseecom_ce_pipe_entry),
8209 GFP_KERNEL);
8210 if (pce_entry == NULL) {
8211 pr_err("failed to alloc memory\n");
8212 rc = -ENOMEM;
8213 goto out;
8214 }
8215 for (i = 0; i < entry; i++) {
8216 pce_entry->ce_num = hlos_ce_hw_instance[i];
8217 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8218 pce_entry->valid = 1;
8219 pce_entry++;
8220 }
8221 } else {
8222 pr_warn("Device does not support PFE");
8223 file_encrypt_pipe = 0xff;
8224 }
8225
8226out1:
8227 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8228 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8229out:
8230 if (rc) {
8231 if (qseecom.ce_info.fde) {
8232 pce_info_use = qseecom.ce_info.fde;
8233 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8234 pce_entry = pce_info_use->ce_pipe_entry;
8235 kfree(pce_entry);
8236 pce_info_use++;
8237 }
8238 }
8239 kfree(qseecom.ce_info.fde);
8240 qseecom.ce_info.fde = NULL;
8241 if (qseecom.ce_info.pfe) {
8242 pce_info_use = qseecom.ce_info.pfe;
8243 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8244 pce_entry = pce_info_use->ce_pipe_entry;
8245 kfree(pce_entry);
8246 pce_info_use++;
8247 }
8248 }
8249 kfree(qseecom.ce_info.pfe);
8250 qseecom.ce_info.pfe = NULL;
8251 }
8252 kfree(unit_tbl);
8253 kfree(pfde_tbl);
8254 return rc;
8255}
8256
8257static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8258 void __user *argp)
8259{
8260 struct qseecom_ce_info_req req;
8261 struct qseecom_ce_info_req *pinfo = &req;
8262 int ret = 0;
8263 int i;
8264 unsigned int entries;
8265 struct qseecom_ce_info_use *pce_info_use, *p;
8266 int total = 0;
8267 bool found = false;
8268 struct qseecom_ce_pipe_entry *pce_entry;
8269
8270 ret = copy_from_user(pinfo, argp,
8271 sizeof(struct qseecom_ce_info_req));
8272 if (ret) {
8273 pr_err("copy_from_user failed\n");
8274 return ret;
8275 }
8276
8277 switch (pinfo->usage) {
8278 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8279 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8280 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8281 if (qseecom.support_fde) {
8282 p = qseecom.ce_info.fde;
8283 total = qseecom.ce_info.num_fde;
8284 } else {
8285 pr_err("system does not support fde\n");
8286 return -EINVAL;
8287 }
8288 break;
8289 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8290 if (qseecom.support_pfe) {
8291 p = qseecom.ce_info.pfe;
8292 total = qseecom.ce_info.num_pfe;
8293 } else {
8294 pr_err("system does not support pfe\n");
8295 return -EINVAL;
8296 }
8297 break;
8298 default:
8299 pr_err("unsupported usage %d\n", pinfo->usage);
8300 return -EINVAL;
8301 }
8302
8303 pce_info_use = NULL;
8304 for (i = 0; i < total; i++) {
8305 if (!p->alloc)
8306 pce_info_use = p;
8307 else if (!memcmp(p->handle, pinfo->handle,
8308 MAX_CE_INFO_HANDLE_SIZE)) {
8309 pce_info_use = p;
8310 found = true;
8311 break;
8312 }
8313 p++;
8314 }
8315
8316 if (pce_info_use == NULL)
8317 return -EBUSY;
8318
8319 pinfo->unit_num = pce_info_use->unit_num;
8320 if (!pce_info_use->alloc) {
8321 pce_info_use->alloc = true;
8322 memcpy(pce_info_use->handle,
8323 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8324 }
8325 if (pce_info_use->num_ce_pipe_entries >
8326 MAX_CE_PIPE_PAIR_PER_UNIT)
8327 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8328 else
8329 entries = pce_info_use->num_ce_pipe_entries;
8330 pinfo->num_ce_pipe_entries = entries;
8331 pce_entry = pce_info_use->ce_pipe_entry;
8332 for (i = 0; i < entries; i++, pce_entry++)
8333 pinfo->ce_pipe_entry[i] = *pce_entry;
8334 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8335 pinfo->ce_pipe_entry[i].valid = 0;
8336
8337 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8338 pr_err("copy_to_user failed\n");
8339 ret = -EFAULT;
8340 }
8341 return ret;
8342}
8343
8344static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8345 void __user *argp)
8346{
8347 struct qseecom_ce_info_req req;
8348 struct qseecom_ce_info_req *pinfo = &req;
8349 int ret = 0;
8350 struct qseecom_ce_info_use *p;
8351 int total = 0;
8352 int i;
8353 bool found = false;
8354
8355 ret = copy_from_user(pinfo, argp,
8356 sizeof(struct qseecom_ce_info_req));
8357 if (ret)
8358 return ret;
8359
8360 switch (pinfo->usage) {
8361 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8362 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8363 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8364 if (qseecom.support_fde) {
8365 p = qseecom.ce_info.fde;
8366 total = qseecom.ce_info.num_fde;
8367 } else {
8368 pr_err("system does not support fde\n");
8369 return -EINVAL;
8370 }
8371 break;
8372 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8373 if (qseecom.support_pfe) {
8374 p = qseecom.ce_info.pfe;
8375 total = qseecom.ce_info.num_pfe;
8376 } else {
8377 pr_err("system does not support pfe\n");
8378 return -EINVAL;
8379 }
8380 break;
8381 default:
8382 pr_err("unsupported usage %d\n", pinfo->usage);
8383 return -EINVAL;
8384 }
8385
8386 for (i = 0; i < total; i++) {
8387 if (p->alloc &&
8388 !memcmp(p->handle, pinfo->handle,
8389 MAX_CE_INFO_HANDLE_SIZE)) {
8390 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8391 p->alloc = false;
8392 found = true;
8393 break;
8394 }
8395 p++;
8396 }
8397 return ret;
8398}
8399
8400static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8401 void __user *argp)
8402{
8403 struct qseecom_ce_info_req req;
8404 struct qseecom_ce_info_req *pinfo = &req;
8405 int ret = 0;
8406 int i;
8407 unsigned int entries;
8408 struct qseecom_ce_info_use *pce_info_use, *p;
8409 int total = 0;
8410 bool found = false;
8411 struct qseecom_ce_pipe_entry *pce_entry;
8412
8413 ret = copy_from_user(pinfo, argp,
8414 sizeof(struct qseecom_ce_info_req));
8415 if (ret)
8416 return ret;
8417
8418 switch (pinfo->usage) {
8419 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8420 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8421 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8422 if (qseecom.support_fde) {
8423 p = qseecom.ce_info.fde;
8424 total = qseecom.ce_info.num_fde;
8425 } else {
8426 pr_err("system does not support fde\n");
8427 return -EINVAL;
8428 }
8429 break;
8430 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8431 if (qseecom.support_pfe) {
8432 p = qseecom.ce_info.pfe;
8433 total = qseecom.ce_info.num_pfe;
8434 } else {
8435 pr_err("system does not support pfe\n");
8436 return -EINVAL;
8437 }
8438 break;
8439 default:
8440 pr_err("unsupported usage %d\n", pinfo->usage);
8441 return -EINVAL;
8442 }
8443
8444 pce_info_use = NULL;
8445 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8446 pinfo->num_ce_pipe_entries = 0;
8447 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8448 pinfo->ce_pipe_entry[i].valid = 0;
8449
8450 for (i = 0; i < total; i++) {
8451
8452 if (p->alloc && !memcmp(p->handle,
8453 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8454 pce_info_use = p;
8455 found = true;
8456 break;
8457 }
8458 p++;
8459 }
8460 if (!pce_info_use)
8461 goto out;
8462 pinfo->unit_num = pce_info_use->unit_num;
8463 if (pce_info_use->num_ce_pipe_entries >
8464 MAX_CE_PIPE_PAIR_PER_UNIT)
8465 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8466 else
8467 entries = pce_info_use->num_ce_pipe_entries;
8468 pinfo->num_ce_pipe_entries = entries;
8469 pce_entry = pce_info_use->ce_pipe_entry;
8470 for (i = 0; i < entries; i++, pce_entry++)
8471 pinfo->ce_pipe_entry[i] = *pce_entry;
8472 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8473 pinfo->ce_pipe_entry[i].valid = 0;
8474out:
8475 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8476 pr_err("copy_to_user failed\n");
8477 ret = -EFAULT;
8478 }
8479 return ret;
8480}
8481
8482/*
8483 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8484 * then whitelist feature is not supported.
8485 */
8486static int qseecom_check_whitelist_feature(void)
8487{
8488 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8489
8490 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8491}
8492
8493static int qseecom_probe(struct platform_device *pdev)
8494{
8495 int rc;
8496 int i;
8497 uint32_t feature = 10;
8498 struct device *class_dev;
8499 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8500 struct qseecom_command_scm_resp resp;
8501 struct qseecom_ce_info_use *pce_info_use = NULL;
8502
8503 qseecom.qsee_bw_count = 0;
8504 qseecom.qsee_perf_client = 0;
8505 qseecom.qsee_sfpb_bw_count = 0;
8506
8507 qseecom.qsee.ce_core_clk = NULL;
8508 qseecom.qsee.ce_clk = NULL;
8509 qseecom.qsee.ce_core_src_clk = NULL;
8510 qseecom.qsee.ce_bus_clk = NULL;
8511
8512 qseecom.cumulative_mode = 0;
8513 qseecom.current_mode = INACTIVE;
8514 qseecom.support_bus_scaling = false;
8515 qseecom.support_fde = false;
8516 qseecom.support_pfe = false;
8517
8518 qseecom.ce_drv.ce_core_clk = NULL;
8519 qseecom.ce_drv.ce_clk = NULL;
8520 qseecom.ce_drv.ce_core_src_clk = NULL;
8521 qseecom.ce_drv.ce_bus_clk = NULL;
8522 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8523
8524 qseecom.app_block_ref_cnt = 0;
8525 init_waitqueue_head(&qseecom.app_block_wq);
8526 qseecom.whitelist_support = true;
8527
8528 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8529 if (rc < 0) {
8530 pr_err("alloc_chrdev_region failed %d\n", rc);
8531 return rc;
8532 }
8533
8534 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8535 if (IS_ERR(driver_class)) {
8536 rc = -ENOMEM;
8537 pr_err("class_create failed %d\n", rc);
8538 goto exit_unreg_chrdev_region;
8539 }
8540
8541 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8542 QSEECOM_DEV);
8543 if (IS_ERR(class_dev)) {
8544 pr_err("class_device_create failed %d\n", rc);
8545 rc = -ENOMEM;
8546 goto exit_destroy_class;
8547 }
8548
8549 cdev_init(&qseecom.cdev, &qseecom_fops);
8550 qseecom.cdev.owner = THIS_MODULE;
8551
8552 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8553 if (rc < 0) {
8554 pr_err("cdev_add failed %d\n", rc);
8555 goto exit_destroy_device;
8556 }
8557
8558 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8559 spin_lock_init(&qseecom.registered_listener_list_lock);
8560 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8561 spin_lock_init(&qseecom.registered_app_list_lock);
8562 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8563 spin_lock_init(&qseecom.registered_kclient_list_lock);
8564 init_waitqueue_head(&qseecom.send_resp_wq);
8565 qseecom.send_resp_flag = 0;
8566
8567 qseecom.qsee_version = QSEEE_VERSION_00;
8568 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8569 &resp, sizeof(resp));
8570 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8571 if (rc) {
8572 pr_err("Failed to get QSEE version info %d\n", rc);
8573 goto exit_del_cdev;
8574 }
8575 qseecom.qsee_version = resp.result;
8576 qseecom.qseos_version = QSEOS_VERSION_14;
8577 qseecom.commonlib_loaded = false;
8578 qseecom.commonlib64_loaded = false;
8579 qseecom.pdev = class_dev;
8580 /* Create ION msm client */
8581 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8582 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8583 pr_err("Ion client cannot be created\n");
8584 rc = -ENOMEM;
8585 goto exit_del_cdev;
8586 }
8587
8588 /* register client for bus scaling */
8589 if (pdev->dev.of_node) {
8590 qseecom.pdev->of_node = pdev->dev.of_node;
8591 qseecom.support_bus_scaling =
8592 of_property_read_bool((&pdev->dev)->of_node,
8593 "qcom,support-bus-scaling");
8594 rc = qseecom_retrieve_ce_data(pdev);
8595 if (rc)
8596 goto exit_destroy_ion_client;
8597 qseecom.appsbl_qseecom_support =
8598 of_property_read_bool((&pdev->dev)->of_node,
8599 "qcom,appsbl-qseecom-support");
8600 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8601 qseecom.appsbl_qseecom_support);
8602
8603 qseecom.commonlib64_loaded =
8604 of_property_read_bool((&pdev->dev)->of_node,
8605 "qcom,commonlib64-loaded-by-uefi");
8606 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8607 qseecom.commonlib64_loaded);
8608 qseecom.fde_key_size =
8609 of_property_read_bool((&pdev->dev)->of_node,
8610 "qcom,fde-key-size");
8611 qseecom.no_clock_support =
8612 of_property_read_bool((&pdev->dev)->of_node,
8613 "qcom,no-clock-support");
8614 if (!qseecom.no_clock_support) {
8615 pr_info("qseecom clocks handled by other subsystem\n");
8616 } else {
8617 pr_info("no-clock-support=0x%x",
8618 qseecom.no_clock_support);
8619 }
8620
8621 if (of_property_read_u32((&pdev->dev)->of_node,
8622 "qcom,qsee-reentrancy-support",
8623 &qseecom.qsee_reentrancy_support)) {
8624 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8625 qseecom.qsee_reentrancy_support = 0;
8626 } else {
8627 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8628 qseecom.qsee_reentrancy_support);
8629 }
8630
8631 /*
8632 * The qseecom bus scaling flag can not be enabled when
8633 * crypto clock is not handled by HLOS.
8634 */
8635 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8636 pr_err("support_bus_scaling flag can not be enabled.\n");
8637 rc = -EINVAL;
8638 goto exit_destroy_ion_client;
8639 }
8640
8641 if (of_property_read_u32((&pdev->dev)->of_node,
8642 "qcom,ce-opp-freq",
8643 &qseecom.ce_opp_freq_hz)) {
8644 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8645 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8646 }
8647 rc = __qseecom_init_clk(CLK_QSEE);
8648 if (rc)
8649 goto exit_destroy_ion_client;
8650
8651 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8652 (qseecom.support_pfe || qseecom.support_fde)) {
8653 rc = __qseecom_init_clk(CLK_CE_DRV);
8654 if (rc) {
8655 __qseecom_deinit_clk(CLK_QSEE);
8656 goto exit_destroy_ion_client;
8657 }
8658 } else {
8659 struct qseecom_clk *qclk;
8660
8661 qclk = &qseecom.qsee;
8662 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8663 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8664 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8665 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8666 }
8667
8668 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8669 msm_bus_cl_get_pdata(pdev);
8670 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8671 (!qseecom.is_apps_region_protected &&
8672 !qseecom.appsbl_qseecom_support)) {
8673 struct resource *resource = NULL;
8674 struct qsee_apps_region_info_ireq req;
8675 struct qsee_apps_region_info_64bit_ireq req_64bit;
8676 struct qseecom_command_scm_resp resp;
8677 void *cmd_buf = NULL;
8678 size_t cmd_len;
8679
8680 resource = platform_get_resource_byname(pdev,
8681 IORESOURCE_MEM, "secapp-region");
8682 if (resource) {
8683 if (qseecom.qsee_version < QSEE_VERSION_40) {
8684 req.qsee_cmd_id =
8685 QSEOS_APP_REGION_NOTIFICATION;
8686 req.addr = (uint32_t)resource->start;
8687 req.size = resource_size(resource);
8688 cmd_buf = (void *)&req;
8689 cmd_len = sizeof(struct
8690 qsee_apps_region_info_ireq);
8691 pr_warn("secure app region addr=0x%x size=0x%x",
8692 req.addr, req.size);
8693 } else {
8694 req_64bit.qsee_cmd_id =
8695 QSEOS_APP_REGION_NOTIFICATION;
8696 req_64bit.addr = resource->start;
8697 req_64bit.size = resource_size(
8698 resource);
8699 cmd_buf = (void *)&req_64bit;
8700 cmd_len = sizeof(struct
8701 qsee_apps_region_info_64bit_ireq);
8702 pr_warn("secure app region addr=0x%llx size=0x%x",
8703 req_64bit.addr, req_64bit.size);
8704 }
8705 } else {
8706 pr_err("Fail to get secure app region info\n");
8707 rc = -EINVAL;
8708 goto exit_deinit_clock;
8709 }
8710 rc = __qseecom_enable_clk(CLK_QSEE);
8711 if (rc) {
8712 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8713 rc = -EIO;
8714 goto exit_deinit_clock;
8715 }
8716 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8717 cmd_buf, cmd_len,
8718 &resp, sizeof(resp));
8719 __qseecom_disable_clk(CLK_QSEE);
8720 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8721 pr_err("send secapp reg fail %d resp.res %d\n",
8722 rc, resp.result);
8723 rc = -EINVAL;
8724 goto exit_deinit_clock;
8725 }
8726 }
8727 /*
8728 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8729 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8730 * Pls add "qseecom.commonlib64_loaded = true" here too.
8731 */
8732 if (qseecom.is_apps_region_protected ||
8733 qseecom.appsbl_qseecom_support)
8734 qseecom.commonlib_loaded = true;
8735 } else {
8736 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8737 pdev->dev.platform_data;
8738 }
8739 if (qseecom.support_bus_scaling) {
8740 init_timer(&(qseecom.bw_scale_down_timer));
8741 INIT_WORK(&qseecom.bw_inactive_req_ws,
8742 qseecom_bw_inactive_req_work);
8743 qseecom.bw_scale_down_timer.function =
8744 qseecom_scale_bus_bandwidth_timer_callback;
8745 }
8746 qseecom.timer_running = false;
8747 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8748 qseecom_platform_support);
8749
8750 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8751 pr_warn("qseecom.whitelist_support = %d\n",
8752 qseecom.whitelist_support);
8753
8754 if (!qseecom.qsee_perf_client)
8755 pr_err("Unable to register bus client\n");
8756
8757 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8758 return 0;
8759
8760exit_deinit_clock:
8761 __qseecom_deinit_clk(CLK_QSEE);
8762 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8763 (qseecom.support_pfe || qseecom.support_fde))
8764 __qseecom_deinit_clk(CLK_CE_DRV);
8765exit_destroy_ion_client:
8766 if (qseecom.ce_info.fde) {
8767 pce_info_use = qseecom.ce_info.fde;
8768 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8769 kzfree(pce_info_use->ce_pipe_entry);
8770 pce_info_use++;
8771 }
8772 kfree(qseecom.ce_info.fde);
8773 }
8774 if (qseecom.ce_info.pfe) {
8775 pce_info_use = qseecom.ce_info.pfe;
8776 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8777 kzfree(pce_info_use->ce_pipe_entry);
8778 pce_info_use++;
8779 }
8780 kfree(qseecom.ce_info.pfe);
8781 }
8782 ion_client_destroy(qseecom.ion_clnt);
8783exit_del_cdev:
8784 cdev_del(&qseecom.cdev);
8785exit_destroy_device:
8786 device_destroy(driver_class, qseecom_device_no);
8787exit_destroy_class:
8788 class_destroy(driver_class);
8789exit_unreg_chrdev_region:
8790 unregister_chrdev_region(qseecom_device_no, 1);
8791 return rc;
8792}
8793
8794static int qseecom_remove(struct platform_device *pdev)
8795{
8796 struct qseecom_registered_kclient_list *kclient = NULL;
8797 unsigned long flags = 0;
8798 int ret = 0;
8799 int i;
8800 struct qseecom_ce_pipe_entry *pce_entry;
8801 struct qseecom_ce_info_use *pce_info_use;
8802
8803 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8804 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8805
8806 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8807 list) {
8808 if (!kclient)
8809 goto exit_irqrestore;
8810
8811 /* Break the loop if client handle is NULL */
8812 if (!kclient->handle)
8813 goto exit_free_kclient;
8814
8815 if (list_empty(&kclient->list))
8816 goto exit_free_kc_handle;
8817
8818 list_del(&kclient->list);
8819 mutex_lock(&app_access_lock);
8820 ret = qseecom_unload_app(kclient->handle->dev, false);
8821 mutex_unlock(&app_access_lock);
8822 if (!ret) {
8823 kzfree(kclient->handle->dev);
8824 kzfree(kclient->handle);
8825 kzfree(kclient);
8826 }
8827 }
8828
8829exit_free_kc_handle:
8830 kzfree(kclient->handle);
8831exit_free_kclient:
8832 kzfree(kclient);
8833exit_irqrestore:
8834 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8835
8836 if (qseecom.qseos_version > QSEEE_VERSION_00)
8837 qseecom_unload_commonlib_image();
8838
8839 if (qseecom.qsee_perf_client)
8840 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8841 0);
8842 if (pdev->dev.platform_data != NULL)
8843 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8844
8845 if (qseecom.support_bus_scaling) {
8846 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8847 del_timer_sync(&qseecom.bw_scale_down_timer);
8848 }
8849
8850 if (qseecom.ce_info.fde) {
8851 pce_info_use = qseecom.ce_info.fde;
8852 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8853 pce_entry = pce_info_use->ce_pipe_entry;
8854 kfree(pce_entry);
8855 pce_info_use++;
8856 }
8857 }
8858 kfree(qseecom.ce_info.fde);
8859 if (qseecom.ce_info.pfe) {
8860 pce_info_use = qseecom.ce_info.pfe;
8861 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8862 pce_entry = pce_info_use->ce_pipe_entry;
8863 kfree(pce_entry);
8864 pce_info_use++;
8865 }
8866 }
8867 kfree(qseecom.ce_info.pfe);
8868
8869 /* register client for bus scaling */
8870 if (pdev->dev.of_node) {
8871 __qseecom_deinit_clk(CLK_QSEE);
8872 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8873 (qseecom.support_pfe || qseecom.support_fde))
8874 __qseecom_deinit_clk(CLK_CE_DRV);
8875 }
8876
8877 ion_client_destroy(qseecom.ion_clnt);
8878
8879 cdev_del(&qseecom.cdev);
8880
8881 device_destroy(driver_class, qseecom_device_no);
8882
8883 class_destroy(driver_class);
8884
8885 unregister_chrdev_region(qseecom_device_no, 1);
8886
8887 return ret;
8888}
8889
8890static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8891{
8892 int ret = 0;
8893 struct qseecom_clk *qclk;
8894
8895 qclk = &qseecom.qsee;
8896 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8897 if (qseecom.no_clock_support)
8898 return 0;
8899
8900 mutex_lock(&qsee_bw_mutex);
8901 mutex_lock(&clk_access_lock);
8902
8903 if (qseecom.current_mode != INACTIVE) {
8904 ret = msm_bus_scale_client_update_request(
8905 qseecom.qsee_perf_client, INACTIVE);
8906 if (ret)
8907 pr_err("Fail to scale down bus\n");
8908 else
8909 qseecom.current_mode = INACTIVE;
8910 }
8911
8912 if (qclk->clk_access_cnt) {
8913 if (qclk->ce_clk != NULL)
8914 clk_disable_unprepare(qclk->ce_clk);
8915 if (qclk->ce_core_clk != NULL)
8916 clk_disable_unprepare(qclk->ce_core_clk);
8917 if (qclk->ce_bus_clk != NULL)
8918 clk_disable_unprepare(qclk->ce_bus_clk);
8919 }
8920
8921 del_timer_sync(&(qseecom.bw_scale_down_timer));
8922 qseecom.timer_running = false;
8923
8924 mutex_unlock(&clk_access_lock);
8925 mutex_unlock(&qsee_bw_mutex);
8926 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8927
8928 return 0;
8929}
8930
8931static int qseecom_resume(struct platform_device *pdev)
8932{
8933 int mode = 0;
8934 int ret = 0;
8935 struct qseecom_clk *qclk;
8936
8937 qclk = &qseecom.qsee;
8938 if (qseecom.no_clock_support)
8939 goto exit;
8940
8941 mutex_lock(&qsee_bw_mutex);
8942 mutex_lock(&clk_access_lock);
8943 if (qseecom.cumulative_mode >= HIGH)
8944 mode = HIGH;
8945 else
8946 mode = qseecom.cumulative_mode;
8947
8948 if (qseecom.cumulative_mode != INACTIVE) {
8949 ret = msm_bus_scale_client_update_request(
8950 qseecom.qsee_perf_client, mode);
8951 if (ret)
8952 pr_err("Fail to scale up bus to %d\n", mode);
8953 else
8954 qseecom.current_mode = mode;
8955 }
8956
8957 if (qclk->clk_access_cnt) {
8958 if (qclk->ce_core_clk != NULL) {
8959 ret = clk_prepare_enable(qclk->ce_core_clk);
8960 if (ret) {
8961 pr_err("Unable to enable/prep CE core clk\n");
8962 qclk->clk_access_cnt = 0;
8963 goto err;
8964 }
8965 }
8966 if (qclk->ce_clk != NULL) {
8967 ret = clk_prepare_enable(qclk->ce_clk);
8968 if (ret) {
8969 pr_err("Unable to enable/prep CE iface clk\n");
8970 qclk->clk_access_cnt = 0;
8971 goto ce_clk_err;
8972 }
8973 }
8974 if (qclk->ce_bus_clk != NULL) {
8975 ret = clk_prepare_enable(qclk->ce_bus_clk);
8976 if (ret) {
8977 pr_err("Unable to enable/prep CE bus clk\n");
8978 qclk->clk_access_cnt = 0;
8979 goto ce_bus_clk_err;
8980 }
8981 }
8982 }
8983
8984 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8985 qseecom.bw_scale_down_timer.expires = jiffies +
8986 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8987 mod_timer(&(qseecom.bw_scale_down_timer),
8988 qseecom.bw_scale_down_timer.expires);
8989 qseecom.timer_running = true;
8990 }
8991
8992 mutex_unlock(&clk_access_lock);
8993 mutex_unlock(&qsee_bw_mutex);
8994 goto exit;
8995
8996ce_bus_clk_err:
8997 if (qclk->ce_clk)
8998 clk_disable_unprepare(qclk->ce_clk);
8999ce_clk_err:
9000 if (qclk->ce_core_clk)
9001 clk_disable_unprepare(qclk->ce_core_clk);
9002err:
9003 mutex_unlock(&clk_access_lock);
9004 mutex_unlock(&qsee_bw_mutex);
9005 ret = -EIO;
9006exit:
9007 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9008 return ret;
9009}
9010
9011static const struct of_device_id qseecom_match[] = {
9012 {
9013 .compatible = "qcom,qseecom",
9014 },
9015 {}
9016};
9017
9018static struct platform_driver qseecom_plat_driver = {
9019 .probe = qseecom_probe,
9020 .remove = qseecom_remove,
9021 .suspend = qseecom_suspend,
9022 .resume = qseecom_resume,
9023 .driver = {
9024 .name = "qseecom",
9025 .owner = THIS_MODULE,
9026 .of_match_table = qseecom_match,
9027 },
9028};
9029
9030static int qseecom_init(void)
9031{
9032 return platform_driver_register(&qseecom_plat_driver);
9033}
9034
9035static void qseecom_exit(void)
9036{
9037 platform_driver_unregister(&qseecom_plat_driver);
9038}
9039
9040MODULE_LICENSE("GPL v2");
9041MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9042
9043module_init(qseecom_init);
9044module_exit(qseecom_exit);