blob: 241cdc0675c751c395424da747e235c16f111c47 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05304 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700255 struct ce_hw_usage_info ce_info;
256
257 int qsee_bw_count;
258 int qsee_sfpb_bw_count;
259
260 uint32_t qsee_perf_client;
261 struct qseecom_clk qsee;
262 struct qseecom_clk ce_drv;
263
264 bool support_bus_scaling;
265 bool support_fde;
266 bool support_pfe;
267 bool fde_key_size;
268 uint32_t cumulative_mode;
269 enum qseecom_bandwidth_request_mode current_mode;
270 struct timer_list bw_scale_down_timer;
271 struct work_struct bw_inactive_req_ws;
272 struct cdev cdev;
273 bool timer_running;
274 bool no_clock_support;
275 unsigned int ce_opp_freq_hz;
276 bool appsbl_qseecom_support;
277 uint32_t qsee_reentrancy_support;
278
279 uint32_t app_block_ref_cnt;
280 wait_queue_head_t app_block_wq;
281 atomic_t qseecom_state;
282 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700283 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700583 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700584 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
585 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
586 ret = scm_call2(smc_id, &desc);
587 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700588 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700589 smc_id = TZ_OS_REGISTER_LISTENER_ID;
590 __qseecom_reentrancy_check_if_no_app_blocked(
591 smc_id);
592 ret = scm_call2(smc_id, &desc);
593 }
594 break;
595 }
596 case QSEOS_DEREGISTER_LISTENER: {
597 struct qseecom_unregister_listener_ireq *req;
598
599 req = (struct qseecom_unregister_listener_ireq *)
600 req_buf;
601 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
602 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
603 desc.args[0] = req->listener_id;
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
605 ret = scm_call2(smc_id, &desc);
606 break;
607 }
608 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
609 struct qseecom_client_listener_data_irsp *req;
610
611 req = (struct qseecom_client_listener_data_irsp *)
612 req_buf;
613 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
614 desc.arginfo =
615 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
616 desc.args[0] = req->listener_id;
617 desc.args[1] = req->status;
618 ret = scm_call2(smc_id, &desc);
619 break;
620 }
621 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
622 struct qseecom_client_listener_data_irsp *req;
623 struct qseecom_client_listener_data_64bit_irsp *req_64;
624
625 smc_id =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
627 desc.arginfo =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
629 if (qseecom.qsee_version < QSEE_VERSION_40) {
630 req =
631 (struct qseecom_client_listener_data_irsp *)
632 req_buf;
633 desc.args[0] = req->listener_id;
634 desc.args[1] = req->status;
635 desc.args[2] = req->sglistinfo_ptr;
636 desc.args[3] = req->sglistinfo_len;
637 } else {
638 req_64 =
639 (struct qseecom_client_listener_data_64bit_irsp *)
640 req_buf;
641 desc.args[0] = req_64->listener_id;
642 desc.args[1] = req_64->status;
643 desc.args[2] = req_64->sglistinfo_ptr;
644 desc.args[3] = req_64->sglistinfo_len;
645 }
646 ret = scm_call2(smc_id, &desc);
647 break;
648 }
649 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
650 struct qseecom_load_app_ireq *req;
651 struct qseecom_load_app_64bit_ireq *req_64bit;
652
653 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
654 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
655 if (qseecom.qsee_version < QSEE_VERSION_40) {
656 req = (struct qseecom_load_app_ireq *)req_buf;
657 desc.args[0] = req->mdt_len;
658 desc.args[1] = req->img_len;
659 desc.args[2] = req->phy_addr;
660 } else {
661 req_64bit =
662 (struct qseecom_load_app_64bit_ireq *)req_buf;
663 desc.args[0] = req_64bit->mdt_len;
664 desc.args[1] = req_64bit->img_len;
665 desc.args[2] = req_64bit->phy_addr;
666 }
667 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
668 ret = scm_call2(smc_id, &desc);
669 break;
670 }
671 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
672 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
673 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
674 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
675 ret = scm_call2(smc_id, &desc);
676 break;
677 }
678
679 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
680 struct qseecom_client_send_data_ireq *req;
681 struct qseecom_client_send_data_64bit_ireq *req_64bit;
682
683 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
684 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
685 if (qseecom.qsee_version < QSEE_VERSION_40) {
686 req = (struct qseecom_client_send_data_ireq *)
687 req_buf;
688 desc.args[0] = req->app_id;
689 desc.args[1] = req->req_ptr;
690 desc.args[2] = req->req_len;
691 desc.args[3] = req->rsp_ptr;
692 desc.args[4] = req->rsp_len;
693 } else {
694 req_64bit =
695 (struct qseecom_client_send_data_64bit_ireq *)
696 req_buf;
697 desc.args[0] = req_64bit->app_id;
698 desc.args[1] = req_64bit->req_ptr;
699 desc.args[2] = req_64bit->req_len;
700 desc.args[3] = req_64bit->rsp_ptr;
701 desc.args[4] = req_64bit->rsp_len;
702 }
703 ret = scm_call2(smc_id, &desc);
704 break;
705 }
706 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
707 struct qseecom_client_send_data_ireq *req;
708 struct qseecom_client_send_data_64bit_ireq *req_64bit;
709
710 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
711 desc.arginfo =
712 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
713 if (qseecom.qsee_version < QSEE_VERSION_40) {
714 req = (struct qseecom_client_send_data_ireq *)
715 req_buf;
716 desc.args[0] = req->app_id;
717 desc.args[1] = req->req_ptr;
718 desc.args[2] = req->req_len;
719 desc.args[3] = req->rsp_ptr;
720 desc.args[4] = req->rsp_len;
721 desc.args[5] = req->sglistinfo_ptr;
722 desc.args[6] = req->sglistinfo_len;
723 } else {
724 req_64bit =
725 (struct qseecom_client_send_data_64bit_ireq *)
726 req_buf;
727 desc.args[0] = req_64bit->app_id;
728 desc.args[1] = req_64bit->req_ptr;
729 desc.args[2] = req_64bit->req_len;
730 desc.args[3] = req_64bit->rsp_ptr;
731 desc.args[4] = req_64bit->rsp_len;
732 desc.args[5] = req_64bit->sglistinfo_ptr;
733 desc.args[6] = req_64bit->sglistinfo_len;
734 }
735 ret = scm_call2(smc_id, &desc);
736 break;
737 }
738 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
739 struct qseecom_client_send_service_ireq *req;
740
741 req = (struct qseecom_client_send_service_ireq *)
742 req_buf;
743 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
744 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
745 desc.args[0] = req->key_type;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
747 ret = scm_call2(smc_id, &desc);
748 break;
749 }
750 case QSEOS_RPMB_ERASE_COMMAND: {
751 smc_id = TZ_OS_RPMB_ERASE_ID;
752 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
753 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
754 ret = scm_call2(smc_id, &desc);
755 break;
756 }
757 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
758 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
759 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_GENERATE_KEY: {
765 u32 tzbuflen = PAGE_ALIGN(sizeof
766 (struct qseecom_key_generate_ireq) -
767 sizeof(uint32_t));
768 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
769
770 if (!tzbuf)
771 return -ENOMEM;
772 memset(tzbuf, 0, tzbuflen);
773 memcpy(tzbuf, req_buf + sizeof(uint32_t),
774 (sizeof(struct qseecom_key_generate_ireq) -
775 sizeof(uint32_t)));
776 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
777 smc_id = TZ_OS_KS_GEN_KEY_ID;
778 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
779 desc.args[0] = virt_to_phys(tzbuf);
780 desc.args[1] = tzbuflen;
781 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
782 ret = scm_call2(smc_id, &desc);
783 kzfree(tzbuf);
784 break;
785 }
786 case QSEOS_DELETE_KEY: {
787 u32 tzbuflen = PAGE_ALIGN(sizeof
788 (struct qseecom_key_delete_ireq) -
789 sizeof(uint32_t));
790 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
791
792 if (!tzbuf)
793 return -ENOMEM;
794 memset(tzbuf, 0, tzbuflen);
795 memcpy(tzbuf, req_buf + sizeof(uint32_t),
796 (sizeof(struct qseecom_key_delete_ireq) -
797 sizeof(uint32_t)));
798 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
799 smc_id = TZ_OS_KS_DEL_KEY_ID;
800 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
801 desc.args[0] = virt_to_phys(tzbuf);
802 desc.args[1] = tzbuflen;
803 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
804 ret = scm_call2(smc_id, &desc);
805 kzfree(tzbuf);
806 break;
807 }
808 case QSEOS_SET_KEY: {
809 u32 tzbuflen = PAGE_ALIGN(sizeof
810 (struct qseecom_key_select_ireq) -
811 sizeof(uint32_t));
812 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
813
814 if (!tzbuf)
815 return -ENOMEM;
816 memset(tzbuf, 0, tzbuflen);
817 memcpy(tzbuf, req_buf + sizeof(uint32_t),
818 (sizeof(struct qseecom_key_select_ireq) -
819 sizeof(uint32_t)));
820 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
821 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
822 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
823 desc.args[0] = virt_to_phys(tzbuf);
824 desc.args[1] = tzbuflen;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
826 ret = scm_call2(smc_id, &desc);
827 kzfree(tzbuf);
828 break;
829 }
830 case QSEOS_UPDATE_KEY_USERINFO: {
831 u32 tzbuflen = PAGE_ALIGN(sizeof
832 (struct qseecom_key_userinfo_update_ireq) -
833 sizeof(uint32_t));
834 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
835
836 if (!tzbuf)
837 return -ENOMEM;
838 memset(tzbuf, 0, tzbuflen);
839 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
840 (struct qseecom_key_userinfo_update_ireq) -
841 sizeof(uint32_t)));
842 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
843 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
844 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
845 desc.args[0] = virt_to_phys(tzbuf);
846 desc.args[1] = tzbuflen;
847 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
848 ret = scm_call2(smc_id, &desc);
849 kzfree(tzbuf);
850 break;
851 }
852 case QSEOS_TEE_OPEN_SESSION: {
853 struct qseecom_qteec_ireq *req;
854 struct qseecom_qteec_64bit_ireq *req_64bit;
855
856 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
857 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
858 if (qseecom.qsee_version < QSEE_VERSION_40) {
859 req = (struct qseecom_qteec_ireq *)req_buf;
860 desc.args[0] = req->app_id;
861 desc.args[1] = req->req_ptr;
862 desc.args[2] = req->req_len;
863 desc.args[3] = req->resp_ptr;
864 desc.args[4] = req->resp_len;
865 } else {
866 req_64bit = (struct qseecom_qteec_64bit_ireq *)
867 req_buf;
868 desc.args[0] = req_64bit->app_id;
869 desc.args[1] = req_64bit->req_ptr;
870 desc.args[2] = req_64bit->req_len;
871 desc.args[3] = req_64bit->resp_ptr;
872 desc.args[4] = req_64bit->resp_len;
873 }
874 ret = scm_call2(smc_id, &desc);
875 break;
876 }
877 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
878 struct qseecom_qteec_ireq *req;
879 struct qseecom_qteec_64bit_ireq *req_64bit;
880
881 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
882 desc.arginfo =
883 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
884 if (qseecom.qsee_version < QSEE_VERSION_40) {
885 req = (struct qseecom_qteec_ireq *)req_buf;
886 desc.args[0] = req->app_id;
887 desc.args[1] = req->req_ptr;
888 desc.args[2] = req->req_len;
889 desc.args[3] = req->resp_ptr;
890 desc.args[4] = req->resp_len;
891 desc.args[5] = req->sglistinfo_ptr;
892 desc.args[6] = req->sglistinfo_len;
893 } else {
894 req_64bit = (struct qseecom_qteec_64bit_ireq *)
895 req_buf;
896 desc.args[0] = req_64bit->app_id;
897 desc.args[1] = req_64bit->req_ptr;
898 desc.args[2] = req_64bit->req_len;
899 desc.args[3] = req_64bit->resp_ptr;
900 desc.args[4] = req_64bit->resp_len;
901 desc.args[5] = req_64bit->sglistinfo_ptr;
902 desc.args[6] = req_64bit->sglistinfo_len;
903 }
904 ret = scm_call2(smc_id, &desc);
905 break;
906 }
907 case QSEOS_TEE_INVOKE_COMMAND: {
908 struct qseecom_qteec_ireq *req;
909 struct qseecom_qteec_64bit_ireq *req_64bit;
910
911 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
912 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
913 if (qseecom.qsee_version < QSEE_VERSION_40) {
914 req = (struct qseecom_qteec_ireq *)req_buf;
915 desc.args[0] = req->app_id;
916 desc.args[1] = req->req_ptr;
917 desc.args[2] = req->req_len;
918 desc.args[3] = req->resp_ptr;
919 desc.args[4] = req->resp_len;
920 } else {
921 req_64bit = (struct qseecom_qteec_64bit_ireq *)
922 req_buf;
923 desc.args[0] = req_64bit->app_id;
924 desc.args[1] = req_64bit->req_ptr;
925 desc.args[2] = req_64bit->req_len;
926 desc.args[3] = req_64bit->resp_ptr;
927 desc.args[4] = req_64bit->resp_len;
928 }
929 ret = scm_call2(smc_id, &desc);
930 break;
931 }
932 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
933 struct qseecom_qteec_ireq *req;
934 struct qseecom_qteec_64bit_ireq *req_64bit;
935
936 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
937 desc.arginfo =
938 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
939 if (qseecom.qsee_version < QSEE_VERSION_40) {
940 req = (struct qseecom_qteec_ireq *)req_buf;
941 desc.args[0] = req->app_id;
942 desc.args[1] = req->req_ptr;
943 desc.args[2] = req->req_len;
944 desc.args[3] = req->resp_ptr;
945 desc.args[4] = req->resp_len;
946 desc.args[5] = req->sglistinfo_ptr;
947 desc.args[6] = req->sglistinfo_len;
948 } else {
949 req_64bit = (struct qseecom_qteec_64bit_ireq *)
950 req_buf;
951 desc.args[0] = req_64bit->app_id;
952 desc.args[1] = req_64bit->req_ptr;
953 desc.args[2] = req_64bit->req_len;
954 desc.args[3] = req_64bit->resp_ptr;
955 desc.args[4] = req_64bit->resp_len;
956 desc.args[5] = req_64bit->sglistinfo_ptr;
957 desc.args[6] = req_64bit->sglistinfo_len;
958 }
959 ret = scm_call2(smc_id, &desc);
960 break;
961 }
962 case QSEOS_TEE_CLOSE_SESSION: {
963 struct qseecom_qteec_ireq *req;
964 struct qseecom_qteec_64bit_ireq *req_64bit;
965
966 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
967 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
968 if (qseecom.qsee_version < QSEE_VERSION_40) {
969 req = (struct qseecom_qteec_ireq *)req_buf;
970 desc.args[0] = req->app_id;
971 desc.args[1] = req->req_ptr;
972 desc.args[2] = req->req_len;
973 desc.args[3] = req->resp_ptr;
974 desc.args[4] = req->resp_len;
975 } else {
976 req_64bit = (struct qseecom_qteec_64bit_ireq *)
977 req_buf;
978 desc.args[0] = req_64bit->app_id;
979 desc.args[1] = req_64bit->req_ptr;
980 desc.args[2] = req_64bit->req_len;
981 desc.args[3] = req_64bit->resp_ptr;
982 desc.args[4] = req_64bit->resp_len;
983 }
984 ret = scm_call2(smc_id, &desc);
985 break;
986 }
987 case QSEOS_TEE_REQUEST_CANCELLATION: {
988 struct qseecom_qteec_ireq *req;
989 struct qseecom_qteec_64bit_ireq *req_64bit;
990
991 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
992 desc.arginfo =
993 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
994 if (qseecom.qsee_version < QSEE_VERSION_40) {
995 req = (struct qseecom_qteec_ireq *)req_buf;
996 desc.args[0] = req->app_id;
997 desc.args[1] = req->req_ptr;
998 desc.args[2] = req->req_len;
999 desc.args[3] = req->resp_ptr;
1000 desc.args[4] = req->resp_len;
1001 } else {
1002 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1003 req_buf;
1004 desc.args[0] = req_64bit->app_id;
1005 desc.args[1] = req_64bit->req_ptr;
1006 desc.args[2] = req_64bit->req_len;
1007 desc.args[3] = req_64bit->resp_ptr;
1008 desc.args[4] = req_64bit->resp_len;
1009 }
1010 ret = scm_call2(smc_id, &desc);
1011 break;
1012 }
1013 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1014 struct qseecom_continue_blocked_request_ireq *req =
1015 (struct qseecom_continue_blocked_request_ireq *)
1016 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001017 if (qseecom.smcinvoke_support)
1018 smc_id =
1019 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1020 else
1021 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001022 desc.arginfo =
1023 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001024 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 ret = scm_call2(smc_id, &desc);
1026 break;
1027 }
1028 default: {
1029 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1030 qseos_cmd_id);
1031 ret = -EINVAL;
1032 break;
1033 }
1034 } /*end of switch (qsee_cmd_id) */
1035 break;
1036 } /*end of case SCM_SVC_TZSCHEDULER*/
1037 default: {
1038 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1039 svc_id);
1040 ret = -EINVAL;
1041 break;
1042 }
1043 } /*end of switch svc_id */
1044 scm_resp->result = desc.ret[0];
1045 scm_resp->resp_type = desc.ret[1];
1046 scm_resp->data = desc.ret[2];
1047 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1048 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1049 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1050 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1051 return ret;
1052}
1053
1054
1055static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1056 size_t cmd_len, void *resp_buf, size_t resp_len)
1057{
1058 if (!is_scm_armv8())
1059 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1060 resp_buf, resp_len);
1061 else
1062 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1063}
1064
1065static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1066 struct qseecom_register_listener_req *svc)
1067{
1068 struct qseecom_registered_listener_list *ptr;
1069 int unique = 1;
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1073 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1074 if (ptr->svc.listener_id == svc->listener_id) {
1075 pr_err("Service id: %u is already registered\n",
1076 ptr->svc.listener_id);
1077 unique = 0;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1082 return unique;
1083}
1084
1085static struct qseecom_registered_listener_list *__qseecom_find_svc(
1086 int32_t listener_id)
1087{
1088 struct qseecom_registered_listener_list *entry = NULL;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1092 list_for_each_entry(entry,
1093 &qseecom.registered_listener_list_head, list) {
1094 if (entry->svc.listener_id == listener_id)
1095 break;
1096 }
1097 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1098
1099 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1100 pr_err("Service id: %u is not found\n", listener_id);
1101 return NULL;
1102 }
1103
1104 return entry;
1105}
1106
1107static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1108 struct qseecom_dev_handle *handle,
1109 struct qseecom_register_listener_req *listener)
1110{
1111 int ret = 0;
1112 struct qseecom_register_listener_ireq req;
1113 struct qseecom_register_listener_64bit_ireq req_64bit;
1114 struct qseecom_command_scm_resp resp;
1115 ion_phys_addr_t pa;
1116 void *cmd_buf = NULL;
1117 size_t cmd_len;
1118
1119 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001120 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001121 listener->ifd_data_fd);
1122 if (IS_ERR_OR_NULL(svc->ihandle)) {
1123 pr_err("Ion client could not retrieve the handle\n");
1124 return -ENOMEM;
1125 }
1126
1127 /* Get the physical address of the ION BUF */
1128 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1129 if (ret) {
1130 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1131 ret);
1132 return ret;
1133 }
1134 /* Populate the structure for sending scm call to load image */
1135 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1136 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1137 pr_err("ION memory mapping for listener shared buffer failed\n");
1138 return -ENOMEM;
1139 }
1140 svc->sb_phys = (phys_addr_t)pa;
1141
1142 if (qseecom.qsee_version < QSEE_VERSION_40) {
1143 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1144 req.listener_id = svc->svc.listener_id;
1145 req.sb_len = svc->sb_length;
1146 req.sb_ptr = (uint32_t)svc->sb_phys;
1147 cmd_buf = (void *)&req;
1148 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1149 } else {
1150 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1151 req_64bit.listener_id = svc->svc.listener_id;
1152 req_64bit.sb_len = svc->sb_length;
1153 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1154 cmd_buf = (void *)&req_64bit;
1155 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1156 }
1157
1158 resp.result = QSEOS_RESULT_INCOMPLETE;
1159
1160 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1161 &resp, sizeof(resp));
1162 if (ret) {
1163 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1164 return -EINVAL;
1165 }
1166
1167 if (resp.result != QSEOS_RESULT_SUCCESS) {
1168 pr_err("Error SB registration req: resp.result = %d\n",
1169 resp.result);
1170 return -EPERM;
1171 }
1172 return 0;
1173}
1174
1175static int qseecom_register_listener(struct qseecom_dev_handle *data,
1176 void __user *argp)
1177{
1178 int ret = 0;
1179 unsigned long flags;
1180 struct qseecom_register_listener_req rcvd_lstnr;
1181 struct qseecom_registered_listener_list *new_entry;
1182
1183 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1184 if (ret) {
1185 pr_err("copy_from_user failed\n");
1186 return ret;
1187 }
1188 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1189 rcvd_lstnr.sb_size))
1190 return -EFAULT;
1191
1192 data->listener.id = 0;
1193 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1194 pr_err("Service is not unique and is already registered\n");
1195 data->released = true;
1196 return -EBUSY;
1197 }
1198
1199 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1200 if (!new_entry)
1201 return -ENOMEM;
1202 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1203 new_entry->rcv_req_flag = 0;
1204
1205 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1206 new_entry->sb_length = rcvd_lstnr.sb_size;
1207 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1208 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1209 pr_err("qseecom_set_sb_memoryfailed\n");
1210 kzfree(new_entry);
1211 return -ENOMEM;
1212 }
1213
1214 data->listener.id = rcvd_lstnr.listener_id;
1215 init_waitqueue_head(&new_entry->rcv_req_wq);
1216 init_waitqueue_head(&new_entry->listener_block_app_wq);
1217 new_entry->send_resp_flag = 0;
1218 new_entry->listener_in_use = false;
1219 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1220 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1221 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1222
1223 return ret;
1224}
1225
1226static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1227{
1228 int ret = 0;
1229 unsigned long flags;
1230 uint32_t unmap_mem = 0;
1231 struct qseecom_register_listener_ireq req;
1232 struct qseecom_registered_listener_list *ptr_svc = NULL;
1233 struct qseecom_command_scm_resp resp;
1234 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1235
1236 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1237 req.listener_id = data->listener.id;
1238 resp.result = QSEOS_RESULT_INCOMPLETE;
1239
1240 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1241 sizeof(req), &resp, sizeof(resp));
1242 if (ret) {
1243 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1244 ret, data->listener.id);
1245 return ret;
1246 }
1247
1248 if (resp.result != QSEOS_RESULT_SUCCESS) {
1249 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1250 resp.result, data->listener.id);
1251 return -EPERM;
1252 }
1253
1254 data->abort = 1;
1255 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1256 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1257 list) {
1258 if (ptr_svc->svc.listener_id == data->listener.id) {
1259 wake_up_all(&ptr_svc->rcv_req_wq);
1260 break;
1261 }
1262 }
1263 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1264
1265 while (atomic_read(&data->ioctl_count) > 1) {
1266 if (wait_event_freezable(data->abort_wq,
1267 atomic_read(&data->ioctl_count) <= 1)) {
1268 pr_err("Interrupted from abort\n");
1269 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001270 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272 }
1273
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc,
1276 &qseecom.registered_listener_list_head, list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
1278 if (ptr_svc->sb_virt) {
1279 unmap_mem = 1;
1280 ihandle = ptr_svc->ihandle;
1281 }
1282 list_del(&ptr_svc->list);
1283 kzfree(ptr_svc);
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1288
1289 /* Unmap the memory */
1290 if (unmap_mem) {
1291 if (!IS_ERR_OR_NULL(ihandle)) {
1292 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1293 ion_free(qseecom.ion_clnt, ihandle);
1294 }
1295 }
1296 data->released = true;
1297 return ret;
1298}
1299
1300static int __qseecom_set_msm_bus_request(uint32_t mode)
1301{
1302 int ret = 0;
1303 struct qseecom_clk *qclk;
1304
1305 qclk = &qseecom.qsee;
1306 if (qclk->ce_core_src_clk != NULL) {
1307 if (mode == INACTIVE) {
1308 __qseecom_disable_clk(CLK_QSEE);
1309 } else {
1310 ret = __qseecom_enable_clk(CLK_QSEE);
1311 if (ret)
1312 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1313 ret, mode);
1314 }
1315 }
1316
1317 if ((!ret) && (qseecom.current_mode != mode)) {
1318 ret = msm_bus_scale_client_update_request(
1319 qseecom.qsee_perf_client, mode);
1320 if (ret) {
1321 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1322 ret, mode);
1323 if (qclk->ce_core_src_clk != NULL) {
1324 if (mode == INACTIVE) {
1325 ret = __qseecom_enable_clk(CLK_QSEE);
1326 if (ret)
1327 pr_err("CLK enable failed\n");
1328 } else
1329 __qseecom_disable_clk(CLK_QSEE);
1330 }
1331 }
1332 qseecom.current_mode = mode;
1333 }
1334 return ret;
1335}
1336
1337static void qseecom_bw_inactive_req_work(struct work_struct *work)
1338{
1339 mutex_lock(&app_access_lock);
1340 mutex_lock(&qsee_bw_mutex);
1341 if (qseecom.timer_running)
1342 __qseecom_set_msm_bus_request(INACTIVE);
1343 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1344 qseecom.current_mode, qseecom.cumulative_mode);
1345 qseecom.timer_running = false;
1346 mutex_unlock(&qsee_bw_mutex);
1347 mutex_unlock(&app_access_lock);
1348}
1349
1350static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1351{
1352 schedule_work(&qseecom.bw_inactive_req_ws);
1353}
1354
1355static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1356{
1357 struct qseecom_clk *qclk;
1358 int ret = 0;
1359
1360 mutex_lock(&clk_access_lock);
1361 if (ce == CLK_QSEE)
1362 qclk = &qseecom.qsee;
1363 else
1364 qclk = &qseecom.ce_drv;
1365
1366 if (qclk->clk_access_cnt > 2) {
1367 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1368 ret = -EINVAL;
1369 goto err_dec_ref_cnt;
1370 }
1371 if (qclk->clk_access_cnt == 2)
1372 qclk->clk_access_cnt--;
1373
1374err_dec_ref_cnt:
1375 mutex_unlock(&clk_access_lock);
1376 return ret;
1377}
1378
1379
1380static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1381{
1382 int32_t ret = 0;
1383 int32_t request_mode = INACTIVE;
1384
1385 mutex_lock(&qsee_bw_mutex);
1386 if (mode == 0) {
1387 if (qseecom.cumulative_mode > MEDIUM)
1388 request_mode = HIGH;
1389 else
1390 request_mode = qseecom.cumulative_mode;
1391 } else {
1392 request_mode = mode;
1393 }
1394
1395 ret = __qseecom_set_msm_bus_request(request_mode);
1396 if (ret) {
1397 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1398 ret, request_mode);
1399 goto err_scale_timer;
1400 }
1401
1402 if (qseecom.timer_running) {
1403 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1404 if (ret) {
1405 pr_err("Failed to decrease clk ref count.\n");
1406 goto err_scale_timer;
1407 }
1408 del_timer_sync(&(qseecom.bw_scale_down_timer));
1409 qseecom.timer_running = false;
1410 }
1411err_scale_timer:
1412 mutex_unlock(&qsee_bw_mutex);
1413 return ret;
1414}
1415
1416
1417static int qseecom_unregister_bus_bandwidth_needs(
1418 struct qseecom_dev_handle *data)
1419{
1420 int32_t ret = 0;
1421
1422 qseecom.cumulative_mode -= data->mode;
1423 data->mode = INACTIVE;
1424
1425 return ret;
1426}
1427
1428static int __qseecom_register_bus_bandwidth_needs(
1429 struct qseecom_dev_handle *data, uint32_t request_mode)
1430{
1431 int32_t ret = 0;
1432
1433 if (data->mode == INACTIVE) {
1434 qseecom.cumulative_mode += request_mode;
1435 data->mode = request_mode;
1436 } else {
1437 if (data->mode != request_mode) {
1438 qseecom.cumulative_mode -= data->mode;
1439 qseecom.cumulative_mode += request_mode;
1440 data->mode = request_mode;
1441 }
1442 }
1443 return ret;
1444}
1445
1446static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1447{
1448 int ret = 0;
1449
1450 ret = qsee_vote_for_clock(data, CLK_DFAB);
1451 if (ret) {
1452 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455 ret = qsee_vote_for_clock(data, CLK_SFPB);
1456 if (ret) {
1457 qsee_disable_clock_vote(data, CLK_DFAB);
1458 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1459 goto perf_enable_exit;
1460 }
1461
1462perf_enable_exit:
1463 return ret;
1464}
1465
1466static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1467 void __user *argp)
1468{
1469 int32_t ret = 0;
1470 int32_t req_mode;
1471
1472 if (qseecom.no_clock_support)
1473 return 0;
1474
1475 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1476 if (ret) {
1477 pr_err("copy_from_user failed\n");
1478 return ret;
1479 }
1480 if (req_mode > HIGH) {
1481 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1482 return -EINVAL;
1483 }
1484
1485 /*
1486 * Register bus bandwidth needs if bus scaling feature is enabled;
1487 * otherwise, qseecom enable/disable clocks for the client directly.
1488 */
1489 if (qseecom.support_bus_scaling) {
1490 mutex_lock(&qsee_bw_mutex);
1491 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1492 mutex_unlock(&qsee_bw_mutex);
1493 } else {
1494 pr_debug("Bus scaling feature is NOT enabled\n");
1495 pr_debug("request bandwidth mode %d for the client\n",
1496 req_mode);
1497 if (req_mode != INACTIVE) {
1498 ret = qseecom_perf_enable(data);
1499 if (ret)
1500 pr_err("Failed to vote for clock with err %d\n",
1501 ret);
1502 } else {
1503 qsee_disable_clock_vote(data, CLK_DFAB);
1504 qsee_disable_clock_vote(data, CLK_SFPB);
1505 }
1506 }
1507 return ret;
1508}
1509
1510static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1511{
1512 if (qseecom.no_clock_support)
1513 return;
1514
1515 mutex_lock(&qsee_bw_mutex);
1516 qseecom.bw_scale_down_timer.expires = jiffies +
1517 msecs_to_jiffies(duration);
1518 mod_timer(&(qseecom.bw_scale_down_timer),
1519 qseecom.bw_scale_down_timer.expires);
1520 qseecom.timer_running = true;
1521 mutex_unlock(&qsee_bw_mutex);
1522}
1523
1524static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1525{
1526 if (!qseecom.support_bus_scaling)
1527 qsee_disable_clock_vote(data, CLK_SFPB);
1528 else
1529 __qseecom_add_bw_scale_down_timer(
1530 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1531}
1532
1533static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1534{
1535 int ret = 0;
1536
1537 if (qseecom.support_bus_scaling) {
1538 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1539 if (ret)
1540 pr_err("Failed to set bw MEDIUM.\n");
1541 } else {
1542 ret = qsee_vote_for_clock(data, CLK_SFPB);
1543 if (ret)
1544 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1545 }
1546 return ret;
1547}
1548
1549static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1550 void __user *argp)
1551{
1552 ion_phys_addr_t pa;
1553 int32_t ret;
1554 struct qseecom_set_sb_mem_param_req req;
1555 size_t len;
1556
1557 /* Copy the relevant information needed for loading the image */
1558 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1559 return -EFAULT;
1560
1561 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1562 (req.sb_len == 0)) {
1563 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1564 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1565 return -EFAULT;
1566 }
1567 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1568 req.sb_len))
1569 return -EFAULT;
1570
1571 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001572 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001573 req.ifd_data_fd);
1574 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1575 pr_err("Ion client could not retrieve the handle\n");
1576 return -ENOMEM;
1577 }
1578 /* Get the physical address of the ION BUF */
1579 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1580 if (ret) {
1581
1582 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1583 ret);
1584 return ret;
1585 }
1586
1587 if (len < req.sb_len) {
1588 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1589 req.sb_len, len);
1590 return -EINVAL;
1591 }
1592 /* Populate the structure for sending scm call to load image */
1593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1594 data->client.ihandle);
1595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1596 pr_err("ION memory mapping for client shared buf failed\n");
1597 return -ENOMEM;
1598 }
1599 data->client.sb_phys = (phys_addr_t)pa;
1600 data->client.sb_length = req.sb_len;
1601 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1602 return 0;
1603}
1604
1605static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1606{
1607 int ret;
1608
1609 ret = (qseecom.send_resp_flag != 0);
1610 return ret || data->abort;
1611}
1612
1613static int __qseecom_reentrancy_listener_has_sent_rsp(
1614 struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
1616{
1617 int ret;
1618
1619 ret = (ptr_svc->send_resp_flag != 0);
1620 return ret || data->abort;
1621}
1622
1623static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1624 struct qseecom_command_scm_resp *resp,
1625 struct qseecom_client_listener_data_irsp *send_data_rsp,
1626 struct qseecom_registered_listener_list *ptr_svc,
1627 uint32_t lstnr) {
1628 int ret = 0;
1629
1630 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1631 qseecom.send_resp_flag = 0;
1632 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1633 send_data_rsp->listener_id = lstnr;
1634 if (ptr_svc)
1635 pr_warn("listener_id:%x, lstnr: %x\n",
1636 ptr_svc->svc.listener_id, lstnr);
1637 if (ptr_svc && ptr_svc->ihandle) {
1638 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1639 ptr_svc->sb_virt, ptr_svc->sb_length,
1640 ION_IOC_CLEAN_INV_CACHES);
1641 if (ret) {
1642 pr_err("cache operation failed %d\n", ret);
1643 return ret;
1644 }
1645 }
1646
1647 if (lstnr == RPMB_SERVICE) {
1648 ret = __qseecom_enable_clk(CLK_QSEE);
1649 if (ret)
1650 return ret;
1651 }
1652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1653 sizeof(send_data_rsp), resp, sizeof(*resp));
1654 if (ret) {
1655 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1656 ret, data->client.app_id);
1657 if (lstnr == RPMB_SERVICE)
1658 __qseecom_disable_clk(CLK_QSEE);
1659 return ret;
1660 }
1661 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1662 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1663 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1664 resp->result, data->client.app_id, lstnr);
1665 ret = -EINVAL;
1666 }
1667 if (lstnr == RPMB_SERVICE)
1668 __qseecom_disable_clk(CLK_QSEE);
1669 return ret;
1670}
1671
1672static void __qseecom_clean_listener_sglistinfo(
1673 struct qseecom_registered_listener_list *ptr_svc)
1674{
1675 if (ptr_svc->sglist_cnt) {
1676 memset(ptr_svc->sglistinfo_ptr, 0,
1677 SGLISTINFO_TABLE_SIZE);
1678 ptr_svc->sglist_cnt = 0;
1679 }
1680}
1681
1682static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1683 struct qseecom_command_scm_resp *resp)
1684{
1685 int ret = 0;
1686 int rc = 0;
1687 uint32_t lstnr;
1688 unsigned long flags;
1689 struct qseecom_client_listener_data_irsp send_data_rsp;
1690 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1691 struct qseecom_registered_listener_list *ptr_svc = NULL;
1692 sigset_t new_sigset;
1693 sigset_t old_sigset;
1694 uint32_t status;
1695 void *cmd_buf = NULL;
1696 size_t cmd_len;
1697 struct sglist_info *table = NULL;
1698
1699 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1700 lstnr = resp->data;
1701 /*
1702 * Wake up blocking lsitener service with the lstnr id
1703 */
1704 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1705 flags);
1706 list_for_each_entry(ptr_svc,
1707 &qseecom.registered_listener_list_head, list) {
1708 if (ptr_svc->svc.listener_id == lstnr) {
1709 ptr_svc->listener_in_use = true;
1710 ptr_svc->rcv_req_flag = 1;
1711 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1716 flags);
1717
1718 if (ptr_svc == NULL) {
1719 pr_err("Listener Svc %d does not exist\n", lstnr);
1720 __qseecom_qseos_fail_return_resp_tz(data, resp,
1721 &send_data_rsp, ptr_svc, lstnr);
1722 return -EINVAL;
1723 }
1724
1725 if (!ptr_svc->ihandle) {
1726 pr_err("Client handle is not initialized\n");
1727 __qseecom_qseos_fail_return_resp_tz(data, resp,
1728 &send_data_rsp, ptr_svc, lstnr);
1729 return -EINVAL;
1730 }
1731
1732 if (ptr_svc->svc.listener_id != lstnr) {
1733 pr_warn("Service requested does not exist\n");
1734 __qseecom_qseos_fail_return_resp_tz(data, resp,
Zhen Kongad83f302017-12-09 12:51:36 -08001735 &send_data_rsp, NULL, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001736 return -ERESTARTSYS;
1737 }
1738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_listener_has_sent_rsp(data))) {
1754 break;
1755 }
1756
1757 if (qseecom.qsee_reentrancy_support &&
1758 !wait_event_freezable(qseecom.send_resp_wq,
1759 __qseecom_reentrancy_listener_has_sent_rsp(
1760 data, ptr_svc))) {
1761 break;
1762 }
1763 } while (1);
1764
1765 /* restore signal mask */
1766 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1767 if (data->abort) {
1768 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1769 data->client.app_id, lstnr, ret);
1770 rc = -ENODEV;
1771 status = QSEOS_RESULT_FAILURE;
1772 } else {
1773 status = QSEOS_RESULT_SUCCESS;
1774 }
1775
1776 qseecom.send_resp_flag = 0;
1777 ptr_svc->send_resp_flag = 0;
1778 table = ptr_svc->sglistinfo_ptr;
1779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
1782 send_data_rsp.sglistinfo_ptr =
1783 (uint32_t)virt_to_phys(table);
1784 send_data_rsp.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 cmd_buf = (void *)&send_data_rsp;
1789 cmd_len = sizeof(send_data_rsp);
1790 } else {
1791 send_data_rsp_64bit.listener_id = lstnr;
1792 send_data_rsp_64bit.status = status;
1793 send_data_rsp_64bit.sglistinfo_ptr =
1794 virt_to_phys(table);
1795 send_data_rsp_64bit.sglistinfo_len =
1796 SGLISTINFO_TABLE_SIZE;
1797 dmac_flush_range((void *)table,
1798 (void *)table + SGLISTINFO_TABLE_SIZE);
1799 cmd_buf = (void *)&send_data_rsp_64bit;
1800 cmd_len = sizeof(send_data_rsp_64bit);
1801 }
1802 if (qseecom.whitelist_support == false)
1803 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1804 else
1805 *(uint32_t *)cmd_buf =
1806 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1807 if (ptr_svc) {
1808 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1809 ptr_svc->ihandle,
1810 ptr_svc->sb_virt, ptr_svc->sb_length,
1811 ION_IOC_CLEAN_INV_CACHES);
1812 if (ret) {
1813 pr_err("cache operation failed %d\n", ret);
1814 return ret;
1815 }
1816 }
1817
1818 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1819 ret = __qseecom_enable_clk(CLK_QSEE);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1825 cmd_buf, cmd_len, resp, sizeof(*resp));
1826 ptr_svc->listener_in_use = false;
1827 __qseecom_clean_listener_sglistinfo(ptr_svc);
1828 if (ret) {
1829 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1830 ret, data->client.app_id);
1831 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1832 __qseecom_disable_clk(CLK_QSEE);
1833 return ret;
1834 }
1835 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1836 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1837 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1838 resp->result, data->client.app_id, lstnr);
1839 ret = -EINVAL;
1840 }
1841 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1842 __qseecom_disable_clk(CLK_QSEE);
1843
1844 }
1845 if (rc)
1846 return rc;
1847
1848 return ret;
1849}
1850
Zhen Kong2f60f492017-06-29 15:22:14 -07001851static int __qseecom_process_blocked_on_listener_legacy(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001852 struct qseecom_command_scm_resp *resp,
1853 struct qseecom_registered_app_list *ptr_app,
1854 struct qseecom_dev_handle *data)
1855{
1856 struct qseecom_registered_listener_list *list_ptr;
1857 int ret = 0;
1858 struct qseecom_continue_blocked_request_ireq ireq;
1859 struct qseecom_command_scm_resp continue_resp;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001860 bool found_app = false;
Zhen Kong2f60f492017-06-29 15:22:14 -07001861 unsigned long flags;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001862
1863 if (!resp || !data) {
1864 pr_err("invalid resp or data pointer\n");
1865 ret = -EINVAL;
1866 goto exit;
1867 }
1868
1869 /* find app_id & img_name from list */
1870 if (!ptr_app) {
1871 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1872 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1873 list) {
1874 if ((ptr_app->app_id == data->client.app_id) &&
1875 (!strcmp(ptr_app->app_name,
1876 data->client.app_name))) {
1877 found_app = true;
1878 break;
1879 }
1880 }
1881 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1882 flags);
1883 if (!found_app) {
1884 pr_err("app_id %d (%s) is not found\n",
1885 data->client.app_id,
1886 (char *)data->client.app_name);
1887 ret = -ENOENT;
1888 goto exit;
1889 }
1890 }
1891
1892 list_ptr = __qseecom_find_svc(resp->data);
1893 if (!list_ptr) {
1894 pr_err("Invalid listener ID\n");
1895 ret = -ENODATA;
1896 goto exit;
1897 }
1898 pr_debug("lsntr %d in_use = %d\n",
1899 resp->data, list_ptr->listener_in_use);
1900 ptr_app->blocked_on_listener_id = resp->data;
Zhen Kong2f60f492017-06-29 15:22:14 -07001901
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001902 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001903 do {
1904 qseecom.app_block_ref_cnt++;
1905 ptr_app->app_blocked = true;
1906 mutex_unlock(&app_access_lock);
1907 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001908 list_ptr->listener_block_app_wq,
1909 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001910 pr_err("Interrupted: listener_id %d, app_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001911 resp->data, ptr_app->app_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001912 ret = -ERESTARTSYS;
1913 goto exit;
1914 }
1915 mutex_lock(&app_access_lock);
1916 ptr_app->app_blocked = false;
1917 qseecom.app_block_ref_cnt--;
1918 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001919
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001920 ptr_app->blocked_on_listener_id = 0;
1921 /* notify the blocked app that listener is available */
1922 pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
1923 resp->data, data->client.app_id,
1924 data->client.app_name);
1925 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
Zhen Kong2f60f492017-06-29 15:22:14 -07001926 ireq.app_or_session_id = data->client.app_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001927 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1928 &ireq, sizeof(ireq),
1929 &continue_resp, sizeof(continue_resp));
1930 if (ret) {
1931 pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
1932 data->client.app_id,
1933 data->client.app_name, ret);
1934 goto exit;
1935 }
1936 /*
1937 * After TZ app is unblocked, then continue to next case
1938 * for incomplete request processing
1939 */
1940 resp->result = QSEOS_RESULT_INCOMPLETE;
1941exit:
1942 return ret;
1943}
1944
Zhen Kong2f60f492017-06-29 15:22:14 -07001945static int __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08001946 struct qseecom_command_scm_resp *resp, uint32_t app_id)
Zhen Kong2f60f492017-06-29 15:22:14 -07001947{
1948 struct qseecom_registered_listener_list *list_ptr;
1949 int ret = 0;
1950 struct qseecom_continue_blocked_request_ireq ireq;
1951 struct qseecom_command_scm_resp continue_resp;
1952 unsigned int session_id;
1953
1954 if (!resp) {
1955 pr_err("invalid resp pointer\n");
1956 ret = -EINVAL;
1957 goto exit;
1958 }
1959 session_id = resp->resp_type;
1960 list_ptr = __qseecom_find_svc(resp->data);
1961 if (!list_ptr) {
1962 pr_err("Invalid listener ID\n");
1963 ret = -ENODATA;
1964 goto exit;
1965 }
1966 pr_debug("lsntr %d in_use = %d\n",
1967 resp->data, list_ptr->listener_in_use);
1968 /* sleep until listener is available */
Zhen Kongd8cc0052017-11-13 15:13:31 -08001969 do {
1970 qseecom.app_block_ref_cnt++;
1971 mutex_unlock(&app_access_lock);
1972 if (wait_event_freezable(
Zhen Kong2f60f492017-06-29 15:22:14 -07001973 list_ptr->listener_block_app_wq,
1974 !list_ptr->listener_in_use)) {
Zhen Kongd8cc0052017-11-13 15:13:31 -08001975 pr_err("Interrupted: listener_id %d, session_id %d\n",
Zhen Kong2f60f492017-06-29 15:22:14 -07001976 resp->data, session_id);
Zhen Kongd8cc0052017-11-13 15:13:31 -08001977 ret = -ERESTARTSYS;
1978 goto exit;
1979 }
1980 mutex_lock(&app_access_lock);
1981 qseecom.app_block_ref_cnt--;
1982 } while (list_ptr->listener_in_use);
Zhen Kong2f60f492017-06-29 15:22:14 -07001983
1984 /* notify TZ that listener is available */
1985 pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
1986 resp->data, session_id);
1987 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1988 ireq.app_or_session_id = session_id;
1989 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1990 &ireq, sizeof(ireq),
1991 &continue_resp, sizeof(continue_resp));
1992 if (ret) {
Zhen Konge7f525f2017-12-01 18:26:25 -08001993 /* retry with legacy cmd */
1994 qseecom.smcinvoke_support = false;
1995 ireq.app_or_session_id = app_id;
1996 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1997 &ireq, sizeof(ireq),
1998 &continue_resp, sizeof(continue_resp));
1999 qseecom.smcinvoke_support = true;
2000 if (ret) {
2001 pr_err("cont block req for app %d or session %d fail\n",
2002 app_id, session_id);
2003 goto exit;
2004 }
Zhen Kong2f60f492017-06-29 15:22:14 -07002005 }
2006 resp->result = QSEOS_RESULT_INCOMPLETE;
2007exit:
2008 return ret;
2009}
2010
2011static int __qseecom_process_reentrancy_blocked_on_listener(
2012 struct qseecom_command_scm_resp *resp,
2013 struct qseecom_registered_app_list *ptr_app,
2014 struct qseecom_dev_handle *data)
2015{
2016 if (!qseecom.smcinvoke_support)
2017 return __qseecom_process_blocked_on_listener_legacy(
2018 resp, ptr_app, data);
2019 else
2020 return __qseecom_process_blocked_on_listener_smcinvoke(
Zhen Konge7f525f2017-12-01 18:26:25 -08002021 resp, data->client.app_id);
Zhen Kong2f60f492017-06-29 15:22:14 -07002022}
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002023static int __qseecom_reentrancy_process_incomplete_cmd(
2024 struct qseecom_dev_handle *data,
2025 struct qseecom_command_scm_resp *resp)
2026{
2027 int ret = 0;
2028 int rc = 0;
2029 uint32_t lstnr;
2030 unsigned long flags;
2031 struct qseecom_client_listener_data_irsp send_data_rsp;
2032 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
2033 struct qseecom_registered_listener_list *ptr_svc = NULL;
2034 sigset_t new_sigset;
2035 sigset_t old_sigset;
2036 uint32_t status;
2037 void *cmd_buf = NULL;
2038 size_t cmd_len;
2039 struct sglist_info *table = NULL;
2040
2041 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
2042 lstnr = resp->data;
2043 /*
2044 * Wake up blocking lsitener service with the lstnr id
2045 */
2046 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
2047 flags);
2048 list_for_each_entry(ptr_svc,
2049 &qseecom.registered_listener_list_head, list) {
2050 if (ptr_svc->svc.listener_id == lstnr) {
2051 ptr_svc->listener_in_use = true;
2052 ptr_svc->rcv_req_flag = 1;
2053 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2054 break;
2055 }
2056 }
2057 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2058 flags);
2059
2060 if (ptr_svc == NULL) {
2061 pr_err("Listener Svc %d does not exist\n", lstnr);
2062 return -EINVAL;
2063 }
2064
2065 if (!ptr_svc->ihandle) {
2066 pr_err("Client handle is not initialized\n");
2067 return -EINVAL;
2068 }
2069
2070 if (ptr_svc->svc.listener_id != lstnr) {
2071 pr_warn("Service requested does not exist\n");
2072 return -ERESTARTSYS;
2073 }
2074 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2075
2076 /* initialize the new signal mask with all signals*/
2077 sigfillset(&new_sigset);
2078
2079 /* block all signals */
2080 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2081
2082 /* unlock mutex btw waking listener and sleep-wait */
2083 mutex_unlock(&app_access_lock);
2084 do {
2085 if (!wait_event_freezable(qseecom.send_resp_wq,
2086 __qseecom_reentrancy_listener_has_sent_rsp(
2087 data, ptr_svc))) {
2088 break;
2089 }
2090 } while (1);
2091 /* lock mutex again after resp sent */
2092 mutex_lock(&app_access_lock);
2093 ptr_svc->send_resp_flag = 0;
2094 qseecom.send_resp_flag = 0;
2095
2096 /* restore signal mask */
2097 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2098 if (data->abort) {
2099 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2100 data->client.app_id, lstnr, ret);
2101 rc = -ENODEV;
2102 status = QSEOS_RESULT_FAILURE;
2103 } else {
2104 status = QSEOS_RESULT_SUCCESS;
2105 }
2106 table = ptr_svc->sglistinfo_ptr;
2107 if (qseecom.qsee_version < QSEE_VERSION_40) {
2108 send_data_rsp.listener_id = lstnr;
2109 send_data_rsp.status = status;
2110 send_data_rsp.sglistinfo_ptr =
2111 (uint32_t)virt_to_phys(table);
2112 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2113 dmac_flush_range((void *)table,
2114 (void *)table + SGLISTINFO_TABLE_SIZE);
2115 cmd_buf = (void *)&send_data_rsp;
2116 cmd_len = sizeof(send_data_rsp);
2117 } else {
2118 send_data_rsp_64bit.listener_id = lstnr;
2119 send_data_rsp_64bit.status = status;
2120 send_data_rsp_64bit.sglistinfo_ptr =
2121 virt_to_phys(table);
2122 send_data_rsp_64bit.sglistinfo_len =
2123 SGLISTINFO_TABLE_SIZE;
2124 dmac_flush_range((void *)table,
2125 (void *)table + SGLISTINFO_TABLE_SIZE);
2126 cmd_buf = (void *)&send_data_rsp_64bit;
2127 cmd_len = sizeof(send_data_rsp_64bit);
2128 }
2129 if (qseecom.whitelist_support == false)
2130 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2131 else
2132 *(uint32_t *)cmd_buf =
2133 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2134 if (ptr_svc) {
2135 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2136 ptr_svc->ihandle,
2137 ptr_svc->sb_virt, ptr_svc->sb_length,
2138 ION_IOC_CLEAN_INV_CACHES);
2139 if (ret) {
2140 pr_err("cache operation failed %d\n", ret);
2141 return ret;
2142 }
2143 }
2144 if (lstnr == RPMB_SERVICE) {
2145 ret = __qseecom_enable_clk(CLK_QSEE);
2146 if (ret)
2147 return ret;
2148 }
2149
2150 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2151 cmd_buf, cmd_len, resp, sizeof(*resp));
2152 ptr_svc->listener_in_use = false;
2153 __qseecom_clean_listener_sglistinfo(ptr_svc);
2154 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2155
2156 if (ret) {
2157 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2158 ret, data->client.app_id);
2159 goto exit;
2160 }
2161
2162 switch (resp->result) {
2163 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2164 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2165 lstnr, data->client.app_id, resp->data);
2166 if (lstnr == resp->data) {
2167 pr_err("lstnr %d should not be blocked!\n",
2168 lstnr);
2169 ret = -EINVAL;
2170 goto exit;
2171 }
2172 ret = __qseecom_process_reentrancy_blocked_on_listener(
2173 resp, NULL, data);
2174 if (ret) {
2175 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2176 data->client.app_id,
2177 data->client.app_name, resp->data);
2178 goto exit;
2179 }
2180 case QSEOS_RESULT_SUCCESS:
2181 case QSEOS_RESULT_INCOMPLETE:
2182 break;
2183 default:
2184 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2185 resp->result, data->client.app_id, lstnr);
2186 ret = -EINVAL;
2187 goto exit;
2188 }
2189exit:
2190 if (lstnr == RPMB_SERVICE)
2191 __qseecom_disable_clk(CLK_QSEE);
2192
2193 }
2194 if (rc)
2195 return rc;
2196
2197 return ret;
2198}
2199
2200/*
2201 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2202 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2203 * So, needs to first check if no app blocked before sending OS level scm call,
2204 * then wait until all apps are unblocked.
2205 */
2206static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2207{
2208 sigset_t new_sigset, old_sigset;
2209
2210 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2211 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2212 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2213 /* thread sleep until this app unblocked */
2214 while (qseecom.app_block_ref_cnt > 0) {
2215 sigfillset(&new_sigset);
2216 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2217 mutex_unlock(&app_access_lock);
2218 do {
2219 if (!wait_event_freezable(qseecom.app_block_wq,
2220 (qseecom.app_block_ref_cnt == 0)))
2221 break;
2222 } while (1);
2223 mutex_lock(&app_access_lock);
2224 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2225 }
2226 }
2227}
2228
2229/*
2230 * scm_call of send data will fail if this TA is blocked or there are more
2231 * than one TA requesting listener services; So, first check to see if need
2232 * to wait.
2233 */
2234static void __qseecom_reentrancy_check_if_this_app_blocked(
2235 struct qseecom_registered_app_list *ptr_app)
2236{
2237 sigset_t new_sigset, old_sigset;
2238
2239 if (qseecom.qsee_reentrancy_support) {
2240 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2241 /* thread sleep until this app unblocked */
2242 sigfillset(&new_sigset);
2243 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2244 mutex_unlock(&app_access_lock);
2245 do {
2246 if (!wait_event_freezable(qseecom.app_block_wq,
2247 (!ptr_app->app_blocked &&
2248 qseecom.app_block_ref_cnt <= 1)))
2249 break;
2250 } while (1);
2251 mutex_lock(&app_access_lock);
2252 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2253 }
2254 }
2255}
2256
2257static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2258 uint32_t *app_id)
2259{
2260 int32_t ret;
2261 struct qseecom_command_scm_resp resp;
2262 bool found_app = false;
2263 struct qseecom_registered_app_list *entry = NULL;
2264 unsigned long flags = 0;
2265
2266 if (!app_id) {
2267 pr_err("Null pointer to app_id\n");
2268 return -EINVAL;
2269 }
2270 *app_id = 0;
2271
2272 /* check if app exists and has been registered locally */
2273 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2274 list_for_each_entry(entry,
2275 &qseecom.registered_app_list_head, list) {
2276 if (!strcmp(entry->app_name, req.app_name)) {
2277 found_app = true;
2278 break;
2279 }
2280 }
2281 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2282 if (found_app) {
2283 pr_debug("Found app with id %d\n", entry->app_id);
2284 *app_id = entry->app_id;
2285 return 0;
2286 }
2287
2288 memset((void *)&resp, 0, sizeof(resp));
2289
2290 /* SCM_CALL to check if app_id for the mentioned app exists */
2291 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2292 sizeof(struct qseecom_check_app_ireq),
2293 &resp, sizeof(resp));
2294 if (ret) {
2295 pr_err("scm_call to check if app is already loaded failed\n");
2296 return -EINVAL;
2297 }
2298
2299 if (resp.result == QSEOS_RESULT_FAILURE)
2300 return 0;
2301
2302 switch (resp.resp_type) {
2303 /*qsee returned listener type response */
2304 case QSEOS_LISTENER_ID:
2305 pr_err("resp type is of listener type instead of app");
2306 return -EINVAL;
2307 case QSEOS_APP_ID:
2308 *app_id = resp.data;
2309 return 0;
2310 default:
2311 pr_err("invalid resp type (%d) from qsee",
2312 resp.resp_type);
2313 return -ENODEV;
2314 }
2315}
2316
2317static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2318{
2319 struct qseecom_registered_app_list *entry = NULL;
2320 unsigned long flags = 0;
2321 u32 app_id = 0;
2322 struct ion_handle *ihandle; /* Ion handle */
2323 struct qseecom_load_img_req load_img_req;
2324 int32_t ret = 0;
2325 ion_phys_addr_t pa = 0;
2326 size_t len;
2327 struct qseecom_command_scm_resp resp;
2328 struct qseecom_check_app_ireq req;
2329 struct qseecom_load_app_ireq load_req;
2330 struct qseecom_load_app_64bit_ireq load_req_64bit;
2331 void *cmd_buf = NULL;
2332 size_t cmd_len;
2333 bool first_time = false;
2334
2335 /* Copy the relevant information needed for loading the image */
2336 if (copy_from_user(&load_img_req,
2337 (void __user *)argp,
2338 sizeof(struct qseecom_load_img_req))) {
2339 pr_err("copy_from_user failed\n");
2340 return -EFAULT;
2341 }
2342
2343 /* Check and load cmnlib */
2344 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2345 if (!qseecom.commonlib_loaded &&
2346 load_img_req.app_arch == ELFCLASS32) {
2347 ret = qseecom_load_commonlib_image(data, "cmnlib");
2348 if (ret) {
2349 pr_err("failed to load cmnlib\n");
2350 return -EIO;
2351 }
2352 qseecom.commonlib_loaded = true;
2353 pr_debug("cmnlib is loaded\n");
2354 }
2355
2356 if (!qseecom.commonlib64_loaded &&
2357 load_img_req.app_arch == ELFCLASS64) {
2358 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2359 if (ret) {
2360 pr_err("failed to load cmnlib64\n");
2361 return -EIO;
2362 }
2363 qseecom.commonlib64_loaded = true;
2364 pr_debug("cmnlib64 is loaded\n");
2365 }
2366 }
2367
2368 if (qseecom.support_bus_scaling) {
2369 mutex_lock(&qsee_bw_mutex);
2370 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2371 mutex_unlock(&qsee_bw_mutex);
2372 if (ret)
2373 return ret;
2374 }
2375
2376 /* Vote for the SFPB clock */
2377 ret = __qseecom_enable_clk_scale_up(data);
2378 if (ret)
2379 goto enable_clk_err;
2380
2381 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2382 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2383 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2384
2385 ret = __qseecom_check_app_exists(req, &app_id);
2386 if (ret < 0)
2387 goto loadapp_err;
2388
2389 if (app_id) {
2390 pr_debug("App id %d (%s) already exists\n", app_id,
2391 (char *)(req.app_name));
2392 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2393 list_for_each_entry(entry,
2394 &qseecom.registered_app_list_head, list){
2395 if (entry->app_id == app_id) {
2396 entry->ref_cnt++;
2397 break;
2398 }
2399 }
2400 spin_unlock_irqrestore(
2401 &qseecom.registered_app_list_lock, flags);
2402 ret = 0;
2403 } else {
2404 first_time = true;
2405 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2406 (char *)(load_img_req.img_name));
2407 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002408 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002409 load_img_req.ifd_data_fd);
2410 if (IS_ERR_OR_NULL(ihandle)) {
2411 pr_err("Ion client could not retrieve the handle\n");
2412 ret = -ENOMEM;
2413 goto loadapp_err;
2414 }
2415
2416 /* Get the physical address of the ION BUF */
2417 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2418 if (ret) {
2419 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2420 ret);
2421 goto loadapp_err;
2422 }
2423 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2424 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2425 len, load_img_req.mdt_len,
2426 load_img_req.img_len);
2427 ret = -EINVAL;
2428 goto loadapp_err;
2429 }
2430 /* Populate the structure for sending scm call to load image */
2431 if (qseecom.qsee_version < QSEE_VERSION_40) {
2432 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2433 load_req.mdt_len = load_img_req.mdt_len;
2434 load_req.img_len = load_img_req.img_len;
2435 strlcpy(load_req.app_name, load_img_req.img_name,
2436 MAX_APP_NAME_SIZE);
2437 load_req.phy_addr = (uint32_t)pa;
2438 cmd_buf = (void *)&load_req;
2439 cmd_len = sizeof(struct qseecom_load_app_ireq);
2440 } else {
2441 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2442 load_req_64bit.mdt_len = load_img_req.mdt_len;
2443 load_req_64bit.img_len = load_img_req.img_len;
2444 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2445 MAX_APP_NAME_SIZE);
2446 load_req_64bit.phy_addr = (uint64_t)pa;
2447 cmd_buf = (void *)&load_req_64bit;
2448 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2449 }
2450
2451 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2452 ION_IOC_CLEAN_INV_CACHES);
2453 if (ret) {
2454 pr_err("cache operation failed %d\n", ret);
2455 goto loadapp_err;
2456 }
2457
2458 /* SCM_CALL to load the app and get the app_id back */
2459 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2460 cmd_len, &resp, sizeof(resp));
2461 if (ret) {
2462 pr_err("scm_call to load app failed\n");
2463 if (!IS_ERR_OR_NULL(ihandle))
2464 ion_free(qseecom.ion_clnt, ihandle);
2465 ret = -EINVAL;
2466 goto loadapp_err;
2467 }
2468
2469 if (resp.result == QSEOS_RESULT_FAILURE) {
2470 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2471 if (!IS_ERR_OR_NULL(ihandle))
2472 ion_free(qseecom.ion_clnt, ihandle);
2473 ret = -EFAULT;
2474 goto loadapp_err;
2475 }
2476
2477 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2478 ret = __qseecom_process_incomplete_cmd(data, &resp);
2479 if (ret) {
2480 pr_err("process_incomplete_cmd failed err: %d\n",
2481 ret);
2482 if (!IS_ERR_OR_NULL(ihandle))
2483 ion_free(qseecom.ion_clnt, ihandle);
2484 ret = -EFAULT;
2485 goto loadapp_err;
2486 }
2487 }
2488
2489 if (resp.result != QSEOS_RESULT_SUCCESS) {
2490 pr_err("scm_call failed resp.result unknown, %d\n",
2491 resp.result);
2492 if (!IS_ERR_OR_NULL(ihandle))
2493 ion_free(qseecom.ion_clnt, ihandle);
2494 ret = -EFAULT;
2495 goto loadapp_err;
2496 }
2497
2498 app_id = resp.data;
2499
2500 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2501 if (!entry) {
2502 ret = -ENOMEM;
2503 goto loadapp_err;
2504 }
2505 entry->app_id = app_id;
2506 entry->ref_cnt = 1;
2507 entry->app_arch = load_img_req.app_arch;
2508 /*
2509 * keymaster app may be first loaded as "keymaste" by qseecomd,
2510 * and then used as "keymaster" on some targets. To avoid app
2511 * name checking error, register "keymaster" into app_list and
2512 * thread private data.
2513 */
2514 if (!strcmp(load_img_req.img_name, "keymaste"))
2515 strlcpy(entry->app_name, "keymaster",
2516 MAX_APP_NAME_SIZE);
2517 else
2518 strlcpy(entry->app_name, load_img_req.img_name,
2519 MAX_APP_NAME_SIZE);
2520 entry->app_blocked = false;
2521 entry->blocked_on_listener_id = 0;
2522
2523 /* Deallocate the handle */
2524 if (!IS_ERR_OR_NULL(ihandle))
2525 ion_free(qseecom.ion_clnt, ihandle);
2526
2527 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2528 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2529 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2530 flags);
2531
2532 pr_warn("App with id %u (%s) now loaded\n", app_id,
2533 (char *)(load_img_req.img_name));
2534 }
2535 data->client.app_id = app_id;
2536 data->client.app_arch = load_img_req.app_arch;
2537 if (!strcmp(load_img_req.img_name, "keymaste"))
2538 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2539 else
2540 strlcpy(data->client.app_name, load_img_req.img_name,
2541 MAX_APP_NAME_SIZE);
2542 load_img_req.app_id = app_id;
2543 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2544 pr_err("copy_to_user failed\n");
2545 ret = -EFAULT;
2546 if (first_time == true) {
2547 spin_lock_irqsave(
2548 &qseecom.registered_app_list_lock, flags);
2549 list_del(&entry->list);
2550 spin_unlock_irqrestore(
2551 &qseecom.registered_app_list_lock, flags);
2552 kzfree(entry);
2553 }
2554 }
2555
2556loadapp_err:
2557 __qseecom_disable_clk_scale_down(data);
2558enable_clk_err:
2559 if (qseecom.support_bus_scaling) {
2560 mutex_lock(&qsee_bw_mutex);
2561 qseecom_unregister_bus_bandwidth_needs(data);
2562 mutex_unlock(&qsee_bw_mutex);
2563 }
2564 return ret;
2565}
2566
2567static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2568{
2569 int ret = 1; /* Set unload app */
2570
2571 wake_up_all(&qseecom.send_resp_wq);
2572 if (qseecom.qsee_reentrancy_support)
2573 mutex_unlock(&app_access_lock);
2574 while (atomic_read(&data->ioctl_count) > 1) {
2575 if (wait_event_freezable(data->abort_wq,
2576 atomic_read(&data->ioctl_count) <= 1)) {
2577 pr_err("Interrupted from abort\n");
2578 ret = -ERESTARTSYS;
2579 break;
2580 }
2581 }
2582 if (qseecom.qsee_reentrancy_support)
2583 mutex_lock(&app_access_lock);
2584 return ret;
2585}
2586
2587static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2588{
2589 int ret = 0;
2590
2591 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2592 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2593 ion_free(qseecom.ion_clnt, data->client.ihandle);
2594 data->client.ihandle = NULL;
2595 }
2596 return ret;
2597}
2598
2599static int qseecom_unload_app(struct qseecom_dev_handle *data,
2600 bool app_crash)
2601{
2602 unsigned long flags;
2603 unsigned long flags1;
2604 int ret = 0;
2605 struct qseecom_command_scm_resp resp;
2606 struct qseecom_registered_app_list *ptr_app = NULL;
2607 bool unload = false;
2608 bool found_app = false;
2609 bool found_dead_app = false;
2610
2611 if (!data) {
2612 pr_err("Invalid/uninitialized device handle\n");
2613 return -EINVAL;
2614 }
2615
2616 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2617 pr_debug("Do not unload keymaster app from tz\n");
2618 goto unload_exit;
2619 }
2620
2621 __qseecom_cleanup_app(data);
2622 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2623
2624 if (data->client.app_id > 0) {
2625 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2626 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2627 list) {
2628 if (ptr_app->app_id == data->client.app_id) {
2629 if (!strcmp((void *)ptr_app->app_name,
2630 (void *)data->client.app_name)) {
2631 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002632 if (ptr_app->app_blocked)
2633 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002634 if (app_crash || ptr_app->ref_cnt == 1)
2635 unload = true;
2636 break;
2637 }
2638 found_dead_app = true;
2639 break;
2640 }
2641 }
2642 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2643 flags);
2644 if (found_app == false && found_dead_app == false) {
2645 pr_err("Cannot find app with id = %d (%s)\n",
2646 data->client.app_id,
2647 (char *)data->client.app_name);
2648 ret = -EINVAL;
2649 goto unload_exit;
2650 }
2651 }
2652
2653 if (found_dead_app)
2654 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2655 (char *)data->client.app_name);
2656
2657 if (unload) {
2658 struct qseecom_unload_app_ireq req;
2659 /* Populate the structure for sending scm call to load image */
2660 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2661 req.app_id = data->client.app_id;
2662
2663 /* SCM_CALL to unload the app */
2664 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2665 sizeof(struct qseecom_unload_app_ireq),
2666 &resp, sizeof(resp));
2667 if (ret) {
2668 pr_err("scm_call to unload app (id = %d) failed\n",
2669 req.app_id);
2670 ret = -EFAULT;
2671 goto unload_exit;
2672 } else {
2673 pr_warn("App id %d now unloaded\n", req.app_id);
2674 }
2675 if (resp.result == QSEOS_RESULT_FAILURE) {
2676 pr_err("app (%d) unload_failed!!\n",
2677 data->client.app_id);
2678 ret = -EFAULT;
2679 goto unload_exit;
2680 }
2681 if (resp.result == QSEOS_RESULT_SUCCESS)
2682 pr_debug("App (%d) is unloaded!!\n",
2683 data->client.app_id);
2684 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2685 ret = __qseecom_process_incomplete_cmd(data, &resp);
2686 if (ret) {
2687 pr_err("process_incomplete_cmd fail err: %d\n",
2688 ret);
2689 goto unload_exit;
2690 }
2691 }
2692 }
2693
2694 if (found_app) {
2695 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2696 if (app_crash) {
2697 ptr_app->ref_cnt = 0;
2698 pr_debug("app_crash: ref_count = 0\n");
2699 } else {
2700 if (ptr_app->ref_cnt == 1) {
2701 ptr_app->ref_cnt = 0;
2702 pr_debug("ref_count set to 0\n");
2703 } else {
2704 ptr_app->ref_cnt--;
2705 pr_debug("Can't unload app(%d) inuse\n",
2706 ptr_app->app_id);
2707 }
2708 }
2709 if (unload) {
2710 list_del(&ptr_app->list);
2711 kzfree(ptr_app);
2712 }
2713 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2714 flags1);
2715 }
2716unload_exit:
2717 qseecom_unmap_ion_allocated_memory(data);
2718 data->released = true;
2719 return ret;
2720}
2721
2722static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2723 unsigned long virt)
2724{
2725 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2726}
2727
2728static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2729 unsigned long virt)
2730{
2731 return (uintptr_t)data->client.sb_virt +
2732 (virt - data->client.user_virt_sb_base);
2733}
2734
2735int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2736 struct qseecom_send_svc_cmd_req *req_ptr,
2737 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2738{
2739 int ret = 0;
2740 void *req_buf = NULL;
2741
2742 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2743 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2744 req_ptr, send_svc_ireq_ptr);
2745 return -EINVAL;
2746 }
2747
2748 /* Clients need to ensure req_buf is at base offset of shared buffer */
2749 if ((uintptr_t)req_ptr->cmd_req_buf !=
2750 data_ptr->client.user_virt_sb_base) {
2751 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2752 return -EINVAL;
2753 }
2754
2755 if (data_ptr->client.sb_length <
2756 sizeof(struct qseecom_rpmb_provision_key)) {
2757 pr_err("shared buffer is too small to hold key type\n");
2758 return -EINVAL;
2759 }
2760 req_buf = data_ptr->client.sb_virt;
2761
2762 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2763 send_svc_ireq_ptr->key_type =
2764 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2765 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2766 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2767 data_ptr, (uintptr_t)req_ptr->resp_buf));
2768 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2769
2770 return ret;
2771}
2772
2773int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2774 struct qseecom_send_svc_cmd_req *req_ptr,
2775 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2776{
2777 int ret = 0;
2778 uint32_t reqd_len_sb_in = 0;
2779
2780 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2781 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2782 req_ptr, send_svc_ireq_ptr);
2783 return -EINVAL;
2784 }
2785
2786 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2787 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2788 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2789 pr_err("Required: %u, Available: %zu\n",
2790 reqd_len_sb_in, data_ptr->client.sb_length);
2791 return -ENOMEM;
2792 }
2793
2794 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2795 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2796 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2797 data_ptr, (uintptr_t)req_ptr->resp_buf));
2798 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2799
2800 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2801 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2802
2803
2804 return ret;
2805}
2806
2807static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2808 struct qseecom_send_svc_cmd_req *req)
2809{
2810 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2811 pr_err("req or cmd buffer or response buffer is null\n");
2812 return -EINVAL;
2813 }
2814
2815 if (!data || !data->client.ihandle) {
2816 pr_err("Client or client handle is not initialized\n");
2817 return -EINVAL;
2818 }
2819
2820 if (data->client.sb_virt == NULL) {
2821 pr_err("sb_virt null\n");
2822 return -EINVAL;
2823 }
2824
2825 if (data->client.user_virt_sb_base == 0) {
2826 pr_err("user_virt_sb_base is null\n");
2827 return -EINVAL;
2828 }
2829
2830 if (data->client.sb_length == 0) {
2831 pr_err("sb_length is 0\n");
2832 return -EINVAL;
2833 }
2834
2835 if (((uintptr_t)req->cmd_req_buf <
2836 data->client.user_virt_sb_base) ||
2837 ((uintptr_t)req->cmd_req_buf >=
2838 (data->client.user_virt_sb_base + data->client.sb_length))) {
2839 pr_err("cmd buffer address not within shared bufffer\n");
2840 return -EINVAL;
2841 }
2842 if (((uintptr_t)req->resp_buf <
2843 data->client.user_virt_sb_base) ||
2844 ((uintptr_t)req->resp_buf >=
2845 (data->client.user_virt_sb_base + data->client.sb_length))) {
2846 pr_err("response buffer address not within shared bufffer\n");
2847 return -EINVAL;
2848 }
2849 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2850 (req->cmd_req_len > data->client.sb_length) ||
2851 (req->resp_len > data->client.sb_length)) {
2852 pr_err("cmd buf length or response buf length not valid\n");
2853 return -EINVAL;
2854 }
2855 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2856 pr_err("Integer overflow detected in req_len & rsp_len\n");
2857 return -EINVAL;
2858 }
2859
2860 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2861 pr_debug("Not enough memory to fit cmd_buf.\n");
2862 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2863 (req->cmd_req_len + req->resp_len),
2864 data->client.sb_length);
2865 return -ENOMEM;
2866 }
2867 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2868 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2869 return -EINVAL;
2870 }
2871 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2872 pr_err("Integer overflow in resp_len & resp_buf\n");
2873 return -EINVAL;
2874 }
2875 if (data->client.user_virt_sb_base >
2876 (ULONG_MAX - data->client.sb_length)) {
2877 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2878 return -EINVAL;
2879 }
2880 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2881 ((uintptr_t)data->client.user_virt_sb_base +
2882 data->client.sb_length)) ||
2883 (((uintptr_t)req->resp_buf + req->resp_len) >
2884 ((uintptr_t)data->client.user_virt_sb_base +
2885 data->client.sb_length))) {
2886 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2887 return -EINVAL;
2888 }
2889 return 0;
2890}
2891
2892static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2893 void __user *argp)
2894{
2895 int ret = 0;
2896 struct qseecom_client_send_service_ireq send_svc_ireq;
2897 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2898 struct qseecom_command_scm_resp resp;
2899 struct qseecom_send_svc_cmd_req req;
2900 void *send_req_ptr;
2901 size_t req_buf_size;
2902
2903 /*struct qseecom_command_scm_resp resp;*/
2904
2905 if (copy_from_user(&req,
2906 (void __user *)argp,
2907 sizeof(req))) {
2908 pr_err("copy_from_user failed\n");
2909 return -EFAULT;
2910 }
2911
2912 if (__validate_send_service_cmd_inputs(data, &req))
2913 return -EINVAL;
2914
2915 data->type = QSEECOM_SECURE_SERVICE;
2916
2917 switch (req.cmd_id) {
2918 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2919 case QSEOS_RPMB_ERASE_COMMAND:
2920 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2921 send_req_ptr = &send_svc_ireq;
2922 req_buf_size = sizeof(send_svc_ireq);
2923 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2924 send_req_ptr))
2925 return -EINVAL;
2926 break;
2927 case QSEOS_FSM_LTEOTA_REQ_CMD:
2928 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2929 case QSEOS_FSM_IKE_REQ_CMD:
2930 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2931 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2932 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2933 case QSEOS_FSM_ENCFS_REQ_CMD:
2934 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2935 send_req_ptr = &send_fsm_key_svc_ireq;
2936 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2937 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2938 send_req_ptr))
2939 return -EINVAL;
2940 break;
2941 default:
2942 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2943 return -EINVAL;
2944 }
2945
2946 if (qseecom.support_bus_scaling) {
2947 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2948 if (ret) {
2949 pr_err("Fail to set bw HIGH\n");
2950 return ret;
2951 }
2952 } else {
2953 ret = qseecom_perf_enable(data);
2954 if (ret) {
2955 pr_err("Failed to vote for clocks with err %d\n", ret);
2956 goto exit;
2957 }
2958 }
2959
2960 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2961 data->client.sb_virt, data->client.sb_length,
2962 ION_IOC_CLEAN_INV_CACHES);
2963 if (ret) {
2964 pr_err("cache operation failed %d\n", ret);
2965 goto exit;
2966 }
2967 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2968 (const void *)send_req_ptr,
2969 req_buf_size, &resp, sizeof(resp));
2970 if (ret) {
2971 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2972 if (!qseecom.support_bus_scaling) {
2973 qsee_disable_clock_vote(data, CLK_DFAB);
2974 qsee_disable_clock_vote(data, CLK_SFPB);
2975 } else {
2976 __qseecom_add_bw_scale_down_timer(
2977 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2978 }
2979 goto exit;
2980 }
2981 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2982 data->client.sb_virt, data->client.sb_length,
2983 ION_IOC_INV_CACHES);
2984 if (ret) {
2985 pr_err("cache operation failed %d\n", ret);
2986 goto exit;
2987 }
2988 switch (resp.result) {
2989 case QSEOS_RESULT_SUCCESS:
2990 break;
2991 case QSEOS_RESULT_INCOMPLETE:
2992 pr_debug("qseos_result_incomplete\n");
2993 ret = __qseecom_process_incomplete_cmd(data, &resp);
2994 if (ret) {
2995 pr_err("process_incomplete_cmd fail with result: %d\n",
2996 resp.result);
2997 }
2998 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2999 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05303000 if (put_user(resp.result,
3001 (uint32_t __user *)req.resp_buf)) {
3002 ret = -EINVAL;
3003 goto exit;
3004 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003005 ret = 0;
3006 }
3007 break;
3008 case QSEOS_RESULT_FAILURE:
3009 pr_err("scm call failed with resp.result: %d\n", resp.result);
3010 ret = -EINVAL;
3011 break;
3012 default:
3013 pr_err("Response result %d not supported\n",
3014 resp.result);
3015 ret = -EINVAL;
3016 break;
3017 }
3018 if (!qseecom.support_bus_scaling) {
3019 qsee_disable_clock_vote(data, CLK_DFAB);
3020 qsee_disable_clock_vote(data, CLK_SFPB);
3021 } else {
3022 __qseecom_add_bw_scale_down_timer(
3023 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
3024 }
3025
3026exit:
3027 return ret;
3028}
3029
3030static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
3031 struct qseecom_send_cmd_req *req)
3032
3033{
3034 if (!data || !data->client.ihandle) {
3035 pr_err("Client or client handle is not initialized\n");
3036 return -EINVAL;
3037 }
3038 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
3039 (req->cmd_req_buf == NULL)) {
3040 pr_err("cmd buffer or response buffer is null\n");
3041 return -EINVAL;
3042 }
3043 if (((uintptr_t)req->cmd_req_buf <
3044 data->client.user_virt_sb_base) ||
3045 ((uintptr_t)req->cmd_req_buf >=
3046 (data->client.user_virt_sb_base + data->client.sb_length))) {
3047 pr_err("cmd buffer address not within shared bufffer\n");
3048 return -EINVAL;
3049 }
3050 if (((uintptr_t)req->resp_buf <
3051 data->client.user_virt_sb_base) ||
3052 ((uintptr_t)req->resp_buf >=
3053 (data->client.user_virt_sb_base + data->client.sb_length))) {
3054 pr_err("response buffer address not within shared bufffer\n");
3055 return -EINVAL;
3056 }
3057 if ((req->cmd_req_len == 0) ||
3058 (req->cmd_req_len > data->client.sb_length) ||
3059 (req->resp_len > data->client.sb_length)) {
3060 pr_err("cmd buf length or response buf length not valid\n");
3061 return -EINVAL;
3062 }
3063 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3064 pr_err("Integer overflow detected in req_len & rsp_len\n");
3065 return -EINVAL;
3066 }
3067
3068 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3069 pr_debug("Not enough memory to fit cmd_buf.\n");
3070 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3071 (req->cmd_req_len + req->resp_len),
3072 data->client.sb_length);
3073 return -ENOMEM;
3074 }
3075 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3076 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3077 return -EINVAL;
3078 }
3079 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3080 pr_err("Integer overflow in resp_len & resp_buf\n");
3081 return -EINVAL;
3082 }
3083 if (data->client.user_virt_sb_base >
3084 (ULONG_MAX - data->client.sb_length)) {
3085 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3086 return -EINVAL;
3087 }
3088 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3089 ((uintptr_t)data->client.user_virt_sb_base +
3090 data->client.sb_length)) ||
3091 (((uintptr_t)req->resp_buf + req->resp_len) >
3092 ((uintptr_t)data->client.user_virt_sb_base +
3093 data->client.sb_length))) {
3094 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3095 return -EINVAL;
3096 }
3097 return 0;
3098}
3099
3100int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3101 struct qseecom_registered_app_list *ptr_app,
3102 struct qseecom_dev_handle *data)
3103{
3104 int ret = 0;
3105
3106 switch (resp->result) {
3107 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3108 pr_warn("App(%d) %s is blocked on listener %d\n",
3109 data->client.app_id, data->client.app_name,
3110 resp->data);
3111 ret = __qseecom_process_reentrancy_blocked_on_listener(
3112 resp, ptr_app, data);
3113 if (ret) {
3114 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3115 data->client.app_id, data->client.app_name, resp->data);
3116 return ret;
3117 }
3118
3119 case QSEOS_RESULT_INCOMPLETE:
3120 qseecom.app_block_ref_cnt++;
3121 ptr_app->app_blocked = true;
3122 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3123 ptr_app->app_blocked = false;
3124 qseecom.app_block_ref_cnt--;
3125 wake_up_interruptible(&qseecom.app_block_wq);
3126 if (ret)
3127 pr_err("process_incomplete_cmd failed err: %d\n",
3128 ret);
3129 return ret;
3130 case QSEOS_RESULT_SUCCESS:
3131 return ret;
3132 default:
3133 pr_err("Response result %d not supported\n",
3134 resp->result);
3135 return -EINVAL;
3136 }
3137}
3138
3139static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3140 struct qseecom_send_cmd_req *req)
3141{
3142 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003143 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003144 u32 reqd_len_sb_in = 0;
3145 struct qseecom_client_send_data_ireq send_data_req = {0};
3146 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3147 struct qseecom_command_scm_resp resp;
3148 unsigned long flags;
3149 struct qseecom_registered_app_list *ptr_app;
3150 bool found_app = false;
3151 void *cmd_buf = NULL;
3152 size_t cmd_len;
3153 struct sglist_info *table = data->sglistinfo_ptr;
3154
3155 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3156 /* find app_id & img_name from list */
3157 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3158 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3159 list) {
3160 if ((ptr_app->app_id == data->client.app_id) &&
3161 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3162 found_app = true;
3163 break;
3164 }
3165 }
3166 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3167
3168 if (!found_app) {
3169 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3170 (char *)data->client.app_name);
3171 return -ENOENT;
3172 }
3173
3174 if (qseecom.qsee_version < QSEE_VERSION_40) {
3175 send_data_req.app_id = data->client.app_id;
3176 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3177 data, (uintptr_t)req->cmd_req_buf));
3178 send_data_req.req_len = req->cmd_req_len;
3179 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3180 data, (uintptr_t)req->resp_buf));
3181 send_data_req.rsp_len = req->resp_len;
3182 send_data_req.sglistinfo_ptr =
3183 (uint32_t)virt_to_phys(table);
3184 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3185 dmac_flush_range((void *)table,
3186 (void *)table + SGLISTINFO_TABLE_SIZE);
3187 cmd_buf = (void *)&send_data_req;
3188 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3189 } else {
3190 send_data_req_64bit.app_id = data->client.app_id;
3191 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3192 (uintptr_t)req->cmd_req_buf);
3193 send_data_req_64bit.req_len = req->cmd_req_len;
3194 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3195 (uintptr_t)req->resp_buf);
3196 send_data_req_64bit.rsp_len = req->resp_len;
3197 /* check if 32bit app's phys_addr region is under 4GB.*/
3198 if ((data->client.app_arch == ELFCLASS32) &&
3199 ((send_data_req_64bit.req_ptr >=
3200 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3201 (send_data_req_64bit.rsp_ptr >=
3202 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3203 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3204 data->client.app_name,
3205 send_data_req_64bit.req_ptr,
3206 send_data_req_64bit.req_len,
3207 send_data_req_64bit.rsp_ptr,
3208 send_data_req_64bit.rsp_len);
3209 return -EFAULT;
3210 }
3211 send_data_req_64bit.sglistinfo_ptr =
3212 (uint64_t)virt_to_phys(table);
3213 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3214 dmac_flush_range((void *)table,
3215 (void *)table + SGLISTINFO_TABLE_SIZE);
3216 cmd_buf = (void *)&send_data_req_64bit;
3217 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3218 }
3219
3220 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3221 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3222 else
3223 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3224
3225 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3226 data->client.sb_virt,
3227 reqd_len_sb_in,
3228 ION_IOC_CLEAN_INV_CACHES);
3229 if (ret) {
3230 pr_err("cache operation failed %d\n", ret);
3231 return ret;
3232 }
3233
3234 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3235
3236 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3237 cmd_buf, cmd_len,
3238 &resp, sizeof(resp));
3239 if (ret) {
3240 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3241 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003242 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003243 }
3244
3245 if (qseecom.qsee_reentrancy_support) {
3246 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003247 if (ret)
3248 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003249 } else {
3250 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3251 ret = __qseecom_process_incomplete_cmd(data, &resp);
3252 if (ret) {
3253 pr_err("process_incomplete_cmd failed err: %d\n",
3254 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003255 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003256 }
3257 } else {
3258 if (resp.result != QSEOS_RESULT_SUCCESS) {
3259 pr_err("Response result %d not supported\n",
3260 resp.result);
3261 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003262 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003263 }
3264 }
3265 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003266exit:
3267 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003268 data->client.sb_virt, data->client.sb_length,
3269 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003270 if (ret2) {
3271 pr_err("cache operation failed %d\n", ret2);
3272 return ret2;
3273 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003274 return ret;
3275}
3276
3277static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3278{
3279 int ret = 0;
3280 struct qseecom_send_cmd_req req;
3281
3282 ret = copy_from_user(&req, argp, sizeof(req));
3283 if (ret) {
3284 pr_err("copy_from_user failed\n");
3285 return ret;
3286 }
3287
3288 if (__validate_send_cmd_inputs(data, &req))
3289 return -EINVAL;
3290
3291 ret = __qseecom_send_cmd(data, &req);
3292
3293 if (ret)
3294 return ret;
3295
3296 return ret;
3297}
3298
3299int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3300 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3301 struct qseecom_dev_handle *data, int i) {
3302
3303 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3304 (req->ifd_data[i].fd > 0)) {
3305 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3306 (req->ifd_data[i].cmd_buf_offset >
3307 req->cmd_req_len - sizeof(uint32_t))) {
3308 pr_err("Invalid offset (req len) 0x%x\n",
3309 req->ifd_data[i].cmd_buf_offset);
3310 return -EINVAL;
3311 }
3312 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3313 (lstnr_resp->ifd_data[i].fd > 0)) {
3314 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3315 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3316 lstnr_resp->resp_len - sizeof(uint32_t))) {
3317 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3318 lstnr_resp->ifd_data[i].cmd_buf_offset);
3319 return -EINVAL;
3320 }
3321 }
3322 return 0;
3323}
3324
3325static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3326 struct qseecom_dev_handle *data)
3327{
3328 struct ion_handle *ihandle;
3329 char *field;
3330 int ret = 0;
3331 int i = 0;
3332 uint32_t len = 0;
3333 struct scatterlist *sg;
3334 struct qseecom_send_modfd_cmd_req *req = NULL;
3335 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3336 struct qseecom_registered_listener_list *this_lstnr = NULL;
3337 uint32_t offset;
3338 struct sg_table *sg_ptr;
3339
3340 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3341 (data->type != QSEECOM_CLIENT_APP))
3342 return -EFAULT;
3343
3344 if (msg == NULL) {
3345 pr_err("Invalid address\n");
3346 return -EINVAL;
3347 }
3348 if (data->type == QSEECOM_LISTENER_SERVICE) {
3349 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3350 this_lstnr = __qseecom_find_svc(data->listener.id);
3351 if (IS_ERR_OR_NULL(this_lstnr)) {
3352 pr_err("Invalid listener ID\n");
3353 return -ENOMEM;
3354 }
3355 } else {
3356 req = (struct qseecom_send_modfd_cmd_req *)msg;
3357 }
3358
3359 for (i = 0; i < MAX_ION_FD; i++) {
3360 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3361 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003362 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003363 req->ifd_data[i].fd);
3364 if (IS_ERR_OR_NULL(ihandle)) {
3365 pr_err("Ion client can't retrieve the handle\n");
3366 return -ENOMEM;
3367 }
3368 field = (char *) req->cmd_req_buf +
3369 req->ifd_data[i].cmd_buf_offset;
3370 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3371 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003372 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003373 lstnr_resp->ifd_data[i].fd);
3374 if (IS_ERR_OR_NULL(ihandle)) {
3375 pr_err("Ion client can't retrieve the handle\n");
3376 return -ENOMEM;
3377 }
3378 field = lstnr_resp->resp_buf_ptr +
3379 lstnr_resp->ifd_data[i].cmd_buf_offset;
3380 } else {
3381 continue;
3382 }
3383 /* Populate the cmd data structure with the phys_addr */
3384 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3385 if (IS_ERR_OR_NULL(sg_ptr)) {
3386 pr_err("IOn client could not retrieve sg table\n");
3387 goto err;
3388 }
3389 if (sg_ptr->nents == 0) {
3390 pr_err("Num of scattered entries is 0\n");
3391 goto err;
3392 }
3393 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3394 pr_err("Num of scattered entries");
3395 pr_err(" (%d) is greater than max supported %d\n",
3396 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3397 goto err;
3398 }
3399 sg = sg_ptr->sgl;
3400 if (sg_ptr->nents == 1) {
3401 uint32_t *update;
3402
3403 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3404 goto err;
3405 if ((data->type == QSEECOM_CLIENT_APP &&
3406 (data->client.app_arch == ELFCLASS32 ||
3407 data->client.app_arch == ELFCLASS64)) ||
3408 (data->type == QSEECOM_LISTENER_SERVICE)) {
3409 /*
3410 * Check if sg list phy add region is under 4GB
3411 */
3412 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3413 (!cleanup) &&
3414 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3415 >= PHY_ADDR_4G - sg->length)) {
3416 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3417 data->client.app_name,
3418 &(sg_dma_address(sg_ptr->sgl)),
3419 sg->length);
3420 goto err;
3421 }
3422 update = (uint32_t *) field;
3423 *update = cleanup ? 0 :
3424 (uint32_t)sg_dma_address(sg_ptr->sgl);
3425 } else {
3426 pr_err("QSEE app arch %u is not supported\n",
3427 data->client.app_arch);
3428 goto err;
3429 }
3430 len += (uint32_t)sg->length;
3431 } else {
3432 struct qseecom_sg_entry *update;
3433 int j = 0;
3434
3435 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3436 (req->ifd_data[i].fd > 0)) {
3437
3438 if ((req->cmd_req_len <
3439 SG_ENTRY_SZ * sg_ptr->nents) ||
3440 (req->ifd_data[i].cmd_buf_offset >
3441 (req->cmd_req_len -
3442 SG_ENTRY_SZ * sg_ptr->nents))) {
3443 pr_err("Invalid offset = 0x%x\n",
3444 req->ifd_data[i].cmd_buf_offset);
3445 goto err;
3446 }
3447
3448 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3449 (lstnr_resp->ifd_data[i].fd > 0)) {
3450
3451 if ((lstnr_resp->resp_len <
3452 SG_ENTRY_SZ * sg_ptr->nents) ||
3453 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3454 (lstnr_resp->resp_len -
3455 SG_ENTRY_SZ * sg_ptr->nents))) {
3456 goto err;
3457 }
3458 }
3459 if ((data->type == QSEECOM_CLIENT_APP &&
3460 (data->client.app_arch == ELFCLASS32 ||
3461 data->client.app_arch == ELFCLASS64)) ||
3462 (data->type == QSEECOM_LISTENER_SERVICE)) {
3463 update = (struct qseecom_sg_entry *)field;
3464 for (j = 0; j < sg_ptr->nents; j++) {
3465 /*
3466 * Check if sg list PA is under 4GB
3467 */
3468 if ((qseecom.qsee_version >=
3469 QSEE_VERSION_40) &&
3470 (!cleanup) &&
3471 ((uint64_t)(sg_dma_address(sg))
3472 >= PHY_ADDR_4G - sg->length)) {
3473 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3474 data->client.app_name,
3475 &(sg_dma_address(sg)),
3476 sg->length);
3477 goto err;
3478 }
3479 update->phys_addr = cleanup ? 0 :
3480 (uint32_t)sg_dma_address(sg);
3481 update->len = cleanup ? 0 : sg->length;
3482 update++;
3483 len += sg->length;
3484 sg = sg_next(sg);
3485 }
3486 } else {
3487 pr_err("QSEE app arch %u is not supported\n",
3488 data->client.app_arch);
3489 goto err;
3490 }
3491 }
3492
3493 if (cleanup) {
3494 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3495 ihandle, NULL, len,
3496 ION_IOC_INV_CACHES);
3497 if (ret) {
3498 pr_err("cache operation failed %d\n", ret);
3499 goto err;
3500 }
3501 } else {
3502 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3503 ihandle, NULL, len,
3504 ION_IOC_CLEAN_INV_CACHES);
3505 if (ret) {
3506 pr_err("cache operation failed %d\n", ret);
3507 goto err;
3508 }
3509 if (data->type == QSEECOM_CLIENT_APP) {
3510 offset = req->ifd_data[i].cmd_buf_offset;
3511 data->sglistinfo_ptr[i].indexAndFlags =
3512 SGLISTINFO_SET_INDEX_FLAG(
3513 (sg_ptr->nents == 1), 0, offset);
3514 data->sglistinfo_ptr[i].sizeOrCount =
3515 (sg_ptr->nents == 1) ?
3516 sg->length : sg_ptr->nents;
3517 data->sglist_cnt = i + 1;
3518 } else {
3519 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3520 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3521 (uintptr_t)this_lstnr->sb_virt);
3522 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3523 SGLISTINFO_SET_INDEX_FLAG(
3524 (sg_ptr->nents == 1), 0, offset);
3525 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3526 (sg_ptr->nents == 1) ?
3527 sg->length : sg_ptr->nents;
3528 this_lstnr->sglist_cnt = i + 1;
3529 }
3530 }
3531 /* Deallocate the handle */
3532 if (!IS_ERR_OR_NULL(ihandle))
3533 ion_free(qseecom.ion_clnt, ihandle);
3534 }
3535 return ret;
3536err:
3537 if (!IS_ERR_OR_NULL(ihandle))
3538 ion_free(qseecom.ion_clnt, ihandle);
3539 return -ENOMEM;
3540}
3541
3542static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3543 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3544{
3545 struct scatterlist *sg = sg_ptr->sgl;
3546 struct qseecom_sg_entry_64bit *sg_entry;
3547 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3548 void *buf;
3549 uint i;
3550 size_t size;
3551 dma_addr_t coh_pmem;
3552
3553 if (fd_idx >= MAX_ION_FD) {
3554 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3555 return -ENOMEM;
3556 }
3557 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3558 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3559 /* Allocate a contiguous kernel buffer */
3560 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3561 size = (size + PAGE_SIZE) & PAGE_MASK;
3562 buf = dma_alloc_coherent(qseecom.pdev,
3563 size, &coh_pmem, GFP_KERNEL);
3564 if (buf == NULL) {
3565 pr_err("failed to alloc memory for sg buf\n");
3566 return -ENOMEM;
3567 }
3568 /* update qseecom_sg_list_buf_hdr_64bit */
3569 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3570 buf_hdr->new_buf_phys_addr = coh_pmem;
3571 buf_hdr->nents_total = sg_ptr->nents;
3572 /* save the left sg entries into new allocated buf */
3573 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3574 for (i = 0; i < sg_ptr->nents; i++) {
3575 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3576 sg_entry->len = sg->length;
3577 sg_entry++;
3578 sg = sg_next(sg);
3579 }
3580
3581 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3582 data->client.sec_buf_fd[fd_idx].vbase = buf;
3583 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3584 data->client.sec_buf_fd[fd_idx].size = size;
3585
3586 return 0;
3587}
3588
3589static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3590 struct qseecom_dev_handle *data)
3591{
3592 struct ion_handle *ihandle;
3593 char *field;
3594 int ret = 0;
3595 int i = 0;
3596 uint32_t len = 0;
3597 struct scatterlist *sg;
3598 struct qseecom_send_modfd_cmd_req *req = NULL;
3599 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3600 struct qseecom_registered_listener_list *this_lstnr = NULL;
3601 uint32_t offset;
3602 struct sg_table *sg_ptr;
3603
3604 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3605 (data->type != QSEECOM_CLIENT_APP))
3606 return -EFAULT;
3607
3608 if (msg == NULL) {
3609 pr_err("Invalid address\n");
3610 return -EINVAL;
3611 }
3612 if (data->type == QSEECOM_LISTENER_SERVICE) {
3613 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3614 this_lstnr = __qseecom_find_svc(data->listener.id);
3615 if (IS_ERR_OR_NULL(this_lstnr)) {
3616 pr_err("Invalid listener ID\n");
3617 return -ENOMEM;
3618 }
3619 } else {
3620 req = (struct qseecom_send_modfd_cmd_req *)msg;
3621 }
3622
3623 for (i = 0; i < MAX_ION_FD; i++) {
3624 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3625 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003626 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003627 req->ifd_data[i].fd);
3628 if (IS_ERR_OR_NULL(ihandle)) {
3629 pr_err("Ion client can't retrieve the handle\n");
3630 return -ENOMEM;
3631 }
3632 field = (char *) req->cmd_req_buf +
3633 req->ifd_data[i].cmd_buf_offset;
3634 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3635 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003636 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003637 lstnr_resp->ifd_data[i].fd);
3638 if (IS_ERR_OR_NULL(ihandle)) {
3639 pr_err("Ion client can't retrieve the handle\n");
3640 return -ENOMEM;
3641 }
3642 field = lstnr_resp->resp_buf_ptr +
3643 lstnr_resp->ifd_data[i].cmd_buf_offset;
3644 } else {
3645 continue;
3646 }
3647 /* Populate the cmd data structure with the phys_addr */
3648 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3649 if (IS_ERR_OR_NULL(sg_ptr)) {
3650 pr_err("IOn client could not retrieve sg table\n");
3651 goto err;
3652 }
3653 if (sg_ptr->nents == 0) {
3654 pr_err("Num of scattered entries is 0\n");
3655 goto err;
3656 }
3657 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3658 pr_warn("Num of scattered entries");
3659 pr_warn(" (%d) is greater than %d\n",
3660 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3661 if (cleanup) {
3662 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3663 data->client.sec_buf_fd[i].vbase)
3664 dma_free_coherent(qseecom.pdev,
3665 data->client.sec_buf_fd[i].size,
3666 data->client.sec_buf_fd[i].vbase,
3667 data->client.sec_buf_fd[i].pbase);
3668 } else {
3669 ret = __qseecom_allocate_sg_list_buffer(data,
3670 field, i, sg_ptr);
3671 if (ret) {
3672 pr_err("Failed to allocate sg list buffer\n");
3673 goto err;
3674 }
3675 }
3676 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3677 sg = sg_ptr->sgl;
3678 goto cleanup;
3679 }
3680 sg = sg_ptr->sgl;
3681 if (sg_ptr->nents == 1) {
3682 uint64_t *update_64bit;
3683
3684 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3685 goto err;
3686 /* 64bit app uses 64bit address */
3687 update_64bit = (uint64_t *) field;
3688 *update_64bit = cleanup ? 0 :
3689 (uint64_t)sg_dma_address(sg_ptr->sgl);
3690 len += (uint32_t)sg->length;
3691 } else {
3692 struct qseecom_sg_entry_64bit *update_64bit;
3693 int j = 0;
3694
3695 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3696 (req->ifd_data[i].fd > 0)) {
3697
3698 if ((req->cmd_req_len <
3699 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3700 (req->ifd_data[i].cmd_buf_offset >
3701 (req->cmd_req_len -
3702 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3703 pr_err("Invalid offset = 0x%x\n",
3704 req->ifd_data[i].cmd_buf_offset);
3705 goto err;
3706 }
3707
3708 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3709 (lstnr_resp->ifd_data[i].fd > 0)) {
3710
3711 if ((lstnr_resp->resp_len <
3712 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3713 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3714 (lstnr_resp->resp_len -
3715 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3716 goto err;
3717 }
3718 }
3719 /* 64bit app uses 64bit address */
3720 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3721 for (j = 0; j < sg_ptr->nents; j++) {
3722 update_64bit->phys_addr = cleanup ? 0 :
3723 (uint64_t)sg_dma_address(sg);
3724 update_64bit->len = cleanup ? 0 :
3725 (uint32_t)sg->length;
3726 update_64bit++;
3727 len += sg->length;
3728 sg = sg_next(sg);
3729 }
3730 }
3731cleanup:
3732 if (cleanup) {
3733 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3734 ihandle, NULL, len,
3735 ION_IOC_INV_CACHES);
3736 if (ret) {
3737 pr_err("cache operation failed %d\n", ret);
3738 goto err;
3739 }
3740 } else {
3741 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3742 ihandle, NULL, len,
3743 ION_IOC_CLEAN_INV_CACHES);
3744 if (ret) {
3745 pr_err("cache operation failed %d\n", ret);
3746 goto err;
3747 }
3748 if (data->type == QSEECOM_CLIENT_APP) {
3749 offset = req->ifd_data[i].cmd_buf_offset;
3750 data->sglistinfo_ptr[i].indexAndFlags =
3751 SGLISTINFO_SET_INDEX_FLAG(
3752 (sg_ptr->nents == 1), 1, offset);
3753 data->sglistinfo_ptr[i].sizeOrCount =
3754 (sg_ptr->nents == 1) ?
3755 sg->length : sg_ptr->nents;
3756 data->sglist_cnt = i + 1;
3757 } else {
3758 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3759 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3760 (uintptr_t)this_lstnr->sb_virt);
3761 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3762 SGLISTINFO_SET_INDEX_FLAG(
3763 (sg_ptr->nents == 1), 1, offset);
3764 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3765 (sg_ptr->nents == 1) ?
3766 sg->length : sg_ptr->nents;
3767 this_lstnr->sglist_cnt = i + 1;
3768 }
3769 }
3770 /* Deallocate the handle */
3771 if (!IS_ERR_OR_NULL(ihandle))
3772 ion_free(qseecom.ion_clnt, ihandle);
3773 }
3774 return ret;
3775err:
3776 for (i = 0; i < MAX_ION_FD; i++)
3777 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3778 data->client.sec_buf_fd[i].vbase)
3779 dma_free_coherent(qseecom.pdev,
3780 data->client.sec_buf_fd[i].size,
3781 data->client.sec_buf_fd[i].vbase,
3782 data->client.sec_buf_fd[i].pbase);
3783 if (!IS_ERR_OR_NULL(ihandle))
3784 ion_free(qseecom.ion_clnt, ihandle);
3785 return -ENOMEM;
3786}
3787
3788static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3789 void __user *argp,
3790 bool is_64bit_addr)
3791{
3792 int ret = 0;
3793 int i;
3794 struct qseecom_send_modfd_cmd_req req;
3795 struct qseecom_send_cmd_req send_cmd_req;
3796
3797 ret = copy_from_user(&req, argp, sizeof(req));
3798 if (ret) {
3799 pr_err("copy_from_user failed\n");
3800 return ret;
3801 }
3802
3803 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3804 send_cmd_req.cmd_req_len = req.cmd_req_len;
3805 send_cmd_req.resp_buf = req.resp_buf;
3806 send_cmd_req.resp_len = req.resp_len;
3807
3808 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3809 return -EINVAL;
3810
3811 /* validate offsets */
3812 for (i = 0; i < MAX_ION_FD; i++) {
3813 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3814 pr_err("Invalid offset %d = 0x%x\n",
3815 i, req.ifd_data[i].cmd_buf_offset);
3816 return -EINVAL;
3817 }
3818 }
3819 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3820 (uintptr_t)req.cmd_req_buf);
3821 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3822 (uintptr_t)req.resp_buf);
3823
3824 if (!is_64bit_addr) {
3825 ret = __qseecom_update_cmd_buf(&req, false, data);
3826 if (ret)
3827 return ret;
3828 ret = __qseecom_send_cmd(data, &send_cmd_req);
3829 if (ret)
3830 return ret;
3831 ret = __qseecom_update_cmd_buf(&req, true, data);
3832 if (ret)
3833 return ret;
3834 } else {
3835 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3836 if (ret)
3837 return ret;
3838 ret = __qseecom_send_cmd(data, &send_cmd_req);
3839 if (ret)
3840 return ret;
3841 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3842 if (ret)
3843 return ret;
3844 }
3845
3846 return ret;
3847}
3848
3849static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3850 void __user *argp)
3851{
3852 return __qseecom_send_modfd_cmd(data, argp, false);
3853}
3854
3855static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3856 void __user *argp)
3857{
3858 return __qseecom_send_modfd_cmd(data, argp, true);
3859}
3860
3861
3862
3863static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3864 struct qseecom_registered_listener_list *svc)
3865{
3866 int ret;
3867
3868 ret = (svc->rcv_req_flag != 0);
3869 return ret || data->abort;
3870}
3871
3872static int qseecom_receive_req(struct qseecom_dev_handle *data)
3873{
3874 int ret = 0;
3875 struct qseecom_registered_listener_list *this_lstnr;
3876
3877 this_lstnr = __qseecom_find_svc(data->listener.id);
3878 if (!this_lstnr) {
3879 pr_err("Invalid listener ID\n");
3880 return -ENODATA;
3881 }
3882
3883 while (1) {
3884 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3885 __qseecom_listener_has_rcvd_req(data,
3886 this_lstnr))) {
3887 pr_debug("Interrupted: exiting Listener Service = %d\n",
3888 (uint32_t)data->listener.id);
3889 /* woken up for different reason */
3890 return -ERESTARTSYS;
3891 }
3892
3893 if (data->abort) {
3894 pr_err("Aborting Listener Service = %d\n",
3895 (uint32_t)data->listener.id);
3896 return -ENODEV;
3897 }
3898 this_lstnr->rcv_req_flag = 0;
3899 break;
3900 }
3901 return ret;
3902}
3903
3904static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3905{
3906 unsigned char app_arch = 0;
3907 struct elf32_hdr *ehdr;
3908 struct elf64_hdr *ehdr64;
3909
3910 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3911
3912 switch (app_arch) {
3913 case ELFCLASS32: {
3914 ehdr = (struct elf32_hdr *)fw_entry->data;
3915 if (fw_entry->size < sizeof(*ehdr)) {
3916 pr_err("%s: Not big enough to be an elf32 header\n",
3917 qseecom.pdev->init_name);
3918 return false;
3919 }
3920 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3921 pr_err("%s: Not an elf32 header\n",
3922 qseecom.pdev->init_name);
3923 return false;
3924 }
3925 if (ehdr->e_phnum == 0) {
3926 pr_err("%s: No loadable segments\n",
3927 qseecom.pdev->init_name);
3928 return false;
3929 }
3930 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3931 sizeof(struct elf32_hdr) > fw_entry->size) {
3932 pr_err("%s: Program headers not within mdt\n",
3933 qseecom.pdev->init_name);
3934 return false;
3935 }
3936 break;
3937 }
3938 case ELFCLASS64: {
3939 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3940 if (fw_entry->size < sizeof(*ehdr64)) {
3941 pr_err("%s: Not big enough to be an elf64 header\n",
3942 qseecom.pdev->init_name);
3943 return false;
3944 }
3945 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3946 pr_err("%s: Not an elf64 header\n",
3947 qseecom.pdev->init_name);
3948 return false;
3949 }
3950 if (ehdr64->e_phnum == 0) {
3951 pr_err("%s: No loadable segments\n",
3952 qseecom.pdev->init_name);
3953 return false;
3954 }
3955 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3956 sizeof(struct elf64_hdr) > fw_entry->size) {
3957 pr_err("%s: Program headers not within mdt\n",
3958 qseecom.pdev->init_name);
3959 return false;
3960 }
3961 break;
3962 }
3963 default: {
3964 pr_err("QSEE app arch %u is not supported\n", app_arch);
3965 return false;
3966 }
3967 }
3968 return true;
3969}
3970
3971static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3972 uint32_t *app_arch)
3973{
3974 int ret = -1;
3975 int i = 0, rc = 0;
3976 const struct firmware *fw_entry = NULL;
3977 char fw_name[MAX_APP_NAME_SIZE];
3978 struct elf32_hdr *ehdr;
3979 struct elf64_hdr *ehdr64;
3980 int num_images = 0;
3981
3982 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3983 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3984 if (rc) {
3985 pr_err("error with request_firmware\n");
3986 ret = -EIO;
3987 goto err;
3988 }
3989 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3990 ret = -EIO;
3991 goto err;
3992 }
3993 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3994 *fw_size = fw_entry->size;
3995 if (*app_arch == ELFCLASS32) {
3996 ehdr = (struct elf32_hdr *)fw_entry->data;
3997 num_images = ehdr->e_phnum;
3998 } else if (*app_arch == ELFCLASS64) {
3999 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4000 num_images = ehdr64->e_phnum;
4001 } else {
4002 pr_err("QSEE %s app, arch %u is not supported\n",
4003 appname, *app_arch);
4004 ret = -EIO;
4005 goto err;
4006 }
4007 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
4008 release_firmware(fw_entry);
4009 fw_entry = NULL;
4010 for (i = 0; i < num_images; i++) {
4011 memset(fw_name, 0, sizeof(fw_name));
4012 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4013 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4014 if (ret)
4015 goto err;
4016 if (*fw_size > U32_MAX - fw_entry->size) {
4017 pr_err("QSEE %s app file size overflow\n", appname);
4018 ret = -EINVAL;
4019 goto err;
4020 }
4021 *fw_size += fw_entry->size;
4022 release_firmware(fw_entry);
4023 fw_entry = NULL;
4024 }
4025
4026 return ret;
4027err:
4028 if (fw_entry)
4029 release_firmware(fw_entry);
4030 *fw_size = 0;
4031 return ret;
4032}
4033
4034static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
4035 uint32_t fw_size,
4036 struct qseecom_load_app_ireq *load_req)
4037{
4038 int ret = -1;
4039 int i = 0, rc = 0;
4040 const struct firmware *fw_entry = NULL;
4041 char fw_name[MAX_APP_NAME_SIZE];
4042 u8 *img_data_ptr = img_data;
4043 struct elf32_hdr *ehdr;
4044 struct elf64_hdr *ehdr64;
4045 int num_images = 0;
4046 unsigned char app_arch = 0;
4047
4048 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
4049 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4050 if (rc) {
4051 ret = -EIO;
4052 goto err;
4053 }
4054
4055 load_req->img_len = fw_entry->size;
4056 if (load_req->img_len > fw_size) {
4057 pr_err("app %s size %zu is larger than buf size %u\n",
4058 appname, fw_entry->size, fw_size);
4059 ret = -EINVAL;
4060 goto err;
4061 }
4062 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4063 img_data_ptr = img_data_ptr + fw_entry->size;
4064 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4065
4066 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4067 if (app_arch == ELFCLASS32) {
4068 ehdr = (struct elf32_hdr *)fw_entry->data;
4069 num_images = ehdr->e_phnum;
4070 } else if (app_arch == ELFCLASS64) {
4071 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4072 num_images = ehdr64->e_phnum;
4073 } else {
4074 pr_err("QSEE %s app, arch %u is not supported\n",
4075 appname, app_arch);
4076 ret = -EIO;
4077 goto err;
4078 }
4079 release_firmware(fw_entry);
4080 fw_entry = NULL;
4081 for (i = 0; i < num_images; i++) {
4082 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4083 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4084 if (ret) {
4085 pr_err("Failed to locate blob %s\n", fw_name);
4086 goto err;
4087 }
4088 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4089 (fw_entry->size + load_req->img_len > fw_size)) {
4090 pr_err("Invalid file size for %s\n", fw_name);
4091 ret = -EINVAL;
4092 goto err;
4093 }
4094 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4095 img_data_ptr = img_data_ptr + fw_entry->size;
4096 load_req->img_len += fw_entry->size;
4097 release_firmware(fw_entry);
4098 fw_entry = NULL;
4099 }
4100 return ret;
4101err:
4102 release_firmware(fw_entry);
4103 return ret;
4104}
4105
4106static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4107 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4108{
4109 size_t len = 0;
4110 int ret = 0;
4111 ion_phys_addr_t pa;
4112 struct ion_handle *ihandle = NULL;
4113 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004114 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004115
Zhen Kong3dd92792017-12-08 09:47:15 -08004116 do {
4117 if (retry++)
4118 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
4119 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4120 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4121 } while (IS_ERR_OR_NULL(ihandle) &&
4122 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004123
4124 if (IS_ERR_OR_NULL(ihandle)) {
4125 pr_err("ION alloc failed\n");
4126 return -ENOMEM;
4127 }
4128 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4129 ihandle);
4130
4131 if (IS_ERR_OR_NULL(img_data)) {
4132 pr_err("ION memory mapping for image loading failed\n");
4133 ret = -ENOMEM;
4134 goto exit_ion_free;
4135 }
4136 /* Get the physical address of the ION BUF */
4137 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4138 if (ret) {
4139 pr_err("physical memory retrieval failure\n");
4140 ret = -EIO;
4141 goto exit_ion_unmap_kernel;
4142 }
4143
4144 *pihandle = ihandle;
4145 *data = img_data;
4146 *paddr = pa;
4147 return ret;
4148
4149exit_ion_unmap_kernel:
4150 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4151exit_ion_free:
4152 ion_free(qseecom.ion_clnt, ihandle);
4153 ihandle = NULL;
4154 return ret;
4155}
4156
4157static void __qseecom_free_img_data(struct ion_handle **ihandle)
4158{
4159 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4160 ion_free(qseecom.ion_clnt, *ihandle);
4161 *ihandle = NULL;
4162}
4163
4164static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4165 uint32_t *app_id)
4166{
4167 int ret = -1;
4168 uint32_t fw_size = 0;
4169 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4170 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4171 struct qseecom_command_scm_resp resp;
4172 u8 *img_data = NULL;
4173 ion_phys_addr_t pa = 0;
4174 struct ion_handle *ihandle = NULL;
4175 void *cmd_buf = NULL;
4176 size_t cmd_len;
4177 uint32_t app_arch = 0;
4178
4179 if (!data || !appname || !app_id) {
4180 pr_err("Null pointer to data or appname or appid\n");
4181 return -EINVAL;
4182 }
4183 *app_id = 0;
4184 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4185 return -EIO;
4186 data->client.app_arch = app_arch;
4187
4188 /* Check and load cmnlib */
4189 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4190 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4191 ret = qseecom_load_commonlib_image(data, "cmnlib");
4192 if (ret) {
4193 pr_err("failed to load cmnlib\n");
4194 return -EIO;
4195 }
4196 qseecom.commonlib_loaded = true;
4197 pr_debug("cmnlib is loaded\n");
4198 }
4199
4200 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4201 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4202 if (ret) {
4203 pr_err("failed to load cmnlib64\n");
4204 return -EIO;
4205 }
4206 qseecom.commonlib64_loaded = true;
4207 pr_debug("cmnlib64 is loaded\n");
4208 }
4209 }
4210
4211 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4212 if (ret)
4213 return ret;
4214
4215 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4216 if (ret) {
4217 ret = -EIO;
4218 goto exit_free_img_data;
4219 }
4220
4221 /* Populate the load_req parameters */
4222 if (qseecom.qsee_version < QSEE_VERSION_40) {
4223 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4224 load_req.mdt_len = load_req.mdt_len;
4225 load_req.img_len = load_req.img_len;
4226 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4227 load_req.phy_addr = (uint32_t)pa;
4228 cmd_buf = (void *)&load_req;
4229 cmd_len = sizeof(struct qseecom_load_app_ireq);
4230 } else {
4231 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4232 load_req_64bit.mdt_len = load_req.mdt_len;
4233 load_req_64bit.img_len = load_req.img_len;
4234 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4235 load_req_64bit.phy_addr = (uint64_t)pa;
4236 cmd_buf = (void *)&load_req_64bit;
4237 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4238 }
4239
4240 if (qseecom.support_bus_scaling) {
4241 mutex_lock(&qsee_bw_mutex);
4242 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4243 mutex_unlock(&qsee_bw_mutex);
4244 if (ret) {
4245 ret = -EIO;
4246 goto exit_free_img_data;
4247 }
4248 }
4249
4250 ret = __qseecom_enable_clk_scale_up(data);
4251 if (ret) {
4252 ret = -EIO;
4253 goto exit_unregister_bus_bw_need;
4254 }
4255
4256 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4257 img_data, fw_size,
4258 ION_IOC_CLEAN_INV_CACHES);
4259 if (ret) {
4260 pr_err("cache operation failed %d\n", ret);
4261 goto exit_disable_clk_vote;
4262 }
4263
4264 /* SCM_CALL to load the image */
4265 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4266 &resp, sizeof(resp));
4267 if (ret) {
4268 pr_err("scm_call to load failed : ret %d\n", ret);
4269 ret = -EIO;
4270 goto exit_disable_clk_vote;
4271 }
4272
4273 switch (resp.result) {
4274 case QSEOS_RESULT_SUCCESS:
4275 *app_id = resp.data;
4276 break;
4277 case QSEOS_RESULT_INCOMPLETE:
4278 ret = __qseecom_process_incomplete_cmd(data, &resp);
4279 if (ret)
4280 pr_err("process_incomplete_cmd FAILED\n");
4281 else
4282 *app_id = resp.data;
4283 break;
4284 case QSEOS_RESULT_FAILURE:
4285 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4286 break;
4287 default:
4288 pr_err("scm call return unknown response %d\n", resp.result);
4289 ret = -EINVAL;
4290 break;
4291 }
4292
4293exit_disable_clk_vote:
4294 __qseecom_disable_clk_scale_down(data);
4295
4296exit_unregister_bus_bw_need:
4297 if (qseecom.support_bus_scaling) {
4298 mutex_lock(&qsee_bw_mutex);
4299 qseecom_unregister_bus_bandwidth_needs(data);
4300 mutex_unlock(&qsee_bw_mutex);
4301 }
4302
4303exit_free_img_data:
4304 __qseecom_free_img_data(&ihandle);
4305 return ret;
4306}
4307
4308static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4309 char *cmnlib_name)
4310{
4311 int ret = 0;
4312 uint32_t fw_size = 0;
4313 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4314 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4315 struct qseecom_command_scm_resp resp;
4316 u8 *img_data = NULL;
4317 ion_phys_addr_t pa = 0;
4318 void *cmd_buf = NULL;
4319 size_t cmd_len;
4320 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004321 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004322
4323 if (!cmnlib_name) {
4324 pr_err("cmnlib_name is NULL\n");
4325 return -EINVAL;
4326 }
4327 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4328 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4329 cmnlib_name, strlen(cmnlib_name));
4330 return -EINVAL;
4331 }
4332
4333 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4334 return -EIO;
4335
Zhen Kong3bafb312017-10-18 10:27:20 -07004336 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004337 &img_data, fw_size, &pa);
4338 if (ret)
4339 return -EIO;
4340
4341 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4342 if (ret) {
4343 ret = -EIO;
4344 goto exit_free_img_data;
4345 }
4346 if (qseecom.qsee_version < QSEE_VERSION_40) {
4347 load_req.phy_addr = (uint32_t)pa;
4348 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4349 cmd_buf = (void *)&load_req;
4350 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4351 } else {
4352 load_req_64bit.phy_addr = (uint64_t)pa;
4353 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4354 load_req_64bit.img_len = load_req.img_len;
4355 load_req_64bit.mdt_len = load_req.mdt_len;
4356 cmd_buf = (void *)&load_req_64bit;
4357 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4358 }
4359
4360 if (qseecom.support_bus_scaling) {
4361 mutex_lock(&qsee_bw_mutex);
4362 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4363 mutex_unlock(&qsee_bw_mutex);
4364 if (ret) {
4365 ret = -EIO;
4366 goto exit_free_img_data;
4367 }
4368 }
4369
4370 /* Vote for the SFPB clock */
4371 ret = __qseecom_enable_clk_scale_up(data);
4372 if (ret) {
4373 ret = -EIO;
4374 goto exit_unregister_bus_bw_need;
4375 }
4376
Zhen Kong3bafb312017-10-18 10:27:20 -07004377 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004378 img_data, fw_size,
4379 ION_IOC_CLEAN_INV_CACHES);
4380 if (ret) {
4381 pr_err("cache operation failed %d\n", ret);
4382 goto exit_disable_clk_vote;
4383 }
4384
4385 /* SCM_CALL to load the image */
4386 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4387 &resp, sizeof(resp));
4388 if (ret) {
4389 pr_err("scm_call to load failed : ret %d\n", ret);
4390 ret = -EIO;
4391 goto exit_disable_clk_vote;
4392 }
4393
4394 switch (resp.result) {
4395 case QSEOS_RESULT_SUCCESS:
4396 break;
4397 case QSEOS_RESULT_FAILURE:
4398 pr_err("scm call failed w/response result%d\n", resp.result);
4399 ret = -EINVAL;
4400 goto exit_disable_clk_vote;
4401 case QSEOS_RESULT_INCOMPLETE:
4402 ret = __qseecom_process_incomplete_cmd(data, &resp);
4403 if (ret) {
4404 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4405 goto exit_disable_clk_vote;
4406 }
4407 break;
4408 default:
4409 pr_err("scm call return unknown response %d\n", resp.result);
4410 ret = -EINVAL;
4411 goto exit_disable_clk_vote;
4412 }
4413
4414exit_disable_clk_vote:
4415 __qseecom_disable_clk_scale_down(data);
4416
4417exit_unregister_bus_bw_need:
4418 if (qseecom.support_bus_scaling) {
4419 mutex_lock(&qsee_bw_mutex);
4420 qseecom_unregister_bus_bandwidth_needs(data);
4421 mutex_unlock(&qsee_bw_mutex);
4422 }
4423
4424exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004425 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004426 return ret;
4427}
4428
4429static int qseecom_unload_commonlib_image(void)
4430{
4431 int ret = -EINVAL;
4432 struct qseecom_unload_lib_image_ireq unload_req = {0};
4433 struct qseecom_command_scm_resp resp;
4434
4435 /* Populate the remaining parameters */
4436 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4437
4438 /* SCM_CALL to load the image */
4439 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4440 sizeof(struct qseecom_unload_lib_image_ireq),
4441 &resp, sizeof(resp));
4442 if (ret) {
4443 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4444 ret = -EIO;
4445 } else {
4446 switch (resp.result) {
4447 case QSEOS_RESULT_SUCCESS:
4448 break;
4449 case QSEOS_RESULT_FAILURE:
4450 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4451 break;
4452 default:
4453 pr_err("scm call return unknown response %d\n",
4454 resp.result);
4455 ret = -EINVAL;
4456 break;
4457 }
4458 }
4459
4460 return ret;
4461}
4462
4463int qseecom_start_app(struct qseecom_handle **handle,
4464 char *app_name, uint32_t size)
4465{
4466 int32_t ret = 0;
4467 unsigned long flags = 0;
4468 struct qseecom_dev_handle *data = NULL;
4469 struct qseecom_check_app_ireq app_ireq;
4470 struct qseecom_registered_app_list *entry = NULL;
4471 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4472 bool found_app = false;
4473 size_t len;
4474 ion_phys_addr_t pa;
4475 uint32_t fw_size, app_arch;
4476 uint32_t app_id = 0;
4477
4478 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4479 pr_err("Not allowed to be called in %d state\n",
4480 atomic_read(&qseecom.qseecom_state));
4481 return -EPERM;
4482 }
4483 if (!app_name) {
4484 pr_err("failed to get the app name\n");
4485 return -EINVAL;
4486 }
4487
Zhen Kong64a6d7282017-06-16 11:55:07 -07004488 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004489 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004490 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004491 return -EINVAL;
4492 }
4493
4494 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4495 if (!(*handle))
4496 return -ENOMEM;
4497
4498 data = kzalloc(sizeof(*data), GFP_KERNEL);
4499 if (!data) {
4500 if (ret == 0) {
4501 kfree(*handle);
4502 *handle = NULL;
4503 }
4504 return -ENOMEM;
4505 }
4506 data->abort = 0;
4507 data->type = QSEECOM_CLIENT_APP;
4508 data->released = false;
4509 data->client.sb_length = size;
4510 data->client.user_virt_sb_base = 0;
4511 data->client.ihandle = NULL;
4512
4513 init_waitqueue_head(&data->abort_wq);
4514
4515 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4516 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4517 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4518 pr_err("Ion client could not retrieve the handle\n");
4519 kfree(data);
4520 kfree(*handle);
4521 *handle = NULL;
4522 return -EINVAL;
4523 }
4524 mutex_lock(&app_access_lock);
4525
4526 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4527 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4528 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4529 if (ret)
4530 goto err;
4531
4532 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4533 if (app_id) {
4534 pr_warn("App id %d for [%s] app exists\n", app_id,
4535 (char *)app_ireq.app_name);
4536 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4537 list_for_each_entry(entry,
4538 &qseecom.registered_app_list_head, list){
4539 if (entry->app_id == app_id) {
4540 entry->ref_cnt++;
4541 found_app = true;
4542 break;
4543 }
4544 }
4545 spin_unlock_irqrestore(
4546 &qseecom.registered_app_list_lock, flags);
4547 if (!found_app)
4548 pr_warn("App_id %d [%s] was loaded but not registered\n",
4549 ret, (char *)app_ireq.app_name);
4550 } else {
4551 /* load the app and get the app_id */
4552 pr_debug("%s: Loading app for the first time'\n",
4553 qseecom.pdev->init_name);
4554 ret = __qseecom_load_fw(data, app_name, &app_id);
4555 if (ret < 0)
4556 goto err;
4557 }
4558 data->client.app_id = app_id;
4559 if (!found_app) {
4560 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4561 if (!entry) {
4562 pr_err("kmalloc for app entry failed\n");
4563 ret = -ENOMEM;
4564 goto err;
4565 }
4566 entry->app_id = app_id;
4567 entry->ref_cnt = 1;
4568 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4569 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4570 ret = -EIO;
4571 kfree(entry);
4572 goto err;
4573 }
4574 entry->app_arch = app_arch;
4575 entry->app_blocked = false;
4576 entry->blocked_on_listener_id = 0;
4577 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4578 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4579 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4580 flags);
4581 }
4582
4583 /* Get the physical address of the ION BUF */
4584 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4585 if (ret) {
4586 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4587 ret);
4588 goto err;
4589 }
4590
4591 /* Populate the structure for sending scm call to load image */
4592 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4593 data->client.ihandle);
4594 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4595 pr_err("ION memory mapping for client shared buf failed\n");
4596 ret = -ENOMEM;
4597 goto err;
4598 }
4599 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4600 data->client.sb_phys = (phys_addr_t)pa;
4601 (*handle)->dev = (void *)data;
4602 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4603 (*handle)->sbuf_len = data->client.sb_length;
4604
4605 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4606 if (!kclient_entry) {
4607 ret = -ENOMEM;
4608 goto err;
4609 }
4610 kclient_entry->handle = *handle;
4611
4612 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4613 list_add_tail(&kclient_entry->list,
4614 &qseecom.registered_kclient_list_head);
4615 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4616
4617 mutex_unlock(&app_access_lock);
4618 return 0;
4619
4620err:
4621 kfree(data);
4622 kfree(*handle);
4623 *handle = NULL;
4624 mutex_unlock(&app_access_lock);
4625 return ret;
4626}
4627EXPORT_SYMBOL(qseecom_start_app);
4628
4629int qseecom_shutdown_app(struct qseecom_handle **handle)
4630{
4631 int ret = -EINVAL;
4632 struct qseecom_dev_handle *data;
4633
4634 struct qseecom_registered_kclient_list *kclient = NULL;
4635 unsigned long flags = 0;
4636 bool found_handle = false;
4637
4638 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4639 pr_err("Not allowed to be called in %d state\n",
4640 atomic_read(&qseecom.qseecom_state));
4641 return -EPERM;
4642 }
4643
4644 if ((handle == NULL) || (*handle == NULL)) {
4645 pr_err("Handle is not initialized\n");
4646 return -EINVAL;
4647 }
4648 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4649 mutex_lock(&app_access_lock);
4650
4651 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4652 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4653 list) {
4654 if (kclient->handle == (*handle)) {
4655 list_del(&kclient->list);
4656 found_handle = true;
4657 break;
4658 }
4659 }
4660 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4661 if (!found_handle)
4662 pr_err("Unable to find the handle, exiting\n");
4663 else
4664 ret = qseecom_unload_app(data, false);
4665
4666 mutex_unlock(&app_access_lock);
4667 if (ret == 0) {
4668 kzfree(data);
4669 kzfree(*handle);
4670 kzfree(kclient);
4671 *handle = NULL;
4672 }
4673
4674 return ret;
4675}
4676EXPORT_SYMBOL(qseecom_shutdown_app);
4677
4678int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4679 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4680{
4681 int ret = 0;
4682 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4683 struct qseecom_dev_handle *data;
4684 bool perf_enabled = false;
4685
4686 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4687 pr_err("Not allowed to be called in %d state\n",
4688 atomic_read(&qseecom.qseecom_state));
4689 return -EPERM;
4690 }
4691
4692 if (handle == NULL) {
4693 pr_err("Handle is not initialized\n");
4694 return -EINVAL;
4695 }
4696 data = handle->dev;
4697
4698 req.cmd_req_len = sbuf_len;
4699 req.resp_len = rbuf_len;
4700 req.cmd_req_buf = send_buf;
4701 req.resp_buf = resp_buf;
4702
4703 if (__validate_send_cmd_inputs(data, &req))
4704 return -EINVAL;
4705
4706 mutex_lock(&app_access_lock);
4707 if (qseecom.support_bus_scaling) {
4708 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4709 if (ret) {
4710 pr_err("Failed to set bw.\n");
4711 mutex_unlock(&app_access_lock);
4712 return ret;
4713 }
4714 }
4715 /*
4716 * On targets where crypto clock is handled by HLOS,
4717 * if clk_access_cnt is zero and perf_enabled is false,
4718 * then the crypto clock was not enabled before sending cmd
4719 * to tz, qseecom will enable the clock to avoid service failure.
4720 */
4721 if (!qseecom.no_clock_support &&
4722 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4723 pr_debug("ce clock is not enabled!\n");
4724 ret = qseecom_perf_enable(data);
4725 if (ret) {
4726 pr_err("Failed to vote for clock with err %d\n",
4727 ret);
4728 mutex_unlock(&app_access_lock);
4729 return -EINVAL;
4730 }
4731 perf_enabled = true;
4732 }
4733 if (!strcmp(data->client.app_name, "securemm"))
4734 data->use_legacy_cmd = true;
4735
4736 ret = __qseecom_send_cmd(data, &req);
4737 data->use_legacy_cmd = false;
4738 if (qseecom.support_bus_scaling)
4739 __qseecom_add_bw_scale_down_timer(
4740 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4741
4742 if (perf_enabled) {
4743 qsee_disable_clock_vote(data, CLK_DFAB);
4744 qsee_disable_clock_vote(data, CLK_SFPB);
4745 }
4746
4747 mutex_unlock(&app_access_lock);
4748
4749 if (ret)
4750 return ret;
4751
4752 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4753 req.resp_len, req.resp_buf);
4754 return ret;
4755}
4756EXPORT_SYMBOL(qseecom_send_command);
4757
4758int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4759{
4760 int ret = 0;
4761
4762 if ((handle == NULL) || (handle->dev == NULL)) {
4763 pr_err("No valid kernel client\n");
4764 return -EINVAL;
4765 }
4766 if (high) {
4767 if (qseecom.support_bus_scaling) {
4768 mutex_lock(&qsee_bw_mutex);
4769 __qseecom_register_bus_bandwidth_needs(handle->dev,
4770 HIGH);
4771 mutex_unlock(&qsee_bw_mutex);
4772 } else {
4773 ret = qseecom_perf_enable(handle->dev);
4774 if (ret)
4775 pr_err("Failed to vote for clock with err %d\n",
4776 ret);
4777 }
4778 } else {
4779 if (!qseecom.support_bus_scaling) {
4780 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4781 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4782 } else {
4783 mutex_lock(&qsee_bw_mutex);
4784 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4785 mutex_unlock(&qsee_bw_mutex);
4786 }
4787 }
4788 return ret;
4789}
4790EXPORT_SYMBOL(qseecom_set_bandwidth);
4791
4792int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4793{
4794 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4795 struct qseecom_dev_handle dummy_private_data = {0};
4796 struct qseecom_command_scm_resp resp;
4797 int ret = 0;
4798
4799 if (!desc) {
4800 pr_err("desc is NULL\n");
4801 return -EINVAL;
4802 }
4803
4804 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004805 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004806 resp.data = desc->ret[2]; /*listener_id*/
4807
Zhen Konge7f525f2017-12-01 18:26:25 -08004808 dummy_private_data.client.app_id = desc->ret[1];
4809 dummy_app_entry.app_id = desc->ret[1];
4810
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004811 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004812 if (qseecom.qsee_reentrancy_support)
4813 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004814 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004815 else
4816 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4817 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004818 mutex_unlock(&app_access_lock);
4819 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004820 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004821 (int)desc->ret[0], (int)desc->ret[2],
4822 (int)desc->ret[1], ret);
4823 desc->ret[0] = resp.result;
4824 desc->ret[1] = resp.resp_type;
4825 desc->ret[2] = resp.data;
4826 return ret;
4827}
4828EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4829
4830static int qseecom_send_resp(void)
4831{
4832 qseecom.send_resp_flag = 1;
4833 wake_up_interruptible(&qseecom.send_resp_wq);
4834 return 0;
4835}
4836
4837static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4838{
4839 struct qseecom_registered_listener_list *this_lstnr = NULL;
4840
4841 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4842 this_lstnr = __qseecom_find_svc(data->listener.id);
4843 if (this_lstnr == NULL)
4844 return -EINVAL;
4845 qseecom.send_resp_flag = 1;
4846 this_lstnr->send_resp_flag = 1;
4847 wake_up_interruptible(&qseecom.send_resp_wq);
4848 return 0;
4849}
4850
4851static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4852 struct qseecom_send_modfd_listener_resp *resp,
4853 struct qseecom_registered_listener_list *this_lstnr)
4854{
4855 int i;
4856
4857 if (!data || !resp || !this_lstnr) {
4858 pr_err("listener handle or resp msg is null\n");
4859 return -EINVAL;
4860 }
4861
4862 if (resp->resp_buf_ptr == NULL) {
4863 pr_err("resp buffer is null\n");
4864 return -EINVAL;
4865 }
4866 /* validate resp buf length */
4867 if ((resp->resp_len == 0) ||
4868 (resp->resp_len > this_lstnr->sb_length)) {
4869 pr_err("resp buf length %d not valid\n", resp->resp_len);
4870 return -EINVAL;
4871 }
4872
4873 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4874 pr_err("Integer overflow in resp_len & resp_buf\n");
4875 return -EINVAL;
4876 }
4877 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4878 (ULONG_MAX - this_lstnr->sb_length)) {
4879 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4880 return -EINVAL;
4881 }
4882 /* validate resp buf */
4883 if (((uintptr_t)resp->resp_buf_ptr <
4884 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4885 ((uintptr_t)resp->resp_buf_ptr >=
4886 ((uintptr_t)this_lstnr->user_virt_sb_base +
4887 this_lstnr->sb_length)) ||
4888 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4889 ((uintptr_t)this_lstnr->user_virt_sb_base +
4890 this_lstnr->sb_length))) {
4891 pr_err("resp buf is out of shared buffer region\n");
4892 return -EINVAL;
4893 }
4894
4895 /* validate offsets */
4896 for (i = 0; i < MAX_ION_FD; i++) {
4897 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4898 pr_err("Invalid offset %d = 0x%x\n",
4899 i, resp->ifd_data[i].cmd_buf_offset);
4900 return -EINVAL;
4901 }
4902 }
4903
4904 return 0;
4905}
4906
4907static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4908 void __user *argp, bool is_64bit_addr)
4909{
4910 struct qseecom_send_modfd_listener_resp resp;
4911 struct qseecom_registered_listener_list *this_lstnr = NULL;
4912
4913 if (copy_from_user(&resp, argp, sizeof(resp))) {
4914 pr_err("copy_from_user failed");
4915 return -EINVAL;
4916 }
4917
4918 this_lstnr = __qseecom_find_svc(data->listener.id);
4919 if (this_lstnr == NULL)
4920 return -EINVAL;
4921
4922 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4923 return -EINVAL;
4924
4925 resp.resp_buf_ptr = this_lstnr->sb_virt +
4926 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4927
4928 if (!is_64bit_addr)
4929 __qseecom_update_cmd_buf(&resp, false, data);
4930 else
4931 __qseecom_update_cmd_buf_64(&resp, false, data);
4932 qseecom.send_resp_flag = 1;
4933 this_lstnr->send_resp_flag = 1;
4934 wake_up_interruptible(&qseecom.send_resp_wq);
4935 return 0;
4936}
4937
4938static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4939 void __user *argp)
4940{
4941 return __qseecom_send_modfd_resp(data, argp, false);
4942}
4943
4944static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4945 void __user *argp)
4946{
4947 return __qseecom_send_modfd_resp(data, argp, true);
4948}
4949
4950static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4951 void __user *argp)
4952{
4953 struct qseecom_qseos_version_req req;
4954
4955 if (copy_from_user(&req, argp, sizeof(req))) {
4956 pr_err("copy_from_user failed");
4957 return -EINVAL;
4958 }
4959 req.qseos_version = qseecom.qseos_version;
4960 if (copy_to_user(argp, &req, sizeof(req))) {
4961 pr_err("copy_to_user failed");
4962 return -EINVAL;
4963 }
4964 return 0;
4965}
4966
4967static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4968{
4969 int rc = 0;
4970 struct qseecom_clk *qclk = NULL;
4971
4972 if (qseecom.no_clock_support)
4973 return 0;
4974
4975 if (ce == CLK_QSEE)
4976 qclk = &qseecom.qsee;
4977 if (ce == CLK_CE_DRV)
4978 qclk = &qseecom.ce_drv;
4979
4980 if (qclk == NULL) {
4981 pr_err("CLK type not supported\n");
4982 return -EINVAL;
4983 }
4984 mutex_lock(&clk_access_lock);
4985
4986 if (qclk->clk_access_cnt == ULONG_MAX) {
4987 pr_err("clk_access_cnt beyond limitation\n");
4988 goto err;
4989 }
4990 if (qclk->clk_access_cnt > 0) {
4991 qclk->clk_access_cnt++;
4992 mutex_unlock(&clk_access_lock);
4993 return rc;
4994 }
4995
4996 /* Enable CE core clk */
4997 if (qclk->ce_core_clk != NULL) {
4998 rc = clk_prepare_enable(qclk->ce_core_clk);
4999 if (rc) {
5000 pr_err("Unable to enable/prepare CE core clk\n");
5001 goto err;
5002 }
5003 }
5004 /* Enable CE clk */
5005 if (qclk->ce_clk != NULL) {
5006 rc = clk_prepare_enable(qclk->ce_clk);
5007 if (rc) {
5008 pr_err("Unable to enable/prepare CE iface clk\n");
5009 goto ce_clk_err;
5010 }
5011 }
5012 /* Enable AXI clk */
5013 if (qclk->ce_bus_clk != NULL) {
5014 rc = clk_prepare_enable(qclk->ce_bus_clk);
5015 if (rc) {
5016 pr_err("Unable to enable/prepare CE bus clk\n");
5017 goto ce_bus_clk_err;
5018 }
5019 }
5020 qclk->clk_access_cnt++;
5021 mutex_unlock(&clk_access_lock);
5022 return 0;
5023
5024ce_bus_clk_err:
5025 if (qclk->ce_clk != NULL)
5026 clk_disable_unprepare(qclk->ce_clk);
5027ce_clk_err:
5028 if (qclk->ce_core_clk != NULL)
5029 clk_disable_unprepare(qclk->ce_core_clk);
5030err:
5031 mutex_unlock(&clk_access_lock);
5032 return -EIO;
5033}
5034
5035static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5036{
5037 struct qseecom_clk *qclk;
5038
5039 if (qseecom.no_clock_support)
5040 return;
5041
5042 if (ce == CLK_QSEE)
5043 qclk = &qseecom.qsee;
5044 else
5045 qclk = &qseecom.ce_drv;
5046
5047 mutex_lock(&clk_access_lock);
5048
5049 if (qclk->clk_access_cnt == 0) {
5050 mutex_unlock(&clk_access_lock);
5051 return;
5052 }
5053
5054 if (qclk->clk_access_cnt == 1) {
5055 if (qclk->ce_clk != NULL)
5056 clk_disable_unprepare(qclk->ce_clk);
5057 if (qclk->ce_core_clk != NULL)
5058 clk_disable_unprepare(qclk->ce_core_clk);
5059 if (qclk->ce_bus_clk != NULL)
5060 clk_disable_unprepare(qclk->ce_bus_clk);
5061 }
5062 qclk->clk_access_cnt--;
5063 mutex_unlock(&clk_access_lock);
5064}
5065
5066static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5067 int32_t clk_type)
5068{
5069 int ret = 0;
5070 struct qseecom_clk *qclk;
5071
5072 if (qseecom.no_clock_support)
5073 return 0;
5074
5075 qclk = &qseecom.qsee;
5076 if (!qseecom.qsee_perf_client)
5077 return ret;
5078
5079 switch (clk_type) {
5080 case CLK_DFAB:
5081 mutex_lock(&qsee_bw_mutex);
5082 if (!qseecom.qsee_bw_count) {
5083 if (qseecom.qsee_sfpb_bw_count > 0)
5084 ret = msm_bus_scale_client_update_request(
5085 qseecom.qsee_perf_client, 3);
5086 else {
5087 if (qclk->ce_core_src_clk != NULL)
5088 ret = __qseecom_enable_clk(CLK_QSEE);
5089 if (!ret) {
5090 ret =
5091 msm_bus_scale_client_update_request(
5092 qseecom.qsee_perf_client, 1);
5093 if ((ret) &&
5094 (qclk->ce_core_src_clk != NULL))
5095 __qseecom_disable_clk(CLK_QSEE);
5096 }
5097 }
5098 if (ret)
5099 pr_err("DFAB Bandwidth req failed (%d)\n",
5100 ret);
5101 else {
5102 qseecom.qsee_bw_count++;
5103 data->perf_enabled = true;
5104 }
5105 } else {
5106 qseecom.qsee_bw_count++;
5107 data->perf_enabled = true;
5108 }
5109 mutex_unlock(&qsee_bw_mutex);
5110 break;
5111 case CLK_SFPB:
5112 mutex_lock(&qsee_bw_mutex);
5113 if (!qseecom.qsee_sfpb_bw_count) {
5114 if (qseecom.qsee_bw_count > 0)
5115 ret = msm_bus_scale_client_update_request(
5116 qseecom.qsee_perf_client, 3);
5117 else {
5118 if (qclk->ce_core_src_clk != NULL)
5119 ret = __qseecom_enable_clk(CLK_QSEE);
5120 if (!ret) {
5121 ret =
5122 msm_bus_scale_client_update_request(
5123 qseecom.qsee_perf_client, 2);
5124 if ((ret) &&
5125 (qclk->ce_core_src_clk != NULL))
5126 __qseecom_disable_clk(CLK_QSEE);
5127 }
5128 }
5129
5130 if (ret)
5131 pr_err("SFPB Bandwidth req failed (%d)\n",
5132 ret);
5133 else {
5134 qseecom.qsee_sfpb_bw_count++;
5135 data->fast_load_enabled = true;
5136 }
5137 } else {
5138 qseecom.qsee_sfpb_bw_count++;
5139 data->fast_load_enabled = true;
5140 }
5141 mutex_unlock(&qsee_bw_mutex);
5142 break;
5143 default:
5144 pr_err("Clock type not defined\n");
5145 break;
5146 }
5147 return ret;
5148}
5149
5150static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5151 int32_t clk_type)
5152{
5153 int32_t ret = 0;
5154 struct qseecom_clk *qclk;
5155
5156 qclk = &qseecom.qsee;
5157
5158 if (qseecom.no_clock_support)
5159 return;
5160 if (!qseecom.qsee_perf_client)
5161 return;
5162
5163 switch (clk_type) {
5164 case CLK_DFAB:
5165 mutex_lock(&qsee_bw_mutex);
5166 if (qseecom.qsee_bw_count == 0) {
5167 pr_err("Client error.Extra call to disable DFAB clk\n");
5168 mutex_unlock(&qsee_bw_mutex);
5169 return;
5170 }
5171
5172 if (qseecom.qsee_bw_count == 1) {
5173 if (qseecom.qsee_sfpb_bw_count > 0)
5174 ret = msm_bus_scale_client_update_request(
5175 qseecom.qsee_perf_client, 2);
5176 else {
5177 ret = msm_bus_scale_client_update_request(
5178 qseecom.qsee_perf_client, 0);
5179 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5180 __qseecom_disable_clk(CLK_QSEE);
5181 }
5182 if (ret)
5183 pr_err("SFPB Bandwidth req fail (%d)\n",
5184 ret);
5185 else {
5186 qseecom.qsee_bw_count--;
5187 data->perf_enabled = false;
5188 }
5189 } else {
5190 qseecom.qsee_bw_count--;
5191 data->perf_enabled = false;
5192 }
5193 mutex_unlock(&qsee_bw_mutex);
5194 break;
5195 case CLK_SFPB:
5196 mutex_lock(&qsee_bw_mutex);
5197 if (qseecom.qsee_sfpb_bw_count == 0) {
5198 pr_err("Client error.Extra call to disable SFPB clk\n");
5199 mutex_unlock(&qsee_bw_mutex);
5200 return;
5201 }
5202 if (qseecom.qsee_sfpb_bw_count == 1) {
5203 if (qseecom.qsee_bw_count > 0)
5204 ret = msm_bus_scale_client_update_request(
5205 qseecom.qsee_perf_client, 1);
5206 else {
5207 ret = msm_bus_scale_client_update_request(
5208 qseecom.qsee_perf_client, 0);
5209 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5210 __qseecom_disable_clk(CLK_QSEE);
5211 }
5212 if (ret)
5213 pr_err("SFPB Bandwidth req fail (%d)\n",
5214 ret);
5215 else {
5216 qseecom.qsee_sfpb_bw_count--;
5217 data->fast_load_enabled = false;
5218 }
5219 } else {
5220 qseecom.qsee_sfpb_bw_count--;
5221 data->fast_load_enabled = false;
5222 }
5223 mutex_unlock(&qsee_bw_mutex);
5224 break;
5225 default:
5226 pr_err("Clock type not defined\n");
5227 break;
5228 }
5229
5230}
5231
5232static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5233 void __user *argp)
5234{
5235 struct ion_handle *ihandle; /* Ion handle */
5236 struct qseecom_load_img_req load_img_req;
5237 int uret = 0;
5238 int ret;
5239 ion_phys_addr_t pa = 0;
5240 size_t len;
5241 struct qseecom_load_app_ireq load_req;
5242 struct qseecom_load_app_64bit_ireq load_req_64bit;
5243 struct qseecom_command_scm_resp resp;
5244 void *cmd_buf = NULL;
5245 size_t cmd_len;
5246 /* Copy the relevant information needed for loading the image */
5247 if (copy_from_user(&load_img_req,
5248 (void __user *)argp,
5249 sizeof(struct qseecom_load_img_req))) {
5250 pr_err("copy_from_user failed\n");
5251 return -EFAULT;
5252 }
5253
5254 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005255 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005256 load_img_req.ifd_data_fd);
5257 if (IS_ERR_OR_NULL(ihandle)) {
5258 pr_err("Ion client could not retrieve the handle\n");
5259 return -ENOMEM;
5260 }
5261
5262 /* Get the physical address of the ION BUF */
5263 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5264 if (ret) {
5265 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5266 ret);
5267 return ret;
5268 }
5269 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5270 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5271 len, load_img_req.mdt_len,
5272 load_img_req.img_len);
5273 return ret;
5274 }
5275 /* Populate the structure for sending scm call to load image */
5276 if (qseecom.qsee_version < QSEE_VERSION_40) {
5277 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5278 load_req.mdt_len = load_img_req.mdt_len;
5279 load_req.img_len = load_img_req.img_len;
5280 load_req.phy_addr = (uint32_t)pa;
5281 cmd_buf = (void *)&load_req;
5282 cmd_len = sizeof(struct qseecom_load_app_ireq);
5283 } else {
5284 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5285 load_req_64bit.mdt_len = load_img_req.mdt_len;
5286 load_req_64bit.img_len = load_img_req.img_len;
5287 load_req_64bit.phy_addr = (uint64_t)pa;
5288 cmd_buf = (void *)&load_req_64bit;
5289 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5290 }
5291
5292 if (qseecom.support_bus_scaling) {
5293 mutex_lock(&qsee_bw_mutex);
5294 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5295 mutex_unlock(&qsee_bw_mutex);
5296 if (ret) {
5297 ret = -EIO;
5298 goto exit_cpu_restore;
5299 }
5300 }
5301
5302 /* Vote for the SFPB clock */
5303 ret = __qseecom_enable_clk_scale_up(data);
5304 if (ret) {
5305 ret = -EIO;
5306 goto exit_register_bus_bandwidth_needs;
5307 }
5308 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5309 ION_IOC_CLEAN_INV_CACHES);
5310 if (ret) {
5311 pr_err("cache operation failed %d\n", ret);
5312 goto exit_disable_clock;
5313 }
5314 /* SCM_CALL to load the external elf */
5315 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5316 &resp, sizeof(resp));
5317 if (ret) {
5318 pr_err("scm_call to load failed : ret %d\n",
5319 ret);
5320 ret = -EFAULT;
5321 goto exit_disable_clock;
5322 }
5323
5324 switch (resp.result) {
5325 case QSEOS_RESULT_SUCCESS:
5326 break;
5327 case QSEOS_RESULT_INCOMPLETE:
5328 pr_err("%s: qseos result incomplete\n", __func__);
5329 ret = __qseecom_process_incomplete_cmd(data, &resp);
5330 if (ret)
5331 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5332 break;
5333 case QSEOS_RESULT_FAILURE:
5334 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5335 ret = -EFAULT;
5336 break;
5337 default:
5338 pr_err("scm_call response result %d not supported\n",
5339 resp.result);
5340 ret = -EFAULT;
5341 break;
5342 }
5343
5344exit_disable_clock:
5345 __qseecom_disable_clk_scale_down(data);
5346
5347exit_register_bus_bandwidth_needs:
5348 if (qseecom.support_bus_scaling) {
5349 mutex_lock(&qsee_bw_mutex);
5350 uret = qseecom_unregister_bus_bandwidth_needs(data);
5351 mutex_unlock(&qsee_bw_mutex);
5352 if (uret)
5353 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5354 uret, ret);
5355 }
5356
5357exit_cpu_restore:
5358 /* Deallocate the handle */
5359 if (!IS_ERR_OR_NULL(ihandle))
5360 ion_free(qseecom.ion_clnt, ihandle);
5361 return ret;
5362}
5363
5364static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5365{
5366 int ret = 0;
5367 struct qseecom_command_scm_resp resp;
5368 struct qseecom_unload_app_ireq req;
5369
5370 /* unavailable client app */
5371 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5372
5373 /* Populate the structure for sending scm call to unload image */
5374 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5375
5376 /* SCM_CALL to unload the external elf */
5377 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5378 sizeof(struct qseecom_unload_app_ireq),
5379 &resp, sizeof(resp));
5380 if (ret) {
5381 pr_err("scm_call to unload failed : ret %d\n",
5382 ret);
5383 ret = -EFAULT;
5384 goto qseecom_unload_external_elf_scm_err;
5385 }
5386 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5387 ret = __qseecom_process_incomplete_cmd(data, &resp);
5388 if (ret)
5389 pr_err("process_incomplete_cmd fail err: %d\n",
5390 ret);
5391 } else {
5392 if (resp.result != QSEOS_RESULT_SUCCESS) {
5393 pr_err("scm_call to unload image failed resp.result =%d\n",
5394 resp.result);
5395 ret = -EFAULT;
5396 }
5397 }
5398
5399qseecom_unload_external_elf_scm_err:
5400
5401 return ret;
5402}
5403
5404static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5405 void __user *argp)
5406{
5407
5408 int32_t ret;
5409 struct qseecom_qseos_app_load_query query_req;
5410 struct qseecom_check_app_ireq req;
5411 struct qseecom_registered_app_list *entry = NULL;
5412 unsigned long flags = 0;
5413 uint32_t app_arch = 0, app_id = 0;
5414 bool found_app = false;
5415
5416 /* Copy the relevant information needed for loading the image */
5417 if (copy_from_user(&query_req,
5418 (void __user *)argp,
5419 sizeof(struct qseecom_qseos_app_load_query))) {
5420 pr_err("copy_from_user failed\n");
5421 return -EFAULT;
5422 }
5423
5424 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5425 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5426 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5427
5428 ret = __qseecom_check_app_exists(req, &app_id);
5429 if (ret) {
5430 pr_err(" scm call to check if app is loaded failed");
5431 return ret; /* scm call failed */
5432 }
5433 if (app_id) {
5434 pr_debug("App id %d (%s) already exists\n", app_id,
5435 (char *)(req.app_name));
5436 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5437 list_for_each_entry(entry,
5438 &qseecom.registered_app_list_head, list){
5439 if (entry->app_id == app_id) {
5440 app_arch = entry->app_arch;
5441 entry->ref_cnt++;
5442 found_app = true;
5443 break;
5444 }
5445 }
5446 spin_unlock_irqrestore(
5447 &qseecom.registered_app_list_lock, flags);
5448 data->client.app_id = app_id;
5449 query_req.app_id = app_id;
5450 if (app_arch) {
5451 data->client.app_arch = app_arch;
5452 query_req.app_arch = app_arch;
5453 } else {
5454 data->client.app_arch = 0;
5455 query_req.app_arch = 0;
5456 }
5457 strlcpy(data->client.app_name, query_req.app_name,
5458 MAX_APP_NAME_SIZE);
5459 /*
5460 * If app was loaded by appsbl before and was not registered,
5461 * regiser this app now.
5462 */
5463 if (!found_app) {
5464 pr_debug("Register app %d [%s] which was loaded before\n",
5465 ret, (char *)query_req.app_name);
5466 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5467 if (!entry) {
5468 pr_err("kmalloc for app entry failed\n");
5469 return -ENOMEM;
5470 }
5471 entry->app_id = app_id;
5472 entry->ref_cnt = 1;
5473 entry->app_arch = data->client.app_arch;
5474 strlcpy(entry->app_name, data->client.app_name,
5475 MAX_APP_NAME_SIZE);
5476 entry->app_blocked = false;
5477 entry->blocked_on_listener_id = 0;
5478 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5479 flags);
5480 list_add_tail(&entry->list,
5481 &qseecom.registered_app_list_head);
5482 spin_unlock_irqrestore(
5483 &qseecom.registered_app_list_lock, flags);
5484 }
5485 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5486 pr_err("copy_to_user failed\n");
5487 return -EFAULT;
5488 }
5489 return -EEXIST; /* app already loaded */
5490 } else {
5491 return 0; /* app not loaded */
5492 }
5493}
5494
5495static int __qseecom_get_ce_pipe_info(
5496 enum qseecom_key_management_usage_type usage,
5497 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5498{
5499 int ret = -EINVAL;
5500 int i, j;
5501 struct qseecom_ce_info_use *p = NULL;
5502 int total = 0;
5503 struct qseecom_ce_pipe_entry *pcepipe;
5504
5505 switch (usage) {
5506 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5507 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5508 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5509 if (qseecom.support_fde) {
5510 p = qseecom.ce_info.fde;
5511 total = qseecom.ce_info.num_fde;
5512 } else {
5513 pr_err("system does not support fde\n");
5514 return -EINVAL;
5515 }
5516 break;
5517 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5518 if (qseecom.support_pfe) {
5519 p = qseecom.ce_info.pfe;
5520 total = qseecom.ce_info.num_pfe;
5521 } else {
5522 pr_err("system does not support pfe\n");
5523 return -EINVAL;
5524 }
5525 break;
5526 default:
5527 pr_err("unsupported usage %d\n", usage);
5528 return -EINVAL;
5529 }
5530
5531 for (j = 0; j < total; j++) {
5532 if (p->unit_num == unit) {
5533 pcepipe = p->ce_pipe_entry;
5534 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5535 (*ce_hw)[i] = pcepipe->ce_num;
5536 *pipe = pcepipe->ce_pipe_pair;
5537 pcepipe++;
5538 }
5539 ret = 0;
5540 break;
5541 }
5542 p++;
5543 }
5544 return ret;
5545}
5546
5547static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5548 enum qseecom_key_management_usage_type usage,
5549 struct qseecom_key_generate_ireq *ireq)
5550{
5551 struct qseecom_command_scm_resp resp;
5552 int ret;
5553
5554 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5555 usage >= QSEOS_KM_USAGE_MAX) {
5556 pr_err("Error:: unsupported usage %d\n", usage);
5557 return -EFAULT;
5558 }
5559 ret = __qseecom_enable_clk(CLK_QSEE);
5560 if (ret)
5561 return ret;
5562
5563 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5564 ireq, sizeof(struct qseecom_key_generate_ireq),
5565 &resp, sizeof(resp));
5566 if (ret) {
5567 if (ret == -EINVAL &&
5568 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5569 pr_debug("Key ID exists.\n");
5570 ret = 0;
5571 } else {
5572 pr_err("scm call to generate key failed : %d\n", ret);
5573 ret = -EFAULT;
5574 }
5575 goto generate_key_exit;
5576 }
5577
5578 switch (resp.result) {
5579 case QSEOS_RESULT_SUCCESS:
5580 break;
5581 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5582 pr_debug("Key ID exists.\n");
5583 break;
5584 case QSEOS_RESULT_INCOMPLETE:
5585 ret = __qseecom_process_incomplete_cmd(data, &resp);
5586 if (ret) {
5587 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5588 pr_debug("Key ID exists.\n");
5589 ret = 0;
5590 } else {
5591 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5592 resp.result);
5593 }
5594 }
5595 break;
5596 case QSEOS_RESULT_FAILURE:
5597 default:
5598 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5599 ret = -EINVAL;
5600 break;
5601 }
5602generate_key_exit:
5603 __qseecom_disable_clk(CLK_QSEE);
5604 return ret;
5605}
5606
5607static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5608 enum qseecom_key_management_usage_type usage,
5609 struct qseecom_key_delete_ireq *ireq)
5610{
5611 struct qseecom_command_scm_resp resp;
5612 int ret;
5613
5614 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5615 usage >= QSEOS_KM_USAGE_MAX) {
5616 pr_err("Error:: unsupported usage %d\n", usage);
5617 return -EFAULT;
5618 }
5619 ret = __qseecom_enable_clk(CLK_QSEE);
5620 if (ret)
5621 return ret;
5622
5623 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5624 ireq, sizeof(struct qseecom_key_delete_ireq),
5625 &resp, sizeof(struct qseecom_command_scm_resp));
5626 if (ret) {
5627 if (ret == -EINVAL &&
5628 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5629 pr_debug("Max attempts to input password reached.\n");
5630 ret = -ERANGE;
5631 } else {
5632 pr_err("scm call to delete key failed : %d\n", ret);
5633 ret = -EFAULT;
5634 }
5635 goto del_key_exit;
5636 }
5637
5638 switch (resp.result) {
5639 case QSEOS_RESULT_SUCCESS:
5640 break;
5641 case QSEOS_RESULT_INCOMPLETE:
5642 ret = __qseecom_process_incomplete_cmd(data, &resp);
5643 if (ret) {
5644 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5645 resp.result);
5646 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5647 pr_debug("Max attempts to input password reached.\n");
5648 ret = -ERANGE;
5649 }
5650 }
5651 break;
5652 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5653 pr_debug("Max attempts to input password reached.\n");
5654 ret = -ERANGE;
5655 break;
5656 case QSEOS_RESULT_FAILURE:
5657 default:
5658 pr_err("Delete key scm call failed resp.result %d\n",
5659 resp.result);
5660 ret = -EINVAL;
5661 break;
5662 }
5663del_key_exit:
5664 __qseecom_disable_clk(CLK_QSEE);
5665 return ret;
5666}
5667
5668static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5669 enum qseecom_key_management_usage_type usage,
5670 struct qseecom_key_select_ireq *ireq)
5671{
5672 struct qseecom_command_scm_resp resp;
5673 int ret;
5674
5675 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5676 usage >= QSEOS_KM_USAGE_MAX) {
5677 pr_err("Error:: unsupported usage %d\n", usage);
5678 return -EFAULT;
5679 }
5680 ret = __qseecom_enable_clk(CLK_QSEE);
5681 if (ret)
5682 return ret;
5683
5684 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5685 ret = __qseecom_enable_clk(CLK_CE_DRV);
5686 if (ret)
5687 return ret;
5688 }
5689
5690 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5691 ireq, sizeof(struct qseecom_key_select_ireq),
5692 &resp, sizeof(struct qseecom_command_scm_resp));
5693 if (ret) {
5694 if (ret == -EINVAL &&
5695 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5696 pr_debug("Max attempts to input password reached.\n");
5697 ret = -ERANGE;
5698 } else if (ret == -EINVAL &&
5699 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5700 pr_debug("Set Key operation under processing...\n");
5701 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5702 } else {
5703 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5704 ret);
5705 ret = -EFAULT;
5706 }
5707 goto set_key_exit;
5708 }
5709
5710 switch (resp.result) {
5711 case QSEOS_RESULT_SUCCESS:
5712 break;
5713 case QSEOS_RESULT_INCOMPLETE:
5714 ret = __qseecom_process_incomplete_cmd(data, &resp);
5715 if (ret) {
5716 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5717 resp.result);
5718 if (resp.result ==
5719 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5720 pr_debug("Set Key operation under processing...\n");
5721 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5722 }
5723 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5724 pr_debug("Max attempts to input password reached.\n");
5725 ret = -ERANGE;
5726 }
5727 }
5728 break;
5729 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5730 pr_debug("Max attempts to input password reached.\n");
5731 ret = -ERANGE;
5732 break;
5733 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5734 pr_debug("Set Key operation under processing...\n");
5735 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5736 break;
5737 case QSEOS_RESULT_FAILURE:
5738 default:
5739 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5740 ret = -EINVAL;
5741 break;
5742 }
5743set_key_exit:
5744 __qseecom_disable_clk(CLK_QSEE);
5745 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5746 __qseecom_disable_clk(CLK_CE_DRV);
5747 return ret;
5748}
5749
5750static int __qseecom_update_current_key_user_info(
5751 struct qseecom_dev_handle *data,
5752 enum qseecom_key_management_usage_type usage,
5753 struct qseecom_key_userinfo_update_ireq *ireq)
5754{
5755 struct qseecom_command_scm_resp resp;
5756 int ret;
5757
5758 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5759 usage >= QSEOS_KM_USAGE_MAX) {
5760 pr_err("Error:: unsupported usage %d\n", usage);
5761 return -EFAULT;
5762 }
5763 ret = __qseecom_enable_clk(CLK_QSEE);
5764 if (ret)
5765 return ret;
5766
5767 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5768 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5769 &resp, sizeof(struct qseecom_command_scm_resp));
5770 if (ret) {
5771 if (ret == -EINVAL &&
5772 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5773 pr_debug("Set Key operation under processing...\n");
5774 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5775 } else {
5776 pr_err("scm call to update key userinfo failed: %d\n",
5777 ret);
5778 __qseecom_disable_clk(CLK_QSEE);
5779 return -EFAULT;
5780 }
5781 }
5782
5783 switch (resp.result) {
5784 case QSEOS_RESULT_SUCCESS:
5785 break;
5786 case QSEOS_RESULT_INCOMPLETE:
5787 ret = __qseecom_process_incomplete_cmd(data, &resp);
5788 if (resp.result ==
5789 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5790 pr_debug("Set Key operation under processing...\n");
5791 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5792 }
5793 if (ret)
5794 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5795 resp.result);
5796 break;
5797 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5798 pr_debug("Update Key operation under processing...\n");
5799 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5800 break;
5801 case QSEOS_RESULT_FAILURE:
5802 default:
5803 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5804 ret = -EINVAL;
5805 break;
5806 }
5807
5808 __qseecom_disable_clk(CLK_QSEE);
5809 return ret;
5810}
5811
5812
5813static int qseecom_enable_ice_setup(int usage)
5814{
5815 int ret = 0;
5816
5817 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5818 ret = qcom_ice_setup_ice_hw("ufs", true);
5819 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5820 ret = qcom_ice_setup_ice_hw("sdcc", true);
5821
5822 return ret;
5823}
5824
5825static int qseecom_disable_ice_setup(int usage)
5826{
5827 int ret = 0;
5828
5829 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5830 ret = qcom_ice_setup_ice_hw("ufs", false);
5831 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5832 ret = qcom_ice_setup_ice_hw("sdcc", false);
5833
5834 return ret;
5835}
5836
5837static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5838{
5839 struct qseecom_ce_info_use *pce_info_use, *p;
5840 int total = 0;
5841 int i;
5842
5843 switch (usage) {
5844 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5845 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5846 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5847 p = qseecom.ce_info.fde;
5848 total = qseecom.ce_info.num_fde;
5849 break;
5850 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5851 p = qseecom.ce_info.pfe;
5852 total = qseecom.ce_info.num_pfe;
5853 break;
5854 default:
5855 pr_err("unsupported usage %d\n", usage);
5856 return -EINVAL;
5857 }
5858
5859 pce_info_use = NULL;
5860
5861 for (i = 0; i < total; i++) {
5862 if (p->unit_num == unit) {
5863 pce_info_use = p;
5864 break;
5865 }
5866 p++;
5867 }
5868 if (!pce_info_use) {
5869 pr_err("can not find %d\n", unit);
5870 return -EINVAL;
5871 }
5872 return pce_info_use->num_ce_pipe_entries;
5873}
5874
5875static int qseecom_create_key(struct qseecom_dev_handle *data,
5876 void __user *argp)
5877{
5878 int i;
5879 uint32_t *ce_hw = NULL;
5880 uint32_t pipe = 0;
5881 int ret = 0;
5882 uint32_t flags = 0;
5883 struct qseecom_create_key_req create_key_req;
5884 struct qseecom_key_generate_ireq generate_key_ireq;
5885 struct qseecom_key_select_ireq set_key_ireq;
5886 uint32_t entries = 0;
5887
5888 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5889 if (ret) {
5890 pr_err("copy_from_user failed\n");
5891 return ret;
5892 }
5893
5894 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5895 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5896 pr_err("unsupported usage %d\n", create_key_req.usage);
5897 ret = -EFAULT;
5898 return ret;
5899 }
5900 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5901 create_key_req.usage);
5902 if (entries <= 0) {
5903 pr_err("no ce instance for usage %d instance %d\n",
5904 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5905 ret = -EINVAL;
5906 return ret;
5907 }
5908
5909 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5910 if (!ce_hw) {
5911 ret = -ENOMEM;
5912 return ret;
5913 }
5914 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5915 DEFAULT_CE_INFO_UNIT);
5916 if (ret) {
5917 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5918 ret = -EINVAL;
5919 goto free_buf;
5920 }
5921
5922 if (qseecom.fde_key_size)
5923 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5924 else
5925 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5926
5927 generate_key_ireq.flags = flags;
5928 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5929 memset((void *)generate_key_ireq.key_id,
5930 0, QSEECOM_KEY_ID_SIZE);
5931 memset((void *)generate_key_ireq.hash32,
5932 0, QSEECOM_HASH_SIZE);
5933 memcpy((void *)generate_key_ireq.key_id,
5934 (void *)key_id_array[create_key_req.usage].desc,
5935 QSEECOM_KEY_ID_SIZE);
5936 memcpy((void *)generate_key_ireq.hash32,
5937 (void *)create_key_req.hash32,
5938 QSEECOM_HASH_SIZE);
5939
5940 ret = __qseecom_generate_and_save_key(data,
5941 create_key_req.usage, &generate_key_ireq);
5942 if (ret) {
5943 pr_err("Failed to generate key on storage: %d\n", ret);
5944 goto free_buf;
5945 }
5946
5947 for (i = 0; i < entries; i++) {
5948 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5949 if (create_key_req.usage ==
5950 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5951 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5952 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5953
5954 } else if (create_key_req.usage ==
5955 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5956 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5957 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5958
5959 } else {
5960 set_key_ireq.ce = ce_hw[i];
5961 set_key_ireq.pipe = pipe;
5962 }
5963 set_key_ireq.flags = flags;
5964
5965 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5966 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5967 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5968 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5969 memcpy((void *)set_key_ireq.key_id,
5970 (void *)key_id_array[create_key_req.usage].desc,
5971 QSEECOM_KEY_ID_SIZE);
5972 memcpy((void *)set_key_ireq.hash32,
5973 (void *)create_key_req.hash32,
5974 QSEECOM_HASH_SIZE);
5975 /*
5976 * It will return false if it is GPCE based crypto instance or
5977 * ICE is setup properly
5978 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005979 ret = qseecom_enable_ice_setup(create_key_req.usage);
5980 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005981 goto free_buf;
5982
5983 do {
5984 ret = __qseecom_set_clear_ce_key(data,
5985 create_key_req.usage,
5986 &set_key_ireq);
5987 /*
5988 * wait a little before calling scm again to let other
5989 * processes run
5990 */
5991 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5992 msleep(50);
5993
5994 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5995
5996 qseecom_disable_ice_setup(create_key_req.usage);
5997
5998 if (ret) {
5999 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
6000 pipe, ce_hw[i], ret);
6001 goto free_buf;
6002 } else {
6003 pr_err("Set the key successfully\n");
6004 if ((create_key_req.usage ==
6005 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
6006 (create_key_req.usage ==
6007 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
6008 goto free_buf;
6009 }
6010 }
6011
6012free_buf:
6013 kzfree(ce_hw);
6014 return ret;
6015}
6016
6017static int qseecom_wipe_key(struct qseecom_dev_handle *data,
6018 void __user *argp)
6019{
6020 uint32_t *ce_hw = NULL;
6021 uint32_t pipe = 0;
6022 int ret = 0;
6023 uint32_t flags = 0;
6024 int i, j;
6025 struct qseecom_wipe_key_req wipe_key_req;
6026 struct qseecom_key_delete_ireq delete_key_ireq;
6027 struct qseecom_key_select_ireq clear_key_ireq;
6028 uint32_t entries = 0;
6029
6030 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
6031 if (ret) {
6032 pr_err("copy_from_user failed\n");
6033 return ret;
6034 }
6035
6036 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6037 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6038 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6039 ret = -EFAULT;
6040 return ret;
6041 }
6042
6043 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6044 wipe_key_req.usage);
6045 if (entries <= 0) {
6046 pr_err("no ce instance for usage %d instance %d\n",
6047 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6048 ret = -EINVAL;
6049 return ret;
6050 }
6051
6052 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6053 if (!ce_hw) {
6054 ret = -ENOMEM;
6055 return ret;
6056 }
6057
6058 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6059 DEFAULT_CE_INFO_UNIT);
6060 if (ret) {
6061 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6062 ret = -EINVAL;
6063 goto free_buf;
6064 }
6065
6066 if (wipe_key_req.wipe_key_flag) {
6067 delete_key_ireq.flags = flags;
6068 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6069 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6070 memcpy((void *)delete_key_ireq.key_id,
6071 (void *)key_id_array[wipe_key_req.usage].desc,
6072 QSEECOM_KEY_ID_SIZE);
6073 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6074
6075 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6076 &delete_key_ireq);
6077 if (ret) {
6078 pr_err("Failed to delete key from ssd storage: %d\n",
6079 ret);
6080 ret = -EFAULT;
6081 goto free_buf;
6082 }
6083 }
6084
6085 for (j = 0; j < entries; j++) {
6086 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6087 if (wipe_key_req.usage ==
6088 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6089 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6090 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6091 } else if (wipe_key_req.usage ==
6092 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6093 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6094 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6095 } else {
6096 clear_key_ireq.ce = ce_hw[j];
6097 clear_key_ireq.pipe = pipe;
6098 }
6099 clear_key_ireq.flags = flags;
6100 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6101 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6102 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6103 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6104
6105 /*
6106 * It will return false if it is GPCE based crypto instance or
6107 * ICE is setup properly
6108 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006109 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6110 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006111 goto free_buf;
6112
6113 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6114 &clear_key_ireq);
6115
6116 qseecom_disable_ice_setup(wipe_key_req.usage);
6117
6118 if (ret) {
6119 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6120 pipe, ce_hw[j], ret);
6121 ret = -EFAULT;
6122 goto free_buf;
6123 }
6124 }
6125
6126free_buf:
6127 kzfree(ce_hw);
6128 return ret;
6129}
6130
6131static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6132 void __user *argp)
6133{
6134 int ret = 0;
6135 uint32_t flags = 0;
6136 struct qseecom_update_key_userinfo_req update_key_req;
6137 struct qseecom_key_userinfo_update_ireq ireq;
6138
6139 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6140 if (ret) {
6141 pr_err("copy_from_user failed\n");
6142 return ret;
6143 }
6144
6145 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6146 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6147 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6148 return -EFAULT;
6149 }
6150
6151 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6152
6153 if (qseecom.fde_key_size)
6154 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6155 else
6156 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6157
6158 ireq.flags = flags;
6159 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6160 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6161 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6162 memcpy((void *)ireq.key_id,
6163 (void *)key_id_array[update_key_req.usage].desc,
6164 QSEECOM_KEY_ID_SIZE);
6165 memcpy((void *)ireq.current_hash32,
6166 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6167 memcpy((void *)ireq.new_hash32,
6168 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6169
6170 do {
6171 ret = __qseecom_update_current_key_user_info(data,
6172 update_key_req.usage,
6173 &ireq);
6174 /*
6175 * wait a little before calling scm again to let other
6176 * processes run
6177 */
6178 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6179 msleep(50);
6180
6181 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6182 if (ret) {
6183 pr_err("Failed to update key info: %d\n", ret);
6184 return ret;
6185 }
6186 return ret;
6187
6188}
6189static int qseecom_is_es_activated(void __user *argp)
6190{
6191 struct qseecom_is_es_activated_req req;
6192 struct qseecom_command_scm_resp resp;
6193 int ret;
6194
6195 if (qseecom.qsee_version < QSEE_VERSION_04) {
6196 pr_err("invalid qsee version\n");
6197 return -ENODEV;
6198 }
6199
6200 if (argp == NULL) {
6201 pr_err("arg is null\n");
6202 return -EINVAL;
6203 }
6204
6205 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6206 &req, sizeof(req), &resp, sizeof(resp));
6207 if (ret) {
6208 pr_err("scm_call failed\n");
6209 return ret;
6210 }
6211
6212 req.is_activated = resp.result;
6213 ret = copy_to_user(argp, &req, sizeof(req));
6214 if (ret) {
6215 pr_err("copy_to_user failed\n");
6216 return ret;
6217 }
6218
6219 return 0;
6220}
6221
6222static int qseecom_save_partition_hash(void __user *argp)
6223{
6224 struct qseecom_save_partition_hash_req req;
6225 struct qseecom_command_scm_resp resp;
6226 int ret;
6227
6228 memset(&resp, 0x00, sizeof(resp));
6229
6230 if (qseecom.qsee_version < QSEE_VERSION_04) {
6231 pr_err("invalid qsee version\n");
6232 return -ENODEV;
6233 }
6234
6235 if (argp == NULL) {
6236 pr_err("arg is null\n");
6237 return -EINVAL;
6238 }
6239
6240 ret = copy_from_user(&req, argp, sizeof(req));
6241 if (ret) {
6242 pr_err("copy_from_user failed\n");
6243 return ret;
6244 }
6245
6246 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6247 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6248 if (ret) {
6249 pr_err("qseecom_scm_call failed\n");
6250 return ret;
6251 }
6252
6253 return 0;
6254}
6255
6256static int qseecom_mdtp_cipher_dip(void __user *argp)
6257{
6258 struct qseecom_mdtp_cipher_dip_req req;
6259 u32 tzbuflenin, tzbuflenout;
6260 char *tzbufin = NULL, *tzbufout = NULL;
6261 struct scm_desc desc = {0};
6262 int ret;
6263
6264 do {
6265 /* Copy the parameters from userspace */
6266 if (argp == NULL) {
6267 pr_err("arg is null\n");
6268 ret = -EINVAL;
6269 break;
6270 }
6271
6272 ret = copy_from_user(&req, argp, sizeof(req));
6273 if (ret) {
6274 pr_err("copy_from_user failed, ret= %d\n", ret);
6275 break;
6276 }
6277
6278 if (req.in_buf == NULL || req.out_buf == NULL ||
6279 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6280 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6281 req.direction > 1) {
6282 pr_err("invalid parameters\n");
6283 ret = -EINVAL;
6284 break;
6285 }
6286
6287 /* Copy the input buffer from userspace to kernel space */
6288 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6289 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6290 if (!tzbufin) {
6291 pr_err("error allocating in buffer\n");
6292 ret = -ENOMEM;
6293 break;
6294 }
6295
6296 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6297 if (ret) {
6298 pr_err("copy_from_user failed, ret=%d\n", ret);
6299 break;
6300 }
6301
6302 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6303
6304 /* Prepare the output buffer in kernel space */
6305 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6306 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6307 if (!tzbufout) {
6308 pr_err("error allocating out buffer\n");
6309 ret = -ENOMEM;
6310 break;
6311 }
6312
6313 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6314
6315 /* Send the command to TZ */
6316 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6317 desc.args[0] = virt_to_phys(tzbufin);
6318 desc.args[1] = req.in_buf_size;
6319 desc.args[2] = virt_to_phys(tzbufout);
6320 desc.args[3] = req.out_buf_size;
6321 desc.args[4] = req.direction;
6322
6323 ret = __qseecom_enable_clk(CLK_QSEE);
6324 if (ret)
6325 break;
6326
6327 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6328
6329 __qseecom_disable_clk(CLK_QSEE);
6330
6331 if (ret) {
6332 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6333 ret);
6334 break;
6335 }
6336
6337 /* Copy the output buffer from kernel space to userspace */
6338 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6339 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6340 if (ret) {
6341 pr_err("copy_to_user failed, ret=%d\n", ret);
6342 break;
6343 }
6344 } while (0);
6345
6346 kzfree(tzbufin);
6347 kzfree(tzbufout);
6348
6349 return ret;
6350}
6351
6352static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6353 struct qseecom_qteec_req *req)
6354{
6355 if (!data || !data->client.ihandle) {
6356 pr_err("Client or client handle is not initialized\n");
6357 return -EINVAL;
6358 }
6359
6360 if (data->type != QSEECOM_CLIENT_APP)
6361 return -EFAULT;
6362
6363 if (req->req_len > UINT_MAX - req->resp_len) {
6364 pr_err("Integer overflow detected in req_len & rsp_len\n");
6365 return -EINVAL;
6366 }
6367
6368 if (req->req_len + req->resp_len > data->client.sb_length) {
6369 pr_debug("Not enough memory to fit cmd_buf.\n");
6370 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6371 (req->req_len + req->resp_len), data->client.sb_length);
6372 return -ENOMEM;
6373 }
6374
6375 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6376 pr_err("cmd buffer or response buffer is null\n");
6377 return -EINVAL;
6378 }
6379 if (((uintptr_t)req->req_ptr <
6380 data->client.user_virt_sb_base) ||
6381 ((uintptr_t)req->req_ptr >=
6382 (data->client.user_virt_sb_base + data->client.sb_length))) {
6383 pr_err("cmd buffer address not within shared bufffer\n");
6384 return -EINVAL;
6385 }
6386
6387 if (((uintptr_t)req->resp_ptr <
6388 data->client.user_virt_sb_base) ||
6389 ((uintptr_t)req->resp_ptr >=
6390 (data->client.user_virt_sb_base + data->client.sb_length))) {
6391 pr_err("response buffer address not within shared bufffer\n");
6392 return -EINVAL;
6393 }
6394
6395 if ((req->req_len == 0) || (req->resp_len == 0)) {
6396 pr_err("cmd buf lengtgh/response buf length not valid\n");
6397 return -EINVAL;
6398 }
6399
6400 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6401 pr_err("Integer overflow in req_len & req_ptr\n");
6402 return -EINVAL;
6403 }
6404
6405 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6406 pr_err("Integer overflow in resp_len & resp_ptr\n");
6407 return -EINVAL;
6408 }
6409
6410 if (data->client.user_virt_sb_base >
6411 (ULONG_MAX - data->client.sb_length)) {
6412 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6413 return -EINVAL;
6414 }
6415 if ((((uintptr_t)req->req_ptr + req->req_len) >
6416 ((uintptr_t)data->client.user_virt_sb_base +
6417 data->client.sb_length)) ||
6418 (((uintptr_t)req->resp_ptr + req->resp_len) >
6419 ((uintptr_t)data->client.user_virt_sb_base +
6420 data->client.sb_length))) {
6421 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6422 return -EINVAL;
6423 }
6424 return 0;
6425}
6426
6427static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6428 uint32_t fd_idx, struct sg_table *sg_ptr)
6429{
6430 struct scatterlist *sg = sg_ptr->sgl;
6431 struct qseecom_sg_entry *sg_entry;
6432 void *buf;
6433 uint i;
6434 size_t size;
6435 dma_addr_t coh_pmem;
6436
6437 if (fd_idx >= MAX_ION_FD) {
6438 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6439 return -ENOMEM;
6440 }
6441 /*
6442 * Allocate a buffer, populate it with number of entry plus
6443 * each sg entry's phy addr and length; then return the
6444 * phy_addr of the buffer.
6445 */
6446 size = sizeof(uint32_t) +
6447 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6448 size = (size + PAGE_SIZE) & PAGE_MASK;
6449 buf = dma_alloc_coherent(qseecom.pdev,
6450 size, &coh_pmem, GFP_KERNEL);
6451 if (buf == NULL) {
6452 pr_err("failed to alloc memory for sg buf\n");
6453 return -ENOMEM;
6454 }
6455 *(uint32_t *)buf = sg_ptr->nents;
6456 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6457 for (i = 0; i < sg_ptr->nents; i++) {
6458 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6459 sg_entry->len = sg->length;
6460 sg_entry++;
6461 sg = sg_next(sg);
6462 }
6463 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6464 data->client.sec_buf_fd[fd_idx].vbase = buf;
6465 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6466 data->client.sec_buf_fd[fd_idx].size = size;
6467 return 0;
6468}
6469
6470static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6471 struct qseecom_dev_handle *data, bool cleanup)
6472{
6473 struct ion_handle *ihandle;
6474 int ret = 0;
6475 int i = 0;
6476 uint32_t *update;
6477 struct sg_table *sg_ptr = NULL;
6478 struct scatterlist *sg;
6479 struct qseecom_param_memref *memref;
6480
6481 if (req == NULL) {
6482 pr_err("Invalid address\n");
6483 return -EINVAL;
6484 }
6485 for (i = 0; i < MAX_ION_FD; i++) {
6486 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006487 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006488 req->ifd_data[i].fd);
6489 if (IS_ERR_OR_NULL(ihandle)) {
6490 pr_err("Ion client can't retrieve the handle\n");
6491 return -ENOMEM;
6492 }
6493 if ((req->req_len < sizeof(uint32_t)) ||
6494 (req->ifd_data[i].cmd_buf_offset >
6495 req->req_len - sizeof(uint32_t))) {
6496 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6497 req->req_len,
6498 req->ifd_data[i].cmd_buf_offset);
6499 return -EINVAL;
6500 }
6501 update = (uint32_t *)((char *) req->req_ptr +
6502 req->ifd_data[i].cmd_buf_offset);
6503 if (!update) {
6504 pr_err("update pointer is NULL\n");
6505 return -EINVAL;
6506 }
6507 } else {
6508 continue;
6509 }
6510 /* Populate the cmd data structure with the phys_addr */
6511 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6512 if (IS_ERR_OR_NULL(sg_ptr)) {
6513 pr_err("IOn client could not retrieve sg table\n");
6514 goto err;
6515 }
6516 sg = sg_ptr->sgl;
6517 if (sg == NULL) {
6518 pr_err("sg is NULL\n");
6519 goto err;
6520 }
6521 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6522 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6523 sg_ptr->nents, sg->length);
6524 goto err;
6525 }
6526 /* clean up buf for pre-allocated fd */
6527 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6528 (*update)) {
6529 if (data->client.sec_buf_fd[i].vbase)
6530 dma_free_coherent(qseecom.pdev,
6531 data->client.sec_buf_fd[i].size,
6532 data->client.sec_buf_fd[i].vbase,
6533 data->client.sec_buf_fd[i].pbase);
6534 memset((void *)update, 0,
6535 sizeof(struct qseecom_param_memref));
6536 memset(&(data->client.sec_buf_fd[i]), 0,
6537 sizeof(struct qseecom_sec_buf_fd_info));
6538 goto clean;
6539 }
6540
6541 if (*update == 0) {
6542 /* update buf for pre-allocated fd from secure heap*/
6543 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6544 sg_ptr);
6545 if (ret) {
6546 pr_err("Failed to handle buf for fd[%d]\n", i);
6547 goto err;
6548 }
6549 memref = (struct qseecom_param_memref *)update;
6550 memref->buffer =
6551 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6552 memref->size =
6553 (uint32_t)(data->client.sec_buf_fd[i].size);
6554 } else {
6555 /* update buf for fd from non-secure qseecom heap */
6556 if (sg_ptr->nents != 1) {
6557 pr_err("Num of scat entr (%d) invalid\n",
6558 sg_ptr->nents);
6559 goto err;
6560 }
6561 if (cleanup)
6562 *update = 0;
6563 else
6564 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6565 }
6566clean:
6567 if (cleanup) {
6568 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6569 ihandle, NULL, sg->length,
6570 ION_IOC_INV_CACHES);
6571 if (ret) {
6572 pr_err("cache operation failed %d\n", ret);
6573 goto err;
6574 }
6575 } else {
6576 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6577 ihandle, NULL, sg->length,
6578 ION_IOC_CLEAN_INV_CACHES);
6579 if (ret) {
6580 pr_err("cache operation failed %d\n", ret);
6581 goto err;
6582 }
6583 data->sglistinfo_ptr[i].indexAndFlags =
6584 SGLISTINFO_SET_INDEX_FLAG(
6585 (sg_ptr->nents == 1), 0,
6586 req->ifd_data[i].cmd_buf_offset);
6587 data->sglistinfo_ptr[i].sizeOrCount =
6588 (sg_ptr->nents == 1) ?
6589 sg->length : sg_ptr->nents;
6590 data->sglist_cnt = i + 1;
6591 }
6592 /* Deallocate the handle */
6593 if (!IS_ERR_OR_NULL(ihandle))
6594 ion_free(qseecom.ion_clnt, ihandle);
6595 }
6596 return ret;
6597err:
6598 if (!IS_ERR_OR_NULL(ihandle))
6599 ion_free(qseecom.ion_clnt, ihandle);
6600 return -ENOMEM;
6601}
6602
6603static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6604 struct qseecom_qteec_req *req, uint32_t cmd_id)
6605{
6606 struct qseecom_command_scm_resp resp;
6607 struct qseecom_qteec_ireq ireq;
6608 struct qseecom_qteec_64bit_ireq ireq_64bit;
6609 struct qseecom_registered_app_list *ptr_app;
6610 bool found_app = false;
6611 unsigned long flags;
6612 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006613 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006614 uint32_t reqd_len_sb_in = 0;
6615 void *cmd_buf = NULL;
6616 size_t cmd_len;
6617 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306618 void *req_ptr = NULL;
6619 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006620
6621 ret = __qseecom_qteec_validate_msg(data, req);
6622 if (ret)
6623 return ret;
6624
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306625 req_ptr = req->req_ptr;
6626 resp_ptr = req->resp_ptr;
6627
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006628 /* find app_id & img_name from list */
6629 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6630 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6631 list) {
6632 if ((ptr_app->app_id == data->client.app_id) &&
6633 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6634 found_app = true;
6635 break;
6636 }
6637 }
6638 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6639 if (!found_app) {
6640 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6641 (char *)data->client.app_name);
6642 return -ENOENT;
6643 }
6644
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306645 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6646 (uintptr_t)req->req_ptr);
6647 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6648 (uintptr_t)req->resp_ptr);
6649
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006650 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6651 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6652 ret = __qseecom_update_qteec_req_buf(
6653 (struct qseecom_qteec_modfd_req *)req, data, false);
6654 if (ret)
6655 return ret;
6656 }
6657
6658 if (qseecom.qsee_version < QSEE_VERSION_40) {
6659 ireq.app_id = data->client.app_id;
6660 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306661 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006662 ireq.req_len = req->req_len;
6663 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306664 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006665 ireq.resp_len = req->resp_len;
6666 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6667 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6668 dmac_flush_range((void *)table,
6669 (void *)table + SGLISTINFO_TABLE_SIZE);
6670 cmd_buf = (void *)&ireq;
6671 cmd_len = sizeof(struct qseecom_qteec_ireq);
6672 } else {
6673 ireq_64bit.app_id = data->client.app_id;
6674 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306675 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006676 ireq_64bit.req_len = req->req_len;
6677 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306678 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006679 ireq_64bit.resp_len = req->resp_len;
6680 if ((data->client.app_arch == ELFCLASS32) &&
6681 ((ireq_64bit.req_ptr >=
6682 PHY_ADDR_4G - ireq_64bit.req_len) ||
6683 (ireq_64bit.resp_ptr >=
6684 PHY_ADDR_4G - ireq_64bit.resp_len))){
6685 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6686 data->client.app_name, data->client.app_id);
6687 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6688 ireq_64bit.req_ptr, ireq_64bit.req_len,
6689 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6690 return -EFAULT;
6691 }
6692 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6693 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6694 dmac_flush_range((void *)table,
6695 (void *)table + SGLISTINFO_TABLE_SIZE);
6696 cmd_buf = (void *)&ireq_64bit;
6697 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6698 }
6699 if (qseecom.whitelist_support == true
6700 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6701 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6702 else
6703 *(uint32_t *)cmd_buf = cmd_id;
6704
6705 reqd_len_sb_in = req->req_len + req->resp_len;
6706 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6707 data->client.sb_virt,
6708 reqd_len_sb_in,
6709 ION_IOC_CLEAN_INV_CACHES);
6710 if (ret) {
6711 pr_err("cache operation failed %d\n", ret);
6712 return ret;
6713 }
6714
6715 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6716
6717 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6718 cmd_buf, cmd_len,
6719 &resp, sizeof(resp));
6720 if (ret) {
6721 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6722 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006723 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006724 }
6725
6726 if (qseecom.qsee_reentrancy_support) {
6727 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006728 if (ret)
6729 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006730 } else {
6731 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6732 ret = __qseecom_process_incomplete_cmd(data, &resp);
6733 if (ret) {
6734 pr_err("process_incomplete_cmd failed err: %d\n",
6735 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006736 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006737 }
6738 } else {
6739 if (resp.result != QSEOS_RESULT_SUCCESS) {
6740 pr_err("Response result %d not supported\n",
6741 resp.result);
6742 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006743 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006744 }
6745 }
6746 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006747exit:
6748 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006749 data->client.sb_virt, data->client.sb_length,
6750 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006751 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006752 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006753 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006754 }
6755
6756 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6757 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006758 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006759 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006760 if (ret2)
6761 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006762 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006763 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006764}
6765
6766static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6767 void __user *argp)
6768{
6769 struct qseecom_qteec_modfd_req req;
6770 int ret = 0;
6771
6772 ret = copy_from_user(&req, argp,
6773 sizeof(struct qseecom_qteec_modfd_req));
6774 if (ret) {
6775 pr_err("copy_from_user failed\n");
6776 return ret;
6777 }
6778 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6779 QSEOS_TEE_OPEN_SESSION);
6780
6781 return ret;
6782}
6783
6784static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6785 void __user *argp)
6786{
6787 struct qseecom_qteec_req req;
6788 int ret = 0;
6789
6790 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6791 if (ret) {
6792 pr_err("copy_from_user failed\n");
6793 return ret;
6794 }
6795 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6796 return ret;
6797}
6798
6799static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6800 void __user *argp)
6801{
6802 struct qseecom_qteec_modfd_req req;
6803 struct qseecom_command_scm_resp resp;
6804 struct qseecom_qteec_ireq ireq;
6805 struct qseecom_qteec_64bit_ireq ireq_64bit;
6806 struct qseecom_registered_app_list *ptr_app;
6807 bool found_app = false;
6808 unsigned long flags;
6809 int ret = 0;
6810 int i = 0;
6811 uint32_t reqd_len_sb_in = 0;
6812 void *cmd_buf = NULL;
6813 size_t cmd_len;
6814 struct sglist_info *table = data->sglistinfo_ptr;
6815 void *req_ptr = NULL;
6816 void *resp_ptr = NULL;
6817
6818 ret = copy_from_user(&req, argp,
6819 sizeof(struct qseecom_qteec_modfd_req));
6820 if (ret) {
6821 pr_err("copy_from_user failed\n");
6822 return ret;
6823 }
6824 ret = __qseecom_qteec_validate_msg(data,
6825 (struct qseecom_qteec_req *)(&req));
6826 if (ret)
6827 return ret;
6828 req_ptr = req.req_ptr;
6829 resp_ptr = req.resp_ptr;
6830
6831 /* find app_id & img_name from list */
6832 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6833 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6834 list) {
6835 if ((ptr_app->app_id == data->client.app_id) &&
6836 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6837 found_app = true;
6838 break;
6839 }
6840 }
6841 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6842 if (!found_app) {
6843 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6844 (char *)data->client.app_name);
6845 return -ENOENT;
6846 }
6847
6848 /* validate offsets */
6849 for (i = 0; i < MAX_ION_FD; i++) {
6850 if (req.ifd_data[i].fd) {
6851 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6852 return -EINVAL;
6853 }
6854 }
6855 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6856 (uintptr_t)req.req_ptr);
6857 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6858 (uintptr_t)req.resp_ptr);
6859 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6860 if (ret)
6861 return ret;
6862
6863 if (qseecom.qsee_version < QSEE_VERSION_40) {
6864 ireq.app_id = data->client.app_id;
6865 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6866 (uintptr_t)req_ptr);
6867 ireq.req_len = req.req_len;
6868 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6869 (uintptr_t)resp_ptr);
6870 ireq.resp_len = req.resp_len;
6871 cmd_buf = (void *)&ireq;
6872 cmd_len = sizeof(struct qseecom_qteec_ireq);
6873 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6874 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6875 dmac_flush_range((void *)table,
6876 (void *)table + SGLISTINFO_TABLE_SIZE);
6877 } else {
6878 ireq_64bit.app_id = data->client.app_id;
6879 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6880 (uintptr_t)req_ptr);
6881 ireq_64bit.req_len = req.req_len;
6882 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6883 (uintptr_t)resp_ptr);
6884 ireq_64bit.resp_len = req.resp_len;
6885 cmd_buf = (void *)&ireq_64bit;
6886 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6887 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6888 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6889 dmac_flush_range((void *)table,
6890 (void *)table + SGLISTINFO_TABLE_SIZE);
6891 }
6892 reqd_len_sb_in = req.req_len + req.resp_len;
6893 if (qseecom.whitelist_support == true)
6894 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6895 else
6896 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6897
6898 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6899 data->client.sb_virt,
6900 reqd_len_sb_in,
6901 ION_IOC_CLEAN_INV_CACHES);
6902 if (ret) {
6903 pr_err("cache operation failed %d\n", ret);
6904 return ret;
6905 }
6906
6907 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6908
6909 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6910 cmd_buf, cmd_len,
6911 &resp, sizeof(resp));
6912 if (ret) {
6913 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6914 ret, data->client.app_id);
6915 return ret;
6916 }
6917
6918 if (qseecom.qsee_reentrancy_support) {
6919 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6920 } else {
6921 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6922 ret = __qseecom_process_incomplete_cmd(data, &resp);
6923 if (ret) {
6924 pr_err("process_incomplete_cmd failed err: %d\n",
6925 ret);
6926 return ret;
6927 }
6928 } else {
6929 if (resp.result != QSEOS_RESULT_SUCCESS) {
6930 pr_err("Response result %d not supported\n",
6931 resp.result);
6932 ret = -EINVAL;
6933 }
6934 }
6935 }
6936 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6937 if (ret)
6938 return ret;
6939
6940 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6941 data->client.sb_virt, data->client.sb_length,
6942 ION_IOC_INV_CACHES);
6943 if (ret) {
6944 pr_err("cache operation failed %d\n", ret);
6945 return ret;
6946 }
6947 return 0;
6948}
6949
6950static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6951 void __user *argp)
6952{
6953 struct qseecom_qteec_modfd_req req;
6954 int ret = 0;
6955
6956 ret = copy_from_user(&req, argp,
6957 sizeof(struct qseecom_qteec_modfd_req));
6958 if (ret) {
6959 pr_err("copy_from_user failed\n");
6960 return ret;
6961 }
6962 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6963 QSEOS_TEE_REQUEST_CANCELLATION);
6964
6965 return ret;
6966}
6967
6968static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6969{
6970 if (data->sglist_cnt) {
6971 memset(data->sglistinfo_ptr, 0,
6972 SGLISTINFO_TABLE_SIZE);
6973 data->sglist_cnt = 0;
6974 }
6975}
6976
6977static inline long qseecom_ioctl(struct file *file,
6978 unsigned int cmd, unsigned long arg)
6979{
6980 int ret = 0;
6981 struct qseecom_dev_handle *data = file->private_data;
6982 void __user *argp = (void __user *) arg;
6983 bool perf_enabled = false;
6984
6985 if (!data) {
6986 pr_err("Invalid/uninitialized device handle\n");
6987 return -EINVAL;
6988 }
6989
6990 if (data->abort) {
6991 pr_err("Aborting qseecom driver\n");
6992 return -ENODEV;
6993 }
6994
6995 switch (cmd) {
6996 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6997 if (data->type != QSEECOM_GENERIC) {
6998 pr_err("reg lstnr req: invalid handle (%d)\n",
6999 data->type);
7000 ret = -EINVAL;
7001 break;
7002 }
7003 pr_debug("ioctl register_listener_req()\n");
7004 mutex_lock(&app_access_lock);
7005 atomic_inc(&data->ioctl_count);
7006 data->type = QSEECOM_LISTENER_SERVICE;
7007 ret = qseecom_register_listener(data, argp);
7008 atomic_dec(&data->ioctl_count);
7009 wake_up_all(&data->abort_wq);
7010 mutex_unlock(&app_access_lock);
7011 if (ret)
7012 pr_err("failed qseecom_register_listener: %d\n", ret);
7013 break;
7014 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05307015 case QSEECOM_IOCTL_SET_ICE_INFO: {
7016 struct qseecom_ice_data_t ice_data;
7017
7018 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
7019 if (ret) {
7020 pr_err("copy_from_user failed\n");
7021 return -EFAULT;
7022 }
7023 qcom_ice_set_fde_flag(ice_data.flag);
7024 break;
7025 }
7026
7027 case QSEECOM_IOCTL_SET_ENCDEC_INFO: {
7028 struct qseecom_encdec_conf_t conf;
7029
7030 ret = copy_from_user(&conf, argp, sizeof(conf));
7031 if (ret) {
7032 pr_err("copy_from_user failed\n");
7033 return -EFAULT;
7034 }
7035 ret = qcom_ice_set_fde_conf(conf.start_sector, conf.fs_size,
7036 conf.index, conf.mode);
7037 break;
7038 }
7039
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007040 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
7041 if ((data->listener.id == 0) ||
7042 (data->type != QSEECOM_LISTENER_SERVICE)) {
7043 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
7044 data->type, data->listener.id);
7045 ret = -EINVAL;
7046 break;
7047 }
7048 pr_debug("ioctl unregister_listener_req()\n");
7049 mutex_lock(&app_access_lock);
7050 atomic_inc(&data->ioctl_count);
7051 ret = qseecom_unregister_listener(data);
7052 atomic_dec(&data->ioctl_count);
7053 wake_up_all(&data->abort_wq);
7054 mutex_unlock(&app_access_lock);
7055 if (ret)
7056 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7057 break;
7058 }
7059 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7060 if ((data->client.app_id == 0) ||
7061 (data->type != QSEECOM_CLIENT_APP)) {
7062 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7063 data->type, data->client.app_id);
7064 ret = -EINVAL;
7065 break;
7066 }
7067 /* Only one client allowed here at a time */
7068 mutex_lock(&app_access_lock);
7069 if (qseecom.support_bus_scaling) {
7070 /* register bus bw in case the client doesn't do it */
7071 if (!data->mode) {
7072 mutex_lock(&qsee_bw_mutex);
7073 __qseecom_register_bus_bandwidth_needs(
7074 data, HIGH);
7075 mutex_unlock(&qsee_bw_mutex);
7076 }
7077 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7078 if (ret) {
7079 pr_err("Failed to set bw.\n");
7080 ret = -EINVAL;
7081 mutex_unlock(&app_access_lock);
7082 break;
7083 }
7084 }
7085 /*
7086 * On targets where crypto clock is handled by HLOS,
7087 * if clk_access_cnt is zero and perf_enabled is false,
7088 * then the crypto clock was not enabled before sending cmd to
7089 * tz, qseecom will enable the clock to avoid service failure.
7090 */
7091 if (!qseecom.no_clock_support &&
7092 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7093 pr_debug("ce clock is not enabled!\n");
7094 ret = qseecom_perf_enable(data);
7095 if (ret) {
7096 pr_err("Failed to vote for clock with err %d\n",
7097 ret);
7098 mutex_unlock(&app_access_lock);
7099 ret = -EINVAL;
7100 break;
7101 }
7102 perf_enabled = true;
7103 }
7104 atomic_inc(&data->ioctl_count);
7105 ret = qseecom_send_cmd(data, argp);
7106 if (qseecom.support_bus_scaling)
7107 __qseecom_add_bw_scale_down_timer(
7108 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7109 if (perf_enabled) {
7110 qsee_disable_clock_vote(data, CLK_DFAB);
7111 qsee_disable_clock_vote(data, CLK_SFPB);
7112 }
7113 atomic_dec(&data->ioctl_count);
7114 wake_up_all(&data->abort_wq);
7115 mutex_unlock(&app_access_lock);
7116 if (ret)
7117 pr_err("failed qseecom_send_cmd: %d\n", ret);
7118 break;
7119 }
7120 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7121 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7122 if ((data->client.app_id == 0) ||
7123 (data->type != QSEECOM_CLIENT_APP)) {
7124 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7125 data->type, data->client.app_id);
7126 ret = -EINVAL;
7127 break;
7128 }
7129 /* Only one client allowed here at a time */
7130 mutex_lock(&app_access_lock);
7131 if (qseecom.support_bus_scaling) {
7132 if (!data->mode) {
7133 mutex_lock(&qsee_bw_mutex);
7134 __qseecom_register_bus_bandwidth_needs(
7135 data, HIGH);
7136 mutex_unlock(&qsee_bw_mutex);
7137 }
7138 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7139 if (ret) {
7140 pr_err("Failed to set bw.\n");
7141 mutex_unlock(&app_access_lock);
7142 ret = -EINVAL;
7143 break;
7144 }
7145 }
7146 /*
7147 * On targets where crypto clock is handled by HLOS,
7148 * if clk_access_cnt is zero and perf_enabled is false,
7149 * then the crypto clock was not enabled before sending cmd to
7150 * tz, qseecom will enable the clock to avoid service failure.
7151 */
7152 if (!qseecom.no_clock_support &&
7153 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7154 pr_debug("ce clock is not enabled!\n");
7155 ret = qseecom_perf_enable(data);
7156 if (ret) {
7157 pr_err("Failed to vote for clock with err %d\n",
7158 ret);
7159 mutex_unlock(&app_access_lock);
7160 ret = -EINVAL;
7161 break;
7162 }
7163 perf_enabled = true;
7164 }
7165 atomic_inc(&data->ioctl_count);
7166 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7167 ret = qseecom_send_modfd_cmd(data, argp);
7168 else
7169 ret = qseecom_send_modfd_cmd_64(data, argp);
7170 if (qseecom.support_bus_scaling)
7171 __qseecom_add_bw_scale_down_timer(
7172 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7173 if (perf_enabled) {
7174 qsee_disable_clock_vote(data, CLK_DFAB);
7175 qsee_disable_clock_vote(data, CLK_SFPB);
7176 }
7177 atomic_dec(&data->ioctl_count);
7178 wake_up_all(&data->abort_wq);
7179 mutex_unlock(&app_access_lock);
7180 if (ret)
7181 pr_err("failed qseecom_send_cmd: %d\n", ret);
7182 __qseecom_clean_data_sglistinfo(data);
7183 break;
7184 }
7185 case QSEECOM_IOCTL_RECEIVE_REQ: {
7186 if ((data->listener.id == 0) ||
7187 (data->type != QSEECOM_LISTENER_SERVICE)) {
7188 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7189 data->type, data->listener.id);
7190 ret = -EINVAL;
7191 break;
7192 }
7193 atomic_inc(&data->ioctl_count);
7194 ret = qseecom_receive_req(data);
7195 atomic_dec(&data->ioctl_count);
7196 wake_up_all(&data->abort_wq);
7197 if (ret && (ret != -ERESTARTSYS))
7198 pr_err("failed qseecom_receive_req: %d\n", ret);
7199 break;
7200 }
7201 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7202 if ((data->listener.id == 0) ||
7203 (data->type != QSEECOM_LISTENER_SERVICE)) {
7204 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7205 data->type, data->listener.id);
7206 ret = -EINVAL;
7207 break;
7208 }
7209 atomic_inc(&data->ioctl_count);
7210 if (!qseecom.qsee_reentrancy_support)
7211 ret = qseecom_send_resp();
7212 else
7213 ret = qseecom_reentrancy_send_resp(data);
7214 atomic_dec(&data->ioctl_count);
7215 wake_up_all(&data->abort_wq);
7216 if (ret)
7217 pr_err("failed qseecom_send_resp: %d\n", ret);
7218 break;
7219 }
7220 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7221 if ((data->type != QSEECOM_CLIENT_APP) &&
7222 (data->type != QSEECOM_GENERIC) &&
7223 (data->type != QSEECOM_SECURE_SERVICE)) {
7224 pr_err("set mem param req: invalid handle (%d)\n",
7225 data->type);
7226 ret = -EINVAL;
7227 break;
7228 }
7229 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7230 mutex_lock(&app_access_lock);
7231 atomic_inc(&data->ioctl_count);
7232 ret = qseecom_set_client_mem_param(data, argp);
7233 atomic_dec(&data->ioctl_count);
7234 mutex_unlock(&app_access_lock);
7235 if (ret)
7236 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7237 ret);
7238 break;
7239 }
7240 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7241 if ((data->type != QSEECOM_GENERIC) &&
7242 (data->type != QSEECOM_CLIENT_APP)) {
7243 pr_err("load app req: invalid handle (%d)\n",
7244 data->type);
7245 ret = -EINVAL;
7246 break;
7247 }
7248 data->type = QSEECOM_CLIENT_APP;
7249 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7250 mutex_lock(&app_access_lock);
7251 atomic_inc(&data->ioctl_count);
7252 ret = qseecom_load_app(data, argp);
7253 atomic_dec(&data->ioctl_count);
7254 mutex_unlock(&app_access_lock);
7255 if (ret)
7256 pr_err("failed load_app request: %d\n", ret);
7257 break;
7258 }
7259 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7260 if ((data->client.app_id == 0) ||
7261 (data->type != QSEECOM_CLIENT_APP)) {
7262 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7263 data->type, data->client.app_id);
7264 ret = -EINVAL;
7265 break;
7266 }
7267 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7268 mutex_lock(&app_access_lock);
7269 atomic_inc(&data->ioctl_count);
7270 ret = qseecom_unload_app(data, false);
7271 atomic_dec(&data->ioctl_count);
7272 mutex_unlock(&app_access_lock);
7273 if (ret)
7274 pr_err("failed unload_app request: %d\n", ret);
7275 break;
7276 }
7277 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7278 atomic_inc(&data->ioctl_count);
7279 ret = qseecom_get_qseos_version(data, argp);
7280 if (ret)
7281 pr_err("qseecom_get_qseos_version: %d\n", ret);
7282 atomic_dec(&data->ioctl_count);
7283 break;
7284 }
7285 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7286 if ((data->type != QSEECOM_GENERIC) &&
7287 (data->type != QSEECOM_CLIENT_APP)) {
7288 pr_err("perf enable req: invalid handle (%d)\n",
7289 data->type);
7290 ret = -EINVAL;
7291 break;
7292 }
7293 if ((data->type == QSEECOM_CLIENT_APP) &&
7294 (data->client.app_id == 0)) {
7295 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7296 data->type, data->client.app_id);
7297 ret = -EINVAL;
7298 break;
7299 }
7300 atomic_inc(&data->ioctl_count);
7301 if (qseecom.support_bus_scaling) {
7302 mutex_lock(&qsee_bw_mutex);
7303 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7304 mutex_unlock(&qsee_bw_mutex);
7305 } else {
7306 ret = qseecom_perf_enable(data);
7307 if (ret)
7308 pr_err("Fail to vote for clocks %d\n", ret);
7309 }
7310 atomic_dec(&data->ioctl_count);
7311 break;
7312 }
7313 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7314 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7315 (data->type != QSEECOM_CLIENT_APP)) {
7316 pr_err("perf disable req: invalid handle (%d)\n",
7317 data->type);
7318 ret = -EINVAL;
7319 break;
7320 }
7321 if ((data->type == QSEECOM_CLIENT_APP) &&
7322 (data->client.app_id == 0)) {
7323 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7324 data->type, data->client.app_id);
7325 ret = -EINVAL;
7326 break;
7327 }
7328 atomic_inc(&data->ioctl_count);
7329 if (!qseecom.support_bus_scaling) {
7330 qsee_disable_clock_vote(data, CLK_DFAB);
7331 qsee_disable_clock_vote(data, CLK_SFPB);
7332 } else {
7333 mutex_lock(&qsee_bw_mutex);
7334 qseecom_unregister_bus_bandwidth_needs(data);
7335 mutex_unlock(&qsee_bw_mutex);
7336 }
7337 atomic_dec(&data->ioctl_count);
7338 break;
7339 }
7340
7341 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7342 /* If crypto clock is not handled by HLOS, return directly. */
7343 if (qseecom.no_clock_support) {
7344 pr_debug("crypto clock is not handled by HLOS\n");
7345 break;
7346 }
7347 if ((data->client.app_id == 0) ||
7348 (data->type != QSEECOM_CLIENT_APP)) {
7349 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7350 data->type, data->client.app_id);
7351 ret = -EINVAL;
7352 break;
7353 }
7354 atomic_inc(&data->ioctl_count);
7355 ret = qseecom_scale_bus_bandwidth(data, argp);
7356 atomic_dec(&data->ioctl_count);
7357 break;
7358 }
7359 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7360 if (data->type != QSEECOM_GENERIC) {
7361 pr_err("load ext elf req: invalid client handle (%d)\n",
7362 data->type);
7363 ret = -EINVAL;
7364 break;
7365 }
7366 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7367 data->released = true;
7368 mutex_lock(&app_access_lock);
7369 atomic_inc(&data->ioctl_count);
7370 ret = qseecom_load_external_elf(data, argp);
7371 atomic_dec(&data->ioctl_count);
7372 mutex_unlock(&app_access_lock);
7373 if (ret)
7374 pr_err("failed load_external_elf request: %d\n", ret);
7375 break;
7376 }
7377 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7378 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7379 pr_err("unload ext elf req: invalid handle (%d)\n",
7380 data->type);
7381 ret = -EINVAL;
7382 break;
7383 }
7384 data->released = true;
7385 mutex_lock(&app_access_lock);
7386 atomic_inc(&data->ioctl_count);
7387 ret = qseecom_unload_external_elf(data);
7388 atomic_dec(&data->ioctl_count);
7389 mutex_unlock(&app_access_lock);
7390 if (ret)
7391 pr_err("failed unload_app request: %d\n", ret);
7392 break;
7393 }
7394 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7395 data->type = QSEECOM_CLIENT_APP;
7396 mutex_lock(&app_access_lock);
7397 atomic_inc(&data->ioctl_count);
7398 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7399 ret = qseecom_query_app_loaded(data, argp);
7400 atomic_dec(&data->ioctl_count);
7401 mutex_unlock(&app_access_lock);
7402 break;
7403 }
7404 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7405 if (data->type != QSEECOM_GENERIC) {
7406 pr_err("send cmd svc req: invalid handle (%d)\n",
7407 data->type);
7408 ret = -EINVAL;
7409 break;
7410 }
7411 data->type = QSEECOM_SECURE_SERVICE;
7412 if (qseecom.qsee_version < QSEE_VERSION_03) {
7413 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7414 qseecom.qsee_version);
7415 return -EINVAL;
7416 }
7417 mutex_lock(&app_access_lock);
7418 atomic_inc(&data->ioctl_count);
7419 ret = qseecom_send_service_cmd(data, argp);
7420 atomic_dec(&data->ioctl_count);
7421 mutex_unlock(&app_access_lock);
7422 break;
7423 }
7424 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7425 if (!(qseecom.support_pfe || qseecom.support_fde))
7426 pr_err("Features requiring key init not supported\n");
7427 if (data->type != QSEECOM_GENERIC) {
7428 pr_err("create key req: invalid handle (%d)\n",
7429 data->type);
7430 ret = -EINVAL;
7431 break;
7432 }
7433 if (qseecom.qsee_version < QSEE_VERSION_05) {
7434 pr_err("Create Key feature unsupported: qsee ver %u\n",
7435 qseecom.qsee_version);
7436 return -EINVAL;
7437 }
7438 data->released = true;
7439 mutex_lock(&app_access_lock);
7440 atomic_inc(&data->ioctl_count);
7441 ret = qseecom_create_key(data, argp);
7442 if (ret)
7443 pr_err("failed to create encryption key: %d\n", ret);
7444
7445 atomic_dec(&data->ioctl_count);
7446 mutex_unlock(&app_access_lock);
7447 break;
7448 }
7449 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7450 if (!(qseecom.support_pfe || qseecom.support_fde))
7451 pr_err("Features requiring key init not supported\n");
7452 if (data->type != QSEECOM_GENERIC) {
7453 pr_err("wipe key req: invalid handle (%d)\n",
7454 data->type);
7455 ret = -EINVAL;
7456 break;
7457 }
7458 if (qseecom.qsee_version < QSEE_VERSION_05) {
7459 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7460 qseecom.qsee_version);
7461 return -EINVAL;
7462 }
7463 data->released = true;
7464 mutex_lock(&app_access_lock);
7465 atomic_inc(&data->ioctl_count);
7466 ret = qseecom_wipe_key(data, argp);
7467 if (ret)
7468 pr_err("failed to wipe encryption key: %d\n", ret);
7469 atomic_dec(&data->ioctl_count);
7470 mutex_unlock(&app_access_lock);
7471 break;
7472 }
7473 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7474 if (!(qseecom.support_pfe || qseecom.support_fde))
7475 pr_err("Features requiring key init not supported\n");
7476 if (data->type != QSEECOM_GENERIC) {
7477 pr_err("update key req: invalid handle (%d)\n",
7478 data->type);
7479 ret = -EINVAL;
7480 break;
7481 }
7482 if (qseecom.qsee_version < QSEE_VERSION_05) {
7483 pr_err("Update Key feature unsupported in qsee ver %u\n",
7484 qseecom.qsee_version);
7485 return -EINVAL;
7486 }
7487 data->released = true;
7488 mutex_lock(&app_access_lock);
7489 atomic_inc(&data->ioctl_count);
7490 ret = qseecom_update_key_user_info(data, argp);
7491 if (ret)
7492 pr_err("failed to update key user info: %d\n", ret);
7493 atomic_dec(&data->ioctl_count);
7494 mutex_unlock(&app_access_lock);
7495 break;
7496 }
7497 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7498 if (data->type != QSEECOM_GENERIC) {
7499 pr_err("save part hash req: invalid handle (%d)\n",
7500 data->type);
7501 ret = -EINVAL;
7502 break;
7503 }
7504 data->released = true;
7505 mutex_lock(&app_access_lock);
7506 atomic_inc(&data->ioctl_count);
7507 ret = qseecom_save_partition_hash(argp);
7508 atomic_dec(&data->ioctl_count);
7509 mutex_unlock(&app_access_lock);
7510 break;
7511 }
7512 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7513 if (data->type != QSEECOM_GENERIC) {
7514 pr_err("ES activated req: invalid handle (%d)\n",
7515 data->type);
7516 ret = -EINVAL;
7517 break;
7518 }
7519 data->released = true;
7520 mutex_lock(&app_access_lock);
7521 atomic_inc(&data->ioctl_count);
7522 ret = qseecom_is_es_activated(argp);
7523 atomic_dec(&data->ioctl_count);
7524 mutex_unlock(&app_access_lock);
7525 break;
7526 }
7527 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7528 if (data->type != QSEECOM_GENERIC) {
7529 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7530 data->type);
7531 ret = -EINVAL;
7532 break;
7533 }
7534 data->released = true;
7535 mutex_lock(&app_access_lock);
7536 atomic_inc(&data->ioctl_count);
7537 ret = qseecom_mdtp_cipher_dip(argp);
7538 atomic_dec(&data->ioctl_count);
7539 mutex_unlock(&app_access_lock);
7540 break;
7541 }
7542 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7543 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7544 if ((data->listener.id == 0) ||
7545 (data->type != QSEECOM_LISTENER_SERVICE)) {
7546 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7547 data->type, data->listener.id);
7548 ret = -EINVAL;
7549 break;
7550 }
7551 atomic_inc(&data->ioctl_count);
7552 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7553 ret = qseecom_send_modfd_resp(data, argp);
7554 else
7555 ret = qseecom_send_modfd_resp_64(data, argp);
7556 atomic_dec(&data->ioctl_count);
7557 wake_up_all(&data->abort_wq);
7558 if (ret)
7559 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7560 __qseecom_clean_data_sglistinfo(data);
7561 break;
7562 }
7563 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7564 if ((data->client.app_id == 0) ||
7565 (data->type != QSEECOM_CLIENT_APP)) {
7566 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7567 data->type, data->client.app_id);
7568 ret = -EINVAL;
7569 break;
7570 }
7571 if (qseecom.qsee_version < QSEE_VERSION_40) {
7572 pr_err("GP feature unsupported: qsee ver %u\n",
7573 qseecom.qsee_version);
7574 return -EINVAL;
7575 }
7576 /* Only one client allowed here at a time */
7577 mutex_lock(&app_access_lock);
7578 atomic_inc(&data->ioctl_count);
7579 ret = qseecom_qteec_open_session(data, argp);
7580 atomic_dec(&data->ioctl_count);
7581 wake_up_all(&data->abort_wq);
7582 mutex_unlock(&app_access_lock);
7583 if (ret)
7584 pr_err("failed open_session_cmd: %d\n", ret);
7585 __qseecom_clean_data_sglistinfo(data);
7586 break;
7587 }
7588 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7589 if ((data->client.app_id == 0) ||
7590 (data->type != QSEECOM_CLIENT_APP)) {
7591 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7592 data->type, data->client.app_id);
7593 ret = -EINVAL;
7594 break;
7595 }
7596 if (qseecom.qsee_version < QSEE_VERSION_40) {
7597 pr_err("GP feature unsupported: qsee ver %u\n",
7598 qseecom.qsee_version);
7599 return -EINVAL;
7600 }
7601 /* Only one client allowed here at a time */
7602 mutex_lock(&app_access_lock);
7603 atomic_inc(&data->ioctl_count);
7604 ret = qseecom_qteec_close_session(data, argp);
7605 atomic_dec(&data->ioctl_count);
7606 wake_up_all(&data->abort_wq);
7607 mutex_unlock(&app_access_lock);
7608 if (ret)
7609 pr_err("failed close_session_cmd: %d\n", ret);
7610 break;
7611 }
7612 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7613 if ((data->client.app_id == 0) ||
7614 (data->type != QSEECOM_CLIENT_APP)) {
7615 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7616 data->type, data->client.app_id);
7617 ret = -EINVAL;
7618 break;
7619 }
7620 if (qseecom.qsee_version < QSEE_VERSION_40) {
7621 pr_err("GP feature unsupported: qsee ver %u\n",
7622 qseecom.qsee_version);
7623 return -EINVAL;
7624 }
7625 /* Only one client allowed here at a time */
7626 mutex_lock(&app_access_lock);
7627 atomic_inc(&data->ioctl_count);
7628 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7629 atomic_dec(&data->ioctl_count);
7630 wake_up_all(&data->abort_wq);
7631 mutex_unlock(&app_access_lock);
7632 if (ret)
7633 pr_err("failed Invoke cmd: %d\n", ret);
7634 __qseecom_clean_data_sglistinfo(data);
7635 break;
7636 }
7637 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7638 if ((data->client.app_id == 0) ||
7639 (data->type != QSEECOM_CLIENT_APP)) {
7640 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7641 data->type, data->client.app_id);
7642 ret = -EINVAL;
7643 break;
7644 }
7645 if (qseecom.qsee_version < QSEE_VERSION_40) {
7646 pr_err("GP feature unsupported: qsee ver %u\n",
7647 qseecom.qsee_version);
7648 return -EINVAL;
7649 }
7650 /* Only one client allowed here at a time */
7651 mutex_lock(&app_access_lock);
7652 atomic_inc(&data->ioctl_count);
7653 ret = qseecom_qteec_request_cancellation(data, argp);
7654 atomic_dec(&data->ioctl_count);
7655 wake_up_all(&data->abort_wq);
7656 mutex_unlock(&app_access_lock);
7657 if (ret)
7658 pr_err("failed request_cancellation: %d\n", ret);
7659 break;
7660 }
7661 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7662 atomic_inc(&data->ioctl_count);
7663 ret = qseecom_get_ce_info(data, argp);
7664 if (ret)
7665 pr_err("failed get fde ce pipe info: %d\n", ret);
7666 atomic_dec(&data->ioctl_count);
7667 break;
7668 }
7669 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7670 atomic_inc(&data->ioctl_count);
7671 ret = qseecom_free_ce_info(data, argp);
7672 if (ret)
7673 pr_err("failed get fde ce pipe info: %d\n", ret);
7674 atomic_dec(&data->ioctl_count);
7675 break;
7676 }
7677 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7678 atomic_inc(&data->ioctl_count);
7679 ret = qseecom_query_ce_info(data, argp);
7680 if (ret)
7681 pr_err("failed get fde ce pipe info: %d\n", ret);
7682 atomic_dec(&data->ioctl_count);
7683 break;
7684 }
7685 default:
7686 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7687 return -EINVAL;
7688 }
7689 return ret;
7690}
7691
7692static int qseecom_open(struct inode *inode, struct file *file)
7693{
7694 int ret = 0;
7695 struct qseecom_dev_handle *data;
7696
7697 data = kzalloc(sizeof(*data), GFP_KERNEL);
7698 if (!data)
7699 return -ENOMEM;
7700 file->private_data = data;
7701 data->abort = 0;
7702 data->type = QSEECOM_GENERIC;
7703 data->released = false;
7704 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7705 data->mode = INACTIVE;
7706 init_waitqueue_head(&data->abort_wq);
7707 atomic_set(&data->ioctl_count, 0);
7708 return ret;
7709}
7710
7711static int qseecom_release(struct inode *inode, struct file *file)
7712{
7713 struct qseecom_dev_handle *data = file->private_data;
7714 int ret = 0;
7715
7716 if (data->released == false) {
7717 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7718 data->type, data->mode, data);
7719 switch (data->type) {
7720 case QSEECOM_LISTENER_SERVICE:
7721 mutex_lock(&app_access_lock);
7722 ret = qseecom_unregister_listener(data);
7723 mutex_unlock(&app_access_lock);
7724 break;
7725 case QSEECOM_CLIENT_APP:
7726 mutex_lock(&app_access_lock);
7727 ret = qseecom_unload_app(data, true);
7728 mutex_unlock(&app_access_lock);
7729 break;
7730 case QSEECOM_SECURE_SERVICE:
7731 case QSEECOM_GENERIC:
7732 ret = qseecom_unmap_ion_allocated_memory(data);
7733 if (ret)
7734 pr_err("Ion Unmap failed\n");
7735 break;
7736 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7737 break;
7738 default:
7739 pr_err("Unsupported clnt_handle_type %d",
7740 data->type);
7741 break;
7742 }
7743 }
7744
7745 if (qseecom.support_bus_scaling) {
7746 mutex_lock(&qsee_bw_mutex);
7747 if (data->mode != INACTIVE) {
7748 qseecom_unregister_bus_bandwidth_needs(data);
7749 if (qseecom.cumulative_mode == INACTIVE) {
7750 ret = __qseecom_set_msm_bus_request(INACTIVE);
7751 if (ret)
7752 pr_err("Fail to scale down bus\n");
7753 }
7754 }
7755 mutex_unlock(&qsee_bw_mutex);
7756 } else {
7757 if (data->fast_load_enabled == true)
7758 qsee_disable_clock_vote(data, CLK_SFPB);
7759 if (data->perf_enabled == true)
7760 qsee_disable_clock_vote(data, CLK_DFAB);
7761 }
7762 kfree(data);
7763
7764 return ret;
7765}
7766
7767#ifdef CONFIG_COMPAT
7768#include "compat_qseecom.c"
7769#else
7770#define compat_qseecom_ioctl NULL
7771#endif
7772
7773static const struct file_operations qseecom_fops = {
7774 .owner = THIS_MODULE,
7775 .unlocked_ioctl = qseecom_ioctl,
7776 .compat_ioctl = compat_qseecom_ioctl,
7777 .open = qseecom_open,
7778 .release = qseecom_release
7779};
7780
7781static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7782{
7783 int rc = 0;
7784 struct device *pdev;
7785 struct qseecom_clk *qclk;
7786 char *core_clk_src = NULL;
7787 char *core_clk = NULL;
7788 char *iface_clk = NULL;
7789 char *bus_clk = NULL;
7790
7791 switch (ce) {
7792 case CLK_QSEE: {
7793 core_clk_src = "core_clk_src";
7794 core_clk = "core_clk";
7795 iface_clk = "iface_clk";
7796 bus_clk = "bus_clk";
7797 qclk = &qseecom.qsee;
7798 qclk->instance = CLK_QSEE;
7799 break;
7800 };
7801 case CLK_CE_DRV: {
7802 core_clk_src = "ce_drv_core_clk_src";
7803 core_clk = "ce_drv_core_clk";
7804 iface_clk = "ce_drv_iface_clk";
7805 bus_clk = "ce_drv_bus_clk";
7806 qclk = &qseecom.ce_drv;
7807 qclk->instance = CLK_CE_DRV;
7808 break;
7809 };
7810 default:
7811 pr_err("Invalid ce hw instance: %d!\n", ce);
7812 return -EIO;
7813 }
7814
7815 if (qseecom.no_clock_support) {
7816 qclk->ce_core_clk = NULL;
7817 qclk->ce_clk = NULL;
7818 qclk->ce_bus_clk = NULL;
7819 qclk->ce_core_src_clk = NULL;
7820 return 0;
7821 }
7822
7823 pdev = qseecom.pdev;
7824
7825 /* Get CE3 src core clk. */
7826 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7827 if (!IS_ERR(qclk->ce_core_src_clk)) {
7828 rc = clk_set_rate(qclk->ce_core_src_clk,
7829 qseecom.ce_opp_freq_hz);
7830 if (rc) {
7831 clk_put(qclk->ce_core_src_clk);
7832 qclk->ce_core_src_clk = NULL;
7833 pr_err("Unable to set the core src clk @%uMhz.\n",
7834 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7835 return -EIO;
7836 }
7837 } else {
7838 pr_warn("Unable to get CE core src clk, set to NULL\n");
7839 qclk->ce_core_src_clk = NULL;
7840 }
7841
7842 /* Get CE core clk */
7843 qclk->ce_core_clk = clk_get(pdev, core_clk);
7844 if (IS_ERR(qclk->ce_core_clk)) {
7845 rc = PTR_ERR(qclk->ce_core_clk);
7846 pr_err("Unable to get CE core clk\n");
7847 if (qclk->ce_core_src_clk != NULL)
7848 clk_put(qclk->ce_core_src_clk);
7849 return -EIO;
7850 }
7851
7852 /* Get CE Interface clk */
7853 qclk->ce_clk = clk_get(pdev, iface_clk);
7854 if (IS_ERR(qclk->ce_clk)) {
7855 rc = PTR_ERR(qclk->ce_clk);
7856 pr_err("Unable to get CE interface clk\n");
7857 if (qclk->ce_core_src_clk != NULL)
7858 clk_put(qclk->ce_core_src_clk);
7859 clk_put(qclk->ce_core_clk);
7860 return -EIO;
7861 }
7862
7863 /* Get CE AXI clk */
7864 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7865 if (IS_ERR(qclk->ce_bus_clk)) {
7866 rc = PTR_ERR(qclk->ce_bus_clk);
7867 pr_err("Unable to get CE BUS interface clk\n");
7868 if (qclk->ce_core_src_clk != NULL)
7869 clk_put(qclk->ce_core_src_clk);
7870 clk_put(qclk->ce_core_clk);
7871 clk_put(qclk->ce_clk);
7872 return -EIO;
7873 }
7874
7875 return rc;
7876}
7877
7878static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7879{
7880 struct qseecom_clk *qclk;
7881
7882 if (ce == CLK_QSEE)
7883 qclk = &qseecom.qsee;
7884 else
7885 qclk = &qseecom.ce_drv;
7886
7887 if (qclk->ce_clk != NULL) {
7888 clk_put(qclk->ce_clk);
7889 qclk->ce_clk = NULL;
7890 }
7891 if (qclk->ce_core_clk != NULL) {
7892 clk_put(qclk->ce_core_clk);
7893 qclk->ce_core_clk = NULL;
7894 }
7895 if (qclk->ce_bus_clk != NULL) {
7896 clk_put(qclk->ce_bus_clk);
7897 qclk->ce_bus_clk = NULL;
7898 }
7899 if (qclk->ce_core_src_clk != NULL) {
7900 clk_put(qclk->ce_core_src_clk);
7901 qclk->ce_core_src_clk = NULL;
7902 }
7903 qclk->instance = CLK_INVALID;
7904}
7905
7906static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7907{
7908 int rc = 0;
7909 uint32_t hlos_num_ce_hw_instances;
7910 uint32_t disk_encrypt_pipe;
7911 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007912 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007913 int i;
7914 const int *tbl;
7915 int size;
7916 int entry;
7917 struct qseecom_crypto_info *pfde_tbl = NULL;
7918 struct qseecom_crypto_info *p;
7919 int tbl_size;
7920 int j;
7921 bool old_db = true;
7922 struct qseecom_ce_info_use *pce_info_use;
7923 uint32_t *unit_tbl = NULL;
7924 int total_units = 0;
7925 struct qseecom_ce_pipe_entry *pce_entry;
7926
7927 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7928 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7929
7930 if (of_property_read_u32((&pdev->dev)->of_node,
7931 "qcom,qsee-ce-hw-instance",
7932 &qseecom.ce_info.qsee_ce_hw_instance)) {
7933 pr_err("Fail to get qsee ce hw instance information.\n");
7934 rc = -EINVAL;
7935 goto out;
7936 } else {
7937 pr_debug("qsee-ce-hw-instance=0x%x\n",
7938 qseecom.ce_info.qsee_ce_hw_instance);
7939 }
7940
7941 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7942 "qcom,support-fde");
7943 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7944 "qcom,support-pfe");
7945
7946 if (!qseecom.support_pfe && !qseecom.support_fde) {
7947 pr_warn("Device does not support PFE/FDE");
7948 goto out;
7949 }
7950
7951 if (qseecom.support_fde)
7952 tbl = of_get_property((&pdev->dev)->of_node,
7953 "qcom,full-disk-encrypt-info", &size);
7954 else
7955 tbl = NULL;
7956 if (tbl) {
7957 old_db = false;
7958 if (size % sizeof(struct qseecom_crypto_info)) {
7959 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7960 size);
7961 rc = -EINVAL;
7962 goto out;
7963 }
7964 tbl_size = size / sizeof
7965 (struct qseecom_crypto_info);
7966
7967 pfde_tbl = kzalloc(size, GFP_KERNEL);
7968 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7969 total_units = 0;
7970
7971 if (!pfde_tbl || !unit_tbl) {
7972 pr_err("failed to alloc memory\n");
7973 rc = -ENOMEM;
7974 goto out;
7975 }
7976 if (of_property_read_u32_array((&pdev->dev)->of_node,
7977 "qcom,full-disk-encrypt-info",
7978 (u32 *)pfde_tbl, size/sizeof(u32))) {
7979 pr_err("failed to read full-disk-encrypt-info tbl\n");
7980 rc = -EINVAL;
7981 goto out;
7982 }
7983
7984 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7985 for (j = 0; j < total_units; j++) {
7986 if (p->unit_num == *(unit_tbl + j))
7987 break;
7988 }
7989 if (j == total_units) {
7990 *(unit_tbl + total_units) = p->unit_num;
7991 total_units++;
7992 }
7993 }
7994
7995 qseecom.ce_info.num_fde = total_units;
7996 pce_info_use = qseecom.ce_info.fde = kcalloc(
7997 total_units, sizeof(struct qseecom_ce_info_use),
7998 GFP_KERNEL);
7999 if (!pce_info_use) {
8000 pr_err("failed to alloc memory\n");
8001 rc = -ENOMEM;
8002 goto out;
8003 }
8004
8005 for (j = 0; j < total_units; j++, pce_info_use++) {
8006 pce_info_use->unit_num = *(unit_tbl + j);
8007 pce_info_use->alloc = false;
8008 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8009 pce_info_use->num_ce_pipe_entries = 0;
8010 pce_info_use->ce_pipe_entry = NULL;
8011 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8012 if (p->unit_num == pce_info_use->unit_num)
8013 pce_info_use->num_ce_pipe_entries++;
8014 }
8015
8016 entry = pce_info_use->num_ce_pipe_entries;
8017 pce_entry = pce_info_use->ce_pipe_entry =
8018 kcalloc(entry,
8019 sizeof(struct qseecom_ce_pipe_entry),
8020 GFP_KERNEL);
8021 if (pce_entry == NULL) {
8022 pr_err("failed to alloc memory\n");
8023 rc = -ENOMEM;
8024 goto out;
8025 }
8026
8027 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8028 if (p->unit_num == pce_info_use->unit_num) {
8029 pce_entry->ce_num = p->ce;
8030 pce_entry->ce_pipe_pair =
8031 p->pipe_pair;
8032 pce_entry->valid = true;
8033 pce_entry++;
8034 }
8035 }
8036 }
8037 kfree(unit_tbl);
8038 unit_tbl = NULL;
8039 kfree(pfde_tbl);
8040 pfde_tbl = NULL;
8041 }
8042
8043 if (qseecom.support_pfe)
8044 tbl = of_get_property((&pdev->dev)->of_node,
8045 "qcom,per-file-encrypt-info", &size);
8046 else
8047 tbl = NULL;
8048 if (tbl) {
8049 old_db = false;
8050 if (size % sizeof(struct qseecom_crypto_info)) {
8051 pr_err("per-file-encrypt-info tbl size(%d)\n",
8052 size);
8053 rc = -EINVAL;
8054 goto out;
8055 }
8056 tbl_size = size / sizeof
8057 (struct qseecom_crypto_info);
8058
8059 pfde_tbl = kzalloc(size, GFP_KERNEL);
8060 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8061 total_units = 0;
8062 if (!pfde_tbl || !unit_tbl) {
8063 pr_err("failed to alloc memory\n");
8064 rc = -ENOMEM;
8065 goto out;
8066 }
8067 if (of_property_read_u32_array((&pdev->dev)->of_node,
8068 "qcom,per-file-encrypt-info",
8069 (u32 *)pfde_tbl, size/sizeof(u32))) {
8070 pr_err("failed to read per-file-encrypt-info tbl\n");
8071 rc = -EINVAL;
8072 goto out;
8073 }
8074
8075 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8076 for (j = 0; j < total_units; j++) {
8077 if (p->unit_num == *(unit_tbl + j))
8078 break;
8079 }
8080 if (j == total_units) {
8081 *(unit_tbl + total_units) = p->unit_num;
8082 total_units++;
8083 }
8084 }
8085
8086 qseecom.ce_info.num_pfe = total_units;
8087 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8088 total_units, sizeof(struct qseecom_ce_info_use),
8089 GFP_KERNEL);
8090 if (!pce_info_use) {
8091 pr_err("failed to alloc memory\n");
8092 rc = -ENOMEM;
8093 goto out;
8094 }
8095
8096 for (j = 0; j < total_units; j++, pce_info_use++) {
8097 pce_info_use->unit_num = *(unit_tbl + j);
8098 pce_info_use->alloc = false;
8099 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8100 pce_info_use->num_ce_pipe_entries = 0;
8101 pce_info_use->ce_pipe_entry = NULL;
8102 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8103 if (p->unit_num == pce_info_use->unit_num)
8104 pce_info_use->num_ce_pipe_entries++;
8105 }
8106
8107 entry = pce_info_use->num_ce_pipe_entries;
8108 pce_entry = pce_info_use->ce_pipe_entry =
8109 kcalloc(entry,
8110 sizeof(struct qseecom_ce_pipe_entry),
8111 GFP_KERNEL);
8112 if (pce_entry == NULL) {
8113 pr_err("failed to alloc memory\n");
8114 rc = -ENOMEM;
8115 goto out;
8116 }
8117
8118 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8119 if (p->unit_num == pce_info_use->unit_num) {
8120 pce_entry->ce_num = p->ce;
8121 pce_entry->ce_pipe_pair =
8122 p->pipe_pair;
8123 pce_entry->valid = true;
8124 pce_entry++;
8125 }
8126 }
8127 }
8128 kfree(unit_tbl);
8129 unit_tbl = NULL;
8130 kfree(pfde_tbl);
8131 pfde_tbl = NULL;
8132 }
8133
8134 if (!old_db)
8135 goto out1;
8136
8137 if (of_property_read_bool((&pdev->dev)->of_node,
8138 "qcom,support-multiple-ce-hw-instance")) {
8139 if (of_property_read_u32((&pdev->dev)->of_node,
8140 "qcom,hlos-num-ce-hw-instances",
8141 &hlos_num_ce_hw_instances)) {
8142 pr_err("Fail: get hlos number of ce hw instance\n");
8143 rc = -EINVAL;
8144 goto out;
8145 }
8146 } else {
8147 hlos_num_ce_hw_instances = 1;
8148 }
8149
8150 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8151 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8152 MAX_CE_PIPE_PAIR_PER_UNIT);
8153 rc = -EINVAL;
8154 goto out;
8155 }
8156
8157 if (of_property_read_u32_array((&pdev->dev)->of_node,
8158 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8159 hlos_num_ce_hw_instances)) {
8160 pr_err("Fail: get hlos ce hw instance info\n");
8161 rc = -EINVAL;
8162 goto out;
8163 }
8164
8165 if (qseecom.support_fde) {
8166 pce_info_use = qseecom.ce_info.fde =
8167 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8168 if (!pce_info_use) {
8169 pr_err("failed to alloc memory\n");
8170 rc = -ENOMEM;
8171 goto out;
8172 }
8173 /* by default for old db */
8174 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8175 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8176 pce_info_use->alloc = false;
8177 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8178 pce_info_use->ce_pipe_entry = NULL;
8179 if (of_property_read_u32((&pdev->dev)->of_node,
8180 "qcom,disk-encrypt-pipe-pair",
8181 &disk_encrypt_pipe)) {
8182 pr_err("Fail to get FDE pipe information.\n");
8183 rc = -EINVAL;
8184 goto out;
8185 } else {
8186 pr_debug("disk-encrypt-pipe-pair=0x%x",
8187 disk_encrypt_pipe);
8188 }
8189 entry = pce_info_use->num_ce_pipe_entries =
8190 hlos_num_ce_hw_instances;
8191 pce_entry = pce_info_use->ce_pipe_entry =
8192 kcalloc(entry,
8193 sizeof(struct qseecom_ce_pipe_entry),
8194 GFP_KERNEL);
8195 if (pce_entry == NULL) {
8196 pr_err("failed to alloc memory\n");
8197 rc = -ENOMEM;
8198 goto out;
8199 }
8200 for (i = 0; i < entry; i++) {
8201 pce_entry->ce_num = hlos_ce_hw_instance[i];
8202 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8203 pce_entry->valid = 1;
8204 pce_entry++;
8205 }
8206 } else {
8207 pr_warn("Device does not support FDE");
8208 disk_encrypt_pipe = 0xff;
8209 }
8210 if (qseecom.support_pfe) {
8211 pce_info_use = qseecom.ce_info.pfe =
8212 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8213 if (!pce_info_use) {
8214 pr_err("failed to alloc memory\n");
8215 rc = -ENOMEM;
8216 goto out;
8217 }
8218 /* by default for old db */
8219 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8220 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8221 pce_info_use->alloc = false;
8222 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8223 pce_info_use->ce_pipe_entry = NULL;
8224
8225 if (of_property_read_u32((&pdev->dev)->of_node,
8226 "qcom,file-encrypt-pipe-pair",
8227 &file_encrypt_pipe)) {
8228 pr_err("Fail to get PFE pipe information.\n");
8229 rc = -EINVAL;
8230 goto out;
8231 } else {
8232 pr_debug("file-encrypt-pipe-pair=0x%x",
8233 file_encrypt_pipe);
8234 }
8235 entry = pce_info_use->num_ce_pipe_entries =
8236 hlos_num_ce_hw_instances;
8237 pce_entry = pce_info_use->ce_pipe_entry =
8238 kcalloc(entry,
8239 sizeof(struct qseecom_ce_pipe_entry),
8240 GFP_KERNEL);
8241 if (pce_entry == NULL) {
8242 pr_err("failed to alloc memory\n");
8243 rc = -ENOMEM;
8244 goto out;
8245 }
8246 for (i = 0; i < entry; i++) {
8247 pce_entry->ce_num = hlos_ce_hw_instance[i];
8248 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8249 pce_entry->valid = 1;
8250 pce_entry++;
8251 }
8252 } else {
8253 pr_warn("Device does not support PFE");
8254 file_encrypt_pipe = 0xff;
8255 }
8256
8257out1:
8258 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8259 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8260out:
8261 if (rc) {
8262 if (qseecom.ce_info.fde) {
8263 pce_info_use = qseecom.ce_info.fde;
8264 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8265 pce_entry = pce_info_use->ce_pipe_entry;
8266 kfree(pce_entry);
8267 pce_info_use++;
8268 }
8269 }
8270 kfree(qseecom.ce_info.fde);
8271 qseecom.ce_info.fde = NULL;
8272 if (qseecom.ce_info.pfe) {
8273 pce_info_use = qseecom.ce_info.pfe;
8274 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8275 pce_entry = pce_info_use->ce_pipe_entry;
8276 kfree(pce_entry);
8277 pce_info_use++;
8278 }
8279 }
8280 kfree(qseecom.ce_info.pfe);
8281 qseecom.ce_info.pfe = NULL;
8282 }
8283 kfree(unit_tbl);
8284 kfree(pfde_tbl);
8285 return rc;
8286}
8287
8288static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8289 void __user *argp)
8290{
8291 struct qseecom_ce_info_req req;
8292 struct qseecom_ce_info_req *pinfo = &req;
8293 int ret = 0;
8294 int i;
8295 unsigned int entries;
8296 struct qseecom_ce_info_use *pce_info_use, *p;
8297 int total = 0;
8298 bool found = false;
8299 struct qseecom_ce_pipe_entry *pce_entry;
8300
8301 ret = copy_from_user(pinfo, argp,
8302 sizeof(struct qseecom_ce_info_req));
8303 if (ret) {
8304 pr_err("copy_from_user failed\n");
8305 return ret;
8306 }
8307
8308 switch (pinfo->usage) {
8309 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8310 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8311 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8312 if (qseecom.support_fde) {
8313 p = qseecom.ce_info.fde;
8314 total = qseecom.ce_info.num_fde;
8315 } else {
8316 pr_err("system does not support fde\n");
8317 return -EINVAL;
8318 }
8319 break;
8320 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8321 if (qseecom.support_pfe) {
8322 p = qseecom.ce_info.pfe;
8323 total = qseecom.ce_info.num_pfe;
8324 } else {
8325 pr_err("system does not support pfe\n");
8326 return -EINVAL;
8327 }
8328 break;
8329 default:
8330 pr_err("unsupported usage %d\n", pinfo->usage);
8331 return -EINVAL;
8332 }
8333
8334 pce_info_use = NULL;
8335 for (i = 0; i < total; i++) {
8336 if (!p->alloc)
8337 pce_info_use = p;
8338 else if (!memcmp(p->handle, pinfo->handle,
8339 MAX_CE_INFO_HANDLE_SIZE)) {
8340 pce_info_use = p;
8341 found = true;
8342 break;
8343 }
8344 p++;
8345 }
8346
8347 if (pce_info_use == NULL)
8348 return -EBUSY;
8349
8350 pinfo->unit_num = pce_info_use->unit_num;
8351 if (!pce_info_use->alloc) {
8352 pce_info_use->alloc = true;
8353 memcpy(pce_info_use->handle,
8354 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8355 }
8356 if (pce_info_use->num_ce_pipe_entries >
8357 MAX_CE_PIPE_PAIR_PER_UNIT)
8358 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8359 else
8360 entries = pce_info_use->num_ce_pipe_entries;
8361 pinfo->num_ce_pipe_entries = entries;
8362 pce_entry = pce_info_use->ce_pipe_entry;
8363 for (i = 0; i < entries; i++, pce_entry++)
8364 pinfo->ce_pipe_entry[i] = *pce_entry;
8365 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8366 pinfo->ce_pipe_entry[i].valid = 0;
8367
8368 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8369 pr_err("copy_to_user failed\n");
8370 ret = -EFAULT;
8371 }
8372 return ret;
8373}
8374
8375static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8376 void __user *argp)
8377{
8378 struct qseecom_ce_info_req req;
8379 struct qseecom_ce_info_req *pinfo = &req;
8380 int ret = 0;
8381 struct qseecom_ce_info_use *p;
8382 int total = 0;
8383 int i;
8384 bool found = false;
8385
8386 ret = copy_from_user(pinfo, argp,
8387 sizeof(struct qseecom_ce_info_req));
8388 if (ret)
8389 return ret;
8390
8391 switch (pinfo->usage) {
8392 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8393 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8394 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8395 if (qseecom.support_fde) {
8396 p = qseecom.ce_info.fde;
8397 total = qseecom.ce_info.num_fde;
8398 } else {
8399 pr_err("system does not support fde\n");
8400 return -EINVAL;
8401 }
8402 break;
8403 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8404 if (qseecom.support_pfe) {
8405 p = qseecom.ce_info.pfe;
8406 total = qseecom.ce_info.num_pfe;
8407 } else {
8408 pr_err("system does not support pfe\n");
8409 return -EINVAL;
8410 }
8411 break;
8412 default:
8413 pr_err("unsupported usage %d\n", pinfo->usage);
8414 return -EINVAL;
8415 }
8416
8417 for (i = 0; i < total; i++) {
8418 if (p->alloc &&
8419 !memcmp(p->handle, pinfo->handle,
8420 MAX_CE_INFO_HANDLE_SIZE)) {
8421 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8422 p->alloc = false;
8423 found = true;
8424 break;
8425 }
8426 p++;
8427 }
8428 return ret;
8429}
8430
8431static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8432 void __user *argp)
8433{
8434 struct qseecom_ce_info_req req;
8435 struct qseecom_ce_info_req *pinfo = &req;
8436 int ret = 0;
8437 int i;
8438 unsigned int entries;
8439 struct qseecom_ce_info_use *pce_info_use, *p;
8440 int total = 0;
8441 bool found = false;
8442 struct qseecom_ce_pipe_entry *pce_entry;
8443
8444 ret = copy_from_user(pinfo, argp,
8445 sizeof(struct qseecom_ce_info_req));
8446 if (ret)
8447 return ret;
8448
8449 switch (pinfo->usage) {
8450 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8451 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8452 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8453 if (qseecom.support_fde) {
8454 p = qseecom.ce_info.fde;
8455 total = qseecom.ce_info.num_fde;
8456 } else {
8457 pr_err("system does not support fde\n");
8458 return -EINVAL;
8459 }
8460 break;
8461 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8462 if (qseecom.support_pfe) {
8463 p = qseecom.ce_info.pfe;
8464 total = qseecom.ce_info.num_pfe;
8465 } else {
8466 pr_err("system does not support pfe\n");
8467 return -EINVAL;
8468 }
8469 break;
8470 default:
8471 pr_err("unsupported usage %d\n", pinfo->usage);
8472 return -EINVAL;
8473 }
8474
8475 pce_info_use = NULL;
8476 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8477 pinfo->num_ce_pipe_entries = 0;
8478 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8479 pinfo->ce_pipe_entry[i].valid = 0;
8480
8481 for (i = 0; i < total; i++) {
8482
8483 if (p->alloc && !memcmp(p->handle,
8484 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8485 pce_info_use = p;
8486 found = true;
8487 break;
8488 }
8489 p++;
8490 }
8491 if (!pce_info_use)
8492 goto out;
8493 pinfo->unit_num = pce_info_use->unit_num;
8494 if (pce_info_use->num_ce_pipe_entries >
8495 MAX_CE_PIPE_PAIR_PER_UNIT)
8496 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8497 else
8498 entries = pce_info_use->num_ce_pipe_entries;
8499 pinfo->num_ce_pipe_entries = entries;
8500 pce_entry = pce_info_use->ce_pipe_entry;
8501 for (i = 0; i < entries; i++, pce_entry++)
8502 pinfo->ce_pipe_entry[i] = *pce_entry;
8503 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8504 pinfo->ce_pipe_entry[i].valid = 0;
8505out:
8506 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8507 pr_err("copy_to_user failed\n");
8508 ret = -EFAULT;
8509 }
8510 return ret;
8511}
8512
8513/*
8514 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8515 * then whitelist feature is not supported.
8516 */
8517static int qseecom_check_whitelist_feature(void)
8518{
8519 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8520
8521 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8522}
8523
8524static int qseecom_probe(struct platform_device *pdev)
8525{
8526 int rc;
8527 int i;
8528 uint32_t feature = 10;
8529 struct device *class_dev;
8530 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8531 struct qseecom_command_scm_resp resp;
8532 struct qseecom_ce_info_use *pce_info_use = NULL;
8533
8534 qseecom.qsee_bw_count = 0;
8535 qseecom.qsee_perf_client = 0;
8536 qseecom.qsee_sfpb_bw_count = 0;
8537
8538 qseecom.qsee.ce_core_clk = NULL;
8539 qseecom.qsee.ce_clk = NULL;
8540 qseecom.qsee.ce_core_src_clk = NULL;
8541 qseecom.qsee.ce_bus_clk = NULL;
8542
8543 qseecom.cumulative_mode = 0;
8544 qseecom.current_mode = INACTIVE;
8545 qseecom.support_bus_scaling = false;
8546 qseecom.support_fde = false;
8547 qseecom.support_pfe = false;
8548
8549 qseecom.ce_drv.ce_core_clk = NULL;
8550 qseecom.ce_drv.ce_clk = NULL;
8551 qseecom.ce_drv.ce_core_src_clk = NULL;
8552 qseecom.ce_drv.ce_bus_clk = NULL;
8553 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8554
8555 qseecom.app_block_ref_cnt = 0;
8556 init_waitqueue_head(&qseecom.app_block_wq);
8557 qseecom.whitelist_support = true;
8558
8559 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8560 if (rc < 0) {
8561 pr_err("alloc_chrdev_region failed %d\n", rc);
8562 return rc;
8563 }
8564
8565 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8566 if (IS_ERR(driver_class)) {
8567 rc = -ENOMEM;
8568 pr_err("class_create failed %d\n", rc);
8569 goto exit_unreg_chrdev_region;
8570 }
8571
8572 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8573 QSEECOM_DEV);
8574 if (IS_ERR(class_dev)) {
8575 pr_err("class_device_create failed %d\n", rc);
8576 rc = -ENOMEM;
8577 goto exit_destroy_class;
8578 }
8579
8580 cdev_init(&qseecom.cdev, &qseecom_fops);
8581 qseecom.cdev.owner = THIS_MODULE;
8582
8583 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8584 if (rc < 0) {
8585 pr_err("cdev_add failed %d\n", rc);
8586 goto exit_destroy_device;
8587 }
8588
8589 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8590 spin_lock_init(&qseecom.registered_listener_list_lock);
8591 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8592 spin_lock_init(&qseecom.registered_app_list_lock);
8593 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8594 spin_lock_init(&qseecom.registered_kclient_list_lock);
8595 init_waitqueue_head(&qseecom.send_resp_wq);
8596 qseecom.send_resp_flag = 0;
8597
8598 qseecom.qsee_version = QSEEE_VERSION_00;
8599 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8600 &resp, sizeof(resp));
8601 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8602 if (rc) {
8603 pr_err("Failed to get QSEE version info %d\n", rc);
8604 goto exit_del_cdev;
8605 }
8606 qseecom.qsee_version = resp.result;
8607 qseecom.qseos_version = QSEOS_VERSION_14;
8608 qseecom.commonlib_loaded = false;
8609 qseecom.commonlib64_loaded = false;
8610 qseecom.pdev = class_dev;
8611 /* Create ION msm client */
8612 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8613 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8614 pr_err("Ion client cannot be created\n");
8615 rc = -ENOMEM;
8616 goto exit_del_cdev;
8617 }
8618
8619 /* register client for bus scaling */
8620 if (pdev->dev.of_node) {
8621 qseecom.pdev->of_node = pdev->dev.of_node;
8622 qseecom.support_bus_scaling =
8623 of_property_read_bool((&pdev->dev)->of_node,
8624 "qcom,support-bus-scaling");
8625 rc = qseecom_retrieve_ce_data(pdev);
8626 if (rc)
8627 goto exit_destroy_ion_client;
8628 qseecom.appsbl_qseecom_support =
8629 of_property_read_bool((&pdev->dev)->of_node,
8630 "qcom,appsbl-qseecom-support");
8631 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8632 qseecom.appsbl_qseecom_support);
8633
8634 qseecom.commonlib64_loaded =
8635 of_property_read_bool((&pdev->dev)->of_node,
8636 "qcom,commonlib64-loaded-by-uefi");
8637 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8638 qseecom.commonlib64_loaded);
8639 qseecom.fde_key_size =
8640 of_property_read_bool((&pdev->dev)->of_node,
8641 "qcom,fde-key-size");
8642 qseecom.no_clock_support =
8643 of_property_read_bool((&pdev->dev)->of_node,
8644 "qcom,no-clock-support");
8645 if (!qseecom.no_clock_support) {
8646 pr_info("qseecom clocks handled by other subsystem\n");
8647 } else {
8648 pr_info("no-clock-support=0x%x",
8649 qseecom.no_clock_support);
8650 }
8651
8652 if (of_property_read_u32((&pdev->dev)->of_node,
8653 "qcom,qsee-reentrancy-support",
8654 &qseecom.qsee_reentrancy_support)) {
8655 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8656 qseecom.qsee_reentrancy_support = 0;
8657 } else {
8658 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8659 qseecom.qsee_reentrancy_support);
8660 }
8661
8662 /*
8663 * The qseecom bus scaling flag can not be enabled when
8664 * crypto clock is not handled by HLOS.
8665 */
8666 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8667 pr_err("support_bus_scaling flag can not be enabled.\n");
8668 rc = -EINVAL;
8669 goto exit_destroy_ion_client;
8670 }
8671
8672 if (of_property_read_u32((&pdev->dev)->of_node,
8673 "qcom,ce-opp-freq",
8674 &qseecom.ce_opp_freq_hz)) {
8675 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8676 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8677 }
8678 rc = __qseecom_init_clk(CLK_QSEE);
8679 if (rc)
8680 goto exit_destroy_ion_client;
8681
8682 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8683 (qseecom.support_pfe || qseecom.support_fde)) {
8684 rc = __qseecom_init_clk(CLK_CE_DRV);
8685 if (rc) {
8686 __qseecom_deinit_clk(CLK_QSEE);
8687 goto exit_destroy_ion_client;
8688 }
8689 } else {
8690 struct qseecom_clk *qclk;
8691
8692 qclk = &qseecom.qsee;
8693 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8694 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8695 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8696 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8697 }
8698
8699 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8700 msm_bus_cl_get_pdata(pdev);
8701 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8702 (!qseecom.is_apps_region_protected &&
8703 !qseecom.appsbl_qseecom_support)) {
8704 struct resource *resource = NULL;
8705 struct qsee_apps_region_info_ireq req;
8706 struct qsee_apps_region_info_64bit_ireq req_64bit;
8707 struct qseecom_command_scm_resp resp;
8708 void *cmd_buf = NULL;
8709 size_t cmd_len;
8710
8711 resource = platform_get_resource_byname(pdev,
8712 IORESOURCE_MEM, "secapp-region");
8713 if (resource) {
8714 if (qseecom.qsee_version < QSEE_VERSION_40) {
8715 req.qsee_cmd_id =
8716 QSEOS_APP_REGION_NOTIFICATION;
8717 req.addr = (uint32_t)resource->start;
8718 req.size = resource_size(resource);
8719 cmd_buf = (void *)&req;
8720 cmd_len = sizeof(struct
8721 qsee_apps_region_info_ireq);
8722 pr_warn("secure app region addr=0x%x size=0x%x",
8723 req.addr, req.size);
8724 } else {
8725 req_64bit.qsee_cmd_id =
8726 QSEOS_APP_REGION_NOTIFICATION;
8727 req_64bit.addr = resource->start;
8728 req_64bit.size = resource_size(
8729 resource);
8730 cmd_buf = (void *)&req_64bit;
8731 cmd_len = sizeof(struct
8732 qsee_apps_region_info_64bit_ireq);
8733 pr_warn("secure app region addr=0x%llx size=0x%x",
8734 req_64bit.addr, req_64bit.size);
8735 }
8736 } else {
8737 pr_err("Fail to get secure app region info\n");
8738 rc = -EINVAL;
8739 goto exit_deinit_clock;
8740 }
8741 rc = __qseecom_enable_clk(CLK_QSEE);
8742 if (rc) {
8743 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8744 rc = -EIO;
8745 goto exit_deinit_clock;
8746 }
8747 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8748 cmd_buf, cmd_len,
8749 &resp, sizeof(resp));
8750 __qseecom_disable_clk(CLK_QSEE);
8751 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8752 pr_err("send secapp reg fail %d resp.res %d\n",
8753 rc, resp.result);
8754 rc = -EINVAL;
8755 goto exit_deinit_clock;
8756 }
8757 }
8758 /*
8759 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8760 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8761 * Pls add "qseecom.commonlib64_loaded = true" here too.
8762 */
8763 if (qseecom.is_apps_region_protected ||
8764 qseecom.appsbl_qseecom_support)
8765 qseecom.commonlib_loaded = true;
8766 } else {
8767 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8768 pdev->dev.platform_data;
8769 }
8770 if (qseecom.support_bus_scaling) {
8771 init_timer(&(qseecom.bw_scale_down_timer));
8772 INIT_WORK(&qseecom.bw_inactive_req_ws,
8773 qseecom_bw_inactive_req_work);
8774 qseecom.bw_scale_down_timer.function =
8775 qseecom_scale_bus_bandwidth_timer_callback;
8776 }
8777 qseecom.timer_running = false;
8778 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8779 qseecom_platform_support);
8780
8781 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8782 pr_warn("qseecom.whitelist_support = %d\n",
8783 qseecom.whitelist_support);
8784
8785 if (!qseecom.qsee_perf_client)
8786 pr_err("Unable to register bus client\n");
8787
8788 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8789 return 0;
8790
8791exit_deinit_clock:
8792 __qseecom_deinit_clk(CLK_QSEE);
8793 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8794 (qseecom.support_pfe || qseecom.support_fde))
8795 __qseecom_deinit_clk(CLK_CE_DRV);
8796exit_destroy_ion_client:
8797 if (qseecom.ce_info.fde) {
8798 pce_info_use = qseecom.ce_info.fde;
8799 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8800 kzfree(pce_info_use->ce_pipe_entry);
8801 pce_info_use++;
8802 }
8803 kfree(qseecom.ce_info.fde);
8804 }
8805 if (qseecom.ce_info.pfe) {
8806 pce_info_use = qseecom.ce_info.pfe;
8807 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8808 kzfree(pce_info_use->ce_pipe_entry);
8809 pce_info_use++;
8810 }
8811 kfree(qseecom.ce_info.pfe);
8812 }
8813 ion_client_destroy(qseecom.ion_clnt);
8814exit_del_cdev:
8815 cdev_del(&qseecom.cdev);
8816exit_destroy_device:
8817 device_destroy(driver_class, qseecom_device_no);
8818exit_destroy_class:
8819 class_destroy(driver_class);
8820exit_unreg_chrdev_region:
8821 unregister_chrdev_region(qseecom_device_no, 1);
8822 return rc;
8823}
8824
8825static int qseecom_remove(struct platform_device *pdev)
8826{
8827 struct qseecom_registered_kclient_list *kclient = NULL;
8828 unsigned long flags = 0;
8829 int ret = 0;
8830 int i;
8831 struct qseecom_ce_pipe_entry *pce_entry;
8832 struct qseecom_ce_info_use *pce_info_use;
8833
8834 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8835 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8836
8837 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
8838 list) {
8839 if (!kclient)
8840 goto exit_irqrestore;
8841
8842 /* Break the loop if client handle is NULL */
8843 if (!kclient->handle)
8844 goto exit_free_kclient;
8845
8846 if (list_empty(&kclient->list))
8847 goto exit_free_kc_handle;
8848
8849 list_del(&kclient->list);
8850 mutex_lock(&app_access_lock);
8851 ret = qseecom_unload_app(kclient->handle->dev, false);
8852 mutex_unlock(&app_access_lock);
8853 if (!ret) {
8854 kzfree(kclient->handle->dev);
8855 kzfree(kclient->handle);
8856 kzfree(kclient);
8857 }
8858 }
8859
8860exit_free_kc_handle:
8861 kzfree(kclient->handle);
8862exit_free_kclient:
8863 kzfree(kclient);
8864exit_irqrestore:
8865 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8866
8867 if (qseecom.qseos_version > QSEEE_VERSION_00)
8868 qseecom_unload_commonlib_image();
8869
8870 if (qseecom.qsee_perf_client)
8871 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8872 0);
8873 if (pdev->dev.platform_data != NULL)
8874 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8875
8876 if (qseecom.support_bus_scaling) {
8877 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8878 del_timer_sync(&qseecom.bw_scale_down_timer);
8879 }
8880
8881 if (qseecom.ce_info.fde) {
8882 pce_info_use = qseecom.ce_info.fde;
8883 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8884 pce_entry = pce_info_use->ce_pipe_entry;
8885 kfree(pce_entry);
8886 pce_info_use++;
8887 }
8888 }
8889 kfree(qseecom.ce_info.fde);
8890 if (qseecom.ce_info.pfe) {
8891 pce_info_use = qseecom.ce_info.pfe;
8892 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8893 pce_entry = pce_info_use->ce_pipe_entry;
8894 kfree(pce_entry);
8895 pce_info_use++;
8896 }
8897 }
8898 kfree(qseecom.ce_info.pfe);
8899
8900 /* register client for bus scaling */
8901 if (pdev->dev.of_node) {
8902 __qseecom_deinit_clk(CLK_QSEE);
8903 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8904 (qseecom.support_pfe || qseecom.support_fde))
8905 __qseecom_deinit_clk(CLK_CE_DRV);
8906 }
8907
8908 ion_client_destroy(qseecom.ion_clnt);
8909
8910 cdev_del(&qseecom.cdev);
8911
8912 device_destroy(driver_class, qseecom_device_no);
8913
8914 class_destroy(driver_class);
8915
8916 unregister_chrdev_region(qseecom_device_no, 1);
8917
8918 return ret;
8919}
8920
8921static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8922{
8923 int ret = 0;
8924 struct qseecom_clk *qclk;
8925
8926 qclk = &qseecom.qsee;
8927 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8928 if (qseecom.no_clock_support)
8929 return 0;
8930
8931 mutex_lock(&qsee_bw_mutex);
8932 mutex_lock(&clk_access_lock);
8933
8934 if (qseecom.current_mode != INACTIVE) {
8935 ret = msm_bus_scale_client_update_request(
8936 qseecom.qsee_perf_client, INACTIVE);
8937 if (ret)
8938 pr_err("Fail to scale down bus\n");
8939 else
8940 qseecom.current_mode = INACTIVE;
8941 }
8942
8943 if (qclk->clk_access_cnt) {
8944 if (qclk->ce_clk != NULL)
8945 clk_disable_unprepare(qclk->ce_clk);
8946 if (qclk->ce_core_clk != NULL)
8947 clk_disable_unprepare(qclk->ce_core_clk);
8948 if (qclk->ce_bus_clk != NULL)
8949 clk_disable_unprepare(qclk->ce_bus_clk);
8950 }
8951
8952 del_timer_sync(&(qseecom.bw_scale_down_timer));
8953 qseecom.timer_running = false;
8954
8955 mutex_unlock(&clk_access_lock);
8956 mutex_unlock(&qsee_bw_mutex);
8957 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8958
8959 return 0;
8960}
8961
8962static int qseecom_resume(struct platform_device *pdev)
8963{
8964 int mode = 0;
8965 int ret = 0;
8966 struct qseecom_clk *qclk;
8967
8968 qclk = &qseecom.qsee;
8969 if (qseecom.no_clock_support)
8970 goto exit;
8971
8972 mutex_lock(&qsee_bw_mutex);
8973 mutex_lock(&clk_access_lock);
8974 if (qseecom.cumulative_mode >= HIGH)
8975 mode = HIGH;
8976 else
8977 mode = qseecom.cumulative_mode;
8978
8979 if (qseecom.cumulative_mode != INACTIVE) {
8980 ret = msm_bus_scale_client_update_request(
8981 qseecom.qsee_perf_client, mode);
8982 if (ret)
8983 pr_err("Fail to scale up bus to %d\n", mode);
8984 else
8985 qseecom.current_mode = mode;
8986 }
8987
8988 if (qclk->clk_access_cnt) {
8989 if (qclk->ce_core_clk != NULL) {
8990 ret = clk_prepare_enable(qclk->ce_core_clk);
8991 if (ret) {
8992 pr_err("Unable to enable/prep CE core clk\n");
8993 qclk->clk_access_cnt = 0;
8994 goto err;
8995 }
8996 }
8997 if (qclk->ce_clk != NULL) {
8998 ret = clk_prepare_enable(qclk->ce_clk);
8999 if (ret) {
9000 pr_err("Unable to enable/prep CE iface clk\n");
9001 qclk->clk_access_cnt = 0;
9002 goto ce_clk_err;
9003 }
9004 }
9005 if (qclk->ce_bus_clk != NULL) {
9006 ret = clk_prepare_enable(qclk->ce_bus_clk);
9007 if (ret) {
9008 pr_err("Unable to enable/prep CE bus clk\n");
9009 qclk->clk_access_cnt = 0;
9010 goto ce_bus_clk_err;
9011 }
9012 }
9013 }
9014
9015 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
9016 qseecom.bw_scale_down_timer.expires = jiffies +
9017 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
9018 mod_timer(&(qseecom.bw_scale_down_timer),
9019 qseecom.bw_scale_down_timer.expires);
9020 qseecom.timer_running = true;
9021 }
9022
9023 mutex_unlock(&clk_access_lock);
9024 mutex_unlock(&qsee_bw_mutex);
9025 goto exit;
9026
9027ce_bus_clk_err:
9028 if (qclk->ce_clk)
9029 clk_disable_unprepare(qclk->ce_clk);
9030ce_clk_err:
9031 if (qclk->ce_core_clk)
9032 clk_disable_unprepare(qclk->ce_core_clk);
9033err:
9034 mutex_unlock(&clk_access_lock);
9035 mutex_unlock(&qsee_bw_mutex);
9036 ret = -EIO;
9037exit:
9038 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
9039 return ret;
9040}
9041
9042static const struct of_device_id qseecom_match[] = {
9043 {
9044 .compatible = "qcom,qseecom",
9045 },
9046 {}
9047};
9048
9049static struct platform_driver qseecom_plat_driver = {
9050 .probe = qseecom_probe,
9051 .remove = qseecom_remove,
9052 .suspend = qseecom_suspend,
9053 .resume = qseecom_resume,
9054 .driver = {
9055 .name = "qseecom",
9056 .owner = THIS_MODULE,
9057 .of_match_table = qseecom_match,
9058 },
9059};
9060
9061static int qseecom_init(void)
9062{
9063 return platform_driver_register(&qseecom_plat_driver);
9064}
9065
9066static void qseecom_exit(void)
9067{
9068 platform_driver_unregister(&qseecom_plat_driver);
9069}
9070
9071MODULE_LICENSE("GPL v2");
9072MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9073
9074module_init(qseecom_init);
9075module_exit(qseecom_exit);