blob: 07ae56f4efb514362c01068dbbe1eb48a5ccba20 [file] [log] [blame]
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001/*
2 * QTI Secure Execution Environment Communicator (QSEECOM) driver
3 *
Zhen Kong3d1d92f2018-02-02 17:21:04 -08004 * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/platform_device.h>
23#include <linux/debugfs.h>
24#include <linux/cdev.h>
25#include <linux/uaccess.h>
26#include <linux/sched.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/io.h>
30#include <linux/msm_ion.h>
31#include <linux/types.h>
32#include <linux/clk.h>
33#include <linux/qseecom.h>
34#include <linux/elf.h>
35#include <linux/firmware.h>
36#include <linux/freezer.h>
37#include <linux/scatterlist.h>
38#include <linux/regulator/consumer.h>
39#include <linux/dma-mapping.h>
40#include <soc/qcom/subsystem_restart.h>
41#include <soc/qcom/scm.h>
42#include <soc/qcom/socinfo.h>
43#include <linux/msm-bus.h>
44#include <linux/msm-bus-board.h>
45#include <soc/qcom/qseecomi.h>
46#include <asm/cacheflush.h>
47#include "qseecom_kernel.h"
48#include <crypto/ice.h>
49#include <linux/delay.h>
50
51#include <linux/compat.h>
52#include "compat_qseecom.h"
53
54#define QSEECOM_DEV "qseecom"
55#define QSEOS_VERSION_14 0x14
56#define QSEEE_VERSION_00 0x400000
57#define QSEE_VERSION_01 0x401000
58#define QSEE_VERSION_02 0x402000
59#define QSEE_VERSION_03 0x403000
60#define QSEE_VERSION_04 0x404000
61#define QSEE_VERSION_05 0x405000
62#define QSEE_VERSION_20 0x800000
63#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
64
65#define QSEE_CE_CLK_100MHZ 100000000
66#define CE_CLK_DIV 1000000
67
68#define QSEECOM_MAX_SG_ENTRY 512
69#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
70 (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
71
72#define QSEECOM_INVALID_KEY_ID 0xff
73
74/* Save partition image hash for authentication check */
75#define SCM_SAVE_PARTITION_HASH_ID 0x01
76
77/* Check if enterprise security is activate */
78#define SCM_IS_ACTIVATED_ID 0x02
79
80/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
81#define SCM_MDTP_CIPHER_DIP 0x01
82
83/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
84#define MAX_DIP 0x20000
85
86#define RPMB_SERVICE 0x2000
87#define SSD_SERVICE 0x3000
88
89#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
90#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
91#define TWO 2
92#define QSEECOM_UFS_ICE_CE_NUM 10
93#define QSEECOM_SDCC_ICE_CE_NUM 20
94#define QSEECOM_ICE_FDE_KEY_INDEX 0
95
96#define PHY_ADDR_4G (1ULL<<32)
97
98#define QSEECOM_STATE_NOT_READY 0
99#define QSEECOM_STATE_SUSPEND 1
100#define QSEECOM_STATE_READY 2
101#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
102
103/*
104 * default ce info unit to 0 for
105 * services which
106 * support only single instance.
107 * Most of services are in this category.
108 */
109#define DEFAULT_CE_INFO_UNIT 0
110#define DEFAULT_NUM_CE_INFO_UNIT 1
111
112enum qseecom_clk_definitions {
113 CLK_DFAB = 0,
114 CLK_SFPB,
115};
116
117enum qseecom_ice_key_size_type {
118 QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
119 (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
120 QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
121 (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
122 QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
123 (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
124};
125
126enum qseecom_client_handle_type {
127 QSEECOM_CLIENT_APP = 1,
128 QSEECOM_LISTENER_SERVICE,
129 QSEECOM_SECURE_SERVICE,
130 QSEECOM_GENERIC,
131 QSEECOM_UNAVAILABLE_CLIENT_APP,
132};
133
134enum qseecom_ce_hw_instance {
135 CLK_QSEE = 0,
136 CLK_CE_DRV,
137 CLK_INVALID,
138};
139
140static struct class *driver_class;
141static dev_t qseecom_device_no;
142
143static DEFINE_MUTEX(qsee_bw_mutex);
144static DEFINE_MUTEX(app_access_lock);
145static DEFINE_MUTEX(clk_access_lock);
146
147struct sglist_info {
148 uint32_t indexAndFlags;
149 uint32_t sizeOrCount;
150};
151
152/*
153 * The 31th bit indicates only one or multiple physical address inside
154 * the request buffer. If it is set, the index locates a single physical addr
155 * inside the request buffer, and `sizeOrCount` is the size of the memory being
156 * shared at that physical address.
157 * Otherwise, the index locates an array of {start, len} pairs (a
158 * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
159 * that array.
160 *
161 * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
162 * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
163 *
164 * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
165 */
166#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
167 ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
168
169#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
170
171#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
172
173#define MAKE_WHITELIST_VERSION(major, minor, patch) \
174 (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
175
176struct qseecom_registered_listener_list {
177 struct list_head list;
178 struct qseecom_register_listener_req svc;
179 void *user_virt_sb_base;
180 u8 *sb_virt;
181 phys_addr_t sb_phys;
182 size_t sb_length;
183 struct ion_handle *ihandle; /* Retrieve phy addr */
184 wait_queue_head_t rcv_req_wq;
185 int rcv_req_flag;
186 int send_resp_flag;
187 bool listener_in_use;
188 /* wq for thread blocked on this listener*/
189 wait_queue_head_t listener_block_app_wq;
190 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
191 uint32_t sglist_cnt;
192};
193
194struct qseecom_registered_app_list {
195 struct list_head list;
196 u32 app_id;
197 u32 ref_cnt;
198 char app_name[MAX_APP_NAME_SIZE];
199 u32 app_arch;
200 bool app_blocked;
201 u32 blocked_on_listener_id;
202};
203
204struct qseecom_registered_kclient_list {
205 struct list_head list;
206 struct qseecom_handle *handle;
207};
208
209struct qseecom_ce_info_use {
210 unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
211 unsigned int unit_num;
212 unsigned int num_ce_pipe_entries;
213 struct qseecom_ce_pipe_entry *ce_pipe_entry;
214 bool alloc;
215 uint32_t type;
216};
217
218struct ce_hw_usage_info {
219 uint32_t qsee_ce_hw_instance;
220 uint32_t num_fde;
221 struct qseecom_ce_info_use *fde;
222 uint32_t num_pfe;
223 struct qseecom_ce_info_use *pfe;
224};
225
226struct qseecom_clk {
227 enum qseecom_ce_hw_instance instance;
228 struct clk *ce_core_clk;
229 struct clk *ce_clk;
230 struct clk *ce_core_src_clk;
231 struct clk *ce_bus_clk;
232 uint32_t clk_access_cnt;
233};
234
235struct qseecom_control {
236 struct ion_client *ion_clnt; /* Ion client */
237 struct list_head registered_listener_list_head;
238 spinlock_t registered_listener_list_lock;
239
240 struct list_head registered_app_list_head;
241 spinlock_t registered_app_list_lock;
242
243 struct list_head registered_kclient_list_head;
244 spinlock_t registered_kclient_list_lock;
245
246 wait_queue_head_t send_resp_wq;
247 int send_resp_flag;
248
249 uint32_t qseos_version;
250 uint32_t qsee_version;
251 struct device *pdev;
252 bool whitelist_support;
253 bool commonlib_loaded;
254 bool commonlib64_loaded;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700255 struct ce_hw_usage_info ce_info;
256
257 int qsee_bw_count;
258 int qsee_sfpb_bw_count;
259
260 uint32_t qsee_perf_client;
261 struct qseecom_clk qsee;
262 struct qseecom_clk ce_drv;
263
264 bool support_bus_scaling;
265 bool support_fde;
266 bool support_pfe;
267 bool fde_key_size;
268 uint32_t cumulative_mode;
269 enum qseecom_bandwidth_request_mode current_mode;
270 struct timer_list bw_scale_down_timer;
271 struct work_struct bw_inactive_req_ws;
272 struct cdev cdev;
273 bool timer_running;
274 bool no_clock_support;
275 unsigned int ce_opp_freq_hz;
276 bool appsbl_qseecom_support;
277 uint32_t qsee_reentrancy_support;
278
279 uint32_t app_block_ref_cnt;
280 wait_queue_head_t app_block_wq;
281 atomic_t qseecom_state;
282 int is_apps_region_protected;
Zhen Kong2f60f492017-06-29 15:22:14 -0700283 bool smcinvoke_support;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700284};
285
286struct qseecom_sec_buf_fd_info {
287 bool is_sec_buf_fd;
288 size_t size;
289 void *vbase;
290 dma_addr_t pbase;
291};
292
293struct qseecom_param_memref {
294 uint32_t buffer;
295 uint32_t size;
296};
297
298struct qseecom_client_handle {
299 u32 app_id;
300 u8 *sb_virt;
301 phys_addr_t sb_phys;
302 unsigned long user_virt_sb_base;
303 size_t sb_length;
304 struct ion_handle *ihandle; /* Retrieve phy addr */
305 char app_name[MAX_APP_NAME_SIZE];
306 u32 app_arch;
307 struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
308};
309
310struct qseecom_listener_handle {
311 u32 id;
312};
313
314static struct qseecom_control qseecom;
315
316struct qseecom_dev_handle {
317 enum qseecom_client_handle_type type;
318 union {
319 struct qseecom_client_handle client;
320 struct qseecom_listener_handle listener;
321 };
322 bool released;
323 int abort;
324 wait_queue_head_t abort_wq;
325 atomic_t ioctl_count;
326 bool perf_enabled;
327 bool fast_load_enabled;
328 enum qseecom_bandwidth_request_mode mode;
329 struct sglist_info sglistinfo_ptr[MAX_ION_FD];
330 uint32_t sglist_cnt;
331 bool use_legacy_cmd;
332};
333
334struct qseecom_key_id_usage_desc {
335 uint8_t desc[QSEECOM_KEY_ID_SIZE];
336};
337
338struct qseecom_crypto_info {
339 unsigned int unit_num;
340 unsigned int ce;
341 unsigned int pipe_pair;
342};
343
344static struct qseecom_key_id_usage_desc key_id_array[] = {
345 {
346 .desc = "Undefined Usage Index",
347 },
348
349 {
350 .desc = "Full Disk Encryption",
351 },
352
353 {
354 .desc = "Per File Encryption",
355 },
356
357 {
358 .desc = "UFS ICE Full Disk Encryption",
359 },
360
361 {
362 .desc = "SDCC ICE Full Disk Encryption",
363 },
364};
365
366/* Function proto types */
367static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
368static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
369static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
370static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
371static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
372static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
373 char *cmnlib_name);
374static int qseecom_enable_ice_setup(int usage);
375static int qseecom_disable_ice_setup(int usage);
376static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
377static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
378 void __user *argp);
379static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
380 void __user *argp);
381static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
382 void __user *argp);
383
384static int get_qseecom_keymaster_status(char *str)
385{
386 get_option(&str, &qseecom.is_apps_region_protected);
387 return 1;
388}
389__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
390
391static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
392 const void *req_buf, void *resp_buf)
393{
394 int ret = 0;
395 uint32_t smc_id = 0;
396 uint32_t qseos_cmd_id = 0;
397 struct scm_desc desc = {0};
398 struct qseecom_command_scm_resp *scm_resp = NULL;
399
400 if (!req_buf || !resp_buf) {
401 pr_err("Invalid buffer pointer\n");
402 return -EINVAL;
403 }
404 qseos_cmd_id = *(uint32_t *)req_buf;
405 scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
406
407 switch (svc_id) {
408 case 6: {
409 if (tz_cmd_id == 3) {
410 smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
411 desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
412 desc.args[0] = *(uint32_t *)req_buf;
413 } else {
414 pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
415 svc_id, tz_cmd_id);
416 return -EINVAL;
417 }
418 ret = scm_call2(smc_id, &desc);
419 break;
420 }
421 case SCM_SVC_ES: {
422 switch (tz_cmd_id) {
423 case SCM_SAVE_PARTITION_HASH_ID: {
424 u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
425 struct qseecom_save_partition_hash_req *p_hash_req =
426 (struct qseecom_save_partition_hash_req *)
427 req_buf;
428 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
429
430 if (!tzbuf)
431 return -ENOMEM;
432 memset(tzbuf, 0, tzbuflen);
433 memcpy(tzbuf, p_hash_req->digest,
434 SHA256_DIGEST_LENGTH);
435 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
436 smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
437 desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
438 desc.args[0] = p_hash_req->partition_id;
439 desc.args[1] = virt_to_phys(tzbuf);
440 desc.args[2] = SHA256_DIGEST_LENGTH;
441 ret = scm_call2(smc_id, &desc);
442 kzfree(tzbuf);
443 break;
444 }
445 default: {
446 pr_err("tz_cmd_id %d is not supported by scm_call2\n",
447 tz_cmd_id);
448 ret = -EINVAL;
449 break;
450 }
451 } /* end of switch (tz_cmd_id) */
452 break;
453 } /* end of case SCM_SVC_ES */
454 case SCM_SVC_TZSCHEDULER: {
455 switch (qseos_cmd_id) {
456 case QSEOS_APP_START_COMMAND: {
457 struct qseecom_load_app_ireq *req;
458 struct qseecom_load_app_64bit_ireq *req_64bit;
459
460 smc_id = TZ_OS_APP_START_ID;
461 desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
462 if (qseecom.qsee_version < QSEE_VERSION_40) {
463 req = (struct qseecom_load_app_ireq *)req_buf;
464 desc.args[0] = req->mdt_len;
465 desc.args[1] = req->img_len;
466 desc.args[2] = req->phy_addr;
467 } else {
468 req_64bit =
469 (struct qseecom_load_app_64bit_ireq *)
470 req_buf;
471 desc.args[0] = req_64bit->mdt_len;
472 desc.args[1] = req_64bit->img_len;
473 desc.args[2] = req_64bit->phy_addr;
474 }
475 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
476 ret = scm_call2(smc_id, &desc);
477 break;
478 }
479 case QSEOS_APP_SHUTDOWN_COMMAND: {
480 struct qseecom_unload_app_ireq *req;
481
482 req = (struct qseecom_unload_app_ireq *)req_buf;
483 smc_id = TZ_OS_APP_SHUTDOWN_ID;
484 desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
485 desc.args[0] = req->app_id;
486 ret = scm_call2(smc_id, &desc);
487 break;
488 }
489 case QSEOS_APP_LOOKUP_COMMAND: {
490 struct qseecom_check_app_ireq *req;
491 u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
492 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
493
494 if (!tzbuf)
495 return -ENOMEM;
496 req = (struct qseecom_check_app_ireq *)req_buf;
497 pr_debug("Lookup app_name = %s\n", req->app_name);
498 strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
499 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
500 smc_id = TZ_OS_APP_LOOKUP_ID;
501 desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
502 desc.args[0] = virt_to_phys(tzbuf);
503 desc.args[1] = strlen(req->app_name);
504 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
505 ret = scm_call2(smc_id, &desc);
506 kzfree(tzbuf);
507 break;
508 }
509 case QSEOS_APP_REGION_NOTIFICATION: {
510 struct qsee_apps_region_info_ireq *req;
511 struct qsee_apps_region_info_64bit_ireq *req_64bit;
512
513 smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
514 desc.arginfo =
515 TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
516 if (qseecom.qsee_version < QSEE_VERSION_40) {
517 req = (struct qsee_apps_region_info_ireq *)
518 req_buf;
519 desc.args[0] = req->addr;
520 desc.args[1] = req->size;
521 } else {
522 req_64bit =
523 (struct qsee_apps_region_info_64bit_ireq *)
524 req_buf;
525 desc.args[0] = req_64bit->addr;
526 desc.args[1] = req_64bit->size;
527 }
528 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
529 ret = scm_call2(smc_id, &desc);
530 break;
531 }
532 case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
533 struct qseecom_load_lib_image_ireq *req;
534 struct qseecom_load_lib_image_64bit_ireq *req_64bit;
535
536 smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
537 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
538 if (qseecom.qsee_version < QSEE_VERSION_40) {
539 req = (struct qseecom_load_lib_image_ireq *)
540 req_buf;
541 desc.args[0] = req->mdt_len;
542 desc.args[1] = req->img_len;
543 desc.args[2] = req->phy_addr;
544 } else {
545 req_64bit =
546 (struct qseecom_load_lib_image_64bit_ireq *)
547 req_buf;
548 desc.args[0] = req_64bit->mdt_len;
549 desc.args[1] = req_64bit->img_len;
550 desc.args[2] = req_64bit->phy_addr;
551 }
552 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
553 ret = scm_call2(smc_id, &desc);
554 break;
555 }
556 case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
557 smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
558 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
559 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
560 ret = scm_call2(smc_id, &desc);
561 break;
562 }
563 case QSEOS_REGISTER_LISTENER: {
564 struct qseecom_register_listener_ireq *req;
565 struct qseecom_register_listener_64bit_ireq *req_64bit;
566
567 desc.arginfo =
568 TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
569 if (qseecom.qsee_version < QSEE_VERSION_40) {
570 req = (struct qseecom_register_listener_ireq *)
571 req_buf;
572 desc.args[0] = req->listener_id;
573 desc.args[1] = req->sb_ptr;
574 desc.args[2] = req->sb_len;
575 } else {
576 req_64bit =
577 (struct qseecom_register_listener_64bit_ireq *)
578 req_buf;
579 desc.args[0] = req_64bit->listener_id;
580 desc.args[1] = req_64bit->sb_ptr;
581 desc.args[2] = req_64bit->sb_len;
582 }
Zhen Kong2f60f492017-06-29 15:22:14 -0700583 qseecom.smcinvoke_support = true;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700584 smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
585 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
586 ret = scm_call2(smc_id, &desc);
587 if (ret) {
Zhen Kong2f60f492017-06-29 15:22:14 -0700588 qseecom.smcinvoke_support = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -0700589 smc_id = TZ_OS_REGISTER_LISTENER_ID;
590 __qseecom_reentrancy_check_if_no_app_blocked(
591 smc_id);
592 ret = scm_call2(smc_id, &desc);
593 }
594 break;
595 }
596 case QSEOS_DEREGISTER_LISTENER: {
597 struct qseecom_unregister_listener_ireq *req;
598
599 req = (struct qseecom_unregister_listener_ireq *)
600 req_buf;
601 smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
602 desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
603 desc.args[0] = req->listener_id;
604 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
605 ret = scm_call2(smc_id, &desc);
606 break;
607 }
608 case QSEOS_LISTENER_DATA_RSP_COMMAND: {
609 struct qseecom_client_listener_data_irsp *req;
610
611 req = (struct qseecom_client_listener_data_irsp *)
612 req_buf;
613 smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
614 desc.arginfo =
615 TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
616 desc.args[0] = req->listener_id;
617 desc.args[1] = req->status;
618 ret = scm_call2(smc_id, &desc);
619 break;
620 }
621 case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
622 struct qseecom_client_listener_data_irsp *req;
623 struct qseecom_client_listener_data_64bit_irsp *req_64;
624
625 smc_id =
626 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
627 desc.arginfo =
628 TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
629 if (qseecom.qsee_version < QSEE_VERSION_40) {
630 req =
631 (struct qseecom_client_listener_data_irsp *)
632 req_buf;
633 desc.args[0] = req->listener_id;
634 desc.args[1] = req->status;
635 desc.args[2] = req->sglistinfo_ptr;
636 desc.args[3] = req->sglistinfo_len;
637 } else {
638 req_64 =
639 (struct qseecom_client_listener_data_64bit_irsp *)
640 req_buf;
641 desc.args[0] = req_64->listener_id;
642 desc.args[1] = req_64->status;
643 desc.args[2] = req_64->sglistinfo_ptr;
644 desc.args[3] = req_64->sglistinfo_len;
645 }
646 ret = scm_call2(smc_id, &desc);
647 break;
648 }
649 case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
650 struct qseecom_load_app_ireq *req;
651 struct qseecom_load_app_64bit_ireq *req_64bit;
652
653 smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
654 desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
655 if (qseecom.qsee_version < QSEE_VERSION_40) {
656 req = (struct qseecom_load_app_ireq *)req_buf;
657 desc.args[0] = req->mdt_len;
658 desc.args[1] = req->img_len;
659 desc.args[2] = req->phy_addr;
660 } else {
661 req_64bit =
662 (struct qseecom_load_app_64bit_ireq *)req_buf;
663 desc.args[0] = req_64bit->mdt_len;
664 desc.args[1] = req_64bit->img_len;
665 desc.args[2] = req_64bit->phy_addr;
666 }
667 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
668 ret = scm_call2(smc_id, &desc);
669 break;
670 }
671 case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
672 smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
673 desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
674 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
675 ret = scm_call2(smc_id, &desc);
676 break;
677 }
678
679 case QSEOS_CLIENT_SEND_DATA_COMMAND: {
680 struct qseecom_client_send_data_ireq *req;
681 struct qseecom_client_send_data_64bit_ireq *req_64bit;
682
683 smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
684 desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
685 if (qseecom.qsee_version < QSEE_VERSION_40) {
686 req = (struct qseecom_client_send_data_ireq *)
687 req_buf;
688 desc.args[0] = req->app_id;
689 desc.args[1] = req->req_ptr;
690 desc.args[2] = req->req_len;
691 desc.args[3] = req->rsp_ptr;
692 desc.args[4] = req->rsp_len;
693 } else {
694 req_64bit =
695 (struct qseecom_client_send_data_64bit_ireq *)
696 req_buf;
697 desc.args[0] = req_64bit->app_id;
698 desc.args[1] = req_64bit->req_ptr;
699 desc.args[2] = req_64bit->req_len;
700 desc.args[3] = req_64bit->rsp_ptr;
701 desc.args[4] = req_64bit->rsp_len;
702 }
703 ret = scm_call2(smc_id, &desc);
704 break;
705 }
706 case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
707 struct qseecom_client_send_data_ireq *req;
708 struct qseecom_client_send_data_64bit_ireq *req_64bit;
709
710 smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
711 desc.arginfo =
712 TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
713 if (qseecom.qsee_version < QSEE_VERSION_40) {
714 req = (struct qseecom_client_send_data_ireq *)
715 req_buf;
716 desc.args[0] = req->app_id;
717 desc.args[1] = req->req_ptr;
718 desc.args[2] = req->req_len;
719 desc.args[3] = req->rsp_ptr;
720 desc.args[4] = req->rsp_len;
721 desc.args[5] = req->sglistinfo_ptr;
722 desc.args[6] = req->sglistinfo_len;
723 } else {
724 req_64bit =
725 (struct qseecom_client_send_data_64bit_ireq *)
726 req_buf;
727 desc.args[0] = req_64bit->app_id;
728 desc.args[1] = req_64bit->req_ptr;
729 desc.args[2] = req_64bit->req_len;
730 desc.args[3] = req_64bit->rsp_ptr;
731 desc.args[4] = req_64bit->rsp_len;
732 desc.args[5] = req_64bit->sglistinfo_ptr;
733 desc.args[6] = req_64bit->sglistinfo_len;
734 }
735 ret = scm_call2(smc_id, &desc);
736 break;
737 }
738 case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
739 struct qseecom_client_send_service_ireq *req;
740
741 req = (struct qseecom_client_send_service_ireq *)
742 req_buf;
743 smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
744 desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
745 desc.args[0] = req->key_type;
746 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
747 ret = scm_call2(smc_id, &desc);
748 break;
749 }
750 case QSEOS_RPMB_ERASE_COMMAND: {
751 smc_id = TZ_OS_RPMB_ERASE_ID;
752 desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
753 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
754 ret = scm_call2(smc_id, &desc);
755 break;
756 }
757 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
758 smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
759 desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
760 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
761 ret = scm_call2(smc_id, &desc);
762 break;
763 }
764 case QSEOS_GENERATE_KEY: {
765 u32 tzbuflen = PAGE_ALIGN(sizeof
766 (struct qseecom_key_generate_ireq) -
767 sizeof(uint32_t));
768 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
769
770 if (!tzbuf)
771 return -ENOMEM;
772 memset(tzbuf, 0, tzbuflen);
773 memcpy(tzbuf, req_buf + sizeof(uint32_t),
774 (sizeof(struct qseecom_key_generate_ireq) -
775 sizeof(uint32_t)));
776 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
777 smc_id = TZ_OS_KS_GEN_KEY_ID;
778 desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
779 desc.args[0] = virt_to_phys(tzbuf);
780 desc.args[1] = tzbuflen;
781 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
782 ret = scm_call2(smc_id, &desc);
783 kzfree(tzbuf);
784 break;
785 }
786 case QSEOS_DELETE_KEY: {
787 u32 tzbuflen = PAGE_ALIGN(sizeof
788 (struct qseecom_key_delete_ireq) -
789 sizeof(uint32_t));
790 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
791
792 if (!tzbuf)
793 return -ENOMEM;
794 memset(tzbuf, 0, tzbuflen);
795 memcpy(tzbuf, req_buf + sizeof(uint32_t),
796 (sizeof(struct qseecom_key_delete_ireq) -
797 sizeof(uint32_t)));
798 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
799 smc_id = TZ_OS_KS_DEL_KEY_ID;
800 desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
801 desc.args[0] = virt_to_phys(tzbuf);
802 desc.args[1] = tzbuflen;
803 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
804 ret = scm_call2(smc_id, &desc);
805 kzfree(tzbuf);
806 break;
807 }
808 case QSEOS_SET_KEY: {
809 u32 tzbuflen = PAGE_ALIGN(sizeof
810 (struct qseecom_key_select_ireq) -
811 sizeof(uint32_t));
812 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
813
814 if (!tzbuf)
815 return -ENOMEM;
816 memset(tzbuf, 0, tzbuflen);
817 memcpy(tzbuf, req_buf + sizeof(uint32_t),
818 (sizeof(struct qseecom_key_select_ireq) -
819 sizeof(uint32_t)));
820 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
821 smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
822 desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
823 desc.args[0] = virt_to_phys(tzbuf);
824 desc.args[1] = tzbuflen;
825 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
826 ret = scm_call2(smc_id, &desc);
827 kzfree(tzbuf);
828 break;
829 }
830 case QSEOS_UPDATE_KEY_USERINFO: {
831 u32 tzbuflen = PAGE_ALIGN(sizeof
832 (struct qseecom_key_userinfo_update_ireq) -
833 sizeof(uint32_t));
834 char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
835
836 if (!tzbuf)
837 return -ENOMEM;
838 memset(tzbuf, 0, tzbuflen);
839 memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
840 (struct qseecom_key_userinfo_update_ireq) -
841 sizeof(uint32_t)));
842 dmac_flush_range(tzbuf, tzbuf + tzbuflen);
843 smc_id = TZ_OS_KS_UPDATE_KEY_ID;
844 desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
845 desc.args[0] = virt_to_phys(tzbuf);
846 desc.args[1] = tzbuflen;
847 __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
848 ret = scm_call2(smc_id, &desc);
849 kzfree(tzbuf);
850 break;
851 }
852 case QSEOS_TEE_OPEN_SESSION: {
853 struct qseecom_qteec_ireq *req;
854 struct qseecom_qteec_64bit_ireq *req_64bit;
855
856 smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
857 desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
858 if (qseecom.qsee_version < QSEE_VERSION_40) {
859 req = (struct qseecom_qteec_ireq *)req_buf;
860 desc.args[0] = req->app_id;
861 desc.args[1] = req->req_ptr;
862 desc.args[2] = req->req_len;
863 desc.args[3] = req->resp_ptr;
864 desc.args[4] = req->resp_len;
865 } else {
866 req_64bit = (struct qseecom_qteec_64bit_ireq *)
867 req_buf;
868 desc.args[0] = req_64bit->app_id;
869 desc.args[1] = req_64bit->req_ptr;
870 desc.args[2] = req_64bit->req_len;
871 desc.args[3] = req_64bit->resp_ptr;
872 desc.args[4] = req_64bit->resp_len;
873 }
874 ret = scm_call2(smc_id, &desc);
875 break;
876 }
877 case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
878 struct qseecom_qteec_ireq *req;
879 struct qseecom_qteec_64bit_ireq *req_64bit;
880
881 smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
882 desc.arginfo =
883 TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
884 if (qseecom.qsee_version < QSEE_VERSION_40) {
885 req = (struct qseecom_qteec_ireq *)req_buf;
886 desc.args[0] = req->app_id;
887 desc.args[1] = req->req_ptr;
888 desc.args[2] = req->req_len;
889 desc.args[3] = req->resp_ptr;
890 desc.args[4] = req->resp_len;
891 desc.args[5] = req->sglistinfo_ptr;
892 desc.args[6] = req->sglistinfo_len;
893 } else {
894 req_64bit = (struct qseecom_qteec_64bit_ireq *)
895 req_buf;
896 desc.args[0] = req_64bit->app_id;
897 desc.args[1] = req_64bit->req_ptr;
898 desc.args[2] = req_64bit->req_len;
899 desc.args[3] = req_64bit->resp_ptr;
900 desc.args[4] = req_64bit->resp_len;
901 desc.args[5] = req_64bit->sglistinfo_ptr;
902 desc.args[6] = req_64bit->sglistinfo_len;
903 }
904 ret = scm_call2(smc_id, &desc);
905 break;
906 }
907 case QSEOS_TEE_INVOKE_COMMAND: {
908 struct qseecom_qteec_ireq *req;
909 struct qseecom_qteec_64bit_ireq *req_64bit;
910
911 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
912 desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
913 if (qseecom.qsee_version < QSEE_VERSION_40) {
914 req = (struct qseecom_qteec_ireq *)req_buf;
915 desc.args[0] = req->app_id;
916 desc.args[1] = req->req_ptr;
917 desc.args[2] = req->req_len;
918 desc.args[3] = req->resp_ptr;
919 desc.args[4] = req->resp_len;
920 } else {
921 req_64bit = (struct qseecom_qteec_64bit_ireq *)
922 req_buf;
923 desc.args[0] = req_64bit->app_id;
924 desc.args[1] = req_64bit->req_ptr;
925 desc.args[2] = req_64bit->req_len;
926 desc.args[3] = req_64bit->resp_ptr;
927 desc.args[4] = req_64bit->resp_len;
928 }
929 ret = scm_call2(smc_id, &desc);
930 break;
931 }
932 case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
933 struct qseecom_qteec_ireq *req;
934 struct qseecom_qteec_64bit_ireq *req_64bit;
935
936 smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
937 desc.arginfo =
938 TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
939 if (qseecom.qsee_version < QSEE_VERSION_40) {
940 req = (struct qseecom_qteec_ireq *)req_buf;
941 desc.args[0] = req->app_id;
942 desc.args[1] = req->req_ptr;
943 desc.args[2] = req->req_len;
944 desc.args[3] = req->resp_ptr;
945 desc.args[4] = req->resp_len;
946 desc.args[5] = req->sglistinfo_ptr;
947 desc.args[6] = req->sglistinfo_len;
948 } else {
949 req_64bit = (struct qseecom_qteec_64bit_ireq *)
950 req_buf;
951 desc.args[0] = req_64bit->app_id;
952 desc.args[1] = req_64bit->req_ptr;
953 desc.args[2] = req_64bit->req_len;
954 desc.args[3] = req_64bit->resp_ptr;
955 desc.args[4] = req_64bit->resp_len;
956 desc.args[5] = req_64bit->sglistinfo_ptr;
957 desc.args[6] = req_64bit->sglistinfo_len;
958 }
959 ret = scm_call2(smc_id, &desc);
960 break;
961 }
962 case QSEOS_TEE_CLOSE_SESSION: {
963 struct qseecom_qteec_ireq *req;
964 struct qseecom_qteec_64bit_ireq *req_64bit;
965
966 smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
967 desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
968 if (qseecom.qsee_version < QSEE_VERSION_40) {
969 req = (struct qseecom_qteec_ireq *)req_buf;
970 desc.args[0] = req->app_id;
971 desc.args[1] = req->req_ptr;
972 desc.args[2] = req->req_len;
973 desc.args[3] = req->resp_ptr;
974 desc.args[4] = req->resp_len;
975 } else {
976 req_64bit = (struct qseecom_qteec_64bit_ireq *)
977 req_buf;
978 desc.args[0] = req_64bit->app_id;
979 desc.args[1] = req_64bit->req_ptr;
980 desc.args[2] = req_64bit->req_len;
981 desc.args[3] = req_64bit->resp_ptr;
982 desc.args[4] = req_64bit->resp_len;
983 }
984 ret = scm_call2(smc_id, &desc);
985 break;
986 }
987 case QSEOS_TEE_REQUEST_CANCELLATION: {
988 struct qseecom_qteec_ireq *req;
989 struct qseecom_qteec_64bit_ireq *req_64bit;
990
991 smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
992 desc.arginfo =
993 TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
994 if (qseecom.qsee_version < QSEE_VERSION_40) {
995 req = (struct qseecom_qteec_ireq *)req_buf;
996 desc.args[0] = req->app_id;
997 desc.args[1] = req->req_ptr;
998 desc.args[2] = req->req_len;
999 desc.args[3] = req->resp_ptr;
1000 desc.args[4] = req->resp_len;
1001 } else {
1002 req_64bit = (struct qseecom_qteec_64bit_ireq *)
1003 req_buf;
1004 desc.args[0] = req_64bit->app_id;
1005 desc.args[1] = req_64bit->req_ptr;
1006 desc.args[2] = req_64bit->req_len;
1007 desc.args[3] = req_64bit->resp_ptr;
1008 desc.args[4] = req_64bit->resp_len;
1009 }
1010 ret = scm_call2(smc_id, &desc);
1011 break;
1012 }
1013 case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
1014 struct qseecom_continue_blocked_request_ireq *req =
1015 (struct qseecom_continue_blocked_request_ireq *)
1016 req_buf;
Zhen Kong2f60f492017-06-29 15:22:14 -07001017 if (qseecom.smcinvoke_support)
1018 smc_id =
1019 TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
1020 else
1021 smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001022 desc.arginfo =
1023 TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
Zhen Kong2f60f492017-06-29 15:22:14 -07001024 desc.args[0] = req->app_or_session_id;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001025 ret = scm_call2(smc_id, &desc);
1026 break;
1027 }
1028 default: {
1029 pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
1030 qseos_cmd_id);
1031 ret = -EINVAL;
1032 break;
1033 }
1034 } /*end of switch (qsee_cmd_id) */
1035 break;
1036 } /*end of case SCM_SVC_TZSCHEDULER*/
1037 default: {
1038 pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
1039 svc_id);
1040 ret = -EINVAL;
1041 break;
1042 }
1043 } /*end of switch svc_id */
1044 scm_resp->result = desc.ret[0];
1045 scm_resp->resp_type = desc.ret[1];
1046 scm_resp->data = desc.ret[2];
1047 pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
1048 svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
1049 pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
1050 scm_resp->result, scm_resp->resp_type, scm_resp->data);
1051 return ret;
1052}
1053
1054
1055static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
1056 size_t cmd_len, void *resp_buf, size_t resp_len)
1057{
1058 if (!is_scm_armv8())
1059 return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
1060 resp_buf, resp_len);
1061 else
1062 return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
1063}
1064
1065static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
1066 struct qseecom_register_listener_req *svc)
1067{
1068 struct qseecom_registered_listener_list *ptr;
1069 int unique = 1;
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1073 list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
1074 if (ptr->svc.listener_id == svc->listener_id) {
1075 pr_err("Service id: %u is already registered\n",
1076 ptr->svc.listener_id);
1077 unique = 0;
1078 break;
1079 }
1080 }
1081 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1082 return unique;
1083}
1084
1085static struct qseecom_registered_listener_list *__qseecom_find_svc(
1086 int32_t listener_id)
1087{
1088 struct qseecom_registered_listener_list *entry = NULL;
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1092 list_for_each_entry(entry,
1093 &qseecom.registered_listener_list_head, list) {
1094 if (entry->svc.listener_id == listener_id)
1095 break;
1096 }
1097 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1098
1099 if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
1100 pr_err("Service id: %u is not found\n", listener_id);
1101 return NULL;
1102 }
1103
1104 return entry;
1105}
1106
1107static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
1108 struct qseecom_dev_handle *handle,
1109 struct qseecom_register_listener_req *listener)
1110{
1111 int ret = 0;
1112 struct qseecom_register_listener_ireq req;
1113 struct qseecom_register_listener_64bit_ireq req_64bit;
1114 struct qseecom_command_scm_resp resp;
1115 ion_phys_addr_t pa;
1116 void *cmd_buf = NULL;
1117 size_t cmd_len;
1118
1119 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001120 svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001121 listener->ifd_data_fd);
1122 if (IS_ERR_OR_NULL(svc->ihandle)) {
1123 pr_err("Ion client could not retrieve the handle\n");
1124 return -ENOMEM;
1125 }
1126
1127 /* Get the physical address of the ION BUF */
1128 ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
1129 if (ret) {
1130 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1131 ret);
1132 return ret;
1133 }
1134 /* Populate the structure for sending scm call to load image */
1135 svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
1136 if (IS_ERR_OR_NULL(svc->sb_virt)) {
1137 pr_err("ION memory mapping for listener shared buffer failed\n");
1138 return -ENOMEM;
1139 }
1140 svc->sb_phys = (phys_addr_t)pa;
1141
1142 if (qseecom.qsee_version < QSEE_VERSION_40) {
1143 req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1144 req.listener_id = svc->svc.listener_id;
1145 req.sb_len = svc->sb_length;
1146 req.sb_ptr = (uint32_t)svc->sb_phys;
1147 cmd_buf = (void *)&req;
1148 cmd_len = sizeof(struct qseecom_register_listener_ireq);
1149 } else {
1150 req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
1151 req_64bit.listener_id = svc->svc.listener_id;
1152 req_64bit.sb_len = svc->sb_length;
1153 req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
1154 cmd_buf = (void *)&req_64bit;
1155 cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
1156 }
1157
1158 resp.result = QSEOS_RESULT_INCOMPLETE;
1159
1160 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
1161 &resp, sizeof(resp));
1162 if (ret) {
1163 pr_err("qseecom_scm_call failed with err: %d\n", ret);
1164 return -EINVAL;
1165 }
1166
1167 if (resp.result != QSEOS_RESULT_SUCCESS) {
1168 pr_err("Error SB registration req: resp.result = %d\n",
1169 resp.result);
1170 return -EPERM;
1171 }
1172 return 0;
1173}
1174
1175static int qseecom_register_listener(struct qseecom_dev_handle *data,
1176 void __user *argp)
1177{
1178 int ret = 0;
1179 unsigned long flags;
1180 struct qseecom_register_listener_req rcvd_lstnr;
1181 struct qseecom_registered_listener_list *new_entry;
1182
1183 ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
1184 if (ret) {
1185 pr_err("copy_from_user failed\n");
1186 return ret;
1187 }
1188 if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
1189 rcvd_lstnr.sb_size))
1190 return -EFAULT;
1191
1192 data->listener.id = 0;
1193 if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
1194 pr_err("Service is not unique and is already registered\n");
1195 data->released = true;
1196 return -EBUSY;
1197 }
1198
1199 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
1200 if (!new_entry)
1201 return -ENOMEM;
1202 memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
1203 new_entry->rcv_req_flag = 0;
1204
1205 new_entry->svc.listener_id = rcvd_lstnr.listener_id;
1206 new_entry->sb_length = rcvd_lstnr.sb_size;
1207 new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
1208 if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
1209 pr_err("qseecom_set_sb_memoryfailed\n");
1210 kzfree(new_entry);
1211 return -ENOMEM;
1212 }
1213
1214 data->listener.id = rcvd_lstnr.listener_id;
1215 init_waitqueue_head(&new_entry->rcv_req_wq);
1216 init_waitqueue_head(&new_entry->listener_block_app_wq);
1217 new_entry->send_resp_flag = 0;
1218 new_entry->listener_in_use = false;
1219 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1220 list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
1221 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1222
1223 return ret;
1224}
1225
1226static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
1227{
1228 int ret = 0;
1229 unsigned long flags;
1230 uint32_t unmap_mem = 0;
1231 struct qseecom_register_listener_ireq req;
1232 struct qseecom_registered_listener_list *ptr_svc = NULL;
1233 struct qseecom_command_scm_resp resp;
1234 struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
1235
1236 req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
1237 req.listener_id = data->listener.id;
1238 resp.result = QSEOS_RESULT_INCOMPLETE;
1239
1240 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
1241 sizeof(req), &resp, sizeof(resp));
1242 if (ret) {
1243 pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
1244 ret, data->listener.id);
1245 return ret;
1246 }
1247
1248 if (resp.result != QSEOS_RESULT_SUCCESS) {
1249 pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
1250 resp.result, data->listener.id);
1251 return -EPERM;
1252 }
1253
1254 data->abort = 1;
1255 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1256 list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
1257 list) {
1258 if (ptr_svc->svc.listener_id == data->listener.id) {
1259 wake_up_all(&ptr_svc->rcv_req_wq);
1260 break;
1261 }
1262 }
1263 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1264
1265 while (atomic_read(&data->ioctl_count) > 1) {
1266 if (wait_event_freezable(data->abort_wq,
1267 atomic_read(&data->ioctl_count) <= 1)) {
1268 pr_err("Interrupted from abort\n");
1269 ret = -ERESTARTSYS;
Zhen Kongd0954d72017-06-01 15:06:00 -07001270 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001271 }
1272 }
1273
1274 spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
1275 list_for_each_entry(ptr_svc,
1276 &qseecom.registered_listener_list_head, list) {
1277 if (ptr_svc->svc.listener_id == data->listener.id) {
1278 if (ptr_svc->sb_virt) {
1279 unmap_mem = 1;
1280 ihandle = ptr_svc->ihandle;
1281 }
1282 list_del(&ptr_svc->list);
1283 kzfree(ptr_svc);
1284 break;
1285 }
1286 }
1287 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
1288
1289 /* Unmap the memory */
1290 if (unmap_mem) {
1291 if (!IS_ERR_OR_NULL(ihandle)) {
1292 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
1293 ion_free(qseecom.ion_clnt, ihandle);
1294 }
1295 }
1296 data->released = true;
1297 return ret;
1298}
1299
1300static int __qseecom_set_msm_bus_request(uint32_t mode)
1301{
1302 int ret = 0;
1303 struct qseecom_clk *qclk;
1304
1305 qclk = &qseecom.qsee;
1306 if (qclk->ce_core_src_clk != NULL) {
1307 if (mode == INACTIVE) {
1308 __qseecom_disable_clk(CLK_QSEE);
1309 } else {
1310 ret = __qseecom_enable_clk(CLK_QSEE);
1311 if (ret)
1312 pr_err("CLK enabling failed (%d) MODE (%d)\n",
1313 ret, mode);
1314 }
1315 }
1316
1317 if ((!ret) && (qseecom.current_mode != mode)) {
1318 ret = msm_bus_scale_client_update_request(
1319 qseecom.qsee_perf_client, mode);
1320 if (ret) {
1321 pr_err("Bandwidth req failed(%d) MODE (%d)\n",
1322 ret, mode);
1323 if (qclk->ce_core_src_clk != NULL) {
1324 if (mode == INACTIVE) {
1325 ret = __qseecom_enable_clk(CLK_QSEE);
1326 if (ret)
1327 pr_err("CLK enable failed\n");
1328 } else
1329 __qseecom_disable_clk(CLK_QSEE);
1330 }
1331 }
1332 qseecom.current_mode = mode;
1333 }
1334 return ret;
1335}
1336
1337static void qseecom_bw_inactive_req_work(struct work_struct *work)
1338{
1339 mutex_lock(&app_access_lock);
1340 mutex_lock(&qsee_bw_mutex);
1341 if (qseecom.timer_running)
1342 __qseecom_set_msm_bus_request(INACTIVE);
1343 pr_debug("current_mode = %d, cumulative_mode = %d\n",
1344 qseecom.current_mode, qseecom.cumulative_mode);
1345 qseecom.timer_running = false;
1346 mutex_unlock(&qsee_bw_mutex);
1347 mutex_unlock(&app_access_lock);
1348}
1349
1350static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
1351{
1352 schedule_work(&qseecom.bw_inactive_req_ws);
1353}
1354
1355static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
1356{
1357 struct qseecom_clk *qclk;
1358 int ret = 0;
1359
1360 mutex_lock(&clk_access_lock);
1361 if (ce == CLK_QSEE)
1362 qclk = &qseecom.qsee;
1363 else
1364 qclk = &qseecom.ce_drv;
1365
1366 if (qclk->clk_access_cnt > 2) {
1367 pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
1368 ret = -EINVAL;
1369 goto err_dec_ref_cnt;
1370 }
1371 if (qclk->clk_access_cnt == 2)
1372 qclk->clk_access_cnt--;
1373
1374err_dec_ref_cnt:
1375 mutex_unlock(&clk_access_lock);
1376 return ret;
1377}
1378
1379
1380static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
1381{
1382 int32_t ret = 0;
1383 int32_t request_mode = INACTIVE;
1384
1385 mutex_lock(&qsee_bw_mutex);
1386 if (mode == 0) {
1387 if (qseecom.cumulative_mode > MEDIUM)
1388 request_mode = HIGH;
1389 else
1390 request_mode = qseecom.cumulative_mode;
1391 } else {
1392 request_mode = mode;
1393 }
1394
1395 ret = __qseecom_set_msm_bus_request(request_mode);
1396 if (ret) {
1397 pr_err("set msm bus request failed (%d),request_mode (%d)\n",
1398 ret, request_mode);
1399 goto err_scale_timer;
1400 }
1401
1402 if (qseecom.timer_running) {
1403 ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
1404 if (ret) {
1405 pr_err("Failed to decrease clk ref count.\n");
1406 goto err_scale_timer;
1407 }
1408 del_timer_sync(&(qseecom.bw_scale_down_timer));
1409 qseecom.timer_running = false;
1410 }
1411err_scale_timer:
1412 mutex_unlock(&qsee_bw_mutex);
1413 return ret;
1414}
1415
1416
1417static int qseecom_unregister_bus_bandwidth_needs(
1418 struct qseecom_dev_handle *data)
1419{
1420 int32_t ret = 0;
1421
1422 qseecom.cumulative_mode -= data->mode;
1423 data->mode = INACTIVE;
1424
1425 return ret;
1426}
1427
1428static int __qseecom_register_bus_bandwidth_needs(
1429 struct qseecom_dev_handle *data, uint32_t request_mode)
1430{
1431 int32_t ret = 0;
1432
1433 if (data->mode == INACTIVE) {
1434 qseecom.cumulative_mode += request_mode;
1435 data->mode = request_mode;
1436 } else {
1437 if (data->mode != request_mode) {
1438 qseecom.cumulative_mode -= data->mode;
1439 qseecom.cumulative_mode += request_mode;
1440 data->mode = request_mode;
1441 }
1442 }
1443 return ret;
1444}
1445
1446static int qseecom_perf_enable(struct qseecom_dev_handle *data)
1447{
1448 int ret = 0;
1449
1450 ret = qsee_vote_for_clock(data, CLK_DFAB);
1451 if (ret) {
1452 pr_err("Failed to vote for DFAB clock with err %d\n", ret);
1453 goto perf_enable_exit;
1454 }
1455 ret = qsee_vote_for_clock(data, CLK_SFPB);
1456 if (ret) {
1457 qsee_disable_clock_vote(data, CLK_DFAB);
1458 pr_err("Failed to vote for SFPB clock with err %d\n", ret);
1459 goto perf_enable_exit;
1460 }
1461
1462perf_enable_exit:
1463 return ret;
1464}
1465
1466static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
1467 void __user *argp)
1468{
1469 int32_t ret = 0;
1470 int32_t req_mode;
1471
1472 if (qseecom.no_clock_support)
1473 return 0;
1474
1475 ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
1476 if (ret) {
1477 pr_err("copy_from_user failed\n");
1478 return ret;
1479 }
1480 if (req_mode > HIGH) {
1481 pr_err("Invalid bandwidth mode (%d)\n", req_mode);
1482 return -EINVAL;
1483 }
1484
1485 /*
1486 * Register bus bandwidth needs if bus scaling feature is enabled;
1487 * otherwise, qseecom enable/disable clocks for the client directly.
1488 */
1489 if (qseecom.support_bus_scaling) {
1490 mutex_lock(&qsee_bw_mutex);
1491 ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
1492 mutex_unlock(&qsee_bw_mutex);
1493 } else {
1494 pr_debug("Bus scaling feature is NOT enabled\n");
1495 pr_debug("request bandwidth mode %d for the client\n",
1496 req_mode);
1497 if (req_mode != INACTIVE) {
1498 ret = qseecom_perf_enable(data);
1499 if (ret)
1500 pr_err("Failed to vote for clock with err %d\n",
1501 ret);
1502 } else {
1503 qsee_disable_clock_vote(data, CLK_DFAB);
1504 qsee_disable_clock_vote(data, CLK_SFPB);
1505 }
1506 }
1507 return ret;
1508}
1509
1510static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
1511{
1512 if (qseecom.no_clock_support)
1513 return;
1514
1515 mutex_lock(&qsee_bw_mutex);
1516 qseecom.bw_scale_down_timer.expires = jiffies +
1517 msecs_to_jiffies(duration);
1518 mod_timer(&(qseecom.bw_scale_down_timer),
1519 qseecom.bw_scale_down_timer.expires);
1520 qseecom.timer_running = true;
1521 mutex_unlock(&qsee_bw_mutex);
1522}
1523
1524static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
1525{
1526 if (!qseecom.support_bus_scaling)
1527 qsee_disable_clock_vote(data, CLK_SFPB);
1528 else
1529 __qseecom_add_bw_scale_down_timer(
1530 QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
1531}
1532
1533static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
1534{
1535 int ret = 0;
1536
1537 if (qseecom.support_bus_scaling) {
1538 ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
1539 if (ret)
1540 pr_err("Failed to set bw MEDIUM.\n");
1541 } else {
1542 ret = qsee_vote_for_clock(data, CLK_SFPB);
1543 if (ret)
1544 pr_err("Fail vote for clk SFPB ret %d\n", ret);
1545 }
1546 return ret;
1547}
1548
1549static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
1550 void __user *argp)
1551{
1552 ion_phys_addr_t pa;
1553 int32_t ret;
1554 struct qseecom_set_sb_mem_param_req req;
1555 size_t len;
1556
1557 /* Copy the relevant information needed for loading the image */
1558 if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
1559 return -EFAULT;
1560
1561 if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
1562 (req.sb_len == 0)) {
1563 pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
1564 req.ifd_data_fd, req.sb_len, req.virt_sb_base);
1565 return -EFAULT;
1566 }
1567 if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
1568 req.sb_len))
1569 return -EFAULT;
1570
1571 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07001572 data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001573 req.ifd_data_fd);
1574 if (IS_ERR_OR_NULL(data->client.ihandle)) {
1575 pr_err("Ion client could not retrieve the handle\n");
1576 return -ENOMEM;
1577 }
1578 /* Get the physical address of the ION BUF */
1579 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
1580 if (ret) {
1581
1582 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
1583 ret);
1584 return ret;
1585 }
1586
1587 if (len < req.sb_len) {
1588 pr_err("Requested length (0x%x) is > allocated (%zu)\n",
1589 req.sb_len, len);
1590 return -EINVAL;
1591 }
1592 /* Populate the structure for sending scm call to load image */
1593 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
1594 data->client.ihandle);
1595 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
1596 pr_err("ION memory mapping for client shared buf failed\n");
1597 return -ENOMEM;
1598 }
1599 data->client.sb_phys = (phys_addr_t)pa;
1600 data->client.sb_length = req.sb_len;
1601 data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
1602 return 0;
1603}
1604
1605static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
1606{
1607 int ret;
1608
1609 ret = (qseecom.send_resp_flag != 0);
1610 return ret || data->abort;
1611}
1612
1613static int __qseecom_reentrancy_listener_has_sent_rsp(
1614 struct qseecom_dev_handle *data,
1615 struct qseecom_registered_listener_list *ptr_svc)
1616{
1617 int ret;
1618
1619 ret = (ptr_svc->send_resp_flag != 0);
1620 return ret || data->abort;
1621}
1622
1623static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
1624 struct qseecom_command_scm_resp *resp,
1625 struct qseecom_client_listener_data_irsp *send_data_rsp,
1626 struct qseecom_registered_listener_list *ptr_svc,
1627 uint32_t lstnr) {
1628 int ret = 0;
1629
1630 send_data_rsp->status = QSEOS_RESULT_FAILURE;
1631 qseecom.send_resp_flag = 0;
1632 send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
1633 send_data_rsp->listener_id = lstnr;
1634 if (ptr_svc)
1635 pr_warn("listener_id:%x, lstnr: %x\n",
1636 ptr_svc->svc.listener_id, lstnr);
1637 if (ptr_svc && ptr_svc->ihandle) {
1638 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
1639 ptr_svc->sb_virt, ptr_svc->sb_length,
1640 ION_IOC_CLEAN_INV_CACHES);
1641 if (ret) {
1642 pr_err("cache operation failed %d\n", ret);
1643 return ret;
1644 }
1645 }
1646
1647 if (lstnr == RPMB_SERVICE) {
1648 ret = __qseecom_enable_clk(CLK_QSEE);
1649 if (ret)
1650 return ret;
1651 }
1652 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
1653 sizeof(send_data_rsp), resp, sizeof(*resp));
1654 if (ret) {
1655 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1656 ret, data->client.app_id);
1657 if (lstnr == RPMB_SERVICE)
1658 __qseecom_disable_clk(CLK_QSEE);
1659 return ret;
1660 }
1661 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1662 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1663 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1664 resp->result, data->client.app_id, lstnr);
1665 ret = -EINVAL;
1666 }
1667 if (lstnr == RPMB_SERVICE)
1668 __qseecom_disable_clk(CLK_QSEE);
1669 return ret;
1670}
1671
1672static void __qseecom_clean_listener_sglistinfo(
1673 struct qseecom_registered_listener_list *ptr_svc)
1674{
1675 if (ptr_svc->sglist_cnt) {
1676 memset(ptr_svc->sglistinfo_ptr, 0,
1677 SGLISTINFO_TABLE_SIZE);
1678 ptr_svc->sglist_cnt = 0;
1679 }
1680}
1681
1682static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
1683 struct qseecom_command_scm_resp *resp)
1684{
1685 int ret = 0;
1686 int rc = 0;
1687 uint32_t lstnr;
1688 unsigned long flags;
1689 struct qseecom_client_listener_data_irsp send_data_rsp;
1690 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1691 struct qseecom_registered_listener_list *ptr_svc = NULL;
1692 sigset_t new_sigset;
1693 sigset_t old_sigset;
1694 uint32_t status;
1695 void *cmd_buf = NULL;
1696 size_t cmd_len;
1697 struct sglist_info *table = NULL;
1698
1699 while (resp->result == QSEOS_RESULT_INCOMPLETE) {
1700 lstnr = resp->data;
1701 /*
1702 * Wake up blocking lsitener service with the lstnr id
1703 */
1704 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1705 flags);
1706 list_for_each_entry(ptr_svc,
1707 &qseecom.registered_listener_list_head, list) {
1708 if (ptr_svc->svc.listener_id == lstnr) {
1709 ptr_svc->listener_in_use = true;
1710 ptr_svc->rcv_req_flag = 1;
1711 wake_up_interruptible(&ptr_svc->rcv_req_wq);
1712 break;
1713 }
1714 }
1715 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
1716 flags);
1717
1718 if (ptr_svc == NULL) {
1719 pr_err("Listener Svc %d does not exist\n", lstnr);
1720 __qseecom_qseos_fail_return_resp_tz(data, resp,
1721 &send_data_rsp, ptr_svc, lstnr);
1722 return -EINVAL;
1723 }
1724
1725 if (!ptr_svc->ihandle) {
1726 pr_err("Client handle is not initialized\n");
1727 __qseecom_qseos_fail_return_resp_tz(data, resp,
1728 &send_data_rsp, ptr_svc, lstnr);
1729 return -EINVAL;
1730 }
1731
1732 if (ptr_svc->svc.listener_id != lstnr) {
1733 pr_warn("Service requested does not exist\n");
1734 __qseecom_qseos_fail_return_resp_tz(data, resp,
Zhen Kongad83f302017-12-09 12:51:36 -08001735 &send_data_rsp, NULL, lstnr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001736 return -ERESTARTSYS;
1737 }
1738 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
1739
1740 /* initialize the new signal mask with all signals*/
1741 sigfillset(&new_sigset);
1742 /* block all signals */
1743 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1744
1745 do {
1746 /*
1747 * When reentrancy is not supported, check global
1748 * send_resp_flag; otherwise, check this listener's
1749 * send_resp_flag.
1750 */
1751 if (!qseecom.qsee_reentrancy_support &&
1752 !wait_event_freezable(qseecom.send_resp_wq,
1753 __qseecom_listener_has_sent_rsp(data))) {
1754 break;
1755 }
1756
1757 if (qseecom.qsee_reentrancy_support &&
1758 !wait_event_freezable(qseecom.send_resp_wq,
1759 __qseecom_reentrancy_listener_has_sent_rsp(
1760 data, ptr_svc))) {
1761 break;
1762 }
1763 } while (1);
1764
1765 /* restore signal mask */
1766 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1767 if (data->abort) {
1768 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
1769 data->client.app_id, lstnr, ret);
1770 rc = -ENODEV;
1771 status = QSEOS_RESULT_FAILURE;
1772 } else {
1773 status = QSEOS_RESULT_SUCCESS;
1774 }
1775
1776 qseecom.send_resp_flag = 0;
1777 ptr_svc->send_resp_flag = 0;
1778 table = ptr_svc->sglistinfo_ptr;
1779 if (qseecom.qsee_version < QSEE_VERSION_40) {
1780 send_data_rsp.listener_id = lstnr;
1781 send_data_rsp.status = status;
1782 send_data_rsp.sglistinfo_ptr =
1783 (uint32_t)virt_to_phys(table);
1784 send_data_rsp.sglistinfo_len =
1785 SGLISTINFO_TABLE_SIZE;
1786 dmac_flush_range((void *)table,
1787 (void *)table + SGLISTINFO_TABLE_SIZE);
1788 cmd_buf = (void *)&send_data_rsp;
1789 cmd_len = sizeof(send_data_rsp);
1790 } else {
1791 send_data_rsp_64bit.listener_id = lstnr;
1792 send_data_rsp_64bit.status = status;
1793 send_data_rsp_64bit.sglistinfo_ptr =
1794 virt_to_phys(table);
1795 send_data_rsp_64bit.sglistinfo_len =
1796 SGLISTINFO_TABLE_SIZE;
1797 dmac_flush_range((void *)table,
1798 (void *)table + SGLISTINFO_TABLE_SIZE);
1799 cmd_buf = (void *)&send_data_rsp_64bit;
1800 cmd_len = sizeof(send_data_rsp_64bit);
1801 }
1802 if (qseecom.whitelist_support == false)
1803 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
1804 else
1805 *(uint32_t *)cmd_buf =
1806 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
1807 if (ptr_svc) {
1808 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
1809 ptr_svc->ihandle,
1810 ptr_svc->sb_virt, ptr_svc->sb_length,
1811 ION_IOC_CLEAN_INV_CACHES);
1812 if (ret) {
1813 pr_err("cache operation failed %d\n", ret);
1814 return ret;
1815 }
1816 }
1817
1818 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
1819 ret = __qseecom_enable_clk(CLK_QSEE);
1820 if (ret)
1821 return ret;
1822 }
1823
1824 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1825 cmd_buf, cmd_len, resp, sizeof(*resp));
1826 ptr_svc->listener_in_use = false;
1827 __qseecom_clean_listener_sglistinfo(ptr_svc);
1828 if (ret) {
1829 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
1830 ret, data->client.app_id);
1831 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1832 __qseecom_disable_clk(CLK_QSEE);
1833 return ret;
1834 }
1835 if ((resp->result != QSEOS_RESULT_SUCCESS) &&
1836 (resp->result != QSEOS_RESULT_INCOMPLETE)) {
1837 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
1838 resp->result, data->client.app_id, lstnr);
1839 ret = -EINVAL;
1840 }
1841 if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
1842 __qseecom_disable_clk(CLK_QSEE);
1843
1844 }
1845 if (rc)
1846 return rc;
1847
1848 return ret;
1849}
1850
Zhen Konga91aaf02018-02-02 17:21:04 -08001851static int __qseecom_process_reentrancy_blocked_on_listener(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001852 struct qseecom_command_scm_resp *resp,
1853 struct qseecom_registered_app_list *ptr_app,
1854 struct qseecom_dev_handle *data)
1855{
1856 struct qseecom_registered_listener_list *list_ptr;
1857 int ret = 0;
1858 struct qseecom_continue_blocked_request_ireq ireq;
1859 struct qseecom_command_scm_resp continue_resp;
Zhen Konga91aaf02018-02-02 17:21:04 -08001860 unsigned int session_id;
Zhen Kong3d1d92f2018-02-02 17:21:04 -08001861 sigset_t new_sigset;
1862 sigset_t old_sigset;
Zhen Konga91aaf02018-02-02 17:21:04 -08001863 unsigned long flags;
1864 bool found_app = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001865
1866 if (!resp || !data) {
1867 pr_err("invalid resp or data pointer\n");
1868 ret = -EINVAL;
1869 goto exit;
1870 }
1871
1872 /* find app_id & img_name from list */
1873 if (!ptr_app) {
1874 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
1875 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
1876 list) {
1877 if ((ptr_app->app_id == data->client.app_id) &&
1878 (!strcmp(ptr_app->app_name,
1879 data->client.app_name))) {
1880 found_app = true;
1881 break;
1882 }
1883 }
1884 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
1885 flags);
1886 if (!found_app) {
1887 pr_err("app_id %d (%s) is not found\n",
1888 data->client.app_id,
1889 (char *)data->client.app_name);
1890 ret = -ENOENT;
1891 goto exit;
1892 }
1893 }
1894
Zhen Kongd8cc0052017-11-13 15:13:31 -08001895 do {
Zhen Konga91aaf02018-02-02 17:21:04 -08001896 session_id = resp->resp_type;
1897 list_ptr = __qseecom_find_svc(resp->data);
1898 if (!list_ptr) {
1899 pr_err("Invalid listener ID %d\n", resp->data);
1900 ret = -ENODATA;
Zhen Konge7f525f2017-12-01 18:26:25 -08001901 goto exit;
1902 }
Zhen Konga91aaf02018-02-02 17:21:04 -08001903 ptr_app->blocked_on_listener_id = resp->data;
1904
1905 pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
1906 resp->data, list_ptr->listener_in_use,
1907 session_id, data->client.app_id);
1908
1909 /* sleep until listener is available */
1910 sigfillset(&new_sigset);
1911 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
1912
1913 do {
1914 qseecom.app_block_ref_cnt++;
1915 ptr_app->app_blocked = true;
1916 mutex_unlock(&app_access_lock);
1917 wait_event_freezable(
1918 list_ptr->listener_block_app_wq,
1919 !list_ptr->listener_in_use);
1920 mutex_lock(&app_access_lock);
1921 ptr_app->app_blocked = false;
1922 qseecom.app_block_ref_cnt--;
1923 } while (list_ptr->listener_in_use);
1924
1925 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
1926
1927 ptr_app->blocked_on_listener_id = 0;
1928 pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
1929 resp->data, session_id, data->client.app_id);
1930
1931 /* notify TZ that listener is available */
1932 ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
1933
1934 if (qseecom.smcinvoke_support)
1935 ireq.app_or_session_id = session_id;
1936 else
1937 ireq.app_or_session_id = data->client.app_id;
1938
1939 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1940 &ireq, sizeof(ireq),
1941 &continue_resp, sizeof(continue_resp));
1942 if (ret && qseecom.smcinvoke_support) {
1943 /* retry with legacy cmd */
1944 qseecom.smcinvoke_support = false;
1945 ireq.app_or_session_id = data->client.app_id;
1946 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
1947 &ireq, sizeof(ireq),
1948 &continue_resp, sizeof(continue_resp));
1949 qseecom.smcinvoke_support = true;
1950 if (ret) {
1951 pr_err("unblock app %d or session %d fail\n",
1952 data->client.app_id, session_id);
1953 goto exit;
1954 }
1955 }
1956 resp->result = continue_resp.result;
1957 resp->resp_type = continue_resp.resp_type;
1958 resp->data = continue_resp.data;
1959 pr_debug("unblock resp = %d\n", resp->result);
1960 } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
1961
1962 if (resp->result != QSEOS_RESULT_INCOMPLETE) {
1963 pr_err("Unexpected unblock resp %d\n", resp->result);
1964 ret = -EINVAL;
Zhen Kong2f60f492017-06-29 15:22:14 -07001965 }
Zhen Kong2f60f492017-06-29 15:22:14 -07001966exit:
1967 return ret;
1968}
1969
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07001970static int __qseecom_reentrancy_process_incomplete_cmd(
1971 struct qseecom_dev_handle *data,
1972 struct qseecom_command_scm_resp *resp)
1973{
1974 int ret = 0;
1975 int rc = 0;
1976 uint32_t lstnr;
1977 unsigned long flags;
1978 struct qseecom_client_listener_data_irsp send_data_rsp;
1979 struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
1980 struct qseecom_registered_listener_list *ptr_svc = NULL;
1981 sigset_t new_sigset;
1982 sigset_t old_sigset;
1983 uint32_t status;
1984 void *cmd_buf = NULL;
1985 size_t cmd_len;
1986 struct sglist_info *table = NULL;
1987
1988 while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
1989 lstnr = resp->data;
1990 /*
1991 * Wake up blocking lsitener service with the lstnr id
1992 */
1993 spin_lock_irqsave(&qseecom.registered_listener_list_lock,
1994 flags);
1995 list_for_each_entry(ptr_svc,
1996 &qseecom.registered_listener_list_head, list) {
1997 if (ptr_svc->svc.listener_id == lstnr) {
1998 ptr_svc->listener_in_use = true;
1999 ptr_svc->rcv_req_flag = 1;
2000 wake_up_interruptible(&ptr_svc->rcv_req_wq);
2001 break;
2002 }
2003 }
2004 spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
2005 flags);
2006
2007 if (ptr_svc == NULL) {
2008 pr_err("Listener Svc %d does not exist\n", lstnr);
2009 return -EINVAL;
2010 }
2011
2012 if (!ptr_svc->ihandle) {
2013 pr_err("Client handle is not initialized\n");
2014 return -EINVAL;
2015 }
2016
2017 if (ptr_svc->svc.listener_id != lstnr) {
2018 pr_warn("Service requested does not exist\n");
2019 return -ERESTARTSYS;
2020 }
2021 pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
2022
2023 /* initialize the new signal mask with all signals*/
2024 sigfillset(&new_sigset);
2025
2026 /* block all signals */
2027 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2028
2029 /* unlock mutex btw waking listener and sleep-wait */
2030 mutex_unlock(&app_access_lock);
2031 do {
2032 if (!wait_event_freezable(qseecom.send_resp_wq,
2033 __qseecom_reentrancy_listener_has_sent_rsp(
2034 data, ptr_svc))) {
2035 break;
2036 }
2037 } while (1);
2038 /* lock mutex again after resp sent */
2039 mutex_lock(&app_access_lock);
2040 ptr_svc->send_resp_flag = 0;
2041 qseecom.send_resp_flag = 0;
2042
2043 /* restore signal mask */
2044 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2045 if (data->abort) {
2046 pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
2047 data->client.app_id, lstnr, ret);
2048 rc = -ENODEV;
2049 status = QSEOS_RESULT_FAILURE;
2050 } else {
2051 status = QSEOS_RESULT_SUCCESS;
2052 }
2053 table = ptr_svc->sglistinfo_ptr;
2054 if (qseecom.qsee_version < QSEE_VERSION_40) {
2055 send_data_rsp.listener_id = lstnr;
2056 send_data_rsp.status = status;
2057 send_data_rsp.sglistinfo_ptr =
2058 (uint32_t)virt_to_phys(table);
2059 send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
2060 dmac_flush_range((void *)table,
2061 (void *)table + SGLISTINFO_TABLE_SIZE);
2062 cmd_buf = (void *)&send_data_rsp;
2063 cmd_len = sizeof(send_data_rsp);
2064 } else {
2065 send_data_rsp_64bit.listener_id = lstnr;
2066 send_data_rsp_64bit.status = status;
2067 send_data_rsp_64bit.sglistinfo_ptr =
2068 virt_to_phys(table);
2069 send_data_rsp_64bit.sglistinfo_len =
2070 SGLISTINFO_TABLE_SIZE;
2071 dmac_flush_range((void *)table,
2072 (void *)table + SGLISTINFO_TABLE_SIZE);
2073 cmd_buf = (void *)&send_data_rsp_64bit;
2074 cmd_len = sizeof(send_data_rsp_64bit);
2075 }
2076 if (qseecom.whitelist_support == false)
2077 *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
2078 else
2079 *(uint32_t *)cmd_buf =
2080 QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
2081 if (ptr_svc) {
2082 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
2083 ptr_svc->ihandle,
2084 ptr_svc->sb_virt, ptr_svc->sb_length,
2085 ION_IOC_CLEAN_INV_CACHES);
2086 if (ret) {
2087 pr_err("cache operation failed %d\n", ret);
2088 return ret;
2089 }
2090 }
2091 if (lstnr == RPMB_SERVICE) {
2092 ret = __qseecom_enable_clk(CLK_QSEE);
2093 if (ret)
2094 return ret;
2095 }
2096
2097 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2098 cmd_buf, cmd_len, resp, sizeof(*resp));
2099 ptr_svc->listener_in_use = false;
2100 __qseecom_clean_listener_sglistinfo(ptr_svc);
2101 wake_up_interruptible(&ptr_svc->listener_block_app_wq);
2102
2103 if (ret) {
2104 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
2105 ret, data->client.app_id);
2106 goto exit;
2107 }
2108
2109 switch (resp->result) {
2110 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
2111 pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
2112 lstnr, data->client.app_id, resp->data);
2113 if (lstnr == resp->data) {
2114 pr_err("lstnr %d should not be blocked!\n",
2115 lstnr);
2116 ret = -EINVAL;
2117 goto exit;
2118 }
2119 ret = __qseecom_process_reentrancy_blocked_on_listener(
2120 resp, NULL, data);
2121 if (ret) {
2122 pr_err("failed to process App(%d) %s blocked on listener %d\n",
2123 data->client.app_id,
2124 data->client.app_name, resp->data);
2125 goto exit;
2126 }
2127 case QSEOS_RESULT_SUCCESS:
2128 case QSEOS_RESULT_INCOMPLETE:
2129 break;
2130 default:
2131 pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
2132 resp->result, data->client.app_id, lstnr);
2133 ret = -EINVAL;
2134 goto exit;
2135 }
2136exit:
2137 if (lstnr == RPMB_SERVICE)
2138 __qseecom_disable_clk(CLK_QSEE);
2139
2140 }
2141 if (rc)
2142 return rc;
2143
2144 return ret;
2145}
2146
2147/*
2148 * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
2149 * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
2150 * So, needs to first check if no app blocked before sending OS level scm call,
2151 * then wait until all apps are unblocked.
2152 */
2153static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
2154{
2155 sigset_t new_sigset, old_sigset;
2156
2157 if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
2158 qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
2159 IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
2160 /* thread sleep until this app unblocked */
2161 while (qseecom.app_block_ref_cnt > 0) {
2162 sigfillset(&new_sigset);
2163 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2164 mutex_unlock(&app_access_lock);
2165 do {
2166 if (!wait_event_freezable(qseecom.app_block_wq,
2167 (qseecom.app_block_ref_cnt == 0)))
2168 break;
2169 } while (1);
2170 mutex_lock(&app_access_lock);
2171 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2172 }
2173 }
2174}
2175
2176/*
2177 * scm_call of send data will fail if this TA is blocked or there are more
2178 * than one TA requesting listener services; So, first check to see if need
2179 * to wait.
2180 */
2181static void __qseecom_reentrancy_check_if_this_app_blocked(
2182 struct qseecom_registered_app_list *ptr_app)
2183{
2184 sigset_t new_sigset, old_sigset;
2185
2186 if (qseecom.qsee_reentrancy_support) {
2187 while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
2188 /* thread sleep until this app unblocked */
2189 sigfillset(&new_sigset);
2190 sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
2191 mutex_unlock(&app_access_lock);
2192 do {
2193 if (!wait_event_freezable(qseecom.app_block_wq,
2194 (!ptr_app->app_blocked &&
2195 qseecom.app_block_ref_cnt <= 1)))
2196 break;
2197 } while (1);
2198 mutex_lock(&app_access_lock);
2199 sigprocmask(SIG_SETMASK, &old_sigset, NULL);
2200 }
2201 }
2202}
2203
2204static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
2205 uint32_t *app_id)
2206{
2207 int32_t ret;
2208 struct qseecom_command_scm_resp resp;
2209 bool found_app = false;
2210 struct qseecom_registered_app_list *entry = NULL;
2211 unsigned long flags = 0;
2212
2213 if (!app_id) {
2214 pr_err("Null pointer to app_id\n");
2215 return -EINVAL;
2216 }
2217 *app_id = 0;
2218
2219 /* check if app exists and has been registered locally */
2220 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2221 list_for_each_entry(entry,
2222 &qseecom.registered_app_list_head, list) {
2223 if (!strcmp(entry->app_name, req.app_name)) {
2224 found_app = true;
2225 break;
2226 }
2227 }
2228 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
2229 if (found_app) {
2230 pr_debug("Found app with id %d\n", entry->app_id);
2231 *app_id = entry->app_id;
2232 return 0;
2233 }
2234
2235 memset((void *)&resp, 0, sizeof(resp));
2236
2237 /* SCM_CALL to check if app_id for the mentioned app exists */
2238 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2239 sizeof(struct qseecom_check_app_ireq),
2240 &resp, sizeof(resp));
2241 if (ret) {
2242 pr_err("scm_call to check if app is already loaded failed\n");
2243 return -EINVAL;
2244 }
2245
2246 if (resp.result == QSEOS_RESULT_FAILURE)
2247 return 0;
2248
2249 switch (resp.resp_type) {
2250 /*qsee returned listener type response */
2251 case QSEOS_LISTENER_ID:
2252 pr_err("resp type is of listener type instead of app");
2253 return -EINVAL;
2254 case QSEOS_APP_ID:
2255 *app_id = resp.data;
2256 return 0;
2257 default:
2258 pr_err("invalid resp type (%d) from qsee",
2259 resp.resp_type);
2260 return -ENODEV;
2261 }
2262}
2263
2264static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
2265{
2266 struct qseecom_registered_app_list *entry = NULL;
2267 unsigned long flags = 0;
2268 u32 app_id = 0;
2269 struct ion_handle *ihandle; /* Ion handle */
2270 struct qseecom_load_img_req load_img_req;
2271 int32_t ret = 0;
2272 ion_phys_addr_t pa = 0;
2273 size_t len;
2274 struct qseecom_command_scm_resp resp;
2275 struct qseecom_check_app_ireq req;
2276 struct qseecom_load_app_ireq load_req;
2277 struct qseecom_load_app_64bit_ireq load_req_64bit;
2278 void *cmd_buf = NULL;
2279 size_t cmd_len;
2280 bool first_time = false;
2281
2282 /* Copy the relevant information needed for loading the image */
2283 if (copy_from_user(&load_img_req,
2284 (void __user *)argp,
2285 sizeof(struct qseecom_load_img_req))) {
2286 pr_err("copy_from_user failed\n");
2287 return -EFAULT;
2288 }
2289
2290 /* Check and load cmnlib */
2291 if (qseecom.qsee_version > QSEEE_VERSION_00) {
2292 if (!qseecom.commonlib_loaded &&
2293 load_img_req.app_arch == ELFCLASS32) {
2294 ret = qseecom_load_commonlib_image(data, "cmnlib");
2295 if (ret) {
2296 pr_err("failed to load cmnlib\n");
2297 return -EIO;
2298 }
2299 qseecom.commonlib_loaded = true;
2300 pr_debug("cmnlib is loaded\n");
2301 }
2302
2303 if (!qseecom.commonlib64_loaded &&
2304 load_img_req.app_arch == ELFCLASS64) {
2305 ret = qseecom_load_commonlib_image(data, "cmnlib64");
2306 if (ret) {
2307 pr_err("failed to load cmnlib64\n");
2308 return -EIO;
2309 }
2310 qseecom.commonlib64_loaded = true;
2311 pr_debug("cmnlib64 is loaded\n");
2312 }
2313 }
2314
2315 if (qseecom.support_bus_scaling) {
2316 mutex_lock(&qsee_bw_mutex);
2317 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
2318 mutex_unlock(&qsee_bw_mutex);
2319 if (ret)
2320 return ret;
2321 }
2322
2323 /* Vote for the SFPB clock */
2324 ret = __qseecom_enable_clk_scale_up(data);
2325 if (ret)
2326 goto enable_clk_err;
2327
2328 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
2329 load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
2330 strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
2331
2332 ret = __qseecom_check_app_exists(req, &app_id);
2333 if (ret < 0)
2334 goto loadapp_err;
2335
2336 if (app_id) {
2337 pr_debug("App id %d (%s) already exists\n", app_id,
2338 (char *)(req.app_name));
2339 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2340 list_for_each_entry(entry,
2341 &qseecom.registered_app_list_head, list){
2342 if (entry->app_id == app_id) {
2343 entry->ref_cnt++;
2344 break;
2345 }
2346 }
2347 spin_unlock_irqrestore(
2348 &qseecom.registered_app_list_lock, flags);
2349 ret = 0;
2350 } else {
2351 first_time = true;
2352 pr_warn("App (%s) does'nt exist, loading apps for first time\n",
2353 (char *)(load_img_req.img_name));
2354 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07002355 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002356 load_img_req.ifd_data_fd);
2357 if (IS_ERR_OR_NULL(ihandle)) {
2358 pr_err("Ion client could not retrieve the handle\n");
2359 ret = -ENOMEM;
2360 goto loadapp_err;
2361 }
2362
2363 /* Get the physical address of the ION BUF */
2364 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
2365 if (ret) {
2366 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
2367 ret);
2368 goto loadapp_err;
2369 }
2370 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
2371 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
2372 len, load_img_req.mdt_len,
2373 load_img_req.img_len);
2374 ret = -EINVAL;
2375 goto loadapp_err;
2376 }
2377 /* Populate the structure for sending scm call to load image */
2378 if (qseecom.qsee_version < QSEE_VERSION_40) {
2379 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2380 load_req.mdt_len = load_img_req.mdt_len;
2381 load_req.img_len = load_img_req.img_len;
2382 strlcpy(load_req.app_name, load_img_req.img_name,
2383 MAX_APP_NAME_SIZE);
2384 load_req.phy_addr = (uint32_t)pa;
2385 cmd_buf = (void *)&load_req;
2386 cmd_len = sizeof(struct qseecom_load_app_ireq);
2387 } else {
2388 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
2389 load_req_64bit.mdt_len = load_img_req.mdt_len;
2390 load_req_64bit.img_len = load_img_req.img_len;
2391 strlcpy(load_req_64bit.app_name, load_img_req.img_name,
2392 MAX_APP_NAME_SIZE);
2393 load_req_64bit.phy_addr = (uint64_t)pa;
2394 cmd_buf = (void *)&load_req_64bit;
2395 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
2396 }
2397
2398 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
2399 ION_IOC_CLEAN_INV_CACHES);
2400 if (ret) {
2401 pr_err("cache operation failed %d\n", ret);
2402 goto loadapp_err;
2403 }
2404
2405 /* SCM_CALL to load the app and get the app_id back */
2406 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
2407 cmd_len, &resp, sizeof(resp));
2408 if (ret) {
2409 pr_err("scm_call to load app failed\n");
2410 if (!IS_ERR_OR_NULL(ihandle))
2411 ion_free(qseecom.ion_clnt, ihandle);
2412 ret = -EINVAL;
2413 goto loadapp_err;
2414 }
2415
2416 if (resp.result == QSEOS_RESULT_FAILURE) {
2417 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
2418 if (!IS_ERR_OR_NULL(ihandle))
2419 ion_free(qseecom.ion_clnt, ihandle);
2420 ret = -EFAULT;
2421 goto loadapp_err;
2422 }
2423
2424 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2425 ret = __qseecom_process_incomplete_cmd(data, &resp);
2426 if (ret) {
2427 pr_err("process_incomplete_cmd failed err: %d\n",
2428 ret);
2429 if (!IS_ERR_OR_NULL(ihandle))
2430 ion_free(qseecom.ion_clnt, ihandle);
2431 ret = -EFAULT;
2432 goto loadapp_err;
2433 }
2434 }
2435
2436 if (resp.result != QSEOS_RESULT_SUCCESS) {
2437 pr_err("scm_call failed resp.result unknown, %d\n",
2438 resp.result);
2439 if (!IS_ERR_OR_NULL(ihandle))
2440 ion_free(qseecom.ion_clnt, ihandle);
2441 ret = -EFAULT;
2442 goto loadapp_err;
2443 }
2444
2445 app_id = resp.data;
2446
2447 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2448 if (!entry) {
2449 ret = -ENOMEM;
2450 goto loadapp_err;
2451 }
2452 entry->app_id = app_id;
2453 entry->ref_cnt = 1;
2454 entry->app_arch = load_img_req.app_arch;
2455 /*
2456 * keymaster app may be first loaded as "keymaste" by qseecomd,
2457 * and then used as "keymaster" on some targets. To avoid app
2458 * name checking error, register "keymaster" into app_list and
2459 * thread private data.
2460 */
2461 if (!strcmp(load_img_req.img_name, "keymaste"))
2462 strlcpy(entry->app_name, "keymaster",
2463 MAX_APP_NAME_SIZE);
2464 else
2465 strlcpy(entry->app_name, load_img_req.img_name,
2466 MAX_APP_NAME_SIZE);
2467 entry->app_blocked = false;
2468 entry->blocked_on_listener_id = 0;
2469
2470 /* Deallocate the handle */
2471 if (!IS_ERR_OR_NULL(ihandle))
2472 ion_free(qseecom.ion_clnt, ihandle);
2473
2474 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2475 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
2476 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2477 flags);
2478
2479 pr_warn("App with id %u (%s) now loaded\n", app_id,
2480 (char *)(load_img_req.img_name));
2481 }
2482 data->client.app_id = app_id;
2483 data->client.app_arch = load_img_req.app_arch;
2484 if (!strcmp(load_img_req.img_name, "keymaste"))
2485 strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
2486 else
2487 strlcpy(data->client.app_name, load_img_req.img_name,
2488 MAX_APP_NAME_SIZE);
2489 load_img_req.app_id = app_id;
2490 if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
2491 pr_err("copy_to_user failed\n");
2492 ret = -EFAULT;
2493 if (first_time == true) {
2494 spin_lock_irqsave(
2495 &qseecom.registered_app_list_lock, flags);
2496 list_del(&entry->list);
2497 spin_unlock_irqrestore(
2498 &qseecom.registered_app_list_lock, flags);
2499 kzfree(entry);
2500 }
2501 }
2502
2503loadapp_err:
2504 __qseecom_disable_clk_scale_down(data);
2505enable_clk_err:
2506 if (qseecom.support_bus_scaling) {
2507 mutex_lock(&qsee_bw_mutex);
2508 qseecom_unregister_bus_bandwidth_needs(data);
2509 mutex_unlock(&qsee_bw_mutex);
2510 }
2511 return ret;
2512}
2513
2514static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
2515{
2516 int ret = 1; /* Set unload app */
2517
2518 wake_up_all(&qseecom.send_resp_wq);
2519 if (qseecom.qsee_reentrancy_support)
2520 mutex_unlock(&app_access_lock);
2521 while (atomic_read(&data->ioctl_count) > 1) {
2522 if (wait_event_freezable(data->abort_wq,
2523 atomic_read(&data->ioctl_count) <= 1)) {
2524 pr_err("Interrupted from abort\n");
2525 ret = -ERESTARTSYS;
2526 break;
2527 }
2528 }
2529 if (qseecom.qsee_reentrancy_support)
2530 mutex_lock(&app_access_lock);
2531 return ret;
2532}
2533
2534static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
2535{
2536 int ret = 0;
2537
2538 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
2539 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
2540 ion_free(qseecom.ion_clnt, data->client.ihandle);
2541 data->client.ihandle = NULL;
2542 }
2543 return ret;
2544}
2545
2546static int qseecom_unload_app(struct qseecom_dev_handle *data,
2547 bool app_crash)
2548{
2549 unsigned long flags;
2550 unsigned long flags1;
2551 int ret = 0;
2552 struct qseecom_command_scm_resp resp;
2553 struct qseecom_registered_app_list *ptr_app = NULL;
2554 bool unload = false;
2555 bool found_app = false;
2556 bool found_dead_app = false;
2557
2558 if (!data) {
2559 pr_err("Invalid/uninitialized device handle\n");
2560 return -EINVAL;
2561 }
2562
2563 if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
2564 pr_debug("Do not unload keymaster app from tz\n");
2565 goto unload_exit;
2566 }
2567
2568 __qseecom_cleanup_app(data);
2569 __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
2570
2571 if (data->client.app_id > 0) {
2572 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
2573 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
2574 list) {
2575 if (ptr_app->app_id == data->client.app_id) {
2576 if (!strcmp((void *)ptr_app->app_name,
2577 (void *)data->client.app_name)) {
2578 found_app = true;
Zhen Kongaf93d7a2017-10-13 14:01:48 -07002579 if (ptr_app->app_blocked)
2580 app_crash = false;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002581 if (app_crash || ptr_app->ref_cnt == 1)
2582 unload = true;
2583 break;
2584 }
2585 found_dead_app = true;
2586 break;
2587 }
2588 }
2589 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2590 flags);
2591 if (found_app == false && found_dead_app == false) {
2592 pr_err("Cannot find app with id = %d (%s)\n",
2593 data->client.app_id,
2594 (char *)data->client.app_name);
2595 ret = -EINVAL;
2596 goto unload_exit;
2597 }
2598 }
2599
2600 if (found_dead_app)
2601 pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
2602 (char *)data->client.app_name);
2603
2604 if (unload) {
2605 struct qseecom_unload_app_ireq req;
2606 /* Populate the structure for sending scm call to load image */
2607 req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
2608 req.app_id = data->client.app_id;
2609
2610 /* SCM_CALL to unload the app */
2611 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
2612 sizeof(struct qseecom_unload_app_ireq),
2613 &resp, sizeof(resp));
2614 if (ret) {
2615 pr_err("scm_call to unload app (id = %d) failed\n",
2616 req.app_id);
2617 ret = -EFAULT;
2618 goto unload_exit;
2619 } else {
2620 pr_warn("App id %d now unloaded\n", req.app_id);
2621 }
2622 if (resp.result == QSEOS_RESULT_FAILURE) {
2623 pr_err("app (%d) unload_failed!!\n",
2624 data->client.app_id);
2625 ret = -EFAULT;
2626 goto unload_exit;
2627 }
2628 if (resp.result == QSEOS_RESULT_SUCCESS)
2629 pr_debug("App (%d) is unloaded!!\n",
2630 data->client.app_id);
2631 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
2632 ret = __qseecom_process_incomplete_cmd(data, &resp);
2633 if (ret) {
2634 pr_err("process_incomplete_cmd fail err: %d\n",
2635 ret);
2636 goto unload_exit;
2637 }
2638 }
2639 }
2640
2641 if (found_app) {
2642 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
2643 if (app_crash) {
2644 ptr_app->ref_cnt = 0;
2645 pr_debug("app_crash: ref_count = 0\n");
2646 } else {
2647 if (ptr_app->ref_cnt == 1) {
2648 ptr_app->ref_cnt = 0;
2649 pr_debug("ref_count set to 0\n");
2650 } else {
2651 ptr_app->ref_cnt--;
2652 pr_debug("Can't unload app(%d) inuse\n",
2653 ptr_app->app_id);
2654 }
2655 }
2656 if (unload) {
2657 list_del(&ptr_app->list);
2658 kzfree(ptr_app);
2659 }
2660 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
2661 flags1);
2662 }
2663unload_exit:
2664 qseecom_unmap_ion_allocated_memory(data);
2665 data->released = true;
2666 return ret;
2667}
2668
2669static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
2670 unsigned long virt)
2671{
2672 return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
2673}
2674
2675static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
2676 unsigned long virt)
2677{
2678 return (uintptr_t)data->client.sb_virt +
2679 (virt - data->client.user_virt_sb_base);
2680}
2681
2682int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
2683 struct qseecom_send_svc_cmd_req *req_ptr,
2684 struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
2685{
2686 int ret = 0;
2687 void *req_buf = NULL;
2688
2689 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2690 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2691 req_ptr, send_svc_ireq_ptr);
2692 return -EINVAL;
2693 }
2694
2695 /* Clients need to ensure req_buf is at base offset of shared buffer */
2696 if ((uintptr_t)req_ptr->cmd_req_buf !=
2697 data_ptr->client.user_virt_sb_base) {
2698 pr_err("cmd buf not pointing to base offset of shared buffer\n");
2699 return -EINVAL;
2700 }
2701
2702 if (data_ptr->client.sb_length <
2703 sizeof(struct qseecom_rpmb_provision_key)) {
2704 pr_err("shared buffer is too small to hold key type\n");
2705 return -EINVAL;
2706 }
2707 req_buf = data_ptr->client.sb_virt;
2708
2709 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2710 send_svc_ireq_ptr->key_type =
2711 ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
2712 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2713 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2714 data_ptr, (uintptr_t)req_ptr->resp_buf));
2715 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2716
2717 return ret;
2718}
2719
2720int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
2721 struct qseecom_send_svc_cmd_req *req_ptr,
2722 struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
2723{
2724 int ret = 0;
2725 uint32_t reqd_len_sb_in = 0;
2726
2727 if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
2728 pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
2729 req_ptr, send_svc_ireq_ptr);
2730 return -EINVAL;
2731 }
2732
2733 reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
2734 if (reqd_len_sb_in > data_ptr->client.sb_length) {
2735 pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
2736 pr_err("Required: %u, Available: %zu\n",
2737 reqd_len_sb_in, data_ptr->client.sb_length);
2738 return -ENOMEM;
2739 }
2740
2741 send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
2742 send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
2743 send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2744 data_ptr, (uintptr_t)req_ptr->resp_buf));
2745 send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
2746
2747 send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
2748 data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
2749
2750
2751 return ret;
2752}
2753
2754static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
2755 struct qseecom_send_svc_cmd_req *req)
2756{
2757 if (!req || !req->resp_buf || !req->cmd_req_buf) {
2758 pr_err("req or cmd buffer or response buffer is null\n");
2759 return -EINVAL;
2760 }
2761
2762 if (!data || !data->client.ihandle) {
2763 pr_err("Client or client handle is not initialized\n");
2764 return -EINVAL;
2765 }
2766
2767 if (data->client.sb_virt == NULL) {
2768 pr_err("sb_virt null\n");
2769 return -EINVAL;
2770 }
2771
2772 if (data->client.user_virt_sb_base == 0) {
2773 pr_err("user_virt_sb_base is null\n");
2774 return -EINVAL;
2775 }
2776
2777 if (data->client.sb_length == 0) {
2778 pr_err("sb_length is 0\n");
2779 return -EINVAL;
2780 }
2781
2782 if (((uintptr_t)req->cmd_req_buf <
2783 data->client.user_virt_sb_base) ||
2784 ((uintptr_t)req->cmd_req_buf >=
2785 (data->client.user_virt_sb_base + data->client.sb_length))) {
2786 pr_err("cmd buffer address not within shared bufffer\n");
2787 return -EINVAL;
2788 }
2789 if (((uintptr_t)req->resp_buf <
2790 data->client.user_virt_sb_base) ||
2791 ((uintptr_t)req->resp_buf >=
2792 (data->client.user_virt_sb_base + data->client.sb_length))) {
2793 pr_err("response buffer address not within shared bufffer\n");
2794 return -EINVAL;
2795 }
2796 if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
2797 (req->cmd_req_len > data->client.sb_length) ||
2798 (req->resp_len > data->client.sb_length)) {
2799 pr_err("cmd buf length or response buf length not valid\n");
2800 return -EINVAL;
2801 }
2802 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
2803 pr_err("Integer overflow detected in req_len & rsp_len\n");
2804 return -EINVAL;
2805 }
2806
2807 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
2808 pr_debug("Not enough memory to fit cmd_buf.\n");
2809 pr_debug("resp_buf. Required: %u, Available: %zu\n",
2810 (req->cmd_req_len + req->resp_len),
2811 data->client.sb_length);
2812 return -ENOMEM;
2813 }
2814 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
2815 pr_err("Integer overflow in req_len & cmd_req_buf\n");
2816 return -EINVAL;
2817 }
2818 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
2819 pr_err("Integer overflow in resp_len & resp_buf\n");
2820 return -EINVAL;
2821 }
2822 if (data->client.user_virt_sb_base >
2823 (ULONG_MAX - data->client.sb_length)) {
2824 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
2825 return -EINVAL;
2826 }
2827 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
2828 ((uintptr_t)data->client.user_virt_sb_base +
2829 data->client.sb_length)) ||
2830 (((uintptr_t)req->resp_buf + req->resp_len) >
2831 ((uintptr_t)data->client.user_virt_sb_base +
2832 data->client.sb_length))) {
2833 pr_err("cmd buf or resp buf is out of shared buffer region\n");
2834 return -EINVAL;
2835 }
2836 return 0;
2837}
2838
2839static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
2840 void __user *argp)
2841{
2842 int ret = 0;
2843 struct qseecom_client_send_service_ireq send_svc_ireq;
2844 struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
2845 struct qseecom_command_scm_resp resp;
2846 struct qseecom_send_svc_cmd_req req;
2847 void *send_req_ptr;
2848 size_t req_buf_size;
2849
2850 /*struct qseecom_command_scm_resp resp;*/
2851
2852 if (copy_from_user(&req,
2853 (void __user *)argp,
2854 sizeof(req))) {
2855 pr_err("copy_from_user failed\n");
2856 return -EFAULT;
2857 }
2858
2859 if (__validate_send_service_cmd_inputs(data, &req))
2860 return -EINVAL;
2861
2862 data->type = QSEECOM_SECURE_SERVICE;
2863
2864 switch (req.cmd_id) {
2865 case QSEOS_RPMB_PROVISION_KEY_COMMAND:
2866 case QSEOS_RPMB_ERASE_COMMAND:
2867 case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
2868 send_req_ptr = &send_svc_ireq;
2869 req_buf_size = sizeof(send_svc_ireq);
2870 if (__qseecom_process_rpmb_svc_cmd(data, &req,
2871 send_req_ptr))
2872 return -EINVAL;
2873 break;
2874 case QSEOS_FSM_LTEOTA_REQ_CMD:
2875 case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
2876 case QSEOS_FSM_IKE_REQ_CMD:
2877 case QSEOS_FSM_IKE_REQ_RSP_CMD:
2878 case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
2879 case QSEOS_FSM_OEM_FUSE_READ_ROW:
2880 case QSEOS_FSM_ENCFS_REQ_CMD:
2881 case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
2882 send_req_ptr = &send_fsm_key_svc_ireq;
2883 req_buf_size = sizeof(send_fsm_key_svc_ireq);
2884 if (__qseecom_process_fsm_key_svc_cmd(data, &req,
2885 send_req_ptr))
2886 return -EINVAL;
2887 break;
2888 default:
2889 pr_err("Unsupported cmd_id %d\n", req.cmd_id);
2890 return -EINVAL;
2891 }
2892
2893 if (qseecom.support_bus_scaling) {
2894 ret = qseecom_scale_bus_bandwidth_timer(HIGH);
2895 if (ret) {
2896 pr_err("Fail to set bw HIGH\n");
2897 return ret;
2898 }
2899 } else {
2900 ret = qseecom_perf_enable(data);
2901 if (ret) {
2902 pr_err("Failed to vote for clocks with err %d\n", ret);
2903 goto exit;
2904 }
2905 }
2906
2907 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2908 data->client.sb_virt, data->client.sb_length,
2909 ION_IOC_CLEAN_INV_CACHES);
2910 if (ret) {
2911 pr_err("cache operation failed %d\n", ret);
2912 goto exit;
2913 }
2914 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
2915 (const void *)send_req_ptr,
2916 req_buf_size, &resp, sizeof(resp));
2917 if (ret) {
2918 pr_err("qseecom_scm_call failed with err: %d\n", ret);
2919 if (!qseecom.support_bus_scaling) {
2920 qsee_disable_clock_vote(data, CLK_DFAB);
2921 qsee_disable_clock_vote(data, CLK_SFPB);
2922 } else {
2923 __qseecom_add_bw_scale_down_timer(
2924 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2925 }
2926 goto exit;
2927 }
2928 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
2929 data->client.sb_virt, data->client.sb_length,
2930 ION_IOC_INV_CACHES);
2931 if (ret) {
2932 pr_err("cache operation failed %d\n", ret);
2933 goto exit;
2934 }
2935 switch (resp.result) {
2936 case QSEOS_RESULT_SUCCESS:
2937 break;
2938 case QSEOS_RESULT_INCOMPLETE:
2939 pr_debug("qseos_result_incomplete\n");
2940 ret = __qseecom_process_incomplete_cmd(data, &resp);
2941 if (ret) {
2942 pr_err("process_incomplete_cmd fail with result: %d\n",
2943 resp.result);
2944 }
2945 if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
2946 pr_warn("RPMB key status is 0x%x\n", resp.result);
Brahmaji Kb33e26e2017-06-01 17:20:10 +05302947 if (put_user(resp.result,
2948 (uint32_t __user *)req.resp_buf)) {
2949 ret = -EINVAL;
2950 goto exit;
2951 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07002952 ret = 0;
2953 }
2954 break;
2955 case QSEOS_RESULT_FAILURE:
2956 pr_err("scm call failed with resp.result: %d\n", resp.result);
2957 ret = -EINVAL;
2958 break;
2959 default:
2960 pr_err("Response result %d not supported\n",
2961 resp.result);
2962 ret = -EINVAL;
2963 break;
2964 }
2965 if (!qseecom.support_bus_scaling) {
2966 qsee_disable_clock_vote(data, CLK_DFAB);
2967 qsee_disable_clock_vote(data, CLK_SFPB);
2968 } else {
2969 __qseecom_add_bw_scale_down_timer(
2970 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
2971 }
2972
2973exit:
2974 return ret;
2975}
2976
2977static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
2978 struct qseecom_send_cmd_req *req)
2979
2980{
2981 if (!data || !data->client.ihandle) {
2982 pr_err("Client or client handle is not initialized\n");
2983 return -EINVAL;
2984 }
2985 if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
2986 (req->cmd_req_buf == NULL)) {
2987 pr_err("cmd buffer or response buffer is null\n");
2988 return -EINVAL;
2989 }
2990 if (((uintptr_t)req->cmd_req_buf <
2991 data->client.user_virt_sb_base) ||
2992 ((uintptr_t)req->cmd_req_buf >=
2993 (data->client.user_virt_sb_base + data->client.sb_length))) {
2994 pr_err("cmd buffer address not within shared bufffer\n");
2995 return -EINVAL;
2996 }
2997 if (((uintptr_t)req->resp_buf <
2998 data->client.user_virt_sb_base) ||
2999 ((uintptr_t)req->resp_buf >=
3000 (data->client.user_virt_sb_base + data->client.sb_length))) {
3001 pr_err("response buffer address not within shared bufffer\n");
3002 return -EINVAL;
3003 }
3004 if ((req->cmd_req_len == 0) ||
3005 (req->cmd_req_len > data->client.sb_length) ||
3006 (req->resp_len > data->client.sb_length)) {
3007 pr_err("cmd buf length or response buf length not valid\n");
3008 return -EINVAL;
3009 }
3010 if (req->cmd_req_len > UINT_MAX - req->resp_len) {
3011 pr_err("Integer overflow detected in req_len & rsp_len\n");
3012 return -EINVAL;
3013 }
3014
3015 if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
3016 pr_debug("Not enough memory to fit cmd_buf.\n");
3017 pr_debug("resp_buf. Required: %u, Available: %zu\n",
3018 (req->cmd_req_len + req->resp_len),
3019 data->client.sb_length);
3020 return -ENOMEM;
3021 }
3022 if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
3023 pr_err("Integer overflow in req_len & cmd_req_buf\n");
3024 return -EINVAL;
3025 }
3026 if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
3027 pr_err("Integer overflow in resp_len & resp_buf\n");
3028 return -EINVAL;
3029 }
3030 if (data->client.user_virt_sb_base >
3031 (ULONG_MAX - data->client.sb_length)) {
3032 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
3033 return -EINVAL;
3034 }
3035 if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
3036 ((uintptr_t)data->client.user_virt_sb_base +
3037 data->client.sb_length)) ||
3038 (((uintptr_t)req->resp_buf + req->resp_len) >
3039 ((uintptr_t)data->client.user_virt_sb_base +
3040 data->client.sb_length))) {
3041 pr_err("cmd buf or resp buf is out of shared buffer region\n");
3042 return -EINVAL;
3043 }
3044 return 0;
3045}
3046
3047int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
3048 struct qseecom_registered_app_list *ptr_app,
3049 struct qseecom_dev_handle *data)
3050{
3051 int ret = 0;
3052
3053 switch (resp->result) {
3054 case QSEOS_RESULT_BLOCKED_ON_LISTENER:
3055 pr_warn("App(%d) %s is blocked on listener %d\n",
3056 data->client.app_id, data->client.app_name,
3057 resp->data);
3058 ret = __qseecom_process_reentrancy_blocked_on_listener(
3059 resp, ptr_app, data);
3060 if (ret) {
3061 pr_err("failed to process App(%d) %s is blocked on listener %d\n",
3062 data->client.app_id, data->client.app_name, resp->data);
3063 return ret;
3064 }
3065
3066 case QSEOS_RESULT_INCOMPLETE:
3067 qseecom.app_block_ref_cnt++;
3068 ptr_app->app_blocked = true;
3069 ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
3070 ptr_app->app_blocked = false;
3071 qseecom.app_block_ref_cnt--;
3072 wake_up_interruptible(&qseecom.app_block_wq);
3073 if (ret)
3074 pr_err("process_incomplete_cmd failed err: %d\n",
3075 ret);
3076 return ret;
3077 case QSEOS_RESULT_SUCCESS:
3078 return ret;
3079 default:
3080 pr_err("Response result %d not supported\n",
3081 resp->result);
3082 return -EINVAL;
3083 }
3084}
3085
3086static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
3087 struct qseecom_send_cmd_req *req)
3088{
3089 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07003090 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003091 u32 reqd_len_sb_in = 0;
3092 struct qseecom_client_send_data_ireq send_data_req = {0};
3093 struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
3094 struct qseecom_command_scm_resp resp;
3095 unsigned long flags;
3096 struct qseecom_registered_app_list *ptr_app;
3097 bool found_app = false;
3098 void *cmd_buf = NULL;
3099 size_t cmd_len;
3100 struct sglist_info *table = data->sglistinfo_ptr;
3101
3102 reqd_len_sb_in = req->cmd_req_len + req->resp_len;
3103 /* find app_id & img_name from list */
3104 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
3105 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
3106 list) {
3107 if ((ptr_app->app_id == data->client.app_id) &&
3108 (!strcmp(ptr_app->app_name, data->client.app_name))) {
3109 found_app = true;
3110 break;
3111 }
3112 }
3113 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
3114
3115 if (!found_app) {
3116 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
3117 (char *)data->client.app_name);
3118 return -ENOENT;
3119 }
3120
3121 if (qseecom.qsee_version < QSEE_VERSION_40) {
3122 send_data_req.app_id = data->client.app_id;
3123 send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3124 data, (uintptr_t)req->cmd_req_buf));
3125 send_data_req.req_len = req->cmd_req_len;
3126 send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
3127 data, (uintptr_t)req->resp_buf));
3128 send_data_req.rsp_len = req->resp_len;
3129 send_data_req.sglistinfo_ptr =
3130 (uint32_t)virt_to_phys(table);
3131 send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3132 dmac_flush_range((void *)table,
3133 (void *)table + SGLISTINFO_TABLE_SIZE);
3134 cmd_buf = (void *)&send_data_req;
3135 cmd_len = sizeof(struct qseecom_client_send_data_ireq);
3136 } else {
3137 send_data_req_64bit.app_id = data->client.app_id;
3138 send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
3139 (uintptr_t)req->cmd_req_buf);
3140 send_data_req_64bit.req_len = req->cmd_req_len;
3141 send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
3142 (uintptr_t)req->resp_buf);
3143 send_data_req_64bit.rsp_len = req->resp_len;
3144 /* check if 32bit app's phys_addr region is under 4GB.*/
3145 if ((data->client.app_arch == ELFCLASS32) &&
3146 ((send_data_req_64bit.req_ptr >=
3147 PHY_ADDR_4G - send_data_req_64bit.req_len) ||
3148 (send_data_req_64bit.rsp_ptr >=
3149 PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
3150 pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
3151 data->client.app_name,
3152 send_data_req_64bit.req_ptr,
3153 send_data_req_64bit.req_len,
3154 send_data_req_64bit.rsp_ptr,
3155 send_data_req_64bit.rsp_len);
3156 return -EFAULT;
3157 }
3158 send_data_req_64bit.sglistinfo_ptr =
3159 (uint64_t)virt_to_phys(table);
3160 send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
3161 dmac_flush_range((void *)table,
3162 (void *)table + SGLISTINFO_TABLE_SIZE);
3163 cmd_buf = (void *)&send_data_req_64bit;
3164 cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
3165 }
3166
3167 if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
3168 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
3169 else
3170 *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
3171
3172 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
3173 data->client.sb_virt,
3174 reqd_len_sb_in,
3175 ION_IOC_CLEAN_INV_CACHES);
3176 if (ret) {
3177 pr_err("cache operation failed %d\n", ret);
3178 return ret;
3179 }
3180
3181 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
3182
3183 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
3184 cmd_buf, cmd_len,
3185 &resp, sizeof(resp));
3186 if (ret) {
3187 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
3188 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07003189 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003190 }
3191
3192 if (qseecom.qsee_reentrancy_support) {
3193 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07003194 if (ret)
3195 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003196 } else {
3197 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
3198 ret = __qseecom_process_incomplete_cmd(data, &resp);
3199 if (ret) {
3200 pr_err("process_incomplete_cmd failed err: %d\n",
3201 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07003202 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003203 }
3204 } else {
3205 if (resp.result != QSEOS_RESULT_SUCCESS) {
3206 pr_err("Response result %d not supported\n",
3207 resp.result);
3208 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07003209 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003210 }
3211 }
3212 }
Zhen Kong4af480e2017-09-19 14:34:16 -07003213exit:
3214 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003215 data->client.sb_virt, data->client.sb_length,
3216 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07003217 if (ret2) {
3218 pr_err("cache operation failed %d\n", ret2);
3219 return ret2;
3220 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003221 return ret;
3222}
3223
3224static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
3225{
3226 int ret = 0;
3227 struct qseecom_send_cmd_req req;
3228
3229 ret = copy_from_user(&req, argp, sizeof(req));
3230 if (ret) {
3231 pr_err("copy_from_user failed\n");
3232 return ret;
3233 }
3234
3235 if (__validate_send_cmd_inputs(data, &req))
3236 return -EINVAL;
3237
3238 ret = __qseecom_send_cmd(data, &req);
3239
3240 if (ret)
3241 return ret;
3242
3243 return ret;
3244}
3245
3246int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
3247 struct qseecom_send_modfd_listener_resp *lstnr_resp,
3248 struct qseecom_dev_handle *data, int i) {
3249
3250 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3251 (req->ifd_data[i].fd > 0)) {
3252 if ((req->cmd_req_len < sizeof(uint32_t)) ||
3253 (req->ifd_data[i].cmd_buf_offset >
3254 req->cmd_req_len - sizeof(uint32_t))) {
3255 pr_err("Invalid offset (req len) 0x%x\n",
3256 req->ifd_data[i].cmd_buf_offset);
3257 return -EINVAL;
3258 }
3259 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3260 (lstnr_resp->ifd_data[i].fd > 0)) {
3261 if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
3262 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3263 lstnr_resp->resp_len - sizeof(uint32_t))) {
3264 pr_err("Invalid offset (lstnr resp len) 0x%x\n",
3265 lstnr_resp->ifd_data[i].cmd_buf_offset);
3266 return -EINVAL;
3267 }
3268 }
3269 return 0;
3270}
3271
3272static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
3273 struct qseecom_dev_handle *data)
3274{
3275 struct ion_handle *ihandle;
3276 char *field;
3277 int ret = 0;
3278 int i = 0;
3279 uint32_t len = 0;
3280 struct scatterlist *sg;
3281 struct qseecom_send_modfd_cmd_req *req = NULL;
3282 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3283 struct qseecom_registered_listener_list *this_lstnr = NULL;
3284 uint32_t offset;
3285 struct sg_table *sg_ptr;
3286
3287 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3288 (data->type != QSEECOM_CLIENT_APP))
3289 return -EFAULT;
3290
3291 if (msg == NULL) {
3292 pr_err("Invalid address\n");
3293 return -EINVAL;
3294 }
3295 if (data->type == QSEECOM_LISTENER_SERVICE) {
3296 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3297 this_lstnr = __qseecom_find_svc(data->listener.id);
3298 if (IS_ERR_OR_NULL(this_lstnr)) {
3299 pr_err("Invalid listener ID\n");
3300 return -ENOMEM;
3301 }
3302 } else {
3303 req = (struct qseecom_send_modfd_cmd_req *)msg;
3304 }
3305
3306 for (i = 0; i < MAX_ION_FD; i++) {
3307 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3308 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003309 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003310 req->ifd_data[i].fd);
3311 if (IS_ERR_OR_NULL(ihandle)) {
3312 pr_err("Ion client can't retrieve the handle\n");
3313 return -ENOMEM;
3314 }
3315 field = (char *) req->cmd_req_buf +
3316 req->ifd_data[i].cmd_buf_offset;
3317 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3318 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003319 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003320 lstnr_resp->ifd_data[i].fd);
3321 if (IS_ERR_OR_NULL(ihandle)) {
3322 pr_err("Ion client can't retrieve the handle\n");
3323 return -ENOMEM;
3324 }
3325 field = lstnr_resp->resp_buf_ptr +
3326 lstnr_resp->ifd_data[i].cmd_buf_offset;
3327 } else {
3328 continue;
3329 }
3330 /* Populate the cmd data structure with the phys_addr */
3331 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3332 if (IS_ERR_OR_NULL(sg_ptr)) {
3333 pr_err("IOn client could not retrieve sg table\n");
3334 goto err;
3335 }
3336 if (sg_ptr->nents == 0) {
3337 pr_err("Num of scattered entries is 0\n");
3338 goto err;
3339 }
3340 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3341 pr_err("Num of scattered entries");
3342 pr_err(" (%d) is greater than max supported %d\n",
3343 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3344 goto err;
3345 }
3346 sg = sg_ptr->sgl;
3347 if (sg_ptr->nents == 1) {
3348 uint32_t *update;
3349
3350 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3351 goto err;
3352 if ((data->type == QSEECOM_CLIENT_APP &&
3353 (data->client.app_arch == ELFCLASS32 ||
3354 data->client.app_arch == ELFCLASS64)) ||
3355 (data->type == QSEECOM_LISTENER_SERVICE)) {
3356 /*
3357 * Check if sg list phy add region is under 4GB
3358 */
3359 if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
3360 (!cleanup) &&
3361 ((uint64_t)sg_dma_address(sg_ptr->sgl)
3362 >= PHY_ADDR_4G - sg->length)) {
3363 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3364 data->client.app_name,
3365 &(sg_dma_address(sg_ptr->sgl)),
3366 sg->length);
3367 goto err;
3368 }
3369 update = (uint32_t *) field;
3370 *update = cleanup ? 0 :
3371 (uint32_t)sg_dma_address(sg_ptr->sgl);
3372 } else {
3373 pr_err("QSEE app arch %u is not supported\n",
3374 data->client.app_arch);
3375 goto err;
3376 }
3377 len += (uint32_t)sg->length;
3378 } else {
3379 struct qseecom_sg_entry *update;
3380 int j = 0;
3381
3382 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3383 (req->ifd_data[i].fd > 0)) {
3384
3385 if ((req->cmd_req_len <
3386 SG_ENTRY_SZ * sg_ptr->nents) ||
3387 (req->ifd_data[i].cmd_buf_offset >
3388 (req->cmd_req_len -
3389 SG_ENTRY_SZ * sg_ptr->nents))) {
3390 pr_err("Invalid offset = 0x%x\n",
3391 req->ifd_data[i].cmd_buf_offset);
3392 goto err;
3393 }
3394
3395 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3396 (lstnr_resp->ifd_data[i].fd > 0)) {
3397
3398 if ((lstnr_resp->resp_len <
3399 SG_ENTRY_SZ * sg_ptr->nents) ||
3400 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3401 (lstnr_resp->resp_len -
3402 SG_ENTRY_SZ * sg_ptr->nents))) {
3403 goto err;
3404 }
3405 }
3406 if ((data->type == QSEECOM_CLIENT_APP &&
3407 (data->client.app_arch == ELFCLASS32 ||
3408 data->client.app_arch == ELFCLASS64)) ||
3409 (data->type == QSEECOM_LISTENER_SERVICE)) {
3410 update = (struct qseecom_sg_entry *)field;
3411 for (j = 0; j < sg_ptr->nents; j++) {
3412 /*
3413 * Check if sg list PA is under 4GB
3414 */
3415 if ((qseecom.qsee_version >=
3416 QSEE_VERSION_40) &&
3417 (!cleanup) &&
3418 ((uint64_t)(sg_dma_address(sg))
3419 >= PHY_ADDR_4G - sg->length)) {
3420 pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
3421 data->client.app_name,
3422 &(sg_dma_address(sg)),
3423 sg->length);
3424 goto err;
3425 }
3426 update->phys_addr = cleanup ? 0 :
3427 (uint32_t)sg_dma_address(sg);
3428 update->len = cleanup ? 0 : sg->length;
3429 update++;
3430 len += sg->length;
3431 sg = sg_next(sg);
3432 }
3433 } else {
3434 pr_err("QSEE app arch %u is not supported\n",
3435 data->client.app_arch);
3436 goto err;
3437 }
3438 }
3439
3440 if (cleanup) {
3441 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3442 ihandle, NULL, len,
3443 ION_IOC_INV_CACHES);
3444 if (ret) {
3445 pr_err("cache operation failed %d\n", ret);
3446 goto err;
3447 }
3448 } else {
3449 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3450 ihandle, NULL, len,
3451 ION_IOC_CLEAN_INV_CACHES);
3452 if (ret) {
3453 pr_err("cache operation failed %d\n", ret);
3454 goto err;
3455 }
3456 if (data->type == QSEECOM_CLIENT_APP) {
3457 offset = req->ifd_data[i].cmd_buf_offset;
3458 data->sglistinfo_ptr[i].indexAndFlags =
3459 SGLISTINFO_SET_INDEX_FLAG(
3460 (sg_ptr->nents == 1), 0, offset);
3461 data->sglistinfo_ptr[i].sizeOrCount =
3462 (sg_ptr->nents == 1) ?
3463 sg->length : sg_ptr->nents;
3464 data->sglist_cnt = i + 1;
3465 } else {
3466 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3467 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3468 (uintptr_t)this_lstnr->sb_virt);
3469 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3470 SGLISTINFO_SET_INDEX_FLAG(
3471 (sg_ptr->nents == 1), 0, offset);
3472 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3473 (sg_ptr->nents == 1) ?
3474 sg->length : sg_ptr->nents;
3475 this_lstnr->sglist_cnt = i + 1;
3476 }
3477 }
3478 /* Deallocate the handle */
3479 if (!IS_ERR_OR_NULL(ihandle))
3480 ion_free(qseecom.ion_clnt, ihandle);
3481 }
3482 return ret;
3483err:
3484 if (!IS_ERR_OR_NULL(ihandle))
3485 ion_free(qseecom.ion_clnt, ihandle);
3486 return -ENOMEM;
3487}
3488
3489static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
3490 char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
3491{
3492 struct scatterlist *sg = sg_ptr->sgl;
3493 struct qseecom_sg_entry_64bit *sg_entry;
3494 struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
3495 void *buf;
3496 uint i;
3497 size_t size;
3498 dma_addr_t coh_pmem;
3499
3500 if (fd_idx >= MAX_ION_FD) {
3501 pr_err("fd_idx [%d] is invalid\n", fd_idx);
3502 return -ENOMEM;
3503 }
3504 buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
3505 memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
3506 /* Allocate a contiguous kernel buffer */
3507 size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
3508 size = (size + PAGE_SIZE) & PAGE_MASK;
3509 buf = dma_alloc_coherent(qseecom.pdev,
3510 size, &coh_pmem, GFP_KERNEL);
3511 if (buf == NULL) {
3512 pr_err("failed to alloc memory for sg buf\n");
3513 return -ENOMEM;
3514 }
3515 /* update qseecom_sg_list_buf_hdr_64bit */
3516 buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
3517 buf_hdr->new_buf_phys_addr = coh_pmem;
3518 buf_hdr->nents_total = sg_ptr->nents;
3519 /* save the left sg entries into new allocated buf */
3520 sg_entry = (struct qseecom_sg_entry_64bit *)buf;
3521 for (i = 0; i < sg_ptr->nents; i++) {
3522 sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
3523 sg_entry->len = sg->length;
3524 sg_entry++;
3525 sg = sg_next(sg);
3526 }
3527
3528 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
3529 data->client.sec_buf_fd[fd_idx].vbase = buf;
3530 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
3531 data->client.sec_buf_fd[fd_idx].size = size;
3532
3533 return 0;
3534}
3535
3536static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
3537 struct qseecom_dev_handle *data)
3538{
3539 struct ion_handle *ihandle;
3540 char *field;
3541 int ret = 0;
3542 int i = 0;
3543 uint32_t len = 0;
3544 struct scatterlist *sg;
3545 struct qseecom_send_modfd_cmd_req *req = NULL;
3546 struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
3547 struct qseecom_registered_listener_list *this_lstnr = NULL;
3548 uint32_t offset;
3549 struct sg_table *sg_ptr;
3550
3551 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3552 (data->type != QSEECOM_CLIENT_APP))
3553 return -EFAULT;
3554
3555 if (msg == NULL) {
3556 pr_err("Invalid address\n");
3557 return -EINVAL;
3558 }
3559 if (data->type == QSEECOM_LISTENER_SERVICE) {
3560 lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
3561 this_lstnr = __qseecom_find_svc(data->listener.id);
3562 if (IS_ERR_OR_NULL(this_lstnr)) {
3563 pr_err("Invalid listener ID\n");
3564 return -ENOMEM;
3565 }
3566 } else {
3567 req = (struct qseecom_send_modfd_cmd_req *)msg;
3568 }
3569
3570 for (i = 0; i < MAX_ION_FD; i++) {
3571 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3572 (req->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003573 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003574 req->ifd_data[i].fd);
3575 if (IS_ERR_OR_NULL(ihandle)) {
3576 pr_err("Ion client can't retrieve the handle\n");
3577 return -ENOMEM;
3578 }
3579 field = (char *) req->cmd_req_buf +
3580 req->ifd_data[i].cmd_buf_offset;
3581 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3582 (lstnr_resp->ifd_data[i].fd > 0)) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07003583 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07003584 lstnr_resp->ifd_data[i].fd);
3585 if (IS_ERR_OR_NULL(ihandle)) {
3586 pr_err("Ion client can't retrieve the handle\n");
3587 return -ENOMEM;
3588 }
3589 field = lstnr_resp->resp_buf_ptr +
3590 lstnr_resp->ifd_data[i].cmd_buf_offset;
3591 } else {
3592 continue;
3593 }
3594 /* Populate the cmd data structure with the phys_addr */
3595 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
3596 if (IS_ERR_OR_NULL(sg_ptr)) {
3597 pr_err("IOn client could not retrieve sg table\n");
3598 goto err;
3599 }
3600 if (sg_ptr->nents == 0) {
3601 pr_err("Num of scattered entries is 0\n");
3602 goto err;
3603 }
3604 if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
3605 pr_warn("Num of scattered entries");
3606 pr_warn(" (%d) is greater than %d\n",
3607 sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
3608 if (cleanup) {
3609 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3610 data->client.sec_buf_fd[i].vbase)
3611 dma_free_coherent(qseecom.pdev,
3612 data->client.sec_buf_fd[i].size,
3613 data->client.sec_buf_fd[i].vbase,
3614 data->client.sec_buf_fd[i].pbase);
3615 } else {
3616 ret = __qseecom_allocate_sg_list_buffer(data,
3617 field, i, sg_ptr);
3618 if (ret) {
3619 pr_err("Failed to allocate sg list buffer\n");
3620 goto err;
3621 }
3622 }
3623 len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
3624 sg = sg_ptr->sgl;
3625 goto cleanup;
3626 }
3627 sg = sg_ptr->sgl;
3628 if (sg_ptr->nents == 1) {
3629 uint64_t *update_64bit;
3630
3631 if (__boundary_checks_offset(req, lstnr_resp, data, i))
3632 goto err;
3633 /* 64bit app uses 64bit address */
3634 update_64bit = (uint64_t *) field;
3635 *update_64bit = cleanup ? 0 :
3636 (uint64_t)sg_dma_address(sg_ptr->sgl);
3637 len += (uint32_t)sg->length;
3638 } else {
3639 struct qseecom_sg_entry_64bit *update_64bit;
3640 int j = 0;
3641
3642 if ((data->type != QSEECOM_LISTENER_SERVICE) &&
3643 (req->ifd_data[i].fd > 0)) {
3644
3645 if ((req->cmd_req_len <
3646 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3647 (req->ifd_data[i].cmd_buf_offset >
3648 (req->cmd_req_len -
3649 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3650 pr_err("Invalid offset = 0x%x\n",
3651 req->ifd_data[i].cmd_buf_offset);
3652 goto err;
3653 }
3654
3655 } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
3656 (lstnr_resp->ifd_data[i].fd > 0)) {
3657
3658 if ((lstnr_resp->resp_len <
3659 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
3660 (lstnr_resp->ifd_data[i].cmd_buf_offset >
3661 (lstnr_resp->resp_len -
3662 SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
3663 goto err;
3664 }
3665 }
3666 /* 64bit app uses 64bit address */
3667 update_64bit = (struct qseecom_sg_entry_64bit *)field;
3668 for (j = 0; j < sg_ptr->nents; j++) {
3669 update_64bit->phys_addr = cleanup ? 0 :
3670 (uint64_t)sg_dma_address(sg);
3671 update_64bit->len = cleanup ? 0 :
3672 (uint32_t)sg->length;
3673 update_64bit++;
3674 len += sg->length;
3675 sg = sg_next(sg);
3676 }
3677 }
3678cleanup:
3679 if (cleanup) {
3680 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3681 ihandle, NULL, len,
3682 ION_IOC_INV_CACHES);
3683 if (ret) {
3684 pr_err("cache operation failed %d\n", ret);
3685 goto err;
3686 }
3687 } else {
3688 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
3689 ihandle, NULL, len,
3690 ION_IOC_CLEAN_INV_CACHES);
3691 if (ret) {
3692 pr_err("cache operation failed %d\n", ret);
3693 goto err;
3694 }
3695 if (data->type == QSEECOM_CLIENT_APP) {
3696 offset = req->ifd_data[i].cmd_buf_offset;
3697 data->sglistinfo_ptr[i].indexAndFlags =
3698 SGLISTINFO_SET_INDEX_FLAG(
3699 (sg_ptr->nents == 1), 1, offset);
3700 data->sglistinfo_ptr[i].sizeOrCount =
3701 (sg_ptr->nents == 1) ?
3702 sg->length : sg_ptr->nents;
3703 data->sglist_cnt = i + 1;
3704 } else {
3705 offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
3706 + (uintptr_t)lstnr_resp->resp_buf_ptr -
3707 (uintptr_t)this_lstnr->sb_virt);
3708 this_lstnr->sglistinfo_ptr[i].indexAndFlags =
3709 SGLISTINFO_SET_INDEX_FLAG(
3710 (sg_ptr->nents == 1), 1, offset);
3711 this_lstnr->sglistinfo_ptr[i].sizeOrCount =
3712 (sg_ptr->nents == 1) ?
3713 sg->length : sg_ptr->nents;
3714 this_lstnr->sglist_cnt = i + 1;
3715 }
3716 }
3717 /* Deallocate the handle */
3718 if (!IS_ERR_OR_NULL(ihandle))
3719 ion_free(qseecom.ion_clnt, ihandle);
3720 }
3721 return ret;
3722err:
3723 for (i = 0; i < MAX_ION_FD; i++)
3724 if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
3725 data->client.sec_buf_fd[i].vbase)
3726 dma_free_coherent(qseecom.pdev,
3727 data->client.sec_buf_fd[i].size,
3728 data->client.sec_buf_fd[i].vbase,
3729 data->client.sec_buf_fd[i].pbase);
3730 if (!IS_ERR_OR_NULL(ihandle))
3731 ion_free(qseecom.ion_clnt, ihandle);
3732 return -ENOMEM;
3733}
3734
3735static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3736 void __user *argp,
3737 bool is_64bit_addr)
3738{
3739 int ret = 0;
3740 int i;
3741 struct qseecom_send_modfd_cmd_req req;
3742 struct qseecom_send_cmd_req send_cmd_req;
3743
3744 ret = copy_from_user(&req, argp, sizeof(req));
3745 if (ret) {
3746 pr_err("copy_from_user failed\n");
3747 return ret;
3748 }
3749
3750 send_cmd_req.cmd_req_buf = req.cmd_req_buf;
3751 send_cmd_req.cmd_req_len = req.cmd_req_len;
3752 send_cmd_req.resp_buf = req.resp_buf;
3753 send_cmd_req.resp_len = req.resp_len;
3754
3755 if (__validate_send_cmd_inputs(data, &send_cmd_req))
3756 return -EINVAL;
3757
3758 /* validate offsets */
3759 for (i = 0; i < MAX_ION_FD; i++) {
3760 if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
3761 pr_err("Invalid offset %d = 0x%x\n",
3762 i, req.ifd_data[i].cmd_buf_offset);
3763 return -EINVAL;
3764 }
3765 }
3766 req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3767 (uintptr_t)req.cmd_req_buf);
3768 req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
3769 (uintptr_t)req.resp_buf);
3770
3771 if (!is_64bit_addr) {
3772 ret = __qseecom_update_cmd_buf(&req, false, data);
3773 if (ret)
3774 return ret;
3775 ret = __qseecom_send_cmd(data, &send_cmd_req);
3776 if (ret)
3777 return ret;
3778 ret = __qseecom_update_cmd_buf(&req, true, data);
3779 if (ret)
3780 return ret;
3781 } else {
3782 ret = __qseecom_update_cmd_buf_64(&req, false, data);
3783 if (ret)
3784 return ret;
3785 ret = __qseecom_send_cmd(data, &send_cmd_req);
3786 if (ret)
3787 return ret;
3788 ret = __qseecom_update_cmd_buf_64(&req, true, data);
3789 if (ret)
3790 return ret;
3791 }
3792
3793 return ret;
3794}
3795
3796static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
3797 void __user *argp)
3798{
3799 return __qseecom_send_modfd_cmd(data, argp, false);
3800}
3801
3802static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
3803 void __user *argp)
3804{
3805 return __qseecom_send_modfd_cmd(data, argp, true);
3806}
3807
3808
3809
3810static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
3811 struct qseecom_registered_listener_list *svc)
3812{
3813 int ret;
3814
3815 ret = (svc->rcv_req_flag != 0);
3816 return ret || data->abort;
3817}
3818
3819static int qseecom_receive_req(struct qseecom_dev_handle *data)
3820{
3821 int ret = 0;
3822 struct qseecom_registered_listener_list *this_lstnr;
3823
3824 this_lstnr = __qseecom_find_svc(data->listener.id);
3825 if (!this_lstnr) {
3826 pr_err("Invalid listener ID\n");
3827 return -ENODATA;
3828 }
3829
3830 while (1) {
3831 if (wait_event_freezable(this_lstnr->rcv_req_wq,
3832 __qseecom_listener_has_rcvd_req(data,
3833 this_lstnr))) {
3834 pr_debug("Interrupted: exiting Listener Service = %d\n",
3835 (uint32_t)data->listener.id);
3836 /* woken up for different reason */
3837 return -ERESTARTSYS;
3838 }
3839
3840 if (data->abort) {
3841 pr_err("Aborting Listener Service = %d\n",
3842 (uint32_t)data->listener.id);
3843 return -ENODEV;
3844 }
3845 this_lstnr->rcv_req_flag = 0;
3846 break;
3847 }
3848 return ret;
3849}
3850
3851static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
3852{
3853 unsigned char app_arch = 0;
3854 struct elf32_hdr *ehdr;
3855 struct elf64_hdr *ehdr64;
3856
3857 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3858
3859 switch (app_arch) {
3860 case ELFCLASS32: {
3861 ehdr = (struct elf32_hdr *)fw_entry->data;
3862 if (fw_entry->size < sizeof(*ehdr)) {
3863 pr_err("%s: Not big enough to be an elf32 header\n",
3864 qseecom.pdev->init_name);
3865 return false;
3866 }
3867 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
3868 pr_err("%s: Not an elf32 header\n",
3869 qseecom.pdev->init_name);
3870 return false;
3871 }
3872 if (ehdr->e_phnum == 0) {
3873 pr_err("%s: No loadable segments\n",
3874 qseecom.pdev->init_name);
3875 return false;
3876 }
3877 if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
3878 sizeof(struct elf32_hdr) > fw_entry->size) {
3879 pr_err("%s: Program headers not within mdt\n",
3880 qseecom.pdev->init_name);
3881 return false;
3882 }
3883 break;
3884 }
3885 case ELFCLASS64: {
3886 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3887 if (fw_entry->size < sizeof(*ehdr64)) {
3888 pr_err("%s: Not big enough to be an elf64 header\n",
3889 qseecom.pdev->init_name);
3890 return false;
3891 }
3892 if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
3893 pr_err("%s: Not an elf64 header\n",
3894 qseecom.pdev->init_name);
3895 return false;
3896 }
3897 if (ehdr64->e_phnum == 0) {
3898 pr_err("%s: No loadable segments\n",
3899 qseecom.pdev->init_name);
3900 return false;
3901 }
3902 if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
3903 sizeof(struct elf64_hdr) > fw_entry->size) {
3904 pr_err("%s: Program headers not within mdt\n",
3905 qseecom.pdev->init_name);
3906 return false;
3907 }
3908 break;
3909 }
3910 default: {
3911 pr_err("QSEE app arch %u is not supported\n", app_arch);
3912 return false;
3913 }
3914 }
3915 return true;
3916}
3917
3918static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
3919 uint32_t *app_arch)
3920{
3921 int ret = -1;
3922 int i = 0, rc = 0;
3923 const struct firmware *fw_entry = NULL;
3924 char fw_name[MAX_APP_NAME_SIZE];
3925 struct elf32_hdr *ehdr;
3926 struct elf64_hdr *ehdr64;
3927 int num_images = 0;
3928
3929 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3930 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3931 if (rc) {
3932 pr_err("error with request_firmware\n");
3933 ret = -EIO;
3934 goto err;
3935 }
3936 if (!__qseecom_is_fw_image_valid(fw_entry)) {
3937 ret = -EIO;
3938 goto err;
3939 }
3940 *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
3941 *fw_size = fw_entry->size;
3942 if (*app_arch == ELFCLASS32) {
3943 ehdr = (struct elf32_hdr *)fw_entry->data;
3944 num_images = ehdr->e_phnum;
3945 } else if (*app_arch == ELFCLASS64) {
3946 ehdr64 = (struct elf64_hdr *)fw_entry->data;
3947 num_images = ehdr64->e_phnum;
3948 } else {
3949 pr_err("QSEE %s app, arch %u is not supported\n",
3950 appname, *app_arch);
3951 ret = -EIO;
3952 goto err;
3953 }
3954 pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
3955 release_firmware(fw_entry);
3956 fw_entry = NULL;
3957 for (i = 0; i < num_images; i++) {
3958 memset(fw_name, 0, sizeof(fw_name));
3959 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
3960 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3961 if (ret)
3962 goto err;
3963 if (*fw_size > U32_MAX - fw_entry->size) {
3964 pr_err("QSEE %s app file size overflow\n", appname);
3965 ret = -EINVAL;
3966 goto err;
3967 }
3968 *fw_size += fw_entry->size;
3969 release_firmware(fw_entry);
3970 fw_entry = NULL;
3971 }
3972
3973 return ret;
3974err:
3975 if (fw_entry)
3976 release_firmware(fw_entry);
3977 *fw_size = 0;
3978 return ret;
3979}
3980
3981static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
3982 uint32_t fw_size,
3983 struct qseecom_load_app_ireq *load_req)
3984{
3985 int ret = -1;
3986 int i = 0, rc = 0;
3987 const struct firmware *fw_entry = NULL;
3988 char fw_name[MAX_APP_NAME_SIZE];
3989 u8 *img_data_ptr = img_data;
3990 struct elf32_hdr *ehdr;
3991 struct elf64_hdr *ehdr64;
3992 int num_images = 0;
3993 unsigned char app_arch = 0;
3994
3995 snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
3996 rc = request_firmware(&fw_entry, fw_name, qseecom.pdev);
3997 if (rc) {
3998 ret = -EIO;
3999 goto err;
4000 }
4001
4002 load_req->img_len = fw_entry->size;
4003 if (load_req->img_len > fw_size) {
4004 pr_err("app %s size %zu is larger than buf size %u\n",
4005 appname, fw_entry->size, fw_size);
4006 ret = -EINVAL;
4007 goto err;
4008 }
4009 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4010 img_data_ptr = img_data_ptr + fw_entry->size;
4011 load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
4012
4013 app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
4014 if (app_arch == ELFCLASS32) {
4015 ehdr = (struct elf32_hdr *)fw_entry->data;
4016 num_images = ehdr->e_phnum;
4017 } else if (app_arch == ELFCLASS64) {
4018 ehdr64 = (struct elf64_hdr *)fw_entry->data;
4019 num_images = ehdr64->e_phnum;
4020 } else {
4021 pr_err("QSEE %s app, arch %u is not supported\n",
4022 appname, app_arch);
4023 ret = -EIO;
4024 goto err;
4025 }
4026 release_firmware(fw_entry);
4027 fw_entry = NULL;
4028 for (i = 0; i < num_images; i++) {
4029 snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
4030 ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
4031 if (ret) {
4032 pr_err("Failed to locate blob %s\n", fw_name);
4033 goto err;
4034 }
4035 if ((fw_entry->size > U32_MAX - load_req->img_len) ||
4036 (fw_entry->size + load_req->img_len > fw_size)) {
4037 pr_err("Invalid file size for %s\n", fw_name);
4038 ret = -EINVAL;
4039 goto err;
4040 }
4041 memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
4042 img_data_ptr = img_data_ptr + fw_entry->size;
4043 load_req->img_len += fw_entry->size;
4044 release_firmware(fw_entry);
4045 fw_entry = NULL;
4046 }
4047 return ret;
4048err:
4049 release_firmware(fw_entry);
4050 return ret;
4051}
4052
4053static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
4054 u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
4055{
4056 size_t len = 0;
4057 int ret = 0;
4058 ion_phys_addr_t pa;
4059 struct ion_handle *ihandle = NULL;
4060 u8 *img_data = NULL;
Zhen Kong3dd92792017-12-08 09:47:15 -08004061 int retry = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004062
Zhen Kong3dd92792017-12-08 09:47:15 -08004063 do {
Zhen Kong5d02be92018-05-29 16:17:29 -07004064 if (retry++) {
4065 mutex_unlock(&app_access_lock);
Zhen Kong3dd92792017-12-08 09:47:15 -08004066 msleep(QSEECOM_TA_ION_ALLOCATE_DELAY);
Zhen Kong5d02be92018-05-29 16:17:29 -07004067 mutex_lock(&app_access_lock);
4068 }
Zhen Kong3dd92792017-12-08 09:47:15 -08004069 ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
4070 SZ_4K, ION_HEAP(ION_QSECOM_TA_HEAP_ID), 0);
4071 } while (IS_ERR_OR_NULL(ihandle) &&
4072 (retry <= QSEECOM_TA_ION_ALLOCATE_MAX_ATTEMP));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004073
4074 if (IS_ERR_OR_NULL(ihandle)) {
4075 pr_err("ION alloc failed\n");
4076 return -ENOMEM;
4077 }
4078 img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
4079 ihandle);
4080
4081 if (IS_ERR_OR_NULL(img_data)) {
4082 pr_err("ION memory mapping for image loading failed\n");
4083 ret = -ENOMEM;
4084 goto exit_ion_free;
4085 }
4086 /* Get the physical address of the ION BUF */
4087 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
4088 if (ret) {
4089 pr_err("physical memory retrieval failure\n");
4090 ret = -EIO;
4091 goto exit_ion_unmap_kernel;
4092 }
4093
4094 *pihandle = ihandle;
4095 *data = img_data;
4096 *paddr = pa;
4097 return ret;
4098
4099exit_ion_unmap_kernel:
4100 ion_unmap_kernel(qseecom.ion_clnt, ihandle);
4101exit_ion_free:
4102 ion_free(qseecom.ion_clnt, ihandle);
4103 ihandle = NULL;
4104 return ret;
4105}
4106
4107static void __qseecom_free_img_data(struct ion_handle **ihandle)
4108{
4109 ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
4110 ion_free(qseecom.ion_clnt, *ihandle);
4111 *ihandle = NULL;
4112}
4113
4114static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
4115 uint32_t *app_id)
4116{
4117 int ret = -1;
4118 uint32_t fw_size = 0;
4119 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4120 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4121 struct qseecom_command_scm_resp resp;
4122 u8 *img_data = NULL;
4123 ion_phys_addr_t pa = 0;
4124 struct ion_handle *ihandle = NULL;
4125 void *cmd_buf = NULL;
4126 size_t cmd_len;
4127 uint32_t app_arch = 0;
4128
4129 if (!data || !appname || !app_id) {
4130 pr_err("Null pointer to data or appname or appid\n");
4131 return -EINVAL;
4132 }
4133 *app_id = 0;
4134 if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
4135 return -EIO;
4136 data->client.app_arch = app_arch;
4137
4138 /* Check and load cmnlib */
4139 if (qseecom.qsee_version > QSEEE_VERSION_00) {
4140 if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
4141 ret = qseecom_load_commonlib_image(data, "cmnlib");
4142 if (ret) {
4143 pr_err("failed to load cmnlib\n");
4144 return -EIO;
4145 }
4146 qseecom.commonlib_loaded = true;
4147 pr_debug("cmnlib is loaded\n");
4148 }
4149
4150 if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
4151 ret = qseecom_load_commonlib_image(data, "cmnlib64");
4152 if (ret) {
4153 pr_err("failed to load cmnlib64\n");
4154 return -EIO;
4155 }
4156 qseecom.commonlib64_loaded = true;
4157 pr_debug("cmnlib64 is loaded\n");
4158 }
4159 }
4160
4161 ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
4162 if (ret)
4163 return ret;
4164
4165 ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
4166 if (ret) {
4167 ret = -EIO;
4168 goto exit_free_img_data;
4169 }
4170
4171 /* Populate the load_req parameters */
4172 if (qseecom.qsee_version < QSEE_VERSION_40) {
4173 load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4174 load_req.mdt_len = load_req.mdt_len;
4175 load_req.img_len = load_req.img_len;
4176 strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
4177 load_req.phy_addr = (uint32_t)pa;
4178 cmd_buf = (void *)&load_req;
4179 cmd_len = sizeof(struct qseecom_load_app_ireq);
4180 } else {
4181 load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
4182 load_req_64bit.mdt_len = load_req.mdt_len;
4183 load_req_64bit.img_len = load_req.img_len;
4184 strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
4185 load_req_64bit.phy_addr = (uint64_t)pa;
4186 cmd_buf = (void *)&load_req_64bit;
4187 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
4188 }
4189
4190 if (qseecom.support_bus_scaling) {
4191 mutex_lock(&qsee_bw_mutex);
4192 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4193 mutex_unlock(&qsee_bw_mutex);
4194 if (ret) {
4195 ret = -EIO;
4196 goto exit_free_img_data;
4197 }
4198 }
4199
4200 ret = __qseecom_enable_clk_scale_up(data);
4201 if (ret) {
4202 ret = -EIO;
4203 goto exit_unregister_bus_bw_need;
4204 }
4205
4206 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
4207 img_data, fw_size,
4208 ION_IOC_CLEAN_INV_CACHES);
4209 if (ret) {
4210 pr_err("cache operation failed %d\n", ret);
4211 goto exit_disable_clk_vote;
4212 }
4213
4214 /* SCM_CALL to load the image */
4215 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4216 &resp, sizeof(resp));
4217 if (ret) {
Zhen Kong5d02be92018-05-29 16:17:29 -07004218 pr_err("scm_call to load failed : ret %d, result %x\n",
4219 ret, resp.result);
4220 if (resp.result == QSEOS_RESULT_FAIL_APP_ALREADY_LOADED)
4221 ret = -EEXIST;
4222 else
4223 ret = -EIO;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004224 goto exit_disable_clk_vote;
4225 }
4226
4227 switch (resp.result) {
4228 case QSEOS_RESULT_SUCCESS:
4229 *app_id = resp.data;
4230 break;
4231 case QSEOS_RESULT_INCOMPLETE:
4232 ret = __qseecom_process_incomplete_cmd(data, &resp);
4233 if (ret)
4234 pr_err("process_incomplete_cmd FAILED\n");
4235 else
4236 *app_id = resp.data;
4237 break;
4238 case QSEOS_RESULT_FAILURE:
4239 pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
4240 break;
4241 default:
4242 pr_err("scm call return unknown response %d\n", resp.result);
4243 ret = -EINVAL;
4244 break;
4245 }
4246
4247exit_disable_clk_vote:
4248 __qseecom_disable_clk_scale_down(data);
4249
4250exit_unregister_bus_bw_need:
4251 if (qseecom.support_bus_scaling) {
4252 mutex_lock(&qsee_bw_mutex);
4253 qseecom_unregister_bus_bandwidth_needs(data);
4254 mutex_unlock(&qsee_bw_mutex);
4255 }
4256
4257exit_free_img_data:
4258 __qseecom_free_img_data(&ihandle);
4259 return ret;
4260}
4261
4262static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
4263 char *cmnlib_name)
4264{
4265 int ret = 0;
4266 uint32_t fw_size = 0;
4267 struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
4268 struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
4269 struct qseecom_command_scm_resp resp;
4270 u8 *img_data = NULL;
4271 ion_phys_addr_t pa = 0;
4272 void *cmd_buf = NULL;
4273 size_t cmd_len;
4274 uint32_t app_arch = 0;
Zhen Kong3bafb312017-10-18 10:27:20 -07004275 struct ion_handle *cmnlib_ion_handle = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004276
4277 if (!cmnlib_name) {
4278 pr_err("cmnlib_name is NULL\n");
4279 return -EINVAL;
4280 }
4281 if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
4282 pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
4283 cmnlib_name, strlen(cmnlib_name));
4284 return -EINVAL;
4285 }
4286
4287 if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
4288 return -EIO;
4289
Zhen Kong3bafb312017-10-18 10:27:20 -07004290 ret = __qseecom_allocate_img_data(&cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004291 &img_data, fw_size, &pa);
4292 if (ret)
4293 return -EIO;
4294
4295 ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
4296 if (ret) {
4297 ret = -EIO;
4298 goto exit_free_img_data;
4299 }
4300 if (qseecom.qsee_version < QSEE_VERSION_40) {
4301 load_req.phy_addr = (uint32_t)pa;
4302 load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4303 cmd_buf = (void *)&load_req;
4304 cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
4305 } else {
4306 load_req_64bit.phy_addr = (uint64_t)pa;
4307 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
4308 load_req_64bit.img_len = load_req.img_len;
4309 load_req_64bit.mdt_len = load_req.mdt_len;
4310 cmd_buf = (void *)&load_req_64bit;
4311 cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
4312 }
4313
4314 if (qseecom.support_bus_scaling) {
4315 mutex_lock(&qsee_bw_mutex);
4316 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
4317 mutex_unlock(&qsee_bw_mutex);
4318 if (ret) {
4319 ret = -EIO;
4320 goto exit_free_img_data;
4321 }
4322 }
4323
4324 /* Vote for the SFPB clock */
4325 ret = __qseecom_enable_clk_scale_up(data);
4326 if (ret) {
4327 ret = -EIO;
4328 goto exit_unregister_bus_bw_need;
4329 }
4330
Zhen Kong3bafb312017-10-18 10:27:20 -07004331 ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004332 img_data, fw_size,
4333 ION_IOC_CLEAN_INV_CACHES);
4334 if (ret) {
4335 pr_err("cache operation failed %d\n", ret);
4336 goto exit_disable_clk_vote;
4337 }
4338
4339 /* SCM_CALL to load the image */
4340 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
4341 &resp, sizeof(resp));
4342 if (ret) {
4343 pr_err("scm_call to load failed : ret %d\n", ret);
4344 ret = -EIO;
4345 goto exit_disable_clk_vote;
4346 }
4347
4348 switch (resp.result) {
4349 case QSEOS_RESULT_SUCCESS:
4350 break;
4351 case QSEOS_RESULT_FAILURE:
4352 pr_err("scm call failed w/response result%d\n", resp.result);
4353 ret = -EINVAL;
4354 goto exit_disable_clk_vote;
4355 case QSEOS_RESULT_INCOMPLETE:
4356 ret = __qseecom_process_incomplete_cmd(data, &resp);
4357 if (ret) {
4358 pr_err("process_incomplete_cmd failed err: %d\n", ret);
4359 goto exit_disable_clk_vote;
4360 }
4361 break;
4362 default:
4363 pr_err("scm call return unknown response %d\n", resp.result);
4364 ret = -EINVAL;
4365 goto exit_disable_clk_vote;
4366 }
4367
4368exit_disable_clk_vote:
4369 __qseecom_disable_clk_scale_down(data);
4370
4371exit_unregister_bus_bw_need:
4372 if (qseecom.support_bus_scaling) {
4373 mutex_lock(&qsee_bw_mutex);
4374 qseecom_unregister_bus_bandwidth_needs(data);
4375 mutex_unlock(&qsee_bw_mutex);
4376 }
4377
4378exit_free_img_data:
Zhen Kong3bafb312017-10-18 10:27:20 -07004379 __qseecom_free_img_data(&cmnlib_ion_handle);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004380 return ret;
4381}
4382
4383static int qseecom_unload_commonlib_image(void)
4384{
4385 int ret = -EINVAL;
4386 struct qseecom_unload_lib_image_ireq unload_req = {0};
4387 struct qseecom_command_scm_resp resp;
4388
4389 /* Populate the remaining parameters */
4390 unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
4391
4392 /* SCM_CALL to load the image */
4393 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
4394 sizeof(struct qseecom_unload_lib_image_ireq),
4395 &resp, sizeof(resp));
4396 if (ret) {
4397 pr_err("scm_call to unload lib failed : ret %d\n", ret);
4398 ret = -EIO;
4399 } else {
4400 switch (resp.result) {
4401 case QSEOS_RESULT_SUCCESS:
4402 break;
4403 case QSEOS_RESULT_FAILURE:
4404 pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
4405 break;
4406 default:
4407 pr_err("scm call return unknown response %d\n",
4408 resp.result);
4409 ret = -EINVAL;
4410 break;
4411 }
4412 }
4413
4414 return ret;
4415}
4416
4417int qseecom_start_app(struct qseecom_handle **handle,
4418 char *app_name, uint32_t size)
4419{
4420 int32_t ret = 0;
4421 unsigned long flags = 0;
4422 struct qseecom_dev_handle *data = NULL;
4423 struct qseecom_check_app_ireq app_ireq;
4424 struct qseecom_registered_app_list *entry = NULL;
4425 struct qseecom_registered_kclient_list *kclient_entry = NULL;
4426 bool found_app = false;
4427 size_t len;
4428 ion_phys_addr_t pa;
4429 uint32_t fw_size, app_arch;
4430 uint32_t app_id = 0;
4431
4432 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4433 pr_err("Not allowed to be called in %d state\n",
4434 atomic_read(&qseecom.qseecom_state));
4435 return -EPERM;
4436 }
4437 if (!app_name) {
4438 pr_err("failed to get the app name\n");
4439 return -EINVAL;
4440 }
4441
Zhen Kong64a6d7282017-06-16 11:55:07 -07004442 if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004443 pr_err("The app_name (%s) with length %zu is not valid\n",
Zhen Kong64a6d7282017-06-16 11:55:07 -07004444 app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004445 return -EINVAL;
4446 }
4447
4448 *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
4449 if (!(*handle))
4450 return -ENOMEM;
4451
4452 data = kzalloc(sizeof(*data), GFP_KERNEL);
4453 if (!data) {
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304454 ret = -ENOMEM;
4455 goto exit_handle_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004456 }
4457 data->abort = 0;
4458 data->type = QSEECOM_CLIENT_APP;
4459 data->released = false;
4460 data->client.sb_length = size;
4461 data->client.user_virt_sb_base = 0;
4462 data->client.ihandle = NULL;
4463
4464 init_waitqueue_head(&data->abort_wq);
4465
4466 data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
4467 ION_HEAP(ION_QSECOM_HEAP_ID), 0);
4468 if (IS_ERR_OR_NULL(data->client.ihandle)) {
4469 pr_err("Ion client could not retrieve the handle\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304470 ret = -ENOMEM;
4471 goto exit_data_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004472 }
4473 mutex_lock(&app_access_lock);
4474
Zhen Kong5d02be92018-05-29 16:17:29 -07004475recheck:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004476 app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
4477 strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
4478 ret = __qseecom_check_app_exists(app_ireq, &app_id);
4479 if (ret)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304480 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004481
4482 strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
4483 if (app_id) {
4484 pr_warn("App id %d for [%s] app exists\n", app_id,
4485 (char *)app_ireq.app_name);
4486 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4487 list_for_each_entry(entry,
4488 &qseecom.registered_app_list_head, list){
4489 if (entry->app_id == app_id) {
4490 entry->ref_cnt++;
4491 found_app = true;
4492 break;
4493 }
4494 }
4495 spin_unlock_irqrestore(
4496 &qseecom.registered_app_list_lock, flags);
4497 if (!found_app)
4498 pr_warn("App_id %d [%s] was loaded but not registered\n",
4499 ret, (char *)app_ireq.app_name);
4500 } else {
4501 /* load the app and get the app_id */
4502 pr_debug("%s: Loading app for the first time'\n",
4503 qseecom.pdev->init_name);
4504 ret = __qseecom_load_fw(data, app_name, &app_id);
Zhen Kong5d02be92018-05-29 16:17:29 -07004505 if (ret == -EEXIST) {
4506 pr_err("recheck if TA %s is loaded\n", app_name);
4507 goto recheck;
4508 } else if (ret < 0)
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304509 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004510 }
4511 data->client.app_id = app_id;
4512 if (!found_app) {
4513 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
4514 if (!entry) {
4515 pr_err("kmalloc for app entry failed\n");
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304516 ret = -ENOMEM;
4517 goto exit_ion_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004518 }
4519 entry->app_id = app_id;
4520 entry->ref_cnt = 1;
4521 strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
4522 if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
4523 ret = -EIO;
Zhen Konga6e3f512017-01-20 12:22:23 -08004524 kfree(entry);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304525 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004526 }
4527 entry->app_arch = app_arch;
4528 entry->app_blocked = false;
4529 entry->blocked_on_listener_id = 0;
4530 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
4531 list_add_tail(&entry->list, &qseecom.registered_app_list_head);
4532 spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
4533 flags);
4534 }
4535
4536 /* Get the physical address of the ION BUF */
4537 ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
4538 if (ret) {
4539 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
4540 ret);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304541 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004542 }
4543
4544 /* Populate the structure for sending scm call to load image */
4545 data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
4546 data->client.ihandle);
4547 if (IS_ERR_OR_NULL(data->client.sb_virt)) {
4548 pr_err("ION memory mapping for client shared buf failed\n");
4549 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304550 goto exit_entry_free;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004551 }
4552 data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
4553 data->client.sb_phys = (phys_addr_t)pa;
4554 (*handle)->dev = (void *)data;
4555 (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
4556 (*handle)->sbuf_len = data->client.sb_length;
4557
4558 kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
4559 if (!kclient_entry) {
4560 ret = -ENOMEM;
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304561 goto exit_ion_unmap_kernel;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004562 }
4563 kclient_entry->handle = *handle;
4564
4565 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4566 list_add_tail(&kclient_entry->list,
4567 &qseecom.registered_kclient_list_head);
4568 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4569
4570 mutex_unlock(&app_access_lock);
4571 return 0;
4572
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304573exit_ion_unmap_kernel:
4574 if (!IS_ERR_OR_NULL(data->client.ihandle))
4575 ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
4576exit_entry_free:
4577 kfree(entry);
4578exit_ion_free:
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004579 mutex_unlock(&app_access_lock);
AnilKumar Chimatafe888722018-04-05 23:16:59 +05304580 if (!IS_ERR_OR_NULL(data->client.ihandle)) {
4581 ion_free(qseecom.ion_clnt, data->client.ihandle);
4582 data->client.ihandle = NULL;
4583 }
4584exit_data_free:
4585 kfree(data);
4586exit_handle_free:
4587 if (*handle) {
4588 kfree(*handle);
4589 *handle = NULL;
4590 }
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004591 return ret;
4592}
4593EXPORT_SYMBOL(qseecom_start_app);
4594
4595int qseecom_shutdown_app(struct qseecom_handle **handle)
4596{
4597 int ret = -EINVAL;
4598 struct qseecom_dev_handle *data;
4599
4600 struct qseecom_registered_kclient_list *kclient = NULL;
4601 unsigned long flags = 0;
4602 bool found_handle = false;
4603
4604 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4605 pr_err("Not allowed to be called in %d state\n",
4606 atomic_read(&qseecom.qseecom_state));
4607 return -EPERM;
4608 }
4609
4610 if ((handle == NULL) || (*handle == NULL)) {
4611 pr_err("Handle is not initialized\n");
4612 return -EINVAL;
4613 }
4614 data = (struct qseecom_dev_handle *) ((*handle)->dev);
4615 mutex_lock(&app_access_lock);
4616
4617 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
4618 list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
4619 list) {
4620 if (kclient->handle == (*handle)) {
4621 list_del(&kclient->list);
4622 found_handle = true;
4623 break;
4624 }
4625 }
4626 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
4627 if (!found_handle)
4628 pr_err("Unable to find the handle, exiting\n");
4629 else
4630 ret = qseecom_unload_app(data, false);
4631
4632 mutex_unlock(&app_access_lock);
4633 if (ret == 0) {
4634 kzfree(data);
4635 kzfree(*handle);
4636 kzfree(kclient);
4637 *handle = NULL;
4638 }
4639
4640 return ret;
4641}
4642EXPORT_SYMBOL(qseecom_shutdown_app);
4643
4644int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
4645 uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
4646{
4647 int ret = 0;
4648 struct qseecom_send_cmd_req req = {0, 0, 0, 0};
4649 struct qseecom_dev_handle *data;
4650 bool perf_enabled = false;
4651
4652 if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
4653 pr_err("Not allowed to be called in %d state\n",
4654 atomic_read(&qseecom.qseecom_state));
4655 return -EPERM;
4656 }
4657
4658 if (handle == NULL) {
4659 pr_err("Handle is not initialized\n");
4660 return -EINVAL;
4661 }
4662 data = handle->dev;
4663
4664 req.cmd_req_len = sbuf_len;
4665 req.resp_len = rbuf_len;
4666 req.cmd_req_buf = send_buf;
4667 req.resp_buf = resp_buf;
4668
4669 if (__validate_send_cmd_inputs(data, &req))
4670 return -EINVAL;
4671
4672 mutex_lock(&app_access_lock);
4673 if (qseecom.support_bus_scaling) {
4674 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
4675 if (ret) {
4676 pr_err("Failed to set bw.\n");
4677 mutex_unlock(&app_access_lock);
4678 return ret;
4679 }
4680 }
4681 /*
4682 * On targets where crypto clock is handled by HLOS,
4683 * if clk_access_cnt is zero and perf_enabled is false,
4684 * then the crypto clock was not enabled before sending cmd
4685 * to tz, qseecom will enable the clock to avoid service failure.
4686 */
4687 if (!qseecom.no_clock_support &&
4688 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
4689 pr_debug("ce clock is not enabled!\n");
4690 ret = qseecom_perf_enable(data);
4691 if (ret) {
4692 pr_err("Failed to vote for clock with err %d\n",
4693 ret);
4694 mutex_unlock(&app_access_lock);
4695 return -EINVAL;
4696 }
4697 perf_enabled = true;
4698 }
4699 if (!strcmp(data->client.app_name, "securemm"))
4700 data->use_legacy_cmd = true;
4701
4702 ret = __qseecom_send_cmd(data, &req);
4703 data->use_legacy_cmd = false;
4704 if (qseecom.support_bus_scaling)
4705 __qseecom_add_bw_scale_down_timer(
4706 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
4707
4708 if (perf_enabled) {
4709 qsee_disable_clock_vote(data, CLK_DFAB);
4710 qsee_disable_clock_vote(data, CLK_SFPB);
4711 }
4712
4713 mutex_unlock(&app_access_lock);
4714
4715 if (ret)
4716 return ret;
4717
4718 pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
4719 req.resp_len, req.resp_buf);
4720 return ret;
4721}
4722EXPORT_SYMBOL(qseecom_send_command);
4723
4724int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
4725{
4726 int ret = 0;
4727
4728 if ((handle == NULL) || (handle->dev == NULL)) {
4729 pr_err("No valid kernel client\n");
4730 return -EINVAL;
4731 }
4732 if (high) {
4733 if (qseecom.support_bus_scaling) {
4734 mutex_lock(&qsee_bw_mutex);
4735 __qseecom_register_bus_bandwidth_needs(handle->dev,
4736 HIGH);
4737 mutex_unlock(&qsee_bw_mutex);
4738 } else {
4739 ret = qseecom_perf_enable(handle->dev);
4740 if (ret)
4741 pr_err("Failed to vote for clock with err %d\n",
4742 ret);
4743 }
4744 } else {
4745 if (!qseecom.support_bus_scaling) {
4746 qsee_disable_clock_vote(handle->dev, CLK_DFAB);
4747 qsee_disable_clock_vote(handle->dev, CLK_SFPB);
4748 } else {
4749 mutex_lock(&qsee_bw_mutex);
4750 qseecom_unregister_bus_bandwidth_needs(handle->dev);
4751 mutex_unlock(&qsee_bw_mutex);
4752 }
4753 }
4754 return ret;
4755}
4756EXPORT_SYMBOL(qseecom_set_bandwidth);
4757
4758int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
4759{
4760 struct qseecom_registered_app_list dummy_app_entry = { {0} };
4761 struct qseecom_dev_handle dummy_private_data = {0};
4762 struct qseecom_command_scm_resp resp;
4763 int ret = 0;
4764
4765 if (!desc) {
4766 pr_err("desc is NULL\n");
4767 return -EINVAL;
4768 }
4769
4770 resp.result = desc->ret[0]; /*req_cmd*/
Zhen Kong2f60f492017-06-29 15:22:14 -07004771 resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004772 resp.data = desc->ret[2]; /*listener_id*/
4773
Zhen Konge7f525f2017-12-01 18:26:25 -08004774 dummy_private_data.client.app_id = desc->ret[1];
4775 dummy_app_entry.app_id = desc->ret[1];
4776
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004777 mutex_lock(&app_access_lock);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004778 if (qseecom.qsee_reentrancy_support)
4779 ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004780 &dummy_private_data);
Zhen Kong7458c2e2017-10-19 12:32:07 -07004781 else
4782 ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
4783 &resp);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004784 mutex_unlock(&app_access_lock);
4785 if (ret)
Zhen Kong2f60f492017-06-29 15:22:14 -07004786 pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07004787 (int)desc->ret[0], (int)desc->ret[2],
4788 (int)desc->ret[1], ret);
4789 desc->ret[0] = resp.result;
4790 desc->ret[1] = resp.resp_type;
4791 desc->ret[2] = resp.data;
4792 return ret;
4793}
4794EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
4795
4796static int qseecom_send_resp(void)
4797{
4798 qseecom.send_resp_flag = 1;
4799 wake_up_interruptible(&qseecom.send_resp_wq);
4800 return 0;
4801}
4802
4803static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
4804{
4805 struct qseecom_registered_listener_list *this_lstnr = NULL;
4806
4807 pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
4808 this_lstnr = __qseecom_find_svc(data->listener.id);
4809 if (this_lstnr == NULL)
4810 return -EINVAL;
4811 qseecom.send_resp_flag = 1;
4812 this_lstnr->send_resp_flag = 1;
4813 wake_up_interruptible(&qseecom.send_resp_wq);
4814 return 0;
4815}
4816
4817static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
4818 struct qseecom_send_modfd_listener_resp *resp,
4819 struct qseecom_registered_listener_list *this_lstnr)
4820{
4821 int i;
4822
4823 if (!data || !resp || !this_lstnr) {
4824 pr_err("listener handle or resp msg is null\n");
4825 return -EINVAL;
4826 }
4827
4828 if (resp->resp_buf_ptr == NULL) {
4829 pr_err("resp buffer is null\n");
4830 return -EINVAL;
4831 }
4832 /* validate resp buf length */
4833 if ((resp->resp_len == 0) ||
4834 (resp->resp_len > this_lstnr->sb_length)) {
4835 pr_err("resp buf length %d not valid\n", resp->resp_len);
4836 return -EINVAL;
4837 }
4838
4839 if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
4840 pr_err("Integer overflow in resp_len & resp_buf\n");
4841 return -EINVAL;
4842 }
4843 if ((uintptr_t)this_lstnr->user_virt_sb_base >
4844 (ULONG_MAX - this_lstnr->sb_length)) {
4845 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
4846 return -EINVAL;
4847 }
4848 /* validate resp buf */
4849 if (((uintptr_t)resp->resp_buf_ptr <
4850 (uintptr_t)this_lstnr->user_virt_sb_base) ||
4851 ((uintptr_t)resp->resp_buf_ptr >=
4852 ((uintptr_t)this_lstnr->user_virt_sb_base +
4853 this_lstnr->sb_length)) ||
4854 (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
4855 ((uintptr_t)this_lstnr->user_virt_sb_base +
4856 this_lstnr->sb_length))) {
4857 pr_err("resp buf is out of shared buffer region\n");
4858 return -EINVAL;
4859 }
4860
4861 /* validate offsets */
4862 for (i = 0; i < MAX_ION_FD; i++) {
4863 if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
4864 pr_err("Invalid offset %d = 0x%x\n",
4865 i, resp->ifd_data[i].cmd_buf_offset);
4866 return -EINVAL;
4867 }
4868 }
4869
4870 return 0;
4871}
4872
4873static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4874 void __user *argp, bool is_64bit_addr)
4875{
4876 struct qseecom_send_modfd_listener_resp resp;
4877 struct qseecom_registered_listener_list *this_lstnr = NULL;
4878
4879 if (copy_from_user(&resp, argp, sizeof(resp))) {
4880 pr_err("copy_from_user failed");
4881 return -EINVAL;
4882 }
4883
4884 this_lstnr = __qseecom_find_svc(data->listener.id);
4885 if (this_lstnr == NULL)
4886 return -EINVAL;
4887
4888 if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
4889 return -EINVAL;
4890
4891 resp.resp_buf_ptr = this_lstnr->sb_virt +
4892 (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
4893
4894 if (!is_64bit_addr)
4895 __qseecom_update_cmd_buf(&resp, false, data);
4896 else
4897 __qseecom_update_cmd_buf_64(&resp, false, data);
4898 qseecom.send_resp_flag = 1;
4899 this_lstnr->send_resp_flag = 1;
4900 wake_up_interruptible(&qseecom.send_resp_wq);
4901 return 0;
4902}
4903
4904static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
4905 void __user *argp)
4906{
4907 return __qseecom_send_modfd_resp(data, argp, false);
4908}
4909
4910static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
4911 void __user *argp)
4912{
4913 return __qseecom_send_modfd_resp(data, argp, true);
4914}
4915
4916static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
4917 void __user *argp)
4918{
4919 struct qseecom_qseos_version_req req;
4920
4921 if (copy_from_user(&req, argp, sizeof(req))) {
4922 pr_err("copy_from_user failed");
4923 return -EINVAL;
4924 }
4925 req.qseos_version = qseecom.qseos_version;
4926 if (copy_to_user(argp, &req, sizeof(req))) {
4927 pr_err("copy_to_user failed");
4928 return -EINVAL;
4929 }
4930 return 0;
4931}
4932
4933static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
4934{
4935 int rc = 0;
4936 struct qseecom_clk *qclk = NULL;
4937
4938 if (qseecom.no_clock_support)
4939 return 0;
4940
4941 if (ce == CLK_QSEE)
4942 qclk = &qseecom.qsee;
4943 if (ce == CLK_CE_DRV)
4944 qclk = &qseecom.ce_drv;
4945
4946 if (qclk == NULL) {
4947 pr_err("CLK type not supported\n");
4948 return -EINVAL;
4949 }
4950 mutex_lock(&clk_access_lock);
4951
4952 if (qclk->clk_access_cnt == ULONG_MAX) {
4953 pr_err("clk_access_cnt beyond limitation\n");
4954 goto err;
4955 }
4956 if (qclk->clk_access_cnt > 0) {
4957 qclk->clk_access_cnt++;
4958 mutex_unlock(&clk_access_lock);
4959 return rc;
4960 }
4961
4962 /* Enable CE core clk */
4963 if (qclk->ce_core_clk != NULL) {
4964 rc = clk_prepare_enable(qclk->ce_core_clk);
4965 if (rc) {
4966 pr_err("Unable to enable/prepare CE core clk\n");
4967 goto err;
4968 }
4969 }
4970 /* Enable CE clk */
4971 if (qclk->ce_clk != NULL) {
4972 rc = clk_prepare_enable(qclk->ce_clk);
4973 if (rc) {
4974 pr_err("Unable to enable/prepare CE iface clk\n");
4975 goto ce_clk_err;
4976 }
4977 }
4978 /* Enable AXI clk */
4979 if (qclk->ce_bus_clk != NULL) {
4980 rc = clk_prepare_enable(qclk->ce_bus_clk);
4981 if (rc) {
4982 pr_err("Unable to enable/prepare CE bus clk\n");
4983 goto ce_bus_clk_err;
4984 }
4985 }
4986 qclk->clk_access_cnt++;
4987 mutex_unlock(&clk_access_lock);
4988 return 0;
4989
4990ce_bus_clk_err:
4991 if (qclk->ce_clk != NULL)
4992 clk_disable_unprepare(qclk->ce_clk);
4993ce_clk_err:
4994 if (qclk->ce_core_clk != NULL)
4995 clk_disable_unprepare(qclk->ce_core_clk);
4996err:
4997 mutex_unlock(&clk_access_lock);
4998 return -EIO;
4999}
5000
5001static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
5002{
5003 struct qseecom_clk *qclk;
5004
5005 if (qseecom.no_clock_support)
5006 return;
5007
5008 if (ce == CLK_QSEE)
5009 qclk = &qseecom.qsee;
5010 else
5011 qclk = &qseecom.ce_drv;
5012
5013 mutex_lock(&clk_access_lock);
5014
5015 if (qclk->clk_access_cnt == 0) {
5016 mutex_unlock(&clk_access_lock);
5017 return;
5018 }
5019
5020 if (qclk->clk_access_cnt == 1) {
5021 if (qclk->ce_clk != NULL)
5022 clk_disable_unprepare(qclk->ce_clk);
5023 if (qclk->ce_core_clk != NULL)
5024 clk_disable_unprepare(qclk->ce_core_clk);
5025 if (qclk->ce_bus_clk != NULL)
5026 clk_disable_unprepare(qclk->ce_bus_clk);
5027 }
5028 qclk->clk_access_cnt--;
5029 mutex_unlock(&clk_access_lock);
5030}
5031
5032static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
5033 int32_t clk_type)
5034{
5035 int ret = 0;
5036 struct qseecom_clk *qclk;
5037
5038 if (qseecom.no_clock_support)
5039 return 0;
5040
5041 qclk = &qseecom.qsee;
5042 if (!qseecom.qsee_perf_client)
5043 return ret;
5044
5045 switch (clk_type) {
5046 case CLK_DFAB:
5047 mutex_lock(&qsee_bw_mutex);
5048 if (!qseecom.qsee_bw_count) {
5049 if (qseecom.qsee_sfpb_bw_count > 0)
5050 ret = msm_bus_scale_client_update_request(
5051 qseecom.qsee_perf_client, 3);
5052 else {
5053 if (qclk->ce_core_src_clk != NULL)
5054 ret = __qseecom_enable_clk(CLK_QSEE);
5055 if (!ret) {
5056 ret =
5057 msm_bus_scale_client_update_request(
5058 qseecom.qsee_perf_client, 1);
5059 if ((ret) &&
5060 (qclk->ce_core_src_clk != NULL))
5061 __qseecom_disable_clk(CLK_QSEE);
5062 }
5063 }
5064 if (ret)
5065 pr_err("DFAB Bandwidth req failed (%d)\n",
5066 ret);
5067 else {
5068 qseecom.qsee_bw_count++;
5069 data->perf_enabled = true;
5070 }
5071 } else {
5072 qseecom.qsee_bw_count++;
5073 data->perf_enabled = true;
5074 }
5075 mutex_unlock(&qsee_bw_mutex);
5076 break;
5077 case CLK_SFPB:
5078 mutex_lock(&qsee_bw_mutex);
5079 if (!qseecom.qsee_sfpb_bw_count) {
5080 if (qseecom.qsee_bw_count > 0)
5081 ret = msm_bus_scale_client_update_request(
5082 qseecom.qsee_perf_client, 3);
5083 else {
5084 if (qclk->ce_core_src_clk != NULL)
5085 ret = __qseecom_enable_clk(CLK_QSEE);
5086 if (!ret) {
5087 ret =
5088 msm_bus_scale_client_update_request(
5089 qseecom.qsee_perf_client, 2);
5090 if ((ret) &&
5091 (qclk->ce_core_src_clk != NULL))
5092 __qseecom_disable_clk(CLK_QSEE);
5093 }
5094 }
5095
5096 if (ret)
5097 pr_err("SFPB Bandwidth req failed (%d)\n",
5098 ret);
5099 else {
5100 qseecom.qsee_sfpb_bw_count++;
5101 data->fast_load_enabled = true;
5102 }
5103 } else {
5104 qseecom.qsee_sfpb_bw_count++;
5105 data->fast_load_enabled = true;
5106 }
5107 mutex_unlock(&qsee_bw_mutex);
5108 break;
5109 default:
5110 pr_err("Clock type not defined\n");
5111 break;
5112 }
5113 return ret;
5114}
5115
5116static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
5117 int32_t clk_type)
5118{
5119 int32_t ret = 0;
5120 struct qseecom_clk *qclk;
5121
5122 qclk = &qseecom.qsee;
5123
5124 if (qseecom.no_clock_support)
5125 return;
5126 if (!qseecom.qsee_perf_client)
5127 return;
5128
5129 switch (clk_type) {
5130 case CLK_DFAB:
5131 mutex_lock(&qsee_bw_mutex);
5132 if (qseecom.qsee_bw_count == 0) {
5133 pr_err("Client error.Extra call to disable DFAB clk\n");
5134 mutex_unlock(&qsee_bw_mutex);
5135 return;
5136 }
5137
5138 if (qseecom.qsee_bw_count == 1) {
5139 if (qseecom.qsee_sfpb_bw_count > 0)
5140 ret = msm_bus_scale_client_update_request(
5141 qseecom.qsee_perf_client, 2);
5142 else {
5143 ret = msm_bus_scale_client_update_request(
5144 qseecom.qsee_perf_client, 0);
5145 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5146 __qseecom_disable_clk(CLK_QSEE);
5147 }
5148 if (ret)
5149 pr_err("SFPB Bandwidth req fail (%d)\n",
5150 ret);
5151 else {
5152 qseecom.qsee_bw_count--;
5153 data->perf_enabled = false;
5154 }
5155 } else {
5156 qseecom.qsee_bw_count--;
5157 data->perf_enabled = false;
5158 }
5159 mutex_unlock(&qsee_bw_mutex);
5160 break;
5161 case CLK_SFPB:
5162 mutex_lock(&qsee_bw_mutex);
5163 if (qseecom.qsee_sfpb_bw_count == 0) {
5164 pr_err("Client error.Extra call to disable SFPB clk\n");
5165 mutex_unlock(&qsee_bw_mutex);
5166 return;
5167 }
5168 if (qseecom.qsee_sfpb_bw_count == 1) {
5169 if (qseecom.qsee_bw_count > 0)
5170 ret = msm_bus_scale_client_update_request(
5171 qseecom.qsee_perf_client, 1);
5172 else {
5173 ret = msm_bus_scale_client_update_request(
5174 qseecom.qsee_perf_client, 0);
5175 if ((!ret) && (qclk->ce_core_src_clk != NULL))
5176 __qseecom_disable_clk(CLK_QSEE);
5177 }
5178 if (ret)
5179 pr_err("SFPB Bandwidth req fail (%d)\n",
5180 ret);
5181 else {
5182 qseecom.qsee_sfpb_bw_count--;
5183 data->fast_load_enabled = false;
5184 }
5185 } else {
5186 qseecom.qsee_sfpb_bw_count--;
5187 data->fast_load_enabled = false;
5188 }
5189 mutex_unlock(&qsee_bw_mutex);
5190 break;
5191 default:
5192 pr_err("Clock type not defined\n");
5193 break;
5194 }
5195
5196}
5197
5198static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
5199 void __user *argp)
5200{
5201 struct ion_handle *ihandle; /* Ion handle */
5202 struct qseecom_load_img_req load_img_req;
5203 int uret = 0;
5204 int ret;
5205 ion_phys_addr_t pa = 0;
5206 size_t len;
5207 struct qseecom_load_app_ireq load_req;
5208 struct qseecom_load_app_64bit_ireq load_req_64bit;
5209 struct qseecom_command_scm_resp resp;
5210 void *cmd_buf = NULL;
5211 size_t cmd_len;
5212 /* Copy the relevant information needed for loading the image */
5213 if (copy_from_user(&load_img_req,
5214 (void __user *)argp,
5215 sizeof(struct qseecom_load_img_req))) {
5216 pr_err("copy_from_user failed\n");
5217 return -EFAULT;
5218 }
5219
5220 /* Get the handle of the shared fd */
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07005221 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005222 load_img_req.ifd_data_fd);
5223 if (IS_ERR_OR_NULL(ihandle)) {
5224 pr_err("Ion client could not retrieve the handle\n");
5225 return -ENOMEM;
5226 }
5227
5228 /* Get the physical address of the ION BUF */
5229 ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
5230 if (ret) {
5231 pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
5232 ret);
5233 return ret;
5234 }
5235 if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
5236 pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
5237 len, load_img_req.mdt_len,
5238 load_img_req.img_len);
5239 return ret;
5240 }
5241 /* Populate the structure for sending scm call to load image */
5242 if (qseecom.qsee_version < QSEE_VERSION_40) {
5243 load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5244 load_req.mdt_len = load_img_req.mdt_len;
5245 load_req.img_len = load_img_req.img_len;
5246 load_req.phy_addr = (uint32_t)pa;
5247 cmd_buf = (void *)&load_req;
5248 cmd_len = sizeof(struct qseecom_load_app_ireq);
5249 } else {
5250 load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
5251 load_req_64bit.mdt_len = load_img_req.mdt_len;
5252 load_req_64bit.img_len = load_img_req.img_len;
5253 load_req_64bit.phy_addr = (uint64_t)pa;
5254 cmd_buf = (void *)&load_req_64bit;
5255 cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
5256 }
5257
5258 if (qseecom.support_bus_scaling) {
5259 mutex_lock(&qsee_bw_mutex);
5260 ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
5261 mutex_unlock(&qsee_bw_mutex);
5262 if (ret) {
5263 ret = -EIO;
5264 goto exit_cpu_restore;
5265 }
5266 }
5267
5268 /* Vote for the SFPB clock */
5269 ret = __qseecom_enable_clk_scale_up(data);
5270 if (ret) {
5271 ret = -EIO;
5272 goto exit_register_bus_bandwidth_needs;
5273 }
5274 ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
5275 ION_IOC_CLEAN_INV_CACHES);
5276 if (ret) {
5277 pr_err("cache operation failed %d\n", ret);
5278 goto exit_disable_clock;
5279 }
5280 /* SCM_CALL to load the external elf */
5281 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
5282 &resp, sizeof(resp));
5283 if (ret) {
5284 pr_err("scm_call to load failed : ret %d\n",
5285 ret);
5286 ret = -EFAULT;
5287 goto exit_disable_clock;
5288 }
5289
5290 switch (resp.result) {
5291 case QSEOS_RESULT_SUCCESS:
5292 break;
5293 case QSEOS_RESULT_INCOMPLETE:
5294 pr_err("%s: qseos result incomplete\n", __func__);
5295 ret = __qseecom_process_incomplete_cmd(data, &resp);
5296 if (ret)
5297 pr_err("process_incomplete_cmd failed: err: %d\n", ret);
5298 break;
5299 case QSEOS_RESULT_FAILURE:
5300 pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
5301 ret = -EFAULT;
5302 break;
5303 default:
5304 pr_err("scm_call response result %d not supported\n",
5305 resp.result);
5306 ret = -EFAULT;
5307 break;
5308 }
5309
5310exit_disable_clock:
5311 __qseecom_disable_clk_scale_down(data);
5312
5313exit_register_bus_bandwidth_needs:
5314 if (qseecom.support_bus_scaling) {
5315 mutex_lock(&qsee_bw_mutex);
5316 uret = qseecom_unregister_bus_bandwidth_needs(data);
5317 mutex_unlock(&qsee_bw_mutex);
5318 if (uret)
5319 pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
5320 uret, ret);
5321 }
5322
5323exit_cpu_restore:
5324 /* Deallocate the handle */
5325 if (!IS_ERR_OR_NULL(ihandle))
5326 ion_free(qseecom.ion_clnt, ihandle);
5327 return ret;
5328}
5329
5330static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
5331{
5332 int ret = 0;
5333 struct qseecom_command_scm_resp resp;
5334 struct qseecom_unload_app_ireq req;
5335
5336 /* unavailable client app */
5337 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
5338
5339 /* Populate the structure for sending scm call to unload image */
5340 req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
5341
5342 /* SCM_CALL to unload the external elf */
5343 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
5344 sizeof(struct qseecom_unload_app_ireq),
5345 &resp, sizeof(resp));
5346 if (ret) {
5347 pr_err("scm_call to unload failed : ret %d\n",
5348 ret);
5349 ret = -EFAULT;
5350 goto qseecom_unload_external_elf_scm_err;
5351 }
5352 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
5353 ret = __qseecom_process_incomplete_cmd(data, &resp);
5354 if (ret)
5355 pr_err("process_incomplete_cmd fail err: %d\n",
5356 ret);
5357 } else {
5358 if (resp.result != QSEOS_RESULT_SUCCESS) {
5359 pr_err("scm_call to unload image failed resp.result =%d\n",
5360 resp.result);
5361 ret = -EFAULT;
5362 }
5363 }
5364
5365qseecom_unload_external_elf_scm_err:
5366
5367 return ret;
5368}
5369
5370static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
5371 void __user *argp)
5372{
5373
5374 int32_t ret;
5375 struct qseecom_qseos_app_load_query query_req;
5376 struct qseecom_check_app_ireq req;
5377 struct qseecom_registered_app_list *entry = NULL;
5378 unsigned long flags = 0;
5379 uint32_t app_arch = 0, app_id = 0;
5380 bool found_app = false;
5381
5382 /* Copy the relevant information needed for loading the image */
5383 if (copy_from_user(&query_req,
5384 (void __user *)argp,
5385 sizeof(struct qseecom_qseos_app_load_query))) {
5386 pr_err("copy_from_user failed\n");
5387 return -EFAULT;
5388 }
5389
5390 req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
5391 query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
5392 strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
5393
5394 ret = __qseecom_check_app_exists(req, &app_id);
5395 if (ret) {
5396 pr_err(" scm call to check if app is loaded failed");
5397 return ret; /* scm call failed */
5398 }
5399 if (app_id) {
5400 pr_debug("App id %d (%s) already exists\n", app_id,
5401 (char *)(req.app_name));
5402 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
5403 list_for_each_entry(entry,
5404 &qseecom.registered_app_list_head, list){
5405 if (entry->app_id == app_id) {
5406 app_arch = entry->app_arch;
5407 entry->ref_cnt++;
5408 found_app = true;
5409 break;
5410 }
5411 }
5412 spin_unlock_irqrestore(
5413 &qseecom.registered_app_list_lock, flags);
5414 data->client.app_id = app_id;
5415 query_req.app_id = app_id;
5416 if (app_arch) {
5417 data->client.app_arch = app_arch;
5418 query_req.app_arch = app_arch;
5419 } else {
5420 data->client.app_arch = 0;
5421 query_req.app_arch = 0;
5422 }
5423 strlcpy(data->client.app_name, query_req.app_name,
5424 MAX_APP_NAME_SIZE);
5425 /*
5426 * If app was loaded by appsbl before and was not registered,
5427 * regiser this app now.
5428 */
5429 if (!found_app) {
5430 pr_debug("Register app %d [%s] which was loaded before\n",
5431 ret, (char *)query_req.app_name);
5432 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5433 if (!entry) {
5434 pr_err("kmalloc for app entry failed\n");
5435 return -ENOMEM;
5436 }
5437 entry->app_id = app_id;
5438 entry->ref_cnt = 1;
5439 entry->app_arch = data->client.app_arch;
5440 strlcpy(entry->app_name, data->client.app_name,
5441 MAX_APP_NAME_SIZE);
5442 entry->app_blocked = false;
5443 entry->blocked_on_listener_id = 0;
5444 spin_lock_irqsave(&qseecom.registered_app_list_lock,
5445 flags);
5446 list_add_tail(&entry->list,
5447 &qseecom.registered_app_list_head);
5448 spin_unlock_irqrestore(
5449 &qseecom.registered_app_list_lock, flags);
5450 }
5451 if (copy_to_user(argp, &query_req, sizeof(query_req))) {
5452 pr_err("copy_to_user failed\n");
5453 return -EFAULT;
5454 }
5455 return -EEXIST; /* app already loaded */
5456 } else {
5457 return 0; /* app not loaded */
5458 }
5459}
5460
5461static int __qseecom_get_ce_pipe_info(
5462 enum qseecom_key_management_usage_type usage,
5463 uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
5464{
5465 int ret = -EINVAL;
5466 int i, j;
5467 struct qseecom_ce_info_use *p = NULL;
5468 int total = 0;
5469 struct qseecom_ce_pipe_entry *pcepipe;
5470
5471 switch (usage) {
5472 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5473 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5474 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5475 if (qseecom.support_fde) {
5476 p = qseecom.ce_info.fde;
5477 total = qseecom.ce_info.num_fde;
5478 } else {
5479 pr_err("system does not support fde\n");
5480 return -EINVAL;
5481 }
5482 break;
5483 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5484 if (qseecom.support_pfe) {
5485 p = qseecom.ce_info.pfe;
5486 total = qseecom.ce_info.num_pfe;
5487 } else {
5488 pr_err("system does not support pfe\n");
5489 return -EINVAL;
5490 }
5491 break;
5492 default:
5493 pr_err("unsupported usage %d\n", usage);
5494 return -EINVAL;
5495 }
5496
5497 for (j = 0; j < total; j++) {
5498 if (p->unit_num == unit) {
5499 pcepipe = p->ce_pipe_entry;
5500 for (i = 0; i < p->num_ce_pipe_entries; i++) {
5501 (*ce_hw)[i] = pcepipe->ce_num;
5502 *pipe = pcepipe->ce_pipe_pair;
5503 pcepipe++;
5504 }
5505 ret = 0;
5506 break;
5507 }
5508 p++;
5509 }
5510 return ret;
5511}
5512
5513static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
5514 enum qseecom_key_management_usage_type usage,
5515 struct qseecom_key_generate_ireq *ireq)
5516{
5517 struct qseecom_command_scm_resp resp;
5518 int ret;
5519
5520 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5521 usage >= QSEOS_KM_USAGE_MAX) {
5522 pr_err("Error:: unsupported usage %d\n", usage);
5523 return -EFAULT;
5524 }
5525 ret = __qseecom_enable_clk(CLK_QSEE);
5526 if (ret)
5527 return ret;
5528
5529 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5530 ireq, sizeof(struct qseecom_key_generate_ireq),
5531 &resp, sizeof(resp));
5532 if (ret) {
5533 if (ret == -EINVAL &&
5534 resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5535 pr_debug("Key ID exists.\n");
5536 ret = 0;
5537 } else {
5538 pr_err("scm call to generate key failed : %d\n", ret);
5539 ret = -EFAULT;
5540 }
5541 goto generate_key_exit;
5542 }
5543
5544 switch (resp.result) {
5545 case QSEOS_RESULT_SUCCESS:
5546 break;
5547 case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
5548 pr_debug("Key ID exists.\n");
5549 break;
5550 case QSEOS_RESULT_INCOMPLETE:
5551 ret = __qseecom_process_incomplete_cmd(data, &resp);
5552 if (ret) {
5553 if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
5554 pr_debug("Key ID exists.\n");
5555 ret = 0;
5556 } else {
5557 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5558 resp.result);
5559 }
5560 }
5561 break;
5562 case QSEOS_RESULT_FAILURE:
5563 default:
5564 pr_err("gen key scm call failed resp.result %d\n", resp.result);
5565 ret = -EINVAL;
5566 break;
5567 }
5568generate_key_exit:
5569 __qseecom_disable_clk(CLK_QSEE);
5570 return ret;
5571}
5572
5573static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
5574 enum qseecom_key_management_usage_type usage,
5575 struct qseecom_key_delete_ireq *ireq)
5576{
5577 struct qseecom_command_scm_resp resp;
5578 int ret;
5579
5580 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5581 usage >= QSEOS_KM_USAGE_MAX) {
5582 pr_err("Error:: unsupported usage %d\n", usage);
5583 return -EFAULT;
5584 }
5585 ret = __qseecom_enable_clk(CLK_QSEE);
5586 if (ret)
5587 return ret;
5588
5589 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5590 ireq, sizeof(struct qseecom_key_delete_ireq),
5591 &resp, sizeof(struct qseecom_command_scm_resp));
5592 if (ret) {
5593 if (ret == -EINVAL &&
5594 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5595 pr_debug("Max attempts to input password reached.\n");
5596 ret = -ERANGE;
5597 } else {
5598 pr_err("scm call to delete key failed : %d\n", ret);
5599 ret = -EFAULT;
5600 }
5601 goto del_key_exit;
5602 }
5603
5604 switch (resp.result) {
5605 case QSEOS_RESULT_SUCCESS:
5606 break;
5607 case QSEOS_RESULT_INCOMPLETE:
5608 ret = __qseecom_process_incomplete_cmd(data, &resp);
5609 if (ret) {
5610 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5611 resp.result);
5612 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5613 pr_debug("Max attempts to input password reached.\n");
5614 ret = -ERANGE;
5615 }
5616 }
5617 break;
5618 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5619 pr_debug("Max attempts to input password reached.\n");
5620 ret = -ERANGE;
5621 break;
5622 case QSEOS_RESULT_FAILURE:
5623 default:
5624 pr_err("Delete key scm call failed resp.result %d\n",
5625 resp.result);
5626 ret = -EINVAL;
5627 break;
5628 }
5629del_key_exit:
5630 __qseecom_disable_clk(CLK_QSEE);
5631 return ret;
5632}
5633
5634static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
5635 enum qseecom_key_management_usage_type usage,
5636 struct qseecom_key_select_ireq *ireq)
5637{
5638 struct qseecom_command_scm_resp resp;
5639 int ret;
5640
5641 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5642 usage >= QSEOS_KM_USAGE_MAX) {
5643 pr_err("Error:: unsupported usage %d\n", usage);
5644 return -EFAULT;
5645 }
5646 ret = __qseecom_enable_clk(CLK_QSEE);
5647 if (ret)
5648 return ret;
5649
5650 if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
5651 ret = __qseecom_enable_clk(CLK_CE_DRV);
5652 if (ret)
5653 return ret;
5654 }
5655
5656 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5657 ireq, sizeof(struct qseecom_key_select_ireq),
5658 &resp, sizeof(struct qseecom_command_scm_resp));
5659 if (ret) {
5660 if (ret == -EINVAL &&
5661 resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5662 pr_debug("Max attempts to input password reached.\n");
5663 ret = -ERANGE;
5664 } else if (ret == -EINVAL &&
5665 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5666 pr_debug("Set Key operation under processing...\n");
5667 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5668 } else {
5669 pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
5670 ret);
5671 ret = -EFAULT;
5672 }
5673 goto set_key_exit;
5674 }
5675
5676 switch (resp.result) {
5677 case QSEOS_RESULT_SUCCESS:
5678 break;
5679 case QSEOS_RESULT_INCOMPLETE:
5680 ret = __qseecom_process_incomplete_cmd(data, &resp);
5681 if (ret) {
5682 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5683 resp.result);
5684 if (resp.result ==
5685 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5686 pr_debug("Set Key operation under processing...\n");
5687 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5688 }
5689 if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
5690 pr_debug("Max attempts to input password reached.\n");
5691 ret = -ERANGE;
5692 }
5693 }
5694 break;
5695 case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
5696 pr_debug("Max attempts to input password reached.\n");
5697 ret = -ERANGE;
5698 break;
5699 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5700 pr_debug("Set Key operation under processing...\n");
5701 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5702 break;
5703 case QSEOS_RESULT_FAILURE:
5704 default:
5705 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5706 ret = -EINVAL;
5707 break;
5708 }
5709set_key_exit:
5710 __qseecom_disable_clk(CLK_QSEE);
5711 if (qseecom.qsee.instance != qseecom.ce_drv.instance)
5712 __qseecom_disable_clk(CLK_CE_DRV);
5713 return ret;
5714}
5715
5716static int __qseecom_update_current_key_user_info(
5717 struct qseecom_dev_handle *data,
5718 enum qseecom_key_management_usage_type usage,
5719 struct qseecom_key_userinfo_update_ireq *ireq)
5720{
5721 struct qseecom_command_scm_resp resp;
5722 int ret;
5723
5724 if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5725 usage >= QSEOS_KM_USAGE_MAX) {
5726 pr_err("Error:: unsupported usage %d\n", usage);
5727 return -EFAULT;
5728 }
5729 ret = __qseecom_enable_clk(CLK_QSEE);
5730 if (ret)
5731 return ret;
5732
5733 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
5734 ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
5735 &resp, sizeof(struct qseecom_command_scm_resp));
5736 if (ret) {
5737 if (ret == -EINVAL &&
5738 resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5739 pr_debug("Set Key operation under processing...\n");
5740 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5741 } else {
5742 pr_err("scm call to update key userinfo failed: %d\n",
5743 ret);
5744 __qseecom_disable_clk(CLK_QSEE);
5745 return -EFAULT;
5746 }
5747 }
5748
5749 switch (resp.result) {
5750 case QSEOS_RESULT_SUCCESS:
5751 break;
5752 case QSEOS_RESULT_INCOMPLETE:
5753 ret = __qseecom_process_incomplete_cmd(data, &resp);
5754 if (resp.result ==
5755 QSEOS_RESULT_FAIL_PENDING_OPERATION) {
5756 pr_debug("Set Key operation under processing...\n");
5757 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5758 }
5759 if (ret)
5760 pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
5761 resp.result);
5762 break;
5763 case QSEOS_RESULT_FAIL_PENDING_OPERATION:
5764 pr_debug("Update Key operation under processing...\n");
5765 ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
5766 break;
5767 case QSEOS_RESULT_FAILURE:
5768 default:
5769 pr_err("Set key scm call failed resp.result %d\n", resp.result);
5770 ret = -EINVAL;
5771 break;
5772 }
5773
5774 __qseecom_disable_clk(CLK_QSEE);
5775 return ret;
5776}
5777
5778
5779static int qseecom_enable_ice_setup(int usage)
5780{
5781 int ret = 0;
5782
5783 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5784 ret = qcom_ice_setup_ice_hw("ufs", true);
5785 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5786 ret = qcom_ice_setup_ice_hw("sdcc", true);
5787
5788 return ret;
5789}
5790
5791static int qseecom_disable_ice_setup(int usage)
5792{
5793 int ret = 0;
5794
5795 if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
5796 ret = qcom_ice_setup_ice_hw("ufs", false);
5797 else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
5798 ret = qcom_ice_setup_ice_hw("sdcc", false);
5799
5800 return ret;
5801}
5802
5803static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
5804{
5805 struct qseecom_ce_info_use *pce_info_use, *p;
5806 int total = 0;
5807 int i;
5808
5809 switch (usage) {
5810 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
5811 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
5812 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
5813 p = qseecom.ce_info.fde;
5814 total = qseecom.ce_info.num_fde;
5815 break;
5816 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
5817 p = qseecom.ce_info.pfe;
5818 total = qseecom.ce_info.num_pfe;
5819 break;
5820 default:
5821 pr_err("unsupported usage %d\n", usage);
5822 return -EINVAL;
5823 }
5824
5825 pce_info_use = NULL;
5826
5827 for (i = 0; i < total; i++) {
5828 if (p->unit_num == unit) {
5829 pce_info_use = p;
5830 break;
5831 }
5832 p++;
5833 }
5834 if (!pce_info_use) {
5835 pr_err("can not find %d\n", unit);
5836 return -EINVAL;
5837 }
5838 return pce_info_use->num_ce_pipe_entries;
5839}
5840
5841static int qseecom_create_key(struct qseecom_dev_handle *data,
5842 void __user *argp)
5843{
5844 int i;
5845 uint32_t *ce_hw = NULL;
5846 uint32_t pipe = 0;
5847 int ret = 0;
5848 uint32_t flags = 0;
5849 struct qseecom_create_key_req create_key_req;
5850 struct qseecom_key_generate_ireq generate_key_ireq;
5851 struct qseecom_key_select_ireq set_key_ireq;
5852 uint32_t entries = 0;
5853
5854 ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
5855 if (ret) {
5856 pr_err("copy_from_user failed\n");
5857 return ret;
5858 }
5859
5860 if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
5861 create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
5862 pr_err("unsupported usage %d\n", create_key_req.usage);
5863 ret = -EFAULT;
5864 return ret;
5865 }
5866 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
5867 create_key_req.usage);
5868 if (entries <= 0) {
5869 pr_err("no ce instance for usage %d instance %d\n",
5870 DEFAULT_CE_INFO_UNIT, create_key_req.usage);
5871 ret = -EINVAL;
5872 return ret;
5873 }
5874
5875 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
5876 if (!ce_hw) {
5877 ret = -ENOMEM;
5878 return ret;
5879 }
5880 ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
5881 DEFAULT_CE_INFO_UNIT);
5882 if (ret) {
5883 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
5884 ret = -EINVAL;
5885 goto free_buf;
5886 }
5887
5888 if (qseecom.fde_key_size)
5889 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
5890 else
5891 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
5892
5893 generate_key_ireq.flags = flags;
5894 generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
5895 memset((void *)generate_key_ireq.key_id,
5896 0, QSEECOM_KEY_ID_SIZE);
5897 memset((void *)generate_key_ireq.hash32,
5898 0, QSEECOM_HASH_SIZE);
5899 memcpy((void *)generate_key_ireq.key_id,
5900 (void *)key_id_array[create_key_req.usage].desc,
5901 QSEECOM_KEY_ID_SIZE);
5902 memcpy((void *)generate_key_ireq.hash32,
5903 (void *)create_key_req.hash32,
5904 QSEECOM_HASH_SIZE);
5905
5906 ret = __qseecom_generate_and_save_key(data,
5907 create_key_req.usage, &generate_key_ireq);
5908 if (ret) {
5909 pr_err("Failed to generate key on storage: %d\n", ret);
5910 goto free_buf;
5911 }
5912
5913 for (i = 0; i < entries; i++) {
5914 set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
5915 if (create_key_req.usage ==
5916 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
5917 set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
5918 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5919
5920 } else if (create_key_req.usage ==
5921 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
5922 set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
5923 set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
5924
5925 } else {
5926 set_key_ireq.ce = ce_hw[i];
5927 set_key_ireq.pipe = pipe;
5928 }
5929 set_key_ireq.flags = flags;
5930
5931 /* set both PIPE_ENC and PIPE_ENC_XTS*/
5932 set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
5933 memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
5934 memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
5935 memcpy((void *)set_key_ireq.key_id,
5936 (void *)key_id_array[create_key_req.usage].desc,
5937 QSEECOM_KEY_ID_SIZE);
5938 memcpy((void *)set_key_ireq.hash32,
5939 (void *)create_key_req.hash32,
5940 QSEECOM_HASH_SIZE);
5941 /*
5942 * It will return false if it is GPCE based crypto instance or
5943 * ICE is setup properly
5944 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07005945 ret = qseecom_enable_ice_setup(create_key_req.usage);
5946 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07005947 goto free_buf;
5948
5949 do {
5950 ret = __qseecom_set_clear_ce_key(data,
5951 create_key_req.usage,
5952 &set_key_ireq);
5953 /*
5954 * wait a little before calling scm again to let other
5955 * processes run
5956 */
5957 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
5958 msleep(50);
5959
5960 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
5961
5962 qseecom_disable_ice_setup(create_key_req.usage);
5963
5964 if (ret) {
5965 pr_err("Failed to create key: pipe %d, ce %d: %d\n",
5966 pipe, ce_hw[i], ret);
5967 goto free_buf;
5968 } else {
5969 pr_err("Set the key successfully\n");
5970 if ((create_key_req.usage ==
5971 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
5972 (create_key_req.usage ==
5973 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
5974 goto free_buf;
5975 }
5976 }
5977
5978free_buf:
5979 kzfree(ce_hw);
5980 return ret;
5981}
5982
5983static int qseecom_wipe_key(struct qseecom_dev_handle *data,
5984 void __user *argp)
5985{
5986 uint32_t *ce_hw = NULL;
5987 uint32_t pipe = 0;
5988 int ret = 0;
5989 uint32_t flags = 0;
5990 int i, j;
5991 struct qseecom_wipe_key_req wipe_key_req;
5992 struct qseecom_key_delete_ireq delete_key_ireq;
5993 struct qseecom_key_select_ireq clear_key_ireq;
5994 uint32_t entries = 0;
5995
5996 ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
5997 if (ret) {
5998 pr_err("copy_from_user failed\n");
5999 return ret;
6000 }
6001
6002 if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6003 wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6004 pr_err("unsupported usage %d\n", wipe_key_req.usage);
6005 ret = -EFAULT;
6006 return ret;
6007 }
6008
6009 entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
6010 wipe_key_req.usage);
6011 if (entries <= 0) {
6012 pr_err("no ce instance for usage %d instance %d\n",
6013 DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
6014 ret = -EINVAL;
6015 return ret;
6016 }
6017
6018 ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
6019 if (!ce_hw) {
6020 ret = -ENOMEM;
6021 return ret;
6022 }
6023
6024 ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
6025 DEFAULT_CE_INFO_UNIT);
6026 if (ret) {
6027 pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
6028 ret = -EINVAL;
6029 goto free_buf;
6030 }
6031
6032 if (wipe_key_req.wipe_key_flag) {
6033 delete_key_ireq.flags = flags;
6034 delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
6035 memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6036 memcpy((void *)delete_key_ireq.key_id,
6037 (void *)key_id_array[wipe_key_req.usage].desc,
6038 QSEECOM_KEY_ID_SIZE);
6039 memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6040
6041 ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
6042 &delete_key_ireq);
6043 if (ret) {
6044 pr_err("Failed to delete key from ssd storage: %d\n",
6045 ret);
6046 ret = -EFAULT;
6047 goto free_buf;
6048 }
6049 }
6050
6051 for (j = 0; j < entries; j++) {
6052 clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
6053 if (wipe_key_req.usage ==
6054 QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
6055 clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
6056 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6057 } else if (wipe_key_req.usage ==
6058 QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
6059 clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
6060 clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
6061 } else {
6062 clear_key_ireq.ce = ce_hw[j];
6063 clear_key_ireq.pipe = pipe;
6064 }
6065 clear_key_ireq.flags = flags;
6066 clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
6067 for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
6068 clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
6069 memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
6070
6071 /*
6072 * It will return false if it is GPCE based crypto instance or
6073 * ICE is setup properly
6074 */
AnilKumar Chimata4e210f92017-04-28 14:31:25 -07006075 ret = qseecom_enable_ice_setup(wipe_key_req.usage);
6076 if (ret)
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006077 goto free_buf;
6078
6079 ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
6080 &clear_key_ireq);
6081
6082 qseecom_disable_ice_setup(wipe_key_req.usage);
6083
6084 if (ret) {
6085 pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
6086 pipe, ce_hw[j], ret);
6087 ret = -EFAULT;
6088 goto free_buf;
6089 }
6090 }
6091
6092free_buf:
6093 kzfree(ce_hw);
6094 return ret;
6095}
6096
6097static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
6098 void __user *argp)
6099{
6100 int ret = 0;
6101 uint32_t flags = 0;
6102 struct qseecom_update_key_userinfo_req update_key_req;
6103 struct qseecom_key_userinfo_update_ireq ireq;
6104
6105 ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
6106 if (ret) {
6107 pr_err("copy_from_user failed\n");
6108 return ret;
6109 }
6110
6111 if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
6112 update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
6113 pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
6114 return -EFAULT;
6115 }
6116
6117 ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
6118
6119 if (qseecom.fde_key_size)
6120 flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
6121 else
6122 flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
6123
6124 ireq.flags = flags;
6125 memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
6126 memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
6127 memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
6128 memcpy((void *)ireq.key_id,
6129 (void *)key_id_array[update_key_req.usage].desc,
6130 QSEECOM_KEY_ID_SIZE);
6131 memcpy((void *)ireq.current_hash32,
6132 (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
6133 memcpy((void *)ireq.new_hash32,
6134 (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
6135
6136 do {
6137 ret = __qseecom_update_current_key_user_info(data,
6138 update_key_req.usage,
6139 &ireq);
6140 /*
6141 * wait a little before calling scm again to let other
6142 * processes run
6143 */
6144 if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
6145 msleep(50);
6146
6147 } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
6148 if (ret) {
6149 pr_err("Failed to update key info: %d\n", ret);
6150 return ret;
6151 }
6152 return ret;
6153
6154}
6155static int qseecom_is_es_activated(void __user *argp)
6156{
6157 struct qseecom_is_es_activated_req req;
6158 struct qseecom_command_scm_resp resp;
6159 int ret;
6160
6161 if (qseecom.qsee_version < QSEE_VERSION_04) {
6162 pr_err("invalid qsee version\n");
6163 return -ENODEV;
6164 }
6165
6166 if (argp == NULL) {
6167 pr_err("arg is null\n");
6168 return -EINVAL;
6169 }
6170
6171 ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
6172 &req, sizeof(req), &resp, sizeof(resp));
6173 if (ret) {
6174 pr_err("scm_call failed\n");
6175 return ret;
6176 }
6177
6178 req.is_activated = resp.result;
6179 ret = copy_to_user(argp, &req, sizeof(req));
6180 if (ret) {
6181 pr_err("copy_to_user failed\n");
6182 return ret;
6183 }
6184
6185 return 0;
6186}
6187
6188static int qseecom_save_partition_hash(void __user *argp)
6189{
6190 struct qseecom_save_partition_hash_req req;
6191 struct qseecom_command_scm_resp resp;
6192 int ret;
6193
6194 memset(&resp, 0x00, sizeof(resp));
6195
6196 if (qseecom.qsee_version < QSEE_VERSION_04) {
6197 pr_err("invalid qsee version\n");
6198 return -ENODEV;
6199 }
6200
6201 if (argp == NULL) {
6202 pr_err("arg is null\n");
6203 return -EINVAL;
6204 }
6205
6206 ret = copy_from_user(&req, argp, sizeof(req));
6207 if (ret) {
6208 pr_err("copy_from_user failed\n");
6209 return ret;
6210 }
6211
6212 ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
6213 (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
6214 if (ret) {
6215 pr_err("qseecom_scm_call failed\n");
6216 return ret;
6217 }
6218
6219 return 0;
6220}
6221
6222static int qseecom_mdtp_cipher_dip(void __user *argp)
6223{
6224 struct qseecom_mdtp_cipher_dip_req req;
6225 u32 tzbuflenin, tzbuflenout;
6226 char *tzbufin = NULL, *tzbufout = NULL;
6227 struct scm_desc desc = {0};
6228 int ret;
6229
6230 do {
6231 /* Copy the parameters from userspace */
6232 if (argp == NULL) {
6233 pr_err("arg is null\n");
6234 ret = -EINVAL;
6235 break;
6236 }
6237
6238 ret = copy_from_user(&req, argp, sizeof(req));
6239 if (ret) {
6240 pr_err("copy_from_user failed, ret= %d\n", ret);
6241 break;
6242 }
6243
6244 if (req.in_buf == NULL || req.out_buf == NULL ||
6245 req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
6246 req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
6247 req.direction > 1) {
6248 pr_err("invalid parameters\n");
6249 ret = -EINVAL;
6250 break;
6251 }
6252
6253 /* Copy the input buffer from userspace to kernel space */
6254 tzbuflenin = PAGE_ALIGN(req.in_buf_size);
6255 tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
6256 if (!tzbufin) {
6257 pr_err("error allocating in buffer\n");
6258 ret = -ENOMEM;
6259 break;
6260 }
6261
6262 ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
6263 if (ret) {
6264 pr_err("copy_from_user failed, ret=%d\n", ret);
6265 break;
6266 }
6267
6268 dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
6269
6270 /* Prepare the output buffer in kernel space */
6271 tzbuflenout = PAGE_ALIGN(req.out_buf_size);
6272 tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
6273 if (!tzbufout) {
6274 pr_err("error allocating out buffer\n");
6275 ret = -ENOMEM;
6276 break;
6277 }
6278
6279 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6280
6281 /* Send the command to TZ */
6282 desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
6283 desc.args[0] = virt_to_phys(tzbufin);
6284 desc.args[1] = req.in_buf_size;
6285 desc.args[2] = virt_to_phys(tzbufout);
6286 desc.args[3] = req.out_buf_size;
6287 desc.args[4] = req.direction;
6288
6289 ret = __qseecom_enable_clk(CLK_QSEE);
6290 if (ret)
6291 break;
6292
6293 ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
6294
6295 __qseecom_disable_clk(CLK_QSEE);
6296
6297 if (ret) {
6298 pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
6299 ret);
6300 break;
6301 }
6302
6303 /* Copy the output buffer from kernel space to userspace */
6304 dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
6305 ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
6306 if (ret) {
6307 pr_err("copy_to_user failed, ret=%d\n", ret);
6308 break;
6309 }
6310 } while (0);
6311
6312 kzfree(tzbufin);
6313 kzfree(tzbufout);
6314
6315 return ret;
6316}
6317
6318static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
6319 struct qseecom_qteec_req *req)
6320{
6321 if (!data || !data->client.ihandle) {
6322 pr_err("Client or client handle is not initialized\n");
6323 return -EINVAL;
6324 }
6325
6326 if (data->type != QSEECOM_CLIENT_APP)
6327 return -EFAULT;
6328
6329 if (req->req_len > UINT_MAX - req->resp_len) {
6330 pr_err("Integer overflow detected in req_len & rsp_len\n");
6331 return -EINVAL;
6332 }
6333
6334 if (req->req_len + req->resp_len > data->client.sb_length) {
6335 pr_debug("Not enough memory to fit cmd_buf.\n");
6336 pr_debug("resp_buf. Required: %u, Available: %zu\n",
6337 (req->req_len + req->resp_len), data->client.sb_length);
6338 return -ENOMEM;
6339 }
6340
6341 if (req->req_ptr == NULL || req->resp_ptr == NULL) {
6342 pr_err("cmd buffer or response buffer is null\n");
6343 return -EINVAL;
6344 }
6345 if (((uintptr_t)req->req_ptr <
6346 data->client.user_virt_sb_base) ||
6347 ((uintptr_t)req->req_ptr >=
6348 (data->client.user_virt_sb_base + data->client.sb_length))) {
6349 pr_err("cmd buffer address not within shared bufffer\n");
6350 return -EINVAL;
6351 }
6352
6353 if (((uintptr_t)req->resp_ptr <
6354 data->client.user_virt_sb_base) ||
6355 ((uintptr_t)req->resp_ptr >=
6356 (data->client.user_virt_sb_base + data->client.sb_length))) {
6357 pr_err("response buffer address not within shared bufffer\n");
6358 return -EINVAL;
6359 }
6360
6361 if ((req->req_len == 0) || (req->resp_len == 0)) {
6362 pr_err("cmd buf lengtgh/response buf length not valid\n");
6363 return -EINVAL;
6364 }
6365
6366 if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
6367 pr_err("Integer overflow in req_len & req_ptr\n");
6368 return -EINVAL;
6369 }
6370
6371 if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
6372 pr_err("Integer overflow in resp_len & resp_ptr\n");
6373 return -EINVAL;
6374 }
6375
6376 if (data->client.user_virt_sb_base >
6377 (ULONG_MAX - data->client.sb_length)) {
6378 pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
6379 return -EINVAL;
6380 }
6381 if ((((uintptr_t)req->req_ptr + req->req_len) >
6382 ((uintptr_t)data->client.user_virt_sb_base +
6383 data->client.sb_length)) ||
6384 (((uintptr_t)req->resp_ptr + req->resp_len) >
6385 ((uintptr_t)data->client.user_virt_sb_base +
6386 data->client.sb_length))) {
6387 pr_err("cmd buf or resp buf is out of shared buffer region\n");
6388 return -EINVAL;
6389 }
6390 return 0;
6391}
6392
6393static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
6394 uint32_t fd_idx, struct sg_table *sg_ptr)
6395{
6396 struct scatterlist *sg = sg_ptr->sgl;
6397 struct qseecom_sg_entry *sg_entry;
6398 void *buf;
6399 uint i;
6400 size_t size;
6401 dma_addr_t coh_pmem;
6402
6403 if (fd_idx >= MAX_ION_FD) {
6404 pr_err("fd_idx [%d] is invalid\n", fd_idx);
6405 return -ENOMEM;
6406 }
6407 /*
6408 * Allocate a buffer, populate it with number of entry plus
6409 * each sg entry's phy addr and length; then return the
6410 * phy_addr of the buffer.
6411 */
6412 size = sizeof(uint32_t) +
6413 sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
6414 size = (size + PAGE_SIZE) & PAGE_MASK;
6415 buf = dma_alloc_coherent(qseecom.pdev,
6416 size, &coh_pmem, GFP_KERNEL);
6417 if (buf == NULL) {
6418 pr_err("failed to alloc memory for sg buf\n");
6419 return -ENOMEM;
6420 }
6421 *(uint32_t *)buf = sg_ptr->nents;
6422 sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
6423 for (i = 0; i < sg_ptr->nents; i++) {
6424 sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
6425 sg_entry->len = sg->length;
6426 sg_entry++;
6427 sg = sg_next(sg);
6428 }
6429 data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
6430 data->client.sec_buf_fd[fd_idx].vbase = buf;
6431 data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
6432 data->client.sec_buf_fd[fd_idx].size = size;
6433 return 0;
6434}
6435
6436static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
6437 struct qseecom_dev_handle *data, bool cleanup)
6438{
6439 struct ion_handle *ihandle;
6440 int ret = 0;
6441 int i = 0;
6442 uint32_t *update;
6443 struct sg_table *sg_ptr = NULL;
6444 struct scatterlist *sg;
6445 struct qseecom_param_memref *memref;
6446
6447 if (req == NULL) {
6448 pr_err("Invalid address\n");
6449 return -EINVAL;
6450 }
6451 for (i = 0; i < MAX_ION_FD; i++) {
6452 if (req->ifd_data[i].fd > 0) {
AnilKumar Chimata04d60cf2017-04-09 11:43:10 -07006453 ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006454 req->ifd_data[i].fd);
6455 if (IS_ERR_OR_NULL(ihandle)) {
6456 pr_err("Ion client can't retrieve the handle\n");
6457 return -ENOMEM;
6458 }
6459 if ((req->req_len < sizeof(uint32_t)) ||
6460 (req->ifd_data[i].cmd_buf_offset >
6461 req->req_len - sizeof(uint32_t))) {
6462 pr_err("Invalid offset/req len 0x%x/0x%x\n",
6463 req->req_len,
6464 req->ifd_data[i].cmd_buf_offset);
6465 return -EINVAL;
6466 }
6467 update = (uint32_t *)((char *) req->req_ptr +
6468 req->ifd_data[i].cmd_buf_offset);
6469 if (!update) {
6470 pr_err("update pointer is NULL\n");
6471 return -EINVAL;
6472 }
6473 } else {
6474 continue;
6475 }
6476 /* Populate the cmd data structure with the phys_addr */
6477 sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
6478 if (IS_ERR_OR_NULL(sg_ptr)) {
6479 pr_err("IOn client could not retrieve sg table\n");
6480 goto err;
6481 }
6482 sg = sg_ptr->sgl;
6483 if (sg == NULL) {
6484 pr_err("sg is NULL\n");
6485 goto err;
6486 }
6487 if ((sg_ptr->nents == 0) || (sg->length == 0)) {
6488 pr_err("Num of scat entr (%d)or length(%d) invalid\n",
6489 sg_ptr->nents, sg->length);
6490 goto err;
6491 }
6492 /* clean up buf for pre-allocated fd */
6493 if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
6494 (*update)) {
6495 if (data->client.sec_buf_fd[i].vbase)
6496 dma_free_coherent(qseecom.pdev,
6497 data->client.sec_buf_fd[i].size,
6498 data->client.sec_buf_fd[i].vbase,
6499 data->client.sec_buf_fd[i].pbase);
6500 memset((void *)update, 0,
6501 sizeof(struct qseecom_param_memref));
6502 memset(&(data->client.sec_buf_fd[i]), 0,
6503 sizeof(struct qseecom_sec_buf_fd_info));
6504 goto clean;
6505 }
6506
6507 if (*update == 0) {
6508 /* update buf for pre-allocated fd from secure heap*/
6509 ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
6510 sg_ptr);
6511 if (ret) {
6512 pr_err("Failed to handle buf for fd[%d]\n", i);
6513 goto err;
6514 }
6515 memref = (struct qseecom_param_memref *)update;
6516 memref->buffer =
6517 (uint32_t)(data->client.sec_buf_fd[i].pbase);
6518 memref->size =
6519 (uint32_t)(data->client.sec_buf_fd[i].size);
6520 } else {
6521 /* update buf for fd from non-secure qseecom heap */
6522 if (sg_ptr->nents != 1) {
6523 pr_err("Num of scat entr (%d) invalid\n",
6524 sg_ptr->nents);
6525 goto err;
6526 }
6527 if (cleanup)
6528 *update = 0;
6529 else
6530 *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
6531 }
6532clean:
6533 if (cleanup) {
6534 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6535 ihandle, NULL, sg->length,
6536 ION_IOC_INV_CACHES);
6537 if (ret) {
6538 pr_err("cache operation failed %d\n", ret);
6539 goto err;
6540 }
6541 } else {
6542 ret = msm_ion_do_cache_op(qseecom.ion_clnt,
6543 ihandle, NULL, sg->length,
6544 ION_IOC_CLEAN_INV_CACHES);
6545 if (ret) {
6546 pr_err("cache operation failed %d\n", ret);
6547 goto err;
6548 }
6549 data->sglistinfo_ptr[i].indexAndFlags =
6550 SGLISTINFO_SET_INDEX_FLAG(
6551 (sg_ptr->nents == 1), 0,
6552 req->ifd_data[i].cmd_buf_offset);
6553 data->sglistinfo_ptr[i].sizeOrCount =
6554 (sg_ptr->nents == 1) ?
6555 sg->length : sg_ptr->nents;
6556 data->sglist_cnt = i + 1;
6557 }
6558 /* Deallocate the handle */
6559 if (!IS_ERR_OR_NULL(ihandle))
6560 ion_free(qseecom.ion_clnt, ihandle);
6561 }
6562 return ret;
6563err:
6564 if (!IS_ERR_OR_NULL(ihandle))
6565 ion_free(qseecom.ion_clnt, ihandle);
6566 return -ENOMEM;
6567}
6568
6569static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
6570 struct qseecom_qteec_req *req, uint32_t cmd_id)
6571{
6572 struct qseecom_command_scm_resp resp;
6573 struct qseecom_qteec_ireq ireq;
6574 struct qseecom_qteec_64bit_ireq ireq_64bit;
6575 struct qseecom_registered_app_list *ptr_app;
6576 bool found_app = false;
6577 unsigned long flags;
6578 int ret = 0;
Zhen Kong4af480e2017-09-19 14:34:16 -07006579 int ret2 = 0;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006580 uint32_t reqd_len_sb_in = 0;
6581 void *cmd_buf = NULL;
6582 size_t cmd_len;
6583 struct sglist_info *table = data->sglistinfo_ptr;
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306584 void *req_ptr = NULL;
6585 void *resp_ptr = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006586
6587 ret = __qseecom_qteec_validate_msg(data, req);
6588 if (ret)
6589 return ret;
6590
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306591 req_ptr = req->req_ptr;
6592 resp_ptr = req->resp_ptr;
6593
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006594 /* find app_id & img_name from list */
6595 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6596 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6597 list) {
6598 if ((ptr_app->app_id == data->client.app_id) &&
6599 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6600 found_app = true;
6601 break;
6602 }
6603 }
6604 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6605 if (!found_app) {
6606 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6607 (char *)data->client.app_name);
6608 return -ENOENT;
6609 }
6610
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306611 req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6612 (uintptr_t)req->req_ptr);
6613 req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6614 (uintptr_t)req->resp_ptr);
6615
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006616 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6617 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
6618 ret = __qseecom_update_qteec_req_buf(
6619 (struct qseecom_qteec_modfd_req *)req, data, false);
6620 if (ret)
6621 return ret;
6622 }
6623
6624 if (qseecom.qsee_version < QSEE_VERSION_40) {
6625 ireq.app_id = data->client.app_id;
6626 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306627 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006628 ireq.req_len = req->req_len;
6629 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306630 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006631 ireq.resp_len = req->resp_len;
6632 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6633 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6634 dmac_flush_range((void *)table,
6635 (void *)table + SGLISTINFO_TABLE_SIZE);
6636 cmd_buf = (void *)&ireq;
6637 cmd_len = sizeof(struct qseecom_qteec_ireq);
6638 } else {
6639 ireq_64bit.app_id = data->client.app_id;
6640 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306641 (uintptr_t)req_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006642 ireq_64bit.req_len = req->req_len;
6643 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
Brahmaji Kb33e26e2017-06-01 17:20:10 +05306644 (uintptr_t)resp_ptr);
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006645 ireq_64bit.resp_len = req->resp_len;
6646 if ((data->client.app_arch == ELFCLASS32) &&
6647 ((ireq_64bit.req_ptr >=
6648 PHY_ADDR_4G - ireq_64bit.req_len) ||
6649 (ireq_64bit.resp_ptr >=
6650 PHY_ADDR_4G - ireq_64bit.resp_len))){
6651 pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
6652 data->client.app_name, data->client.app_id);
6653 pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
6654 ireq_64bit.req_ptr, ireq_64bit.req_len,
6655 ireq_64bit.resp_ptr, ireq_64bit.resp_len);
6656 return -EFAULT;
6657 }
6658 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6659 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6660 dmac_flush_range((void *)table,
6661 (void *)table + SGLISTINFO_TABLE_SIZE);
6662 cmd_buf = (void *)&ireq_64bit;
6663 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6664 }
6665 if (qseecom.whitelist_support == true
6666 && cmd_id == QSEOS_TEE_OPEN_SESSION)
6667 *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
6668 else
6669 *(uint32_t *)cmd_buf = cmd_id;
6670
6671 reqd_len_sb_in = req->req_len + req->resp_len;
6672 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6673 data->client.sb_virt,
6674 reqd_len_sb_in,
6675 ION_IOC_CLEAN_INV_CACHES);
6676 if (ret) {
6677 pr_err("cache operation failed %d\n", ret);
6678 return ret;
6679 }
6680
6681 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6682
6683 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6684 cmd_buf, cmd_len,
6685 &resp, sizeof(resp));
6686 if (ret) {
6687 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6688 ret, data->client.app_id);
Zhen Kong4af480e2017-09-19 14:34:16 -07006689 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006690 }
6691
6692 if (qseecom.qsee_reentrancy_support) {
6693 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
Zhen Kong4af480e2017-09-19 14:34:16 -07006694 if (ret)
6695 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006696 } else {
6697 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6698 ret = __qseecom_process_incomplete_cmd(data, &resp);
6699 if (ret) {
6700 pr_err("process_incomplete_cmd failed err: %d\n",
6701 ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006702 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006703 }
6704 } else {
6705 if (resp.result != QSEOS_RESULT_SUCCESS) {
6706 pr_err("Response result %d not supported\n",
6707 resp.result);
6708 ret = -EINVAL;
Zhen Kong4af480e2017-09-19 14:34:16 -07006709 goto exit;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006710 }
6711 }
6712 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006713exit:
6714 ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006715 data->client.sb_virt, data->client.sb_length,
6716 ION_IOC_INV_CACHES);
Zhen Kong4af480e2017-09-19 14:34:16 -07006717 if (ret2) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006718 pr_err("cache operation failed %d\n", ret);
Zhen Kong4af480e2017-09-19 14:34:16 -07006719 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006720 }
6721
6722 if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
6723 (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
Zhen Kong4af480e2017-09-19 14:34:16 -07006724 ret2 = __qseecom_update_qteec_req_buf(
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006725 (struct qseecom_qteec_modfd_req *)req, data, true);
Zhen Kong4af480e2017-09-19 14:34:16 -07006726 if (ret2)
6727 return ret2;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006728 }
Zhen Kong4af480e2017-09-19 14:34:16 -07006729 return ret;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006730}
6731
6732static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
6733 void __user *argp)
6734{
6735 struct qseecom_qteec_modfd_req req;
6736 int ret = 0;
6737
6738 ret = copy_from_user(&req, argp,
6739 sizeof(struct qseecom_qteec_modfd_req));
6740 if (ret) {
6741 pr_err("copy_from_user failed\n");
6742 return ret;
6743 }
6744 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6745 QSEOS_TEE_OPEN_SESSION);
6746
6747 return ret;
6748}
6749
6750static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
6751 void __user *argp)
6752{
6753 struct qseecom_qteec_req req;
6754 int ret = 0;
6755
6756 ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
6757 if (ret) {
6758 pr_err("copy_from_user failed\n");
6759 return ret;
6760 }
6761 ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
6762 return ret;
6763}
6764
6765static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
6766 void __user *argp)
6767{
6768 struct qseecom_qteec_modfd_req req;
6769 struct qseecom_command_scm_resp resp;
6770 struct qseecom_qteec_ireq ireq;
6771 struct qseecom_qteec_64bit_ireq ireq_64bit;
6772 struct qseecom_registered_app_list *ptr_app;
6773 bool found_app = false;
6774 unsigned long flags;
6775 int ret = 0;
6776 int i = 0;
6777 uint32_t reqd_len_sb_in = 0;
6778 void *cmd_buf = NULL;
6779 size_t cmd_len;
6780 struct sglist_info *table = data->sglistinfo_ptr;
6781 void *req_ptr = NULL;
6782 void *resp_ptr = NULL;
6783
6784 ret = copy_from_user(&req, argp,
6785 sizeof(struct qseecom_qteec_modfd_req));
6786 if (ret) {
6787 pr_err("copy_from_user failed\n");
6788 return ret;
6789 }
6790 ret = __qseecom_qteec_validate_msg(data,
6791 (struct qseecom_qteec_req *)(&req));
6792 if (ret)
6793 return ret;
6794 req_ptr = req.req_ptr;
6795 resp_ptr = req.resp_ptr;
6796
6797 /* find app_id & img_name from list */
6798 spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
6799 list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
6800 list) {
6801 if ((ptr_app->app_id == data->client.app_id) &&
6802 (!strcmp(ptr_app->app_name, data->client.app_name))) {
6803 found_app = true;
6804 break;
6805 }
6806 }
6807 spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
6808 if (!found_app) {
6809 pr_err("app_id %d (%s) is not found\n", data->client.app_id,
6810 (char *)data->client.app_name);
6811 return -ENOENT;
6812 }
6813
6814 /* validate offsets */
6815 for (i = 0; i < MAX_ION_FD; i++) {
6816 if (req.ifd_data[i].fd) {
6817 if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
6818 return -EINVAL;
6819 }
6820 }
6821 req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6822 (uintptr_t)req.req_ptr);
6823 req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
6824 (uintptr_t)req.resp_ptr);
6825 ret = __qseecom_update_qteec_req_buf(&req, data, false);
6826 if (ret)
6827 return ret;
6828
6829 if (qseecom.qsee_version < QSEE_VERSION_40) {
6830 ireq.app_id = data->client.app_id;
6831 ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6832 (uintptr_t)req_ptr);
6833 ireq.req_len = req.req_len;
6834 ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
6835 (uintptr_t)resp_ptr);
6836 ireq.resp_len = req.resp_len;
6837 cmd_buf = (void *)&ireq;
6838 cmd_len = sizeof(struct qseecom_qteec_ireq);
6839 ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
6840 ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6841 dmac_flush_range((void *)table,
6842 (void *)table + SGLISTINFO_TABLE_SIZE);
6843 } else {
6844 ireq_64bit.app_id = data->client.app_id;
6845 ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6846 (uintptr_t)req_ptr);
6847 ireq_64bit.req_len = req.req_len;
6848 ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
6849 (uintptr_t)resp_ptr);
6850 ireq_64bit.resp_len = req.resp_len;
6851 cmd_buf = (void *)&ireq_64bit;
6852 cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
6853 ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
6854 ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
6855 dmac_flush_range((void *)table,
6856 (void *)table + SGLISTINFO_TABLE_SIZE);
6857 }
6858 reqd_len_sb_in = req.req_len + req.resp_len;
6859 if (qseecom.whitelist_support == true)
6860 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
6861 else
6862 *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
6863
6864 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6865 data->client.sb_virt,
6866 reqd_len_sb_in,
6867 ION_IOC_CLEAN_INV_CACHES);
6868 if (ret) {
6869 pr_err("cache operation failed %d\n", ret);
6870 return ret;
6871 }
6872
6873 __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
6874
6875 ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
6876 cmd_buf, cmd_len,
6877 &resp, sizeof(resp));
6878 if (ret) {
6879 pr_err("scm_call() failed with err: %d (app_id = %d)\n",
6880 ret, data->client.app_id);
6881 return ret;
6882 }
6883
6884 if (qseecom.qsee_reentrancy_support) {
6885 ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
6886 } else {
6887 if (resp.result == QSEOS_RESULT_INCOMPLETE) {
6888 ret = __qseecom_process_incomplete_cmd(data, &resp);
6889 if (ret) {
6890 pr_err("process_incomplete_cmd failed err: %d\n",
6891 ret);
6892 return ret;
6893 }
6894 } else {
6895 if (resp.result != QSEOS_RESULT_SUCCESS) {
6896 pr_err("Response result %d not supported\n",
6897 resp.result);
6898 ret = -EINVAL;
6899 }
6900 }
6901 }
6902 ret = __qseecom_update_qteec_req_buf(&req, data, true);
6903 if (ret)
6904 return ret;
6905
6906 ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
6907 data->client.sb_virt, data->client.sb_length,
6908 ION_IOC_INV_CACHES);
6909 if (ret) {
6910 pr_err("cache operation failed %d\n", ret);
6911 return ret;
6912 }
6913 return 0;
6914}
6915
6916static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
6917 void __user *argp)
6918{
6919 struct qseecom_qteec_modfd_req req;
6920 int ret = 0;
6921
6922 ret = copy_from_user(&req, argp,
6923 sizeof(struct qseecom_qteec_modfd_req));
6924 if (ret) {
6925 pr_err("copy_from_user failed\n");
6926 return ret;
6927 }
6928 ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
6929 QSEOS_TEE_REQUEST_CANCELLATION);
6930
6931 return ret;
6932}
6933
6934static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
6935{
6936 if (data->sglist_cnt) {
6937 memset(data->sglistinfo_ptr, 0,
6938 SGLISTINFO_TABLE_SIZE);
6939 data->sglist_cnt = 0;
6940 }
6941}
6942
6943static inline long qseecom_ioctl(struct file *file,
6944 unsigned int cmd, unsigned long arg)
6945{
6946 int ret = 0;
6947 struct qseecom_dev_handle *data = file->private_data;
6948 void __user *argp = (void __user *) arg;
6949 bool perf_enabled = false;
6950
6951 if (!data) {
6952 pr_err("Invalid/uninitialized device handle\n");
6953 return -EINVAL;
6954 }
6955
6956 if (data->abort) {
6957 pr_err("Aborting qseecom driver\n");
6958 return -ENODEV;
6959 }
6960
6961 switch (cmd) {
6962 case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
6963 if (data->type != QSEECOM_GENERIC) {
6964 pr_err("reg lstnr req: invalid handle (%d)\n",
6965 data->type);
6966 ret = -EINVAL;
6967 break;
6968 }
6969 pr_debug("ioctl register_listener_req()\n");
6970 mutex_lock(&app_access_lock);
6971 atomic_inc(&data->ioctl_count);
6972 data->type = QSEECOM_LISTENER_SERVICE;
6973 ret = qseecom_register_listener(data, argp);
6974 atomic_dec(&data->ioctl_count);
6975 wake_up_all(&data->abort_wq);
6976 mutex_unlock(&app_access_lock);
6977 if (ret)
6978 pr_err("failed qseecom_register_listener: %d\n", ret);
6979 break;
6980 }
Neeraj Sonib30ac1f2018-04-17 14:48:42 +05306981 case QSEECOM_IOCTL_SET_ICE_INFO: {
6982 struct qseecom_ice_data_t ice_data;
6983
6984 ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
6985 if (ret) {
6986 pr_err("copy_from_user failed\n");
6987 return -EFAULT;
6988 }
6989 qcom_ice_set_fde_flag(ice_data.flag);
6990 break;
6991 }
6992
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07006993 case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
6994 if ((data->listener.id == 0) ||
6995 (data->type != QSEECOM_LISTENER_SERVICE)) {
6996 pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
6997 data->type, data->listener.id);
6998 ret = -EINVAL;
6999 break;
7000 }
7001 pr_debug("ioctl unregister_listener_req()\n");
7002 mutex_lock(&app_access_lock);
7003 atomic_inc(&data->ioctl_count);
7004 ret = qseecom_unregister_listener(data);
7005 atomic_dec(&data->ioctl_count);
7006 wake_up_all(&data->abort_wq);
7007 mutex_unlock(&app_access_lock);
7008 if (ret)
7009 pr_err("failed qseecom_unregister_listener: %d\n", ret);
7010 break;
7011 }
7012 case QSEECOM_IOCTL_SEND_CMD_REQ: {
7013 if ((data->client.app_id == 0) ||
7014 (data->type != QSEECOM_CLIENT_APP)) {
7015 pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
7016 data->type, data->client.app_id);
7017 ret = -EINVAL;
7018 break;
7019 }
7020 /* Only one client allowed here at a time */
7021 mutex_lock(&app_access_lock);
7022 if (qseecom.support_bus_scaling) {
7023 /* register bus bw in case the client doesn't do it */
7024 if (!data->mode) {
7025 mutex_lock(&qsee_bw_mutex);
7026 __qseecom_register_bus_bandwidth_needs(
7027 data, HIGH);
7028 mutex_unlock(&qsee_bw_mutex);
7029 }
7030 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7031 if (ret) {
7032 pr_err("Failed to set bw.\n");
7033 ret = -EINVAL;
7034 mutex_unlock(&app_access_lock);
7035 break;
7036 }
7037 }
7038 /*
7039 * On targets where crypto clock is handled by HLOS,
7040 * if clk_access_cnt is zero and perf_enabled is false,
7041 * then the crypto clock was not enabled before sending cmd to
7042 * tz, qseecom will enable the clock to avoid service failure.
7043 */
7044 if (!qseecom.no_clock_support &&
7045 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7046 pr_debug("ce clock is not enabled!\n");
7047 ret = qseecom_perf_enable(data);
7048 if (ret) {
7049 pr_err("Failed to vote for clock with err %d\n",
7050 ret);
7051 mutex_unlock(&app_access_lock);
7052 ret = -EINVAL;
7053 break;
7054 }
7055 perf_enabled = true;
7056 }
7057 atomic_inc(&data->ioctl_count);
7058 ret = qseecom_send_cmd(data, argp);
7059 if (qseecom.support_bus_scaling)
7060 __qseecom_add_bw_scale_down_timer(
7061 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7062 if (perf_enabled) {
7063 qsee_disable_clock_vote(data, CLK_DFAB);
7064 qsee_disable_clock_vote(data, CLK_SFPB);
7065 }
7066 atomic_dec(&data->ioctl_count);
7067 wake_up_all(&data->abort_wq);
7068 mutex_unlock(&app_access_lock);
7069 if (ret)
7070 pr_err("failed qseecom_send_cmd: %d\n", ret);
7071 break;
7072 }
7073 case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
7074 case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
7075 if ((data->client.app_id == 0) ||
7076 (data->type != QSEECOM_CLIENT_APP)) {
7077 pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
7078 data->type, data->client.app_id);
7079 ret = -EINVAL;
7080 break;
7081 }
7082 /* Only one client allowed here at a time */
7083 mutex_lock(&app_access_lock);
7084 if (qseecom.support_bus_scaling) {
7085 if (!data->mode) {
7086 mutex_lock(&qsee_bw_mutex);
7087 __qseecom_register_bus_bandwidth_needs(
7088 data, HIGH);
7089 mutex_unlock(&qsee_bw_mutex);
7090 }
7091 ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
7092 if (ret) {
7093 pr_err("Failed to set bw.\n");
7094 mutex_unlock(&app_access_lock);
7095 ret = -EINVAL;
7096 break;
7097 }
7098 }
7099 /*
7100 * On targets where crypto clock is handled by HLOS,
7101 * if clk_access_cnt is zero and perf_enabled is false,
7102 * then the crypto clock was not enabled before sending cmd to
7103 * tz, qseecom will enable the clock to avoid service failure.
7104 */
7105 if (!qseecom.no_clock_support &&
7106 !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
7107 pr_debug("ce clock is not enabled!\n");
7108 ret = qseecom_perf_enable(data);
7109 if (ret) {
7110 pr_err("Failed to vote for clock with err %d\n",
7111 ret);
7112 mutex_unlock(&app_access_lock);
7113 ret = -EINVAL;
7114 break;
7115 }
7116 perf_enabled = true;
7117 }
7118 atomic_inc(&data->ioctl_count);
7119 if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
7120 ret = qseecom_send_modfd_cmd(data, argp);
7121 else
7122 ret = qseecom_send_modfd_cmd_64(data, argp);
7123 if (qseecom.support_bus_scaling)
7124 __qseecom_add_bw_scale_down_timer(
7125 QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
7126 if (perf_enabled) {
7127 qsee_disable_clock_vote(data, CLK_DFAB);
7128 qsee_disable_clock_vote(data, CLK_SFPB);
7129 }
7130 atomic_dec(&data->ioctl_count);
7131 wake_up_all(&data->abort_wq);
7132 mutex_unlock(&app_access_lock);
7133 if (ret)
7134 pr_err("failed qseecom_send_cmd: %d\n", ret);
7135 __qseecom_clean_data_sglistinfo(data);
7136 break;
7137 }
7138 case QSEECOM_IOCTL_RECEIVE_REQ: {
7139 if ((data->listener.id == 0) ||
7140 (data->type != QSEECOM_LISTENER_SERVICE)) {
7141 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7142 data->type, data->listener.id);
7143 ret = -EINVAL;
7144 break;
7145 }
7146 atomic_inc(&data->ioctl_count);
7147 ret = qseecom_receive_req(data);
7148 atomic_dec(&data->ioctl_count);
7149 wake_up_all(&data->abort_wq);
7150 if (ret && (ret != -ERESTARTSYS))
7151 pr_err("failed qseecom_receive_req: %d\n", ret);
7152 break;
7153 }
7154 case QSEECOM_IOCTL_SEND_RESP_REQ: {
7155 if ((data->listener.id == 0) ||
7156 (data->type != QSEECOM_LISTENER_SERVICE)) {
7157 pr_err("send resp req: invalid handle (%d), lid(%d)\n",
7158 data->type, data->listener.id);
7159 ret = -EINVAL;
7160 break;
7161 }
7162 atomic_inc(&data->ioctl_count);
7163 if (!qseecom.qsee_reentrancy_support)
7164 ret = qseecom_send_resp();
7165 else
7166 ret = qseecom_reentrancy_send_resp(data);
7167 atomic_dec(&data->ioctl_count);
7168 wake_up_all(&data->abort_wq);
7169 if (ret)
7170 pr_err("failed qseecom_send_resp: %d\n", ret);
7171 break;
7172 }
7173 case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
7174 if ((data->type != QSEECOM_CLIENT_APP) &&
7175 (data->type != QSEECOM_GENERIC) &&
7176 (data->type != QSEECOM_SECURE_SERVICE)) {
7177 pr_err("set mem param req: invalid handle (%d)\n",
7178 data->type);
7179 ret = -EINVAL;
7180 break;
7181 }
7182 pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
7183 mutex_lock(&app_access_lock);
7184 atomic_inc(&data->ioctl_count);
7185 ret = qseecom_set_client_mem_param(data, argp);
7186 atomic_dec(&data->ioctl_count);
7187 mutex_unlock(&app_access_lock);
7188 if (ret)
7189 pr_err("failed Qqseecom_set_mem_param request: %d\n",
7190 ret);
7191 break;
7192 }
7193 case QSEECOM_IOCTL_LOAD_APP_REQ: {
7194 if ((data->type != QSEECOM_GENERIC) &&
7195 (data->type != QSEECOM_CLIENT_APP)) {
7196 pr_err("load app req: invalid handle (%d)\n",
7197 data->type);
7198 ret = -EINVAL;
7199 break;
7200 }
7201 data->type = QSEECOM_CLIENT_APP;
7202 pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
7203 mutex_lock(&app_access_lock);
7204 atomic_inc(&data->ioctl_count);
7205 ret = qseecom_load_app(data, argp);
7206 atomic_dec(&data->ioctl_count);
7207 mutex_unlock(&app_access_lock);
7208 if (ret)
7209 pr_err("failed load_app request: %d\n", ret);
7210 break;
7211 }
7212 case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
7213 if ((data->client.app_id == 0) ||
7214 (data->type != QSEECOM_CLIENT_APP)) {
7215 pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
7216 data->type, data->client.app_id);
7217 ret = -EINVAL;
7218 break;
7219 }
7220 pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
7221 mutex_lock(&app_access_lock);
7222 atomic_inc(&data->ioctl_count);
7223 ret = qseecom_unload_app(data, false);
7224 atomic_dec(&data->ioctl_count);
7225 mutex_unlock(&app_access_lock);
7226 if (ret)
7227 pr_err("failed unload_app request: %d\n", ret);
7228 break;
7229 }
7230 case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
7231 atomic_inc(&data->ioctl_count);
7232 ret = qseecom_get_qseos_version(data, argp);
7233 if (ret)
7234 pr_err("qseecom_get_qseos_version: %d\n", ret);
7235 atomic_dec(&data->ioctl_count);
7236 break;
7237 }
7238 case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
7239 if ((data->type != QSEECOM_GENERIC) &&
7240 (data->type != QSEECOM_CLIENT_APP)) {
7241 pr_err("perf enable req: invalid handle (%d)\n",
7242 data->type);
7243 ret = -EINVAL;
7244 break;
7245 }
7246 if ((data->type == QSEECOM_CLIENT_APP) &&
7247 (data->client.app_id == 0)) {
7248 pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
7249 data->type, data->client.app_id);
7250 ret = -EINVAL;
7251 break;
7252 }
7253 atomic_inc(&data->ioctl_count);
7254 if (qseecom.support_bus_scaling) {
7255 mutex_lock(&qsee_bw_mutex);
7256 __qseecom_register_bus_bandwidth_needs(data, HIGH);
7257 mutex_unlock(&qsee_bw_mutex);
7258 } else {
7259 ret = qseecom_perf_enable(data);
7260 if (ret)
7261 pr_err("Fail to vote for clocks %d\n", ret);
7262 }
7263 atomic_dec(&data->ioctl_count);
7264 break;
7265 }
7266 case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
7267 if ((data->type != QSEECOM_SECURE_SERVICE) &&
7268 (data->type != QSEECOM_CLIENT_APP)) {
7269 pr_err("perf disable req: invalid handle (%d)\n",
7270 data->type);
7271 ret = -EINVAL;
7272 break;
7273 }
7274 if ((data->type == QSEECOM_CLIENT_APP) &&
7275 (data->client.app_id == 0)) {
7276 pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
7277 data->type, data->client.app_id);
7278 ret = -EINVAL;
7279 break;
7280 }
7281 atomic_inc(&data->ioctl_count);
7282 if (!qseecom.support_bus_scaling) {
7283 qsee_disable_clock_vote(data, CLK_DFAB);
7284 qsee_disable_clock_vote(data, CLK_SFPB);
7285 } else {
7286 mutex_lock(&qsee_bw_mutex);
7287 qseecom_unregister_bus_bandwidth_needs(data);
7288 mutex_unlock(&qsee_bw_mutex);
7289 }
7290 atomic_dec(&data->ioctl_count);
7291 break;
7292 }
7293
7294 case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
7295 /* If crypto clock is not handled by HLOS, return directly. */
7296 if (qseecom.no_clock_support) {
7297 pr_debug("crypto clock is not handled by HLOS\n");
7298 break;
7299 }
7300 if ((data->client.app_id == 0) ||
7301 (data->type != QSEECOM_CLIENT_APP)) {
7302 pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
7303 data->type, data->client.app_id);
7304 ret = -EINVAL;
7305 break;
7306 }
7307 atomic_inc(&data->ioctl_count);
7308 ret = qseecom_scale_bus_bandwidth(data, argp);
7309 atomic_dec(&data->ioctl_count);
7310 break;
7311 }
7312 case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
7313 if (data->type != QSEECOM_GENERIC) {
7314 pr_err("load ext elf req: invalid client handle (%d)\n",
7315 data->type);
7316 ret = -EINVAL;
7317 break;
7318 }
7319 data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
7320 data->released = true;
7321 mutex_lock(&app_access_lock);
7322 atomic_inc(&data->ioctl_count);
7323 ret = qseecom_load_external_elf(data, argp);
7324 atomic_dec(&data->ioctl_count);
7325 mutex_unlock(&app_access_lock);
7326 if (ret)
7327 pr_err("failed load_external_elf request: %d\n", ret);
7328 break;
7329 }
7330 case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
7331 if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
7332 pr_err("unload ext elf req: invalid handle (%d)\n",
7333 data->type);
7334 ret = -EINVAL;
7335 break;
7336 }
7337 data->released = true;
7338 mutex_lock(&app_access_lock);
7339 atomic_inc(&data->ioctl_count);
7340 ret = qseecom_unload_external_elf(data);
7341 atomic_dec(&data->ioctl_count);
7342 mutex_unlock(&app_access_lock);
7343 if (ret)
7344 pr_err("failed unload_app request: %d\n", ret);
7345 break;
7346 }
7347 case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
7348 data->type = QSEECOM_CLIENT_APP;
7349 mutex_lock(&app_access_lock);
7350 atomic_inc(&data->ioctl_count);
7351 pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
7352 ret = qseecom_query_app_loaded(data, argp);
7353 atomic_dec(&data->ioctl_count);
7354 mutex_unlock(&app_access_lock);
7355 break;
7356 }
7357 case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
7358 if (data->type != QSEECOM_GENERIC) {
7359 pr_err("send cmd svc req: invalid handle (%d)\n",
7360 data->type);
7361 ret = -EINVAL;
7362 break;
7363 }
7364 data->type = QSEECOM_SECURE_SERVICE;
7365 if (qseecom.qsee_version < QSEE_VERSION_03) {
7366 pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
7367 qseecom.qsee_version);
7368 return -EINVAL;
7369 }
7370 mutex_lock(&app_access_lock);
7371 atomic_inc(&data->ioctl_count);
7372 ret = qseecom_send_service_cmd(data, argp);
7373 atomic_dec(&data->ioctl_count);
7374 mutex_unlock(&app_access_lock);
7375 break;
7376 }
7377 case QSEECOM_IOCTL_CREATE_KEY_REQ: {
7378 if (!(qseecom.support_pfe || qseecom.support_fde))
7379 pr_err("Features requiring key init not supported\n");
7380 if (data->type != QSEECOM_GENERIC) {
7381 pr_err("create key req: invalid handle (%d)\n",
7382 data->type);
7383 ret = -EINVAL;
7384 break;
7385 }
7386 if (qseecom.qsee_version < QSEE_VERSION_05) {
7387 pr_err("Create Key feature unsupported: qsee ver %u\n",
7388 qseecom.qsee_version);
7389 return -EINVAL;
7390 }
7391 data->released = true;
7392 mutex_lock(&app_access_lock);
7393 atomic_inc(&data->ioctl_count);
7394 ret = qseecom_create_key(data, argp);
7395 if (ret)
7396 pr_err("failed to create encryption key: %d\n", ret);
7397
7398 atomic_dec(&data->ioctl_count);
7399 mutex_unlock(&app_access_lock);
7400 break;
7401 }
7402 case QSEECOM_IOCTL_WIPE_KEY_REQ: {
7403 if (!(qseecom.support_pfe || qseecom.support_fde))
7404 pr_err("Features requiring key init not supported\n");
7405 if (data->type != QSEECOM_GENERIC) {
7406 pr_err("wipe key req: invalid handle (%d)\n",
7407 data->type);
7408 ret = -EINVAL;
7409 break;
7410 }
7411 if (qseecom.qsee_version < QSEE_VERSION_05) {
7412 pr_err("Wipe Key feature unsupported in qsee ver %u\n",
7413 qseecom.qsee_version);
7414 return -EINVAL;
7415 }
7416 data->released = true;
7417 mutex_lock(&app_access_lock);
7418 atomic_inc(&data->ioctl_count);
7419 ret = qseecom_wipe_key(data, argp);
7420 if (ret)
7421 pr_err("failed to wipe encryption key: %d\n", ret);
7422 atomic_dec(&data->ioctl_count);
7423 mutex_unlock(&app_access_lock);
7424 break;
7425 }
7426 case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
7427 if (!(qseecom.support_pfe || qseecom.support_fde))
7428 pr_err("Features requiring key init not supported\n");
7429 if (data->type != QSEECOM_GENERIC) {
7430 pr_err("update key req: invalid handle (%d)\n",
7431 data->type);
7432 ret = -EINVAL;
7433 break;
7434 }
7435 if (qseecom.qsee_version < QSEE_VERSION_05) {
7436 pr_err("Update Key feature unsupported in qsee ver %u\n",
7437 qseecom.qsee_version);
7438 return -EINVAL;
7439 }
7440 data->released = true;
7441 mutex_lock(&app_access_lock);
7442 atomic_inc(&data->ioctl_count);
7443 ret = qseecom_update_key_user_info(data, argp);
7444 if (ret)
7445 pr_err("failed to update key user info: %d\n", ret);
7446 atomic_dec(&data->ioctl_count);
7447 mutex_unlock(&app_access_lock);
7448 break;
7449 }
7450 case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
7451 if (data->type != QSEECOM_GENERIC) {
7452 pr_err("save part hash req: invalid handle (%d)\n",
7453 data->type);
7454 ret = -EINVAL;
7455 break;
7456 }
7457 data->released = true;
7458 mutex_lock(&app_access_lock);
7459 atomic_inc(&data->ioctl_count);
7460 ret = qseecom_save_partition_hash(argp);
7461 atomic_dec(&data->ioctl_count);
7462 mutex_unlock(&app_access_lock);
7463 break;
7464 }
7465 case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
7466 if (data->type != QSEECOM_GENERIC) {
7467 pr_err("ES activated req: invalid handle (%d)\n",
7468 data->type);
7469 ret = -EINVAL;
7470 break;
7471 }
7472 data->released = true;
7473 mutex_lock(&app_access_lock);
7474 atomic_inc(&data->ioctl_count);
7475 ret = qseecom_is_es_activated(argp);
7476 atomic_dec(&data->ioctl_count);
7477 mutex_unlock(&app_access_lock);
7478 break;
7479 }
7480 case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
7481 if (data->type != QSEECOM_GENERIC) {
7482 pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
7483 data->type);
7484 ret = -EINVAL;
7485 break;
7486 }
7487 data->released = true;
7488 mutex_lock(&app_access_lock);
7489 atomic_inc(&data->ioctl_count);
7490 ret = qseecom_mdtp_cipher_dip(argp);
7491 atomic_dec(&data->ioctl_count);
7492 mutex_unlock(&app_access_lock);
7493 break;
7494 }
7495 case QSEECOM_IOCTL_SEND_MODFD_RESP:
7496 case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
7497 if ((data->listener.id == 0) ||
7498 (data->type != QSEECOM_LISTENER_SERVICE)) {
7499 pr_err("receive req: invalid handle (%d), lid(%d)\n",
7500 data->type, data->listener.id);
7501 ret = -EINVAL;
7502 break;
7503 }
7504 atomic_inc(&data->ioctl_count);
7505 if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
7506 ret = qseecom_send_modfd_resp(data, argp);
7507 else
7508 ret = qseecom_send_modfd_resp_64(data, argp);
7509 atomic_dec(&data->ioctl_count);
7510 wake_up_all(&data->abort_wq);
7511 if (ret)
7512 pr_err("failed qseecom_send_mod_resp: %d\n", ret);
7513 __qseecom_clean_data_sglistinfo(data);
7514 break;
7515 }
7516 case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
7517 if ((data->client.app_id == 0) ||
7518 (data->type != QSEECOM_CLIENT_APP)) {
7519 pr_err("Open session: invalid handle (%d) appid(%d)\n",
7520 data->type, data->client.app_id);
7521 ret = -EINVAL;
7522 break;
7523 }
7524 if (qseecom.qsee_version < QSEE_VERSION_40) {
7525 pr_err("GP feature unsupported: qsee ver %u\n",
7526 qseecom.qsee_version);
7527 return -EINVAL;
7528 }
7529 /* Only one client allowed here at a time */
7530 mutex_lock(&app_access_lock);
7531 atomic_inc(&data->ioctl_count);
7532 ret = qseecom_qteec_open_session(data, argp);
7533 atomic_dec(&data->ioctl_count);
7534 wake_up_all(&data->abort_wq);
7535 mutex_unlock(&app_access_lock);
7536 if (ret)
7537 pr_err("failed open_session_cmd: %d\n", ret);
7538 __qseecom_clean_data_sglistinfo(data);
7539 break;
7540 }
7541 case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
7542 if ((data->client.app_id == 0) ||
7543 (data->type != QSEECOM_CLIENT_APP)) {
7544 pr_err("Close session: invalid handle (%d) appid(%d)\n",
7545 data->type, data->client.app_id);
7546 ret = -EINVAL;
7547 break;
7548 }
7549 if (qseecom.qsee_version < QSEE_VERSION_40) {
7550 pr_err("GP feature unsupported: qsee ver %u\n",
7551 qseecom.qsee_version);
7552 return -EINVAL;
7553 }
7554 /* Only one client allowed here at a time */
7555 mutex_lock(&app_access_lock);
7556 atomic_inc(&data->ioctl_count);
7557 ret = qseecom_qteec_close_session(data, argp);
7558 atomic_dec(&data->ioctl_count);
7559 wake_up_all(&data->abort_wq);
7560 mutex_unlock(&app_access_lock);
7561 if (ret)
7562 pr_err("failed close_session_cmd: %d\n", ret);
7563 break;
7564 }
7565 case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
7566 if ((data->client.app_id == 0) ||
7567 (data->type != QSEECOM_CLIENT_APP)) {
7568 pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
7569 data->type, data->client.app_id);
7570 ret = -EINVAL;
7571 break;
7572 }
7573 if (qseecom.qsee_version < QSEE_VERSION_40) {
7574 pr_err("GP feature unsupported: qsee ver %u\n",
7575 qseecom.qsee_version);
7576 return -EINVAL;
7577 }
7578 /* Only one client allowed here at a time */
7579 mutex_lock(&app_access_lock);
7580 atomic_inc(&data->ioctl_count);
7581 ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
7582 atomic_dec(&data->ioctl_count);
7583 wake_up_all(&data->abort_wq);
7584 mutex_unlock(&app_access_lock);
7585 if (ret)
7586 pr_err("failed Invoke cmd: %d\n", ret);
7587 __qseecom_clean_data_sglistinfo(data);
7588 break;
7589 }
7590 case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
7591 if ((data->client.app_id == 0) ||
7592 (data->type != QSEECOM_CLIENT_APP)) {
7593 pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
7594 data->type, data->client.app_id);
7595 ret = -EINVAL;
7596 break;
7597 }
7598 if (qseecom.qsee_version < QSEE_VERSION_40) {
7599 pr_err("GP feature unsupported: qsee ver %u\n",
7600 qseecom.qsee_version);
7601 return -EINVAL;
7602 }
7603 /* Only one client allowed here at a time */
7604 mutex_lock(&app_access_lock);
7605 atomic_inc(&data->ioctl_count);
7606 ret = qseecom_qteec_request_cancellation(data, argp);
7607 atomic_dec(&data->ioctl_count);
7608 wake_up_all(&data->abort_wq);
7609 mutex_unlock(&app_access_lock);
7610 if (ret)
7611 pr_err("failed request_cancellation: %d\n", ret);
7612 break;
7613 }
7614 case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
7615 atomic_inc(&data->ioctl_count);
7616 ret = qseecom_get_ce_info(data, argp);
7617 if (ret)
7618 pr_err("failed get fde ce pipe info: %d\n", ret);
7619 atomic_dec(&data->ioctl_count);
7620 break;
7621 }
7622 case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
7623 atomic_inc(&data->ioctl_count);
7624 ret = qseecom_free_ce_info(data, argp);
7625 if (ret)
7626 pr_err("failed get fde ce pipe info: %d\n", ret);
7627 atomic_dec(&data->ioctl_count);
7628 break;
7629 }
7630 case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
7631 atomic_inc(&data->ioctl_count);
7632 ret = qseecom_query_ce_info(data, argp);
7633 if (ret)
7634 pr_err("failed get fde ce pipe info: %d\n", ret);
7635 atomic_dec(&data->ioctl_count);
7636 break;
7637 }
7638 default:
7639 pr_err("Invalid IOCTL: 0x%x\n", cmd);
7640 return -EINVAL;
7641 }
7642 return ret;
7643}
7644
7645static int qseecom_open(struct inode *inode, struct file *file)
7646{
7647 int ret = 0;
7648 struct qseecom_dev_handle *data;
7649
7650 data = kzalloc(sizeof(*data), GFP_KERNEL);
7651 if (!data)
7652 return -ENOMEM;
7653 file->private_data = data;
7654 data->abort = 0;
7655 data->type = QSEECOM_GENERIC;
7656 data->released = false;
7657 memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
7658 data->mode = INACTIVE;
7659 init_waitqueue_head(&data->abort_wq);
7660 atomic_set(&data->ioctl_count, 0);
7661 return ret;
7662}
7663
7664static int qseecom_release(struct inode *inode, struct file *file)
7665{
7666 struct qseecom_dev_handle *data = file->private_data;
7667 int ret = 0;
7668
7669 if (data->released == false) {
7670 pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
7671 data->type, data->mode, data);
7672 switch (data->type) {
7673 case QSEECOM_LISTENER_SERVICE:
7674 mutex_lock(&app_access_lock);
7675 ret = qseecom_unregister_listener(data);
7676 mutex_unlock(&app_access_lock);
7677 break;
7678 case QSEECOM_CLIENT_APP:
7679 mutex_lock(&app_access_lock);
7680 ret = qseecom_unload_app(data, true);
7681 mutex_unlock(&app_access_lock);
7682 break;
7683 case QSEECOM_SECURE_SERVICE:
7684 case QSEECOM_GENERIC:
7685 ret = qseecom_unmap_ion_allocated_memory(data);
7686 if (ret)
7687 pr_err("Ion Unmap failed\n");
7688 break;
7689 case QSEECOM_UNAVAILABLE_CLIENT_APP:
7690 break;
7691 default:
7692 pr_err("Unsupported clnt_handle_type %d",
7693 data->type);
7694 break;
7695 }
7696 }
7697
7698 if (qseecom.support_bus_scaling) {
7699 mutex_lock(&qsee_bw_mutex);
7700 if (data->mode != INACTIVE) {
7701 qseecom_unregister_bus_bandwidth_needs(data);
7702 if (qseecom.cumulative_mode == INACTIVE) {
7703 ret = __qseecom_set_msm_bus_request(INACTIVE);
7704 if (ret)
7705 pr_err("Fail to scale down bus\n");
7706 }
7707 }
7708 mutex_unlock(&qsee_bw_mutex);
7709 } else {
7710 if (data->fast_load_enabled == true)
7711 qsee_disable_clock_vote(data, CLK_SFPB);
7712 if (data->perf_enabled == true)
7713 qsee_disable_clock_vote(data, CLK_DFAB);
7714 }
7715 kfree(data);
7716
7717 return ret;
7718}
7719
7720#ifdef CONFIG_COMPAT
7721#include "compat_qseecom.c"
7722#else
7723#define compat_qseecom_ioctl NULL
7724#endif
7725
7726static const struct file_operations qseecom_fops = {
7727 .owner = THIS_MODULE,
7728 .unlocked_ioctl = qseecom_ioctl,
7729 .compat_ioctl = compat_qseecom_ioctl,
7730 .open = qseecom_open,
7731 .release = qseecom_release
7732};
7733
7734static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
7735{
7736 int rc = 0;
7737 struct device *pdev;
7738 struct qseecom_clk *qclk;
7739 char *core_clk_src = NULL;
7740 char *core_clk = NULL;
7741 char *iface_clk = NULL;
7742 char *bus_clk = NULL;
7743
7744 switch (ce) {
7745 case CLK_QSEE: {
7746 core_clk_src = "core_clk_src";
7747 core_clk = "core_clk";
7748 iface_clk = "iface_clk";
7749 bus_clk = "bus_clk";
7750 qclk = &qseecom.qsee;
7751 qclk->instance = CLK_QSEE;
7752 break;
7753 };
7754 case CLK_CE_DRV: {
7755 core_clk_src = "ce_drv_core_clk_src";
7756 core_clk = "ce_drv_core_clk";
7757 iface_clk = "ce_drv_iface_clk";
7758 bus_clk = "ce_drv_bus_clk";
7759 qclk = &qseecom.ce_drv;
7760 qclk->instance = CLK_CE_DRV;
7761 break;
7762 };
7763 default:
7764 pr_err("Invalid ce hw instance: %d!\n", ce);
7765 return -EIO;
7766 }
7767
7768 if (qseecom.no_clock_support) {
7769 qclk->ce_core_clk = NULL;
7770 qclk->ce_clk = NULL;
7771 qclk->ce_bus_clk = NULL;
7772 qclk->ce_core_src_clk = NULL;
7773 return 0;
7774 }
7775
7776 pdev = qseecom.pdev;
7777
7778 /* Get CE3 src core clk. */
7779 qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
7780 if (!IS_ERR(qclk->ce_core_src_clk)) {
7781 rc = clk_set_rate(qclk->ce_core_src_clk,
7782 qseecom.ce_opp_freq_hz);
7783 if (rc) {
7784 clk_put(qclk->ce_core_src_clk);
7785 qclk->ce_core_src_clk = NULL;
7786 pr_err("Unable to set the core src clk @%uMhz.\n",
7787 qseecom.ce_opp_freq_hz/CE_CLK_DIV);
7788 return -EIO;
7789 }
7790 } else {
7791 pr_warn("Unable to get CE core src clk, set to NULL\n");
7792 qclk->ce_core_src_clk = NULL;
7793 }
7794
7795 /* Get CE core clk */
7796 qclk->ce_core_clk = clk_get(pdev, core_clk);
7797 if (IS_ERR(qclk->ce_core_clk)) {
7798 rc = PTR_ERR(qclk->ce_core_clk);
7799 pr_err("Unable to get CE core clk\n");
7800 if (qclk->ce_core_src_clk != NULL)
7801 clk_put(qclk->ce_core_src_clk);
7802 return -EIO;
7803 }
7804
7805 /* Get CE Interface clk */
7806 qclk->ce_clk = clk_get(pdev, iface_clk);
7807 if (IS_ERR(qclk->ce_clk)) {
7808 rc = PTR_ERR(qclk->ce_clk);
7809 pr_err("Unable to get CE interface clk\n");
7810 if (qclk->ce_core_src_clk != NULL)
7811 clk_put(qclk->ce_core_src_clk);
7812 clk_put(qclk->ce_core_clk);
7813 return -EIO;
7814 }
7815
7816 /* Get CE AXI clk */
7817 qclk->ce_bus_clk = clk_get(pdev, bus_clk);
7818 if (IS_ERR(qclk->ce_bus_clk)) {
7819 rc = PTR_ERR(qclk->ce_bus_clk);
7820 pr_err("Unable to get CE BUS interface clk\n");
7821 if (qclk->ce_core_src_clk != NULL)
7822 clk_put(qclk->ce_core_src_clk);
7823 clk_put(qclk->ce_core_clk);
7824 clk_put(qclk->ce_clk);
7825 return -EIO;
7826 }
7827
7828 return rc;
7829}
7830
7831static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
7832{
7833 struct qseecom_clk *qclk;
7834
7835 if (ce == CLK_QSEE)
7836 qclk = &qseecom.qsee;
7837 else
7838 qclk = &qseecom.ce_drv;
7839
7840 if (qclk->ce_clk != NULL) {
7841 clk_put(qclk->ce_clk);
7842 qclk->ce_clk = NULL;
7843 }
7844 if (qclk->ce_core_clk != NULL) {
7845 clk_put(qclk->ce_core_clk);
7846 qclk->ce_core_clk = NULL;
7847 }
7848 if (qclk->ce_bus_clk != NULL) {
7849 clk_put(qclk->ce_bus_clk);
7850 qclk->ce_bus_clk = NULL;
7851 }
7852 if (qclk->ce_core_src_clk != NULL) {
7853 clk_put(qclk->ce_core_src_clk);
7854 qclk->ce_core_src_clk = NULL;
7855 }
7856 qclk->instance = CLK_INVALID;
7857}
7858
7859static int qseecom_retrieve_ce_data(struct platform_device *pdev)
7860{
7861 int rc = 0;
7862 uint32_t hlos_num_ce_hw_instances;
7863 uint32_t disk_encrypt_pipe;
7864 uint32_t file_encrypt_pipe;
Zhen Kongffec45c2017-10-18 14:05:53 -07007865 uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07007866 int i;
7867 const int *tbl;
7868 int size;
7869 int entry;
7870 struct qseecom_crypto_info *pfde_tbl = NULL;
7871 struct qseecom_crypto_info *p;
7872 int tbl_size;
7873 int j;
7874 bool old_db = true;
7875 struct qseecom_ce_info_use *pce_info_use;
7876 uint32_t *unit_tbl = NULL;
7877 int total_units = 0;
7878 struct qseecom_ce_pipe_entry *pce_entry;
7879
7880 qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
7881 qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
7882
7883 if (of_property_read_u32((&pdev->dev)->of_node,
7884 "qcom,qsee-ce-hw-instance",
7885 &qseecom.ce_info.qsee_ce_hw_instance)) {
7886 pr_err("Fail to get qsee ce hw instance information.\n");
7887 rc = -EINVAL;
7888 goto out;
7889 } else {
7890 pr_debug("qsee-ce-hw-instance=0x%x\n",
7891 qseecom.ce_info.qsee_ce_hw_instance);
7892 }
7893
7894 qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
7895 "qcom,support-fde");
7896 qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
7897 "qcom,support-pfe");
7898
7899 if (!qseecom.support_pfe && !qseecom.support_fde) {
7900 pr_warn("Device does not support PFE/FDE");
7901 goto out;
7902 }
7903
7904 if (qseecom.support_fde)
7905 tbl = of_get_property((&pdev->dev)->of_node,
7906 "qcom,full-disk-encrypt-info", &size);
7907 else
7908 tbl = NULL;
7909 if (tbl) {
7910 old_db = false;
7911 if (size % sizeof(struct qseecom_crypto_info)) {
7912 pr_err("full-disk-encrypt-info tbl size(%d)\n",
7913 size);
7914 rc = -EINVAL;
7915 goto out;
7916 }
7917 tbl_size = size / sizeof
7918 (struct qseecom_crypto_info);
7919
7920 pfde_tbl = kzalloc(size, GFP_KERNEL);
7921 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
7922 total_units = 0;
7923
7924 if (!pfde_tbl || !unit_tbl) {
7925 pr_err("failed to alloc memory\n");
7926 rc = -ENOMEM;
7927 goto out;
7928 }
7929 if (of_property_read_u32_array((&pdev->dev)->of_node,
7930 "qcom,full-disk-encrypt-info",
7931 (u32 *)pfde_tbl, size/sizeof(u32))) {
7932 pr_err("failed to read full-disk-encrypt-info tbl\n");
7933 rc = -EINVAL;
7934 goto out;
7935 }
7936
7937 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7938 for (j = 0; j < total_units; j++) {
7939 if (p->unit_num == *(unit_tbl + j))
7940 break;
7941 }
7942 if (j == total_units) {
7943 *(unit_tbl + total_units) = p->unit_num;
7944 total_units++;
7945 }
7946 }
7947
7948 qseecom.ce_info.num_fde = total_units;
7949 pce_info_use = qseecom.ce_info.fde = kcalloc(
7950 total_units, sizeof(struct qseecom_ce_info_use),
7951 GFP_KERNEL);
7952 if (!pce_info_use) {
7953 pr_err("failed to alloc memory\n");
7954 rc = -ENOMEM;
7955 goto out;
7956 }
7957
7958 for (j = 0; j < total_units; j++, pce_info_use++) {
7959 pce_info_use->unit_num = *(unit_tbl + j);
7960 pce_info_use->alloc = false;
7961 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
7962 pce_info_use->num_ce_pipe_entries = 0;
7963 pce_info_use->ce_pipe_entry = NULL;
7964 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7965 if (p->unit_num == pce_info_use->unit_num)
7966 pce_info_use->num_ce_pipe_entries++;
7967 }
7968
7969 entry = pce_info_use->num_ce_pipe_entries;
7970 pce_entry = pce_info_use->ce_pipe_entry =
7971 kcalloc(entry,
7972 sizeof(struct qseecom_ce_pipe_entry),
7973 GFP_KERNEL);
7974 if (pce_entry == NULL) {
7975 pr_err("failed to alloc memory\n");
7976 rc = -ENOMEM;
7977 goto out;
7978 }
7979
7980 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
7981 if (p->unit_num == pce_info_use->unit_num) {
7982 pce_entry->ce_num = p->ce;
7983 pce_entry->ce_pipe_pair =
7984 p->pipe_pair;
7985 pce_entry->valid = true;
7986 pce_entry++;
7987 }
7988 }
7989 }
7990 kfree(unit_tbl);
7991 unit_tbl = NULL;
7992 kfree(pfde_tbl);
7993 pfde_tbl = NULL;
7994 }
7995
7996 if (qseecom.support_pfe)
7997 tbl = of_get_property((&pdev->dev)->of_node,
7998 "qcom,per-file-encrypt-info", &size);
7999 else
8000 tbl = NULL;
8001 if (tbl) {
8002 old_db = false;
8003 if (size % sizeof(struct qseecom_crypto_info)) {
8004 pr_err("per-file-encrypt-info tbl size(%d)\n",
8005 size);
8006 rc = -EINVAL;
8007 goto out;
8008 }
8009 tbl_size = size / sizeof
8010 (struct qseecom_crypto_info);
8011
8012 pfde_tbl = kzalloc(size, GFP_KERNEL);
8013 unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
8014 total_units = 0;
8015 if (!pfde_tbl || !unit_tbl) {
8016 pr_err("failed to alloc memory\n");
8017 rc = -ENOMEM;
8018 goto out;
8019 }
8020 if (of_property_read_u32_array((&pdev->dev)->of_node,
8021 "qcom,per-file-encrypt-info",
8022 (u32 *)pfde_tbl, size/sizeof(u32))) {
8023 pr_err("failed to read per-file-encrypt-info tbl\n");
8024 rc = -EINVAL;
8025 goto out;
8026 }
8027
8028 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8029 for (j = 0; j < total_units; j++) {
8030 if (p->unit_num == *(unit_tbl + j))
8031 break;
8032 }
8033 if (j == total_units) {
8034 *(unit_tbl + total_units) = p->unit_num;
8035 total_units++;
8036 }
8037 }
8038
8039 qseecom.ce_info.num_pfe = total_units;
8040 pce_info_use = qseecom.ce_info.pfe = kcalloc(
8041 total_units, sizeof(struct qseecom_ce_info_use),
8042 GFP_KERNEL);
8043 if (!pce_info_use) {
8044 pr_err("failed to alloc memory\n");
8045 rc = -ENOMEM;
8046 goto out;
8047 }
8048
8049 for (j = 0; j < total_units; j++, pce_info_use++) {
8050 pce_info_use->unit_num = *(unit_tbl + j);
8051 pce_info_use->alloc = false;
8052 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8053 pce_info_use->num_ce_pipe_entries = 0;
8054 pce_info_use->ce_pipe_entry = NULL;
8055 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8056 if (p->unit_num == pce_info_use->unit_num)
8057 pce_info_use->num_ce_pipe_entries++;
8058 }
8059
8060 entry = pce_info_use->num_ce_pipe_entries;
8061 pce_entry = pce_info_use->ce_pipe_entry =
8062 kcalloc(entry,
8063 sizeof(struct qseecom_ce_pipe_entry),
8064 GFP_KERNEL);
8065 if (pce_entry == NULL) {
8066 pr_err("failed to alloc memory\n");
8067 rc = -ENOMEM;
8068 goto out;
8069 }
8070
8071 for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
8072 if (p->unit_num == pce_info_use->unit_num) {
8073 pce_entry->ce_num = p->ce;
8074 pce_entry->ce_pipe_pair =
8075 p->pipe_pair;
8076 pce_entry->valid = true;
8077 pce_entry++;
8078 }
8079 }
8080 }
8081 kfree(unit_tbl);
8082 unit_tbl = NULL;
8083 kfree(pfde_tbl);
8084 pfde_tbl = NULL;
8085 }
8086
8087 if (!old_db)
8088 goto out1;
8089
8090 if (of_property_read_bool((&pdev->dev)->of_node,
8091 "qcom,support-multiple-ce-hw-instance")) {
8092 if (of_property_read_u32((&pdev->dev)->of_node,
8093 "qcom,hlos-num-ce-hw-instances",
8094 &hlos_num_ce_hw_instances)) {
8095 pr_err("Fail: get hlos number of ce hw instance\n");
8096 rc = -EINVAL;
8097 goto out;
8098 }
8099 } else {
8100 hlos_num_ce_hw_instances = 1;
8101 }
8102
8103 if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
8104 pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
8105 MAX_CE_PIPE_PAIR_PER_UNIT);
8106 rc = -EINVAL;
8107 goto out;
8108 }
8109
8110 if (of_property_read_u32_array((&pdev->dev)->of_node,
8111 "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
8112 hlos_num_ce_hw_instances)) {
8113 pr_err("Fail: get hlos ce hw instance info\n");
8114 rc = -EINVAL;
8115 goto out;
8116 }
8117
8118 if (qseecom.support_fde) {
8119 pce_info_use = qseecom.ce_info.fde =
8120 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8121 if (!pce_info_use) {
8122 pr_err("failed to alloc memory\n");
8123 rc = -ENOMEM;
8124 goto out;
8125 }
8126 /* by default for old db */
8127 qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
8128 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8129 pce_info_use->alloc = false;
8130 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
8131 pce_info_use->ce_pipe_entry = NULL;
8132 if (of_property_read_u32((&pdev->dev)->of_node,
8133 "qcom,disk-encrypt-pipe-pair",
8134 &disk_encrypt_pipe)) {
8135 pr_err("Fail to get FDE pipe information.\n");
8136 rc = -EINVAL;
8137 goto out;
8138 } else {
8139 pr_debug("disk-encrypt-pipe-pair=0x%x",
8140 disk_encrypt_pipe);
8141 }
8142 entry = pce_info_use->num_ce_pipe_entries =
8143 hlos_num_ce_hw_instances;
8144 pce_entry = pce_info_use->ce_pipe_entry =
8145 kcalloc(entry,
8146 sizeof(struct qseecom_ce_pipe_entry),
8147 GFP_KERNEL);
8148 if (pce_entry == NULL) {
8149 pr_err("failed to alloc memory\n");
8150 rc = -ENOMEM;
8151 goto out;
8152 }
8153 for (i = 0; i < entry; i++) {
8154 pce_entry->ce_num = hlos_ce_hw_instance[i];
8155 pce_entry->ce_pipe_pair = disk_encrypt_pipe;
8156 pce_entry->valid = 1;
8157 pce_entry++;
8158 }
8159 } else {
8160 pr_warn("Device does not support FDE");
8161 disk_encrypt_pipe = 0xff;
8162 }
8163 if (qseecom.support_pfe) {
8164 pce_info_use = qseecom.ce_info.pfe =
8165 kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
8166 if (!pce_info_use) {
8167 pr_err("failed to alloc memory\n");
8168 rc = -ENOMEM;
8169 goto out;
8170 }
8171 /* by default for old db */
8172 qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
8173 pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
8174 pce_info_use->alloc = false;
8175 pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
8176 pce_info_use->ce_pipe_entry = NULL;
8177
8178 if (of_property_read_u32((&pdev->dev)->of_node,
8179 "qcom,file-encrypt-pipe-pair",
8180 &file_encrypt_pipe)) {
8181 pr_err("Fail to get PFE pipe information.\n");
8182 rc = -EINVAL;
8183 goto out;
8184 } else {
8185 pr_debug("file-encrypt-pipe-pair=0x%x",
8186 file_encrypt_pipe);
8187 }
8188 entry = pce_info_use->num_ce_pipe_entries =
8189 hlos_num_ce_hw_instances;
8190 pce_entry = pce_info_use->ce_pipe_entry =
8191 kcalloc(entry,
8192 sizeof(struct qseecom_ce_pipe_entry),
8193 GFP_KERNEL);
8194 if (pce_entry == NULL) {
8195 pr_err("failed to alloc memory\n");
8196 rc = -ENOMEM;
8197 goto out;
8198 }
8199 for (i = 0; i < entry; i++) {
8200 pce_entry->ce_num = hlos_ce_hw_instance[i];
8201 pce_entry->ce_pipe_pair = file_encrypt_pipe;
8202 pce_entry->valid = 1;
8203 pce_entry++;
8204 }
8205 } else {
8206 pr_warn("Device does not support PFE");
8207 file_encrypt_pipe = 0xff;
8208 }
8209
8210out1:
8211 qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
8212 qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
8213out:
8214 if (rc) {
8215 if (qseecom.ce_info.fde) {
8216 pce_info_use = qseecom.ce_info.fde;
8217 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8218 pce_entry = pce_info_use->ce_pipe_entry;
8219 kfree(pce_entry);
8220 pce_info_use++;
8221 }
8222 }
8223 kfree(qseecom.ce_info.fde);
8224 qseecom.ce_info.fde = NULL;
8225 if (qseecom.ce_info.pfe) {
8226 pce_info_use = qseecom.ce_info.pfe;
8227 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8228 pce_entry = pce_info_use->ce_pipe_entry;
8229 kfree(pce_entry);
8230 pce_info_use++;
8231 }
8232 }
8233 kfree(qseecom.ce_info.pfe);
8234 qseecom.ce_info.pfe = NULL;
8235 }
8236 kfree(unit_tbl);
8237 kfree(pfde_tbl);
8238 return rc;
8239}
8240
8241static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
8242 void __user *argp)
8243{
8244 struct qseecom_ce_info_req req;
8245 struct qseecom_ce_info_req *pinfo = &req;
8246 int ret = 0;
8247 int i;
8248 unsigned int entries;
8249 struct qseecom_ce_info_use *pce_info_use, *p;
8250 int total = 0;
8251 bool found = false;
8252 struct qseecom_ce_pipe_entry *pce_entry;
8253
8254 ret = copy_from_user(pinfo, argp,
8255 sizeof(struct qseecom_ce_info_req));
8256 if (ret) {
8257 pr_err("copy_from_user failed\n");
8258 return ret;
8259 }
8260
8261 switch (pinfo->usage) {
8262 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8263 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8264 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8265 if (qseecom.support_fde) {
8266 p = qseecom.ce_info.fde;
8267 total = qseecom.ce_info.num_fde;
8268 } else {
8269 pr_err("system does not support fde\n");
8270 return -EINVAL;
8271 }
8272 break;
8273 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8274 if (qseecom.support_pfe) {
8275 p = qseecom.ce_info.pfe;
8276 total = qseecom.ce_info.num_pfe;
8277 } else {
8278 pr_err("system does not support pfe\n");
8279 return -EINVAL;
8280 }
8281 break;
8282 default:
8283 pr_err("unsupported usage %d\n", pinfo->usage);
8284 return -EINVAL;
8285 }
8286
8287 pce_info_use = NULL;
8288 for (i = 0; i < total; i++) {
8289 if (!p->alloc)
8290 pce_info_use = p;
8291 else if (!memcmp(p->handle, pinfo->handle,
8292 MAX_CE_INFO_HANDLE_SIZE)) {
8293 pce_info_use = p;
8294 found = true;
8295 break;
8296 }
8297 p++;
8298 }
8299
8300 if (pce_info_use == NULL)
8301 return -EBUSY;
8302
8303 pinfo->unit_num = pce_info_use->unit_num;
8304 if (!pce_info_use->alloc) {
8305 pce_info_use->alloc = true;
8306 memcpy(pce_info_use->handle,
8307 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
8308 }
8309 if (pce_info_use->num_ce_pipe_entries >
8310 MAX_CE_PIPE_PAIR_PER_UNIT)
8311 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8312 else
8313 entries = pce_info_use->num_ce_pipe_entries;
8314 pinfo->num_ce_pipe_entries = entries;
8315 pce_entry = pce_info_use->ce_pipe_entry;
8316 for (i = 0; i < entries; i++, pce_entry++)
8317 pinfo->ce_pipe_entry[i] = *pce_entry;
8318 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8319 pinfo->ce_pipe_entry[i].valid = 0;
8320
8321 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8322 pr_err("copy_to_user failed\n");
8323 ret = -EFAULT;
8324 }
8325 return ret;
8326}
8327
8328static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
8329 void __user *argp)
8330{
8331 struct qseecom_ce_info_req req;
8332 struct qseecom_ce_info_req *pinfo = &req;
8333 int ret = 0;
8334 struct qseecom_ce_info_use *p;
8335 int total = 0;
8336 int i;
8337 bool found = false;
8338
8339 ret = copy_from_user(pinfo, argp,
8340 sizeof(struct qseecom_ce_info_req));
8341 if (ret)
8342 return ret;
8343
8344 switch (pinfo->usage) {
8345 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8346 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8347 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8348 if (qseecom.support_fde) {
8349 p = qseecom.ce_info.fde;
8350 total = qseecom.ce_info.num_fde;
8351 } else {
8352 pr_err("system does not support fde\n");
8353 return -EINVAL;
8354 }
8355 break;
8356 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8357 if (qseecom.support_pfe) {
8358 p = qseecom.ce_info.pfe;
8359 total = qseecom.ce_info.num_pfe;
8360 } else {
8361 pr_err("system does not support pfe\n");
8362 return -EINVAL;
8363 }
8364 break;
8365 default:
8366 pr_err("unsupported usage %d\n", pinfo->usage);
8367 return -EINVAL;
8368 }
8369
8370 for (i = 0; i < total; i++) {
8371 if (p->alloc &&
8372 !memcmp(p->handle, pinfo->handle,
8373 MAX_CE_INFO_HANDLE_SIZE)) {
8374 memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
8375 p->alloc = false;
8376 found = true;
8377 break;
8378 }
8379 p++;
8380 }
8381 return ret;
8382}
8383
8384static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
8385 void __user *argp)
8386{
8387 struct qseecom_ce_info_req req;
8388 struct qseecom_ce_info_req *pinfo = &req;
8389 int ret = 0;
8390 int i;
8391 unsigned int entries;
8392 struct qseecom_ce_info_use *pce_info_use, *p;
8393 int total = 0;
8394 bool found = false;
8395 struct qseecom_ce_pipe_entry *pce_entry;
8396
8397 ret = copy_from_user(pinfo, argp,
8398 sizeof(struct qseecom_ce_info_req));
8399 if (ret)
8400 return ret;
8401
8402 switch (pinfo->usage) {
8403 case QSEOS_KM_USAGE_DISK_ENCRYPTION:
8404 case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
8405 case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
8406 if (qseecom.support_fde) {
8407 p = qseecom.ce_info.fde;
8408 total = qseecom.ce_info.num_fde;
8409 } else {
8410 pr_err("system does not support fde\n");
8411 return -EINVAL;
8412 }
8413 break;
8414 case QSEOS_KM_USAGE_FILE_ENCRYPTION:
8415 if (qseecom.support_pfe) {
8416 p = qseecom.ce_info.pfe;
8417 total = qseecom.ce_info.num_pfe;
8418 } else {
8419 pr_err("system does not support pfe\n");
8420 return -EINVAL;
8421 }
8422 break;
8423 default:
8424 pr_err("unsupported usage %d\n", pinfo->usage);
8425 return -EINVAL;
8426 }
8427
8428 pce_info_use = NULL;
8429 pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
8430 pinfo->num_ce_pipe_entries = 0;
8431 for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8432 pinfo->ce_pipe_entry[i].valid = 0;
8433
8434 for (i = 0; i < total; i++) {
8435
8436 if (p->alloc && !memcmp(p->handle,
8437 pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
8438 pce_info_use = p;
8439 found = true;
8440 break;
8441 }
8442 p++;
8443 }
8444 if (!pce_info_use)
8445 goto out;
8446 pinfo->unit_num = pce_info_use->unit_num;
8447 if (pce_info_use->num_ce_pipe_entries >
8448 MAX_CE_PIPE_PAIR_PER_UNIT)
8449 entries = MAX_CE_PIPE_PAIR_PER_UNIT;
8450 else
8451 entries = pce_info_use->num_ce_pipe_entries;
8452 pinfo->num_ce_pipe_entries = entries;
8453 pce_entry = pce_info_use->ce_pipe_entry;
8454 for (i = 0; i < entries; i++, pce_entry++)
8455 pinfo->ce_pipe_entry[i] = *pce_entry;
8456 for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
8457 pinfo->ce_pipe_entry[i].valid = 0;
8458out:
8459 if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
8460 pr_err("copy_to_user failed\n");
8461 ret = -EFAULT;
8462 }
8463 return ret;
8464}
8465
8466/*
8467 * Check whitelist feature, and if TZ feature version is < 1.0.0,
8468 * then whitelist feature is not supported.
8469 */
8470static int qseecom_check_whitelist_feature(void)
8471{
8472 int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
8473
8474 return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
8475}
8476
8477static int qseecom_probe(struct platform_device *pdev)
8478{
8479 int rc;
8480 int i;
8481 uint32_t feature = 10;
8482 struct device *class_dev;
8483 struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
8484 struct qseecom_command_scm_resp resp;
8485 struct qseecom_ce_info_use *pce_info_use = NULL;
8486
8487 qseecom.qsee_bw_count = 0;
8488 qseecom.qsee_perf_client = 0;
8489 qseecom.qsee_sfpb_bw_count = 0;
8490
8491 qseecom.qsee.ce_core_clk = NULL;
8492 qseecom.qsee.ce_clk = NULL;
8493 qseecom.qsee.ce_core_src_clk = NULL;
8494 qseecom.qsee.ce_bus_clk = NULL;
8495
8496 qseecom.cumulative_mode = 0;
8497 qseecom.current_mode = INACTIVE;
8498 qseecom.support_bus_scaling = false;
8499 qseecom.support_fde = false;
8500 qseecom.support_pfe = false;
8501
8502 qseecom.ce_drv.ce_core_clk = NULL;
8503 qseecom.ce_drv.ce_clk = NULL;
8504 qseecom.ce_drv.ce_core_src_clk = NULL;
8505 qseecom.ce_drv.ce_bus_clk = NULL;
8506 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8507
8508 qseecom.app_block_ref_cnt = 0;
8509 init_waitqueue_head(&qseecom.app_block_wq);
8510 qseecom.whitelist_support = true;
8511
8512 rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
8513 if (rc < 0) {
8514 pr_err("alloc_chrdev_region failed %d\n", rc);
8515 return rc;
8516 }
8517
8518 driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
8519 if (IS_ERR(driver_class)) {
8520 rc = -ENOMEM;
8521 pr_err("class_create failed %d\n", rc);
8522 goto exit_unreg_chrdev_region;
8523 }
8524
8525 class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
8526 QSEECOM_DEV);
8527 if (IS_ERR(class_dev)) {
8528 pr_err("class_device_create failed %d\n", rc);
8529 rc = -ENOMEM;
8530 goto exit_destroy_class;
8531 }
8532
8533 cdev_init(&qseecom.cdev, &qseecom_fops);
8534 qseecom.cdev.owner = THIS_MODULE;
8535
8536 rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
8537 if (rc < 0) {
8538 pr_err("cdev_add failed %d\n", rc);
8539 goto exit_destroy_device;
8540 }
8541
8542 INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
8543 spin_lock_init(&qseecom.registered_listener_list_lock);
8544 INIT_LIST_HEAD(&qseecom.registered_app_list_head);
8545 spin_lock_init(&qseecom.registered_app_list_lock);
8546 INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
8547 spin_lock_init(&qseecom.registered_kclient_list_lock);
8548 init_waitqueue_head(&qseecom.send_resp_wq);
8549 qseecom.send_resp_flag = 0;
8550
8551 qseecom.qsee_version = QSEEE_VERSION_00;
8552 rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
8553 &resp, sizeof(resp));
8554 pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
8555 if (rc) {
8556 pr_err("Failed to get QSEE version info %d\n", rc);
8557 goto exit_del_cdev;
8558 }
8559 qseecom.qsee_version = resp.result;
8560 qseecom.qseos_version = QSEOS_VERSION_14;
8561 qseecom.commonlib_loaded = false;
8562 qseecom.commonlib64_loaded = false;
8563 qseecom.pdev = class_dev;
8564 /* Create ION msm client */
8565 qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
8566 if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
8567 pr_err("Ion client cannot be created\n");
8568 rc = -ENOMEM;
8569 goto exit_del_cdev;
8570 }
8571
8572 /* register client for bus scaling */
8573 if (pdev->dev.of_node) {
8574 qseecom.pdev->of_node = pdev->dev.of_node;
8575 qseecom.support_bus_scaling =
8576 of_property_read_bool((&pdev->dev)->of_node,
8577 "qcom,support-bus-scaling");
8578 rc = qseecom_retrieve_ce_data(pdev);
8579 if (rc)
8580 goto exit_destroy_ion_client;
8581 qseecom.appsbl_qseecom_support =
8582 of_property_read_bool((&pdev->dev)->of_node,
8583 "qcom,appsbl-qseecom-support");
8584 pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
8585 qseecom.appsbl_qseecom_support);
8586
8587 qseecom.commonlib64_loaded =
8588 of_property_read_bool((&pdev->dev)->of_node,
8589 "qcom,commonlib64-loaded-by-uefi");
8590 pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
8591 qseecom.commonlib64_loaded);
8592 qseecom.fde_key_size =
8593 of_property_read_bool((&pdev->dev)->of_node,
8594 "qcom,fde-key-size");
8595 qseecom.no_clock_support =
8596 of_property_read_bool((&pdev->dev)->of_node,
8597 "qcom,no-clock-support");
8598 if (!qseecom.no_clock_support) {
8599 pr_info("qseecom clocks handled by other subsystem\n");
8600 } else {
8601 pr_info("no-clock-support=0x%x",
8602 qseecom.no_clock_support);
8603 }
8604
8605 if (of_property_read_u32((&pdev->dev)->of_node,
8606 "qcom,qsee-reentrancy-support",
8607 &qseecom.qsee_reentrancy_support)) {
8608 pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
8609 qseecom.qsee_reentrancy_support = 0;
8610 } else {
8611 pr_warn("qseecom.qsee_reentrancy_support = %d\n",
8612 qseecom.qsee_reentrancy_support);
8613 }
8614
8615 /*
8616 * The qseecom bus scaling flag can not be enabled when
8617 * crypto clock is not handled by HLOS.
8618 */
8619 if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
8620 pr_err("support_bus_scaling flag can not be enabled.\n");
8621 rc = -EINVAL;
8622 goto exit_destroy_ion_client;
8623 }
8624
8625 if (of_property_read_u32((&pdev->dev)->of_node,
8626 "qcom,ce-opp-freq",
8627 &qseecom.ce_opp_freq_hz)) {
8628 pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
8629 qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
8630 }
8631 rc = __qseecom_init_clk(CLK_QSEE);
8632 if (rc)
8633 goto exit_destroy_ion_client;
8634
8635 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8636 (qseecom.support_pfe || qseecom.support_fde)) {
8637 rc = __qseecom_init_clk(CLK_CE_DRV);
8638 if (rc) {
8639 __qseecom_deinit_clk(CLK_QSEE);
8640 goto exit_destroy_ion_client;
8641 }
8642 } else {
8643 struct qseecom_clk *qclk;
8644
8645 qclk = &qseecom.qsee;
8646 qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
8647 qseecom.ce_drv.ce_clk = qclk->ce_clk;
8648 qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
8649 qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
8650 }
8651
8652 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8653 msm_bus_cl_get_pdata(pdev);
8654 if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
8655 (!qseecom.is_apps_region_protected &&
8656 !qseecom.appsbl_qseecom_support)) {
8657 struct resource *resource = NULL;
8658 struct qsee_apps_region_info_ireq req;
8659 struct qsee_apps_region_info_64bit_ireq req_64bit;
8660 struct qseecom_command_scm_resp resp;
8661 void *cmd_buf = NULL;
8662 size_t cmd_len;
8663
8664 resource = platform_get_resource_byname(pdev,
8665 IORESOURCE_MEM, "secapp-region");
8666 if (resource) {
8667 if (qseecom.qsee_version < QSEE_VERSION_40) {
8668 req.qsee_cmd_id =
8669 QSEOS_APP_REGION_NOTIFICATION;
8670 req.addr = (uint32_t)resource->start;
8671 req.size = resource_size(resource);
8672 cmd_buf = (void *)&req;
8673 cmd_len = sizeof(struct
8674 qsee_apps_region_info_ireq);
8675 pr_warn("secure app region addr=0x%x size=0x%x",
8676 req.addr, req.size);
8677 } else {
8678 req_64bit.qsee_cmd_id =
8679 QSEOS_APP_REGION_NOTIFICATION;
8680 req_64bit.addr = resource->start;
8681 req_64bit.size = resource_size(
8682 resource);
8683 cmd_buf = (void *)&req_64bit;
8684 cmd_len = sizeof(struct
8685 qsee_apps_region_info_64bit_ireq);
8686 pr_warn("secure app region addr=0x%llx size=0x%x",
8687 req_64bit.addr, req_64bit.size);
8688 }
8689 } else {
8690 pr_err("Fail to get secure app region info\n");
8691 rc = -EINVAL;
8692 goto exit_deinit_clock;
8693 }
8694 rc = __qseecom_enable_clk(CLK_QSEE);
8695 if (rc) {
8696 pr_err("CLK_QSEE enabling failed (%d)\n", rc);
8697 rc = -EIO;
8698 goto exit_deinit_clock;
8699 }
8700 rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
8701 cmd_buf, cmd_len,
8702 &resp, sizeof(resp));
8703 __qseecom_disable_clk(CLK_QSEE);
8704 if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
8705 pr_err("send secapp reg fail %d resp.res %d\n",
8706 rc, resp.result);
8707 rc = -EINVAL;
8708 goto exit_deinit_clock;
8709 }
8710 }
8711 /*
8712 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
8713 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
8714 * Pls add "qseecom.commonlib64_loaded = true" here too.
8715 */
8716 if (qseecom.is_apps_region_protected ||
8717 qseecom.appsbl_qseecom_support)
8718 qseecom.commonlib_loaded = true;
8719 } else {
8720 qseecom_platform_support = (struct msm_bus_scale_pdata *)
8721 pdev->dev.platform_data;
8722 }
8723 if (qseecom.support_bus_scaling) {
8724 init_timer(&(qseecom.bw_scale_down_timer));
8725 INIT_WORK(&qseecom.bw_inactive_req_ws,
8726 qseecom_bw_inactive_req_work);
8727 qseecom.bw_scale_down_timer.function =
8728 qseecom_scale_bus_bandwidth_timer_callback;
8729 }
8730 qseecom.timer_running = false;
8731 qseecom.qsee_perf_client = msm_bus_scale_register_client(
8732 qseecom_platform_support);
8733
8734 qseecom.whitelist_support = qseecom_check_whitelist_feature();
8735 pr_warn("qseecom.whitelist_support = %d\n",
8736 qseecom.whitelist_support);
8737
8738 if (!qseecom.qsee_perf_client)
8739 pr_err("Unable to register bus client\n");
8740
8741 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8742 return 0;
8743
8744exit_deinit_clock:
8745 __qseecom_deinit_clk(CLK_QSEE);
8746 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8747 (qseecom.support_pfe || qseecom.support_fde))
8748 __qseecom_deinit_clk(CLK_CE_DRV);
8749exit_destroy_ion_client:
8750 if (qseecom.ce_info.fde) {
8751 pce_info_use = qseecom.ce_info.fde;
8752 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8753 kzfree(pce_info_use->ce_pipe_entry);
8754 pce_info_use++;
8755 }
8756 kfree(qseecom.ce_info.fde);
8757 }
8758 if (qseecom.ce_info.pfe) {
8759 pce_info_use = qseecom.ce_info.pfe;
8760 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8761 kzfree(pce_info_use->ce_pipe_entry);
8762 pce_info_use++;
8763 }
8764 kfree(qseecom.ce_info.pfe);
8765 }
8766 ion_client_destroy(qseecom.ion_clnt);
8767exit_del_cdev:
8768 cdev_del(&qseecom.cdev);
8769exit_destroy_device:
8770 device_destroy(driver_class, qseecom_device_no);
8771exit_destroy_class:
8772 class_destroy(driver_class);
8773exit_unreg_chrdev_region:
8774 unregister_chrdev_region(qseecom_device_no, 1);
8775 return rc;
8776}
8777
8778static int qseecom_remove(struct platform_device *pdev)
8779{
8780 struct qseecom_registered_kclient_list *kclient = NULL;
Monika Singhe711b162018-04-24 09:54:50 +05308781 struct qseecom_registered_kclient_list *kclient_tmp = NULL;
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008782 unsigned long flags = 0;
8783 int ret = 0;
8784 int i;
8785 struct qseecom_ce_pipe_entry *pce_entry;
8786 struct qseecom_ce_info_use *pce_info_use;
8787
8788 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
8789 spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
8790
Monika Singhe711b162018-04-24 09:54:50 +05308791 list_for_each_entry_safe(kclient, kclient_tmp,
8792 &qseecom.registered_kclient_list_head, list) {
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008793
8794 /* Break the loop if client handle is NULL */
8795 if (!kclient->handle)
8796 goto exit_free_kclient;
8797
8798 if (list_empty(&kclient->list))
8799 goto exit_free_kc_handle;
8800
8801 list_del(&kclient->list);
8802 mutex_lock(&app_access_lock);
8803 ret = qseecom_unload_app(kclient->handle->dev, false);
8804 mutex_unlock(&app_access_lock);
8805 if (!ret) {
8806 kzfree(kclient->handle->dev);
8807 kzfree(kclient->handle);
8808 kzfree(kclient);
8809 }
8810 }
8811
8812exit_free_kc_handle:
8813 kzfree(kclient->handle);
8814exit_free_kclient:
8815 kzfree(kclient);
Monika Singhe711b162018-04-24 09:54:50 +05308816
AnilKumar Chimata20c6b2f2017-04-07 12:18:46 -07008817 spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
8818
8819 if (qseecom.qseos_version > QSEEE_VERSION_00)
8820 qseecom_unload_commonlib_image();
8821
8822 if (qseecom.qsee_perf_client)
8823 msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
8824 0);
8825 if (pdev->dev.platform_data != NULL)
8826 msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
8827
8828 if (qseecom.support_bus_scaling) {
8829 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8830 del_timer_sync(&qseecom.bw_scale_down_timer);
8831 }
8832
8833 if (qseecom.ce_info.fde) {
8834 pce_info_use = qseecom.ce_info.fde;
8835 for (i = 0; i < qseecom.ce_info.num_fde; i++) {
8836 pce_entry = pce_info_use->ce_pipe_entry;
8837 kfree(pce_entry);
8838 pce_info_use++;
8839 }
8840 }
8841 kfree(qseecom.ce_info.fde);
8842 if (qseecom.ce_info.pfe) {
8843 pce_info_use = qseecom.ce_info.pfe;
8844 for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
8845 pce_entry = pce_info_use->ce_pipe_entry;
8846 kfree(pce_entry);
8847 pce_info_use++;
8848 }
8849 }
8850 kfree(qseecom.ce_info.pfe);
8851
8852 /* register client for bus scaling */
8853 if (pdev->dev.of_node) {
8854 __qseecom_deinit_clk(CLK_QSEE);
8855 if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
8856 (qseecom.support_pfe || qseecom.support_fde))
8857 __qseecom_deinit_clk(CLK_CE_DRV);
8858 }
8859
8860 ion_client_destroy(qseecom.ion_clnt);
8861
8862 cdev_del(&qseecom.cdev);
8863
8864 device_destroy(driver_class, qseecom_device_no);
8865
8866 class_destroy(driver_class);
8867
8868 unregister_chrdev_region(qseecom_device_no, 1);
8869
8870 return ret;
8871}
8872
8873static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
8874{
8875 int ret = 0;
8876 struct qseecom_clk *qclk;
8877
8878 qclk = &qseecom.qsee;
8879 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
8880 if (qseecom.no_clock_support)
8881 return 0;
8882
8883 mutex_lock(&qsee_bw_mutex);
8884 mutex_lock(&clk_access_lock);
8885
8886 if (qseecom.current_mode != INACTIVE) {
8887 ret = msm_bus_scale_client_update_request(
8888 qseecom.qsee_perf_client, INACTIVE);
8889 if (ret)
8890 pr_err("Fail to scale down bus\n");
8891 else
8892 qseecom.current_mode = INACTIVE;
8893 }
8894
8895 if (qclk->clk_access_cnt) {
8896 if (qclk->ce_clk != NULL)
8897 clk_disable_unprepare(qclk->ce_clk);
8898 if (qclk->ce_core_clk != NULL)
8899 clk_disable_unprepare(qclk->ce_core_clk);
8900 if (qclk->ce_bus_clk != NULL)
8901 clk_disable_unprepare(qclk->ce_bus_clk);
8902 }
8903
8904 del_timer_sync(&(qseecom.bw_scale_down_timer));
8905 qseecom.timer_running = false;
8906
8907 mutex_unlock(&clk_access_lock);
8908 mutex_unlock(&qsee_bw_mutex);
8909 cancel_work_sync(&qseecom.bw_inactive_req_ws);
8910
8911 return 0;
8912}
8913
8914static int qseecom_resume(struct platform_device *pdev)
8915{
8916 int mode = 0;
8917 int ret = 0;
8918 struct qseecom_clk *qclk;
8919
8920 qclk = &qseecom.qsee;
8921 if (qseecom.no_clock_support)
8922 goto exit;
8923
8924 mutex_lock(&qsee_bw_mutex);
8925 mutex_lock(&clk_access_lock);
8926 if (qseecom.cumulative_mode >= HIGH)
8927 mode = HIGH;
8928 else
8929 mode = qseecom.cumulative_mode;
8930
8931 if (qseecom.cumulative_mode != INACTIVE) {
8932 ret = msm_bus_scale_client_update_request(
8933 qseecom.qsee_perf_client, mode);
8934 if (ret)
8935 pr_err("Fail to scale up bus to %d\n", mode);
8936 else
8937 qseecom.current_mode = mode;
8938 }
8939
8940 if (qclk->clk_access_cnt) {
8941 if (qclk->ce_core_clk != NULL) {
8942 ret = clk_prepare_enable(qclk->ce_core_clk);
8943 if (ret) {
8944 pr_err("Unable to enable/prep CE core clk\n");
8945 qclk->clk_access_cnt = 0;
8946 goto err;
8947 }
8948 }
8949 if (qclk->ce_clk != NULL) {
8950 ret = clk_prepare_enable(qclk->ce_clk);
8951 if (ret) {
8952 pr_err("Unable to enable/prep CE iface clk\n");
8953 qclk->clk_access_cnt = 0;
8954 goto ce_clk_err;
8955 }
8956 }
8957 if (qclk->ce_bus_clk != NULL) {
8958 ret = clk_prepare_enable(qclk->ce_bus_clk);
8959 if (ret) {
8960 pr_err("Unable to enable/prep CE bus clk\n");
8961 qclk->clk_access_cnt = 0;
8962 goto ce_bus_clk_err;
8963 }
8964 }
8965 }
8966
8967 if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
8968 qseecom.bw_scale_down_timer.expires = jiffies +
8969 msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
8970 mod_timer(&(qseecom.bw_scale_down_timer),
8971 qseecom.bw_scale_down_timer.expires);
8972 qseecom.timer_running = true;
8973 }
8974
8975 mutex_unlock(&clk_access_lock);
8976 mutex_unlock(&qsee_bw_mutex);
8977 goto exit;
8978
8979ce_bus_clk_err:
8980 if (qclk->ce_clk)
8981 clk_disable_unprepare(qclk->ce_clk);
8982ce_clk_err:
8983 if (qclk->ce_core_clk)
8984 clk_disable_unprepare(qclk->ce_core_clk);
8985err:
8986 mutex_unlock(&clk_access_lock);
8987 mutex_unlock(&qsee_bw_mutex);
8988 ret = -EIO;
8989exit:
8990 atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
8991 return ret;
8992}
8993
8994static const struct of_device_id qseecom_match[] = {
8995 {
8996 .compatible = "qcom,qseecom",
8997 },
8998 {}
8999};
9000
9001static struct platform_driver qseecom_plat_driver = {
9002 .probe = qseecom_probe,
9003 .remove = qseecom_remove,
9004 .suspend = qseecom_suspend,
9005 .resume = qseecom_resume,
9006 .driver = {
9007 .name = "qseecom",
9008 .owner = THIS_MODULE,
9009 .of_match_table = qseecom_match,
9010 },
9011};
9012
9013static int qseecom_init(void)
9014{
9015 return platform_driver_register(&qseecom_plat_driver);
9016}
9017
9018static void qseecom_exit(void)
9019{
9020 platform_driver_unregister(&qseecom_plat_driver);
9021}
9022
9023MODULE_LICENSE("GPL v2");
9024MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
9025
9026module_init(qseecom_init);
9027module_exit(qseecom_exit);