blob: e359dc5dc9c37308acfe206c1b6fa4ca8f7e4536 [file] [log] [blame]
Jens Wiklander18ebb2f2015-04-14 14:33:20 +02001/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/arm-smccc.h>
18#include <linux/errno.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/tee_drv.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29#include "optee_private.h"
30#include "optee_smc.h"
31
32#define DRIVER_NAME "optee"
33
34#define OPTEE_SHM_NUM_PRIV_PAGES 1
35
36/**
37 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
38 * struct tee_param
39 * @params: subsystem internal parameter representation
40 * @num_params: number of elements in the parameter arrays
41 * @msg_params: OPTEE_MSG parameters
42 * Returns 0 on success or <0 on failure
43 */
44int optee_from_msg_param(struct tee_param *params, size_t num_params,
45 const struct optee_msg_param *msg_params)
46{
47 int rc;
48 size_t n;
49 struct tee_shm *shm;
50 phys_addr_t pa;
51
52 for (n = 0; n < num_params; n++) {
53 struct tee_param *p = params + n;
54 const struct optee_msg_param *mp = msg_params + n;
55 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
56
57 switch (attr) {
58 case OPTEE_MSG_ATTR_TYPE_NONE:
59 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
60 memset(&p->u, 0, sizeof(p->u));
61 break;
62 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
63 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
64 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
65 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
66 attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
67 p->u.value.a = mp->u.value.a;
68 p->u.value.b = mp->u.value.b;
69 p->u.value.c = mp->u.value.c;
70 break;
71 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
72 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
73 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
74 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
75 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
76 p->u.memref.size = mp->u.tmem.size;
77 shm = (struct tee_shm *)(unsigned long)
78 mp->u.tmem.shm_ref;
79 if (!shm) {
80 p->u.memref.shm_offs = 0;
81 p->u.memref.shm = NULL;
82 break;
83 }
84 rc = tee_shm_get_pa(shm, 0, &pa);
85 if (rc)
86 return rc;
87 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
88 p->u.memref.shm = shm;
89
90 /* Check that the memref is covered by the shm object */
91 if (p->u.memref.size) {
92 size_t o = p->u.memref.shm_offs +
93 p->u.memref.size - 1;
94
95 rc = tee_shm_get_pa(shm, o, NULL);
96 if (rc)
97 return rc;
98 }
99 break;
Volodymyr Babchuk7ec482b2017-11-29 14:48:32 +0200100 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
101 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
102 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
103 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
104 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
105 p->u.memref.size = mp->u.rmem.size;
106 shm = (struct tee_shm *)(unsigned long)
107 mp->u.rmem.shm_ref;
108
109 if (!shm) {
110 p->u.memref.shm_offs = 0;
111 p->u.memref.shm = NULL;
112 break;
113 }
114 p->u.memref.shm_offs = mp->u.rmem.offs;
115 p->u.memref.shm = shm;
116
117 break;
118
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200119 default:
120 return -EINVAL;
121 }
122 }
123 return 0;
124}
125
Volodymyr Babchuk7ec482b2017-11-29 14:48:32 +0200126static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
127 const struct tee_param *p)
128{
129 int rc;
130 phys_addr_t pa;
131
132 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
133 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
134
135 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
136 mp->u.tmem.size = p->u.memref.size;
137
138 if (!p->u.memref.shm) {
139 mp->u.tmem.buf_ptr = 0;
140 return 0;
141 }
142
143 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
144 if (rc)
145 return rc;
146
147 mp->u.tmem.buf_ptr = pa;
148 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
149 OPTEE_MSG_ATTR_CACHE_SHIFT;
150
151 return 0;
152}
153
154static int to_msg_param_reg_mem(struct optee_msg_param *mp,
155 const struct tee_param *p)
156{
157 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
158 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
159
160 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
161 mp->u.rmem.size = p->u.memref.size;
162 mp->u.rmem.offs = p->u.memref.shm_offs;
163 return 0;
164}
165
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200166/**
167 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
168 * @msg_params: OPTEE_MSG parameters
169 * @num_params: number of elements in the parameter arrays
170 * @params: subsystem itnernal parameter representation
171 * Returns 0 on success or <0 on failure
172 */
173int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
174 const struct tee_param *params)
175{
176 int rc;
177 size_t n;
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200178
179 for (n = 0; n < num_params; n++) {
180 const struct tee_param *p = params + n;
181 struct optee_msg_param *mp = msg_params + n;
182
183 switch (p->attr) {
184 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
185 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
186 memset(&mp->u, 0, sizeof(mp->u));
187 break;
188 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
189 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
190 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
191 mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
192 TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
193 mp->u.value.a = p->u.value.a;
194 mp->u.value.b = p->u.value.b;
195 mp->u.value.c = p->u.value.c;
196 break;
197 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
198 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
199 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
Volodymyr Babchuk7ec482b2017-11-29 14:48:32 +0200200 if (tee_shm_is_registered(p->u.memref.shm))
201 rc = to_msg_param_reg_mem(mp, p);
202 else
203 rc = to_msg_param_tmp_mem(mp, p);
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200204 if (rc)
205 return rc;
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200206 break;
207 default:
208 return -EINVAL;
209 }
210 }
211 return 0;
212}
213
214static void optee_get_version(struct tee_device *teedev,
215 struct tee_ioctl_version_data *vers)
216{
217 struct tee_ioctl_version_data v = {
218 .impl_id = TEE_IMPL_ID_OPTEE,
219 .impl_caps = TEE_OPTEE_CAP_TZ,
220 .gen_caps = TEE_GEN_CAP_GP,
221 };
222 *vers = v;
223}
224
225static int optee_open(struct tee_context *ctx)
226{
227 struct optee_context_data *ctxdata;
228 struct tee_device *teedev = ctx->teedev;
229 struct optee *optee = tee_get_drvdata(teedev);
230
231 ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
232 if (!ctxdata)
233 return -ENOMEM;
234
235 if (teedev == optee->supp_teedev) {
236 bool busy = true;
237
Jens Wiklanderad675fa2016-12-23 13:13:39 +0100238 mutex_lock(&optee->supp.mutex);
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200239 if (!optee->supp.ctx) {
240 busy = false;
241 optee->supp.ctx = ctx;
242 }
Jens Wiklanderad675fa2016-12-23 13:13:39 +0100243 mutex_unlock(&optee->supp.mutex);
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200244 if (busy) {
245 kfree(ctxdata);
246 return -EBUSY;
247 }
248 }
249
250 mutex_init(&ctxdata->mutex);
251 INIT_LIST_HEAD(&ctxdata->sess_list);
252
253 ctx->data = ctxdata;
254 return 0;
255}
256
257static void optee_release(struct tee_context *ctx)
258{
259 struct optee_context_data *ctxdata = ctx->data;
260 struct tee_device *teedev = ctx->teedev;
261 struct optee *optee = tee_get_drvdata(teedev);
262 struct tee_shm *shm;
263 struct optee_msg_arg *arg = NULL;
264 phys_addr_t parg;
265 struct optee_session *sess;
266 struct optee_session *sess_tmp;
267
268 if (!ctxdata)
269 return;
270
271 shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
272 if (!IS_ERR(shm)) {
273 arg = tee_shm_get_va(shm, 0);
274 /*
Jens Wiklander14edc412017-05-15 11:09:28 +0200275 * If va2pa fails for some reason, we can't call into
276 * secure world, only free the memory. Secure OS will leak
277 * sessions and finally refuse more sessions, but we will
278 * at least let normal world reclaim its memory.
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200279 */
280 if (!IS_ERR(arg))
Jens Wiklander14edc412017-05-15 11:09:28 +0200281 if (tee_shm_va2pa(shm, arg, &parg))
282 arg = NULL; /* prevent usage of parg below */
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200283 }
284
285 list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
286 list_node) {
287 list_del(&sess->list_node);
288 if (!IS_ERR_OR_NULL(arg)) {
289 memset(arg, 0, sizeof(*arg));
290 arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
291 arg->session = sess->session_id;
292 optee_do_call_with_arg(ctx, parg);
293 }
294 kfree(sess);
295 }
296 kfree(ctxdata);
297
298 if (!IS_ERR(shm))
299 tee_shm_free(shm);
300
301 ctx->data = NULL;
302
Jens Wiklanderad675fa2016-12-23 13:13:39 +0100303 if (teedev == optee->supp_teedev)
304 optee_supp_release(&optee->supp);
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200305}
306
Bhumika Goyal7b22a542017-06-29 15:05:04 +0530307static const struct tee_driver_ops optee_ops = {
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200308 .get_version = optee_get_version,
309 .open = optee_open,
310 .release = optee_release,
311 .open_session = optee_open_session,
312 .close_session = optee_close_session,
313 .invoke_func = optee_invoke_func,
314 .cancel_req = optee_cancel_req,
Volodymyr Babchuk88be5b82017-11-29 14:48:31 +0200315 .shm_register = optee_shm_register,
316 .shm_unregister = optee_shm_unregister,
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200317};
318
Bhumika Goyal7b22a542017-06-29 15:05:04 +0530319static const struct tee_desc optee_desc = {
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200320 .name = DRIVER_NAME "-clnt",
321 .ops = &optee_ops,
322 .owner = THIS_MODULE,
323};
324
Bhumika Goyal7b22a542017-06-29 15:05:04 +0530325static const struct tee_driver_ops optee_supp_ops = {
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200326 .get_version = optee_get_version,
327 .open = optee_open,
328 .release = optee_release,
329 .supp_recv = optee_supp_recv,
330 .supp_send = optee_supp_send,
Volodymyr Babchukca453512017-11-29 14:48:33 +0200331 .shm_register = optee_shm_register_supp,
332 .shm_unregister = optee_shm_unregister_supp,
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200333};
334
Bhumika Goyal7b22a542017-06-29 15:05:04 +0530335static const struct tee_desc optee_supp_desc = {
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200336 .name = DRIVER_NAME "-supp",
337 .ops = &optee_supp_ops,
338 .owner = THIS_MODULE,
339 .flags = TEE_DESC_PRIVILEGED,
340};
341
342static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
343{
344 struct arm_smccc_res res;
345
346 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
347
348 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
349 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
350 return true;
351 return false;
352}
353
354static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
355{
356 union {
357 struct arm_smccc_res smccc;
358 struct optee_smc_calls_revision_result result;
359 } res;
360
361 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
362
363 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
364 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
365 return true;
366 return false;
367}
368
369static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
370 u32 *sec_caps)
371{
372 union {
373 struct arm_smccc_res smccc;
374 struct optee_smc_exchange_capabilities_result result;
375 } res;
376 u32 a1 = 0;
377
378 /*
379 * TODO This isn't enough to tell if it's UP system (from kernel
380 * point of view) or not, is_smp() returns the the information
381 * needed, but can't be called directly from here.
382 */
383 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
384 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
385
386 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
387 &res.smccc);
388
389 if (res.result.status != OPTEE_SMC_RETURN_OK)
390 return false;
391
392 *sec_caps = res.result.capabilities;
393 return true;
394}
395
396static struct tee_shm_pool *
397optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
398{
399 union {
400 struct arm_smccc_res smccc;
401 struct optee_smc_get_shm_config_result result;
402 } res;
403 struct tee_shm_pool *pool;
404 unsigned long vaddr;
405 phys_addr_t paddr;
406 size_t size;
407 phys_addr_t begin;
408 phys_addr_t end;
409 void *va;
410 struct tee_shm_pool_mem_info priv_info;
411 struct tee_shm_pool_mem_info dmabuf_info;
412
413 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
414 if (res.result.status != OPTEE_SMC_RETURN_OK) {
415 pr_info("shm service not available\n");
416 return ERR_PTR(-ENOENT);
417 }
418
419 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
420 pr_err("only normal cached shared memory supported\n");
421 return ERR_PTR(-EINVAL);
422 }
423
424 begin = roundup(res.result.start, PAGE_SIZE);
425 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
426 paddr = begin;
427 size = end - begin;
428
429 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
430 pr_err("too small shared memory area\n");
431 return ERR_PTR(-EINVAL);
432 }
433
434 va = memremap(paddr, size, MEMREMAP_WB);
435 if (!va) {
436 pr_err("shared memory ioremap failed\n");
437 return ERR_PTR(-EINVAL);
438 }
439 vaddr = (unsigned long)va;
440
441 priv_info.vaddr = vaddr;
442 priv_info.paddr = paddr;
443 priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
444 dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
445 dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
446 dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
447
448 pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
449 if (IS_ERR(pool)) {
450 memunmap(va);
451 goto out;
452 }
453
454 *memremaped_shm = va;
455out:
456 return pool;
457}
458
459/* Simple wrapper functions to be able to use a function pointer */
460static void optee_smccc_smc(unsigned long a0, unsigned long a1,
461 unsigned long a2, unsigned long a3,
462 unsigned long a4, unsigned long a5,
463 unsigned long a6, unsigned long a7,
464 struct arm_smccc_res *res)
465{
466 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
467}
468
469static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
470 unsigned long a2, unsigned long a3,
471 unsigned long a4, unsigned long a5,
472 unsigned long a6, unsigned long a7,
473 struct arm_smccc_res *res)
474{
475 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
476}
477
478static optee_invoke_fn *get_invoke_func(struct device_node *np)
479{
480 const char *method;
481
482 pr_info("probing for conduit method from DT.\n");
483
484 if (of_property_read_string(np, "method", &method)) {
485 pr_warn("missing \"method\" property\n");
486 return ERR_PTR(-ENXIO);
487 }
488
489 if (!strcmp("hvc", method))
490 return optee_smccc_hvc;
491 else if (!strcmp("smc", method))
492 return optee_smccc_smc;
493
494 pr_warn("invalid \"method\" property: %s\n", method);
495 return ERR_PTR(-EINVAL);
496}
497
498static struct optee *optee_probe(struct device_node *np)
499{
500 optee_invoke_fn *invoke_fn;
501 struct tee_shm_pool *pool;
502 struct optee *optee = NULL;
503 void *memremaped_shm = NULL;
504 struct tee_device *teedev;
505 u32 sec_caps;
506 int rc;
507
508 invoke_fn = get_invoke_func(np);
509 if (IS_ERR(invoke_fn))
510 return (void *)invoke_fn;
511
512 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
513 pr_warn("api uid mismatch\n");
514 return ERR_PTR(-EINVAL);
515 }
516
517 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
518 pr_warn("api revision mismatch\n");
519 return ERR_PTR(-EINVAL);
520 }
521
522 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
523 pr_warn("capabilities mismatch\n");
524 return ERR_PTR(-EINVAL);
525 }
526
527 /*
528 * We have no other option for shared memory, if secure world
529 * doesn't have any reserved memory we can use we can't continue.
530 */
531 if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
532 return ERR_PTR(-EINVAL);
533
534 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
535 if (IS_ERR(pool))
536 return (void *)pool;
537
538 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
539 if (!optee) {
540 rc = -ENOMEM;
541 goto err;
542 }
543
544 optee->invoke_fn = invoke_fn;
Volodymyr Babchuk8fecf082017-11-29 14:48:34 +0200545 optee->sec_caps = sec_caps;
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200546
547 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
548 if (IS_ERR(teedev)) {
549 rc = PTR_ERR(teedev);
550 goto err;
551 }
552 optee->teedev = teedev;
553
554 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
555 if (IS_ERR(teedev)) {
556 rc = PTR_ERR(teedev);
557 goto err;
558 }
559 optee->supp_teedev = teedev;
560
561 rc = tee_device_register(optee->teedev);
562 if (rc)
563 goto err;
564
565 rc = tee_device_register(optee->supp_teedev);
566 if (rc)
567 goto err;
568
569 mutex_init(&optee->call_queue.mutex);
570 INIT_LIST_HEAD(&optee->call_queue.waiters);
571 optee_wait_queue_init(&optee->wait_queue);
572 optee_supp_init(&optee->supp);
573 optee->memremaped_shm = memremaped_shm;
574 optee->pool = pool;
575
576 optee_enable_shm_cache(optee);
577
578 pr_info("initialized driver\n");
579 return optee;
580err:
581 if (optee) {
582 /*
583 * tee_device_unregister() is safe to call even if the
584 * devices hasn't been registered with
585 * tee_device_register() yet.
586 */
587 tee_device_unregister(optee->supp_teedev);
588 tee_device_unregister(optee->teedev);
589 kfree(optee);
590 }
591 if (pool)
592 tee_shm_pool_free(pool);
593 if (memremaped_shm)
594 memunmap(memremaped_shm);
595 return ERR_PTR(rc);
596}
597
598static void optee_remove(struct optee *optee)
599{
600 /*
601 * Ask OP-TEE to free all cached shared memory objects to decrease
602 * reference counters and also avoid wild pointers in secure world
603 * into the old shared memory range.
604 */
605 optee_disable_shm_cache(optee);
606
607 /*
608 * The two devices has to be unregistered before we can free the
609 * other resources.
610 */
611 tee_device_unregister(optee->supp_teedev);
612 tee_device_unregister(optee->teedev);
613
614 tee_shm_pool_free(optee->pool);
615 if (optee->memremaped_shm)
616 memunmap(optee->memremaped_shm);
617 optee_wait_queue_exit(&optee->wait_queue);
618 optee_supp_uninit(&optee->supp);
619 mutex_destroy(&optee->call_queue.mutex);
620
621 kfree(optee);
622}
623
624static const struct of_device_id optee_match[] = {
625 { .compatible = "linaro,optee-tz" },
626 {},
627};
628
629static struct optee *optee_svc;
630
631static int __init optee_driver_init(void)
632{
633 struct device_node *fw_np;
634 struct device_node *np;
635 struct optee *optee;
636
637 /* Node is supposed to be below /firmware */
638 fw_np = of_find_node_by_name(NULL, "firmware");
639 if (!fw_np)
640 return -ENODEV;
641
642 np = of_find_matching_node(fw_np, optee_match);
Jens Wiklander18ebb2f2015-04-14 14:33:20 +0200643 if (!np)
644 return -ENODEV;
645
646 optee = optee_probe(np);
647 of_node_put(np);
648
649 if (IS_ERR(optee))
650 return PTR_ERR(optee);
651
652 optee_svc = optee;
653
654 return 0;
655}
656module_init(optee_driver_init);
657
658static void __exit optee_driver_exit(void)
659{
660 struct optee *optee = optee_svc;
661
662 optee_svc = NULL;
663 if (optee)
664 optee_remove(optee);
665}
666module_exit(optee_driver_exit);
667
668MODULE_AUTHOR("Linaro");
669MODULE_DESCRIPTION("OP-TEE driver");
670MODULE_SUPPORTED_DEVICE("");
671MODULE_VERSION("1.0");
672MODULE_LICENSE("GPL v2");