blob: ac5cc5466129560d3399ba3282d85ee6a011ec09 [file] [log] [blame]
Taniya Dasab9cb422015-02-12 12:06:12 +05301/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/mutex.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21
22#include <asm/cacheflush.h>
23#include <asm/compiler.h>
24
25#include <soc/qcom/scm.h>
26
27#define CREATE_TRACE_POINTS
28#include <trace/events/scm.h>
29
30#define SCM_ENOMEM -5
31#define SCM_EOPNOTSUPP -4
32#define SCM_EINVAL_ADDR -3
33#define SCM_EINVAL_ARG -2
34#define SCM_ERROR -1
35#define SCM_INTERRUPTED 1
36#define SCM_EBUSY -55
37#define SCM_V2_EBUSY -12
38
39static DEFINE_MUTEX(scm_lock);
40
41/*
42 * MSM8996 V2 requires a lock to protect against
43 * concurrent accesses between the limits management
44 * driver and the clock controller
45 */
46DEFINE_MUTEX(scm_lmh_lock);
47
48#define SCM_EBUSY_WAIT_MS 30
49#define SCM_EBUSY_MAX_RETRY 67
50
51#define N_EXT_SCM_ARGS 7
52#define FIRST_EXT_ARG_IDX 3
53#define SMC_ATOMIC_SYSCALL 31
54#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1)
55#define SMC64_MASK 0x40000000
56#define SMC_ATOMIC_MASK 0x80000000
57#define IS_CALL_AVAIL_CMD 1
58
Satya Durga Srinivasu Prabhalaba747412016-10-25 16:35:23 -070059#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \
60 size_t x = __cmd_size + __resp_size; \
61 size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \
62 size_t result; \
63 if (x < __cmd_size || (x + y) < x) \
64 result = 0; \
65 else \
66 result = x + y; \
67 result; \
68 })
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -070069/**
70 * struct scm_command - one SCM command buffer
71 * @len: total available memory for command and response
72 * @buf_offset: start of command buffer
73 * @resp_hdr_offset: start of response buffer
74 * @id: command to be executed
75 * @buf: buffer returned from scm_get_command_buffer()
76 *
77 * An SCM command is laid out in memory as follows:
78 *
79 * ------------------- <--- struct scm_command
80 * | command header |
81 * ------------------- <--- scm_get_command_buffer()
82 * | command buffer |
83 * ------------------- <--- struct scm_response and
84 * | response header | scm_command_to_response()
85 * ------------------- <--- scm_get_response_buffer()
86 * | response buffer |
87 * -------------------
88 *
89 * There can be arbitrary padding between the headers and buffers so
90 * you should always use the appropriate scm_get_*_buffer() routines
91 * to access the buffers in a safe manner.
92 */
93struct scm_command {
94 u32 len;
95 u32 buf_offset;
96 u32 resp_hdr_offset;
97 u32 id;
98 u32 buf[0];
99};
100
101/**
102 * struct scm_response - one SCM response buffer
103 * @len: total available memory for response
104 * @buf_offset: start of response data relative to start of scm_response
105 * @is_complete: indicates if the command has finished processing
106 */
107struct scm_response {
108 u32 len;
109 u32 buf_offset;
110 u32 is_complete;
111};
112
113#ifdef CONFIG_ARM64
114
115#define R0_STR "x0"
116#define R1_STR "x1"
117#define R2_STR "x2"
118#define R3_STR "x3"
119#define R4_STR "x4"
120#define R5_STR "x5"
121#define R6_STR "x6"
122
123/* Outer caches unsupported on ARM64 platforms */
124#define outer_inv_range(x, y)
125#define outer_flush_range(x, y)
126
127#define __cpuc_flush_dcache_area __flush_dcache_area
128
129#else
130
131#define R0_STR "r0"
132#define R1_STR "r1"
133#define R2_STR "r2"
134#define R3_STR "r3"
135#define R4_STR "r4"
136#define R5_STR "r5"
Taniya Dasab9cb422015-02-12 12:06:12 +0530137#define R6_STR "r6"
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700138
139#endif
140
141/**
142 * scm_command_to_response() - Get a pointer to a scm_response
143 * @cmd: command
144 *
145 * Returns a pointer to a response for a command.
146 */
147static inline struct scm_response *scm_command_to_response(
148 const struct scm_command *cmd)
149{
150 return (void *)cmd + cmd->resp_hdr_offset;
151}
152
153/**
154 * scm_get_command_buffer() - Get a pointer to a command buffer
155 * @cmd: command
156 *
157 * Returns a pointer to the command buffer of a command.
158 */
159static inline void *scm_get_command_buffer(const struct scm_command *cmd)
160{
161 return (void *)cmd->buf;
162}
163
164/**
165 * scm_get_response_buffer() - Get a pointer to a response buffer
166 * @rsp: response
167 *
168 * Returns a pointer to a response buffer of a response.
169 */
170static inline void *scm_get_response_buffer(const struct scm_response *rsp)
171{
172 return (void *)rsp + rsp->buf_offset;
173}
174
175static int scm_remap_error(int err)
176{
177 switch (err) {
178 case SCM_ERROR:
179 return -EIO;
180 case SCM_EINVAL_ADDR:
181 case SCM_EINVAL_ARG:
182 return -EINVAL;
183 case SCM_EOPNOTSUPP:
184 return -EOPNOTSUPP;
185 case SCM_ENOMEM:
186 return -ENOMEM;
187 case SCM_EBUSY:
188 return SCM_EBUSY;
189 case SCM_V2_EBUSY:
190 return SCM_V2_EBUSY;
191 }
192 return -EINVAL;
193}
194
195static u32 smc(u32 cmd_addr)
196{
197 int context_id;
198 register u32 r0 asm("r0") = 1;
199 register u32 r1 asm("r1") = (uintptr_t)&context_id;
200 register u32 r2 asm("r2") = cmd_addr;
201 do {
202 asm volatile(
203 __asmeq("%0", R0_STR)
204 __asmeq("%1", R0_STR)
205 __asmeq("%2", R1_STR)
206 __asmeq("%3", R2_STR)
207#ifdef REQUIRES_SEC
208 ".arch_extension sec\n"
209#endif
210 "smc #0\n"
211 : "=r" (r0)
212 : "r" (r0), "r" (r1), "r" (r2)
213 : "r3");
214 } while (r0 == SCM_INTERRUPTED);
215
216 return r0;
217}
218
219static int __scm_call(const struct scm_command *cmd)
220{
221 int ret;
222 u32 cmd_addr = virt_to_phys(cmd);
223
224 /*
225 * Flush the command buffer so that the secure world sees
226 * the correct data.
227 */
228 __cpuc_flush_dcache_area((void *)cmd, cmd->len);
229 outer_flush_range(cmd_addr, cmd_addr + cmd->len);
230
231 ret = smc(cmd_addr);
232 if (ret < 0) {
233 if (ret != SCM_EBUSY)
234 pr_err("scm_call failed with error code %d\n", ret);
235 ret = scm_remap_error(ret);
236 }
237 return ret;
238}
239
240#ifndef CONFIG_ARM64
241static void scm_inv_range(unsigned long start, unsigned long end)
242{
243 u32 cacheline_size, ctr;
244
245 asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
246 cacheline_size = 4 << ((ctr >> 16) & 0xf);
247
248 start = round_down(start, cacheline_size);
249 end = round_up(end, cacheline_size);
250 outer_inv_range(start, end);
251 while (start < end) {
252 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
253 : "memory");
254 start += cacheline_size;
255 }
256 dsb();
257 isb();
258}
259#else
260
261static void scm_inv_range(unsigned long start, unsigned long end)
262{
263 dmac_inv_range((void *)start, (void *)end);
264}
265#endif
266
267/**
268 * scm_call_common() - Send an SCM command
269 * @svc_id: service identifier
270 * @cmd_id: command identifier
271 * @cmd_buf: command buffer
272 * @cmd_len: length of the command buffer
273 * @resp_buf: response buffer
274 * @resp_len: length of the response buffer
275 * @scm_buf: internal scm structure used for passing data
276 * @scm_buf_len: length of the internal scm structure
277 *
278 * Core function to scm call. Initializes the given cmd structure with
279 * appropriate values and makes the actual scm call. Validation of cmd
280 * pointer and length must occur in the calling function.
281 *
282 * Returns the appropriate error code from the scm call
283 */
284
285static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
286 size_t cmd_len, void *resp_buf, size_t resp_len,
287 struct scm_command *scm_buf,
288 size_t scm_buf_length)
289{
290 int ret;
291 struct scm_response *rsp;
292 unsigned long start, end;
293
294 scm_buf->len = scm_buf_length;
295 scm_buf->buf_offset = offsetof(struct scm_command, buf);
296 scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
297 scm_buf->id = (svc_id << 10) | cmd_id;
298
299 if (cmd_buf)
300 memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);
301
302 mutex_lock(&scm_lock);
303 ret = __scm_call(scm_buf);
304 mutex_unlock(&scm_lock);
305 if (ret)
306 return ret;
307
308 rsp = scm_command_to_response(scm_buf);
309 start = (unsigned long)rsp;
310
311 do {
312 scm_inv_range(start, start + sizeof(*rsp));
313 } while (!rsp->is_complete);
314
315 end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
316 scm_inv_range(start, end);
317
318 if (resp_buf)
319 memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
320
321 return ret;
322}
323
324/*
325 * Sometimes the secure world may be busy waiting for a particular resource.
326 * In those situations, it is expected that the secure world returns a special
327 * error code (SCM_EBUSY). Retry any scm_call that fails with this error code,
328 * but with a timeout in place. Also, don't move this into scm_call_common,
329 * since we want the first attempt to be the "fastpath".
330 */
331static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
332 size_t cmd_len, void *resp_buf, size_t resp_len,
333 struct scm_command *cmd,
334 size_t len)
335{
336 int ret, retry_count = 0;
337
338 do {
339 ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
340 resp_buf, resp_len, cmd, len);
341 if (ret == SCM_EBUSY)
342 msleep(SCM_EBUSY_WAIT_MS);
343 if (retry_count == 33)
344 pr_warn("scm: secure world has been busy for 1 second!\n");
345 } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
346
347 if (ret == SCM_EBUSY)
348 pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
349
350 return ret;
351}
352
353/**
354 * scm_call_noalloc - Send an SCM command
355 *
356 * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
357 * scm internal structures. The buffer should be allocated with
358 * DEFINE_SCM_BUFFER to account for the proper alignment and size.
359 */
360int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
361 size_t cmd_len, void *resp_buf, size_t resp_len,
362 void *scm_buf, size_t scm_buf_len)
363{
364 int ret;
365 size_t len = SCM_BUF_LEN(cmd_len, resp_len);
366
Satya Durga Srinivasu Prabhalaba747412016-10-25 16:35:23 -0700367 if (len == 0)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700368 return -EINVAL;
369
370 if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
371 return -EINVAL;
372
373 memset(scm_buf, 0, scm_buf_len);
374
375 ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
376 resp_len, scm_buf, len);
377 return ret;
378
379}
380
381#ifdef CONFIG_ARM64
382
383static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
384 u64 *ret1, u64 *ret2, u64 *ret3)
385{
386 register u64 r0 asm("r0") = x0;
387 register u64 r1 asm("r1") = x1;
388 register u64 r2 asm("r2") = x2;
389 register u64 r3 asm("r3") = x3;
390 register u64 r4 asm("r4") = x4;
391 register u64 r5 asm("r5") = x5;
392 register u64 r6 asm("r6") = 0;
393
394 do {
395 asm volatile(
396 __asmeq("%0", R0_STR)
397 __asmeq("%1", R1_STR)
398 __asmeq("%2", R2_STR)
399 __asmeq("%3", R3_STR)
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700400 __asmeq("%4", R4_STR)
401 __asmeq("%5", R5_STR)
402 __asmeq("%6", R6_STR)
403 __asmeq("%7", R0_STR)
404 __asmeq("%8", R1_STR)
405 __asmeq("%9", R2_STR)
406 __asmeq("%10", R3_STR)
407 __asmeq("%11", R4_STR)
408 __asmeq("%12", R5_STR)
409 __asmeq("%13", R6_STR)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700410#ifdef REQUIRES_SEC
411 ".arch_extension sec\n"
412#endif
413 "smc #0\n"
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700414 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
415 "=r" (r4), "=r" (r5), "=r" (r6)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700416 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
417 "r" (r5), "r" (r6)
418 : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
419 "x14", "x15", "x16", "x17");
420 } while (r0 == SCM_INTERRUPTED);
421
422 if (ret1)
423 *ret1 = r1;
424 if (ret2)
425 *ret2 = r2;
426 if (ret3)
427 *ret3 = r3;
428
429 return r0;
430}
431
432static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
433 u64 *ret1, u64 *ret2, u64 *ret3)
434{
435 register u32 r0 asm("r0") = w0;
436 register u32 r1 asm("r1") = w1;
437 register u32 r2 asm("r2") = w2;
438 register u32 r3 asm("r3") = w3;
439 register u32 r4 asm("r4") = w4;
440 register u32 r5 asm("r5") = w5;
441 register u32 r6 asm("r6") = 0;
442
443 do {
444 asm volatile(
445 __asmeq("%0", R0_STR)
446 __asmeq("%1", R1_STR)
447 __asmeq("%2", R2_STR)
448 __asmeq("%3", R3_STR)
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700449 __asmeq("%4", R4_STR)
450 __asmeq("%5", R5_STR)
451 __asmeq("%6", R6_STR)
452 __asmeq("%7", R0_STR)
453 __asmeq("%8", R1_STR)
454 __asmeq("%9", R2_STR)
455 __asmeq("%10", R3_STR)
456 __asmeq("%11", R4_STR)
457 __asmeq("%12", R5_STR)
458 __asmeq("%13", R6_STR)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700459#ifdef REQUIRES_SEC
460 ".arch_extension sec\n"
461#endif
462 "smc #0\n"
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700463 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
464 "=r" (r4), "=r" (r5), "=r" (r6)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700465 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
466 "r" (r5), "r" (r6)
467 : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
468 "x14", "x15", "x16", "x17");
469
470 } while (r0 == SCM_INTERRUPTED);
471
472 if (ret1)
473 *ret1 = r1;
474 if (ret2)
475 *ret2 = r2;
476 if (ret3)
477 *ret3 = r3;
478
479 return r0;
480}
481
482#else
483
484static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
485 u64 *ret1, u64 *ret2, u64 *ret3)
486{
487 register u32 r0 asm("r0") = w0;
488 register u32 r1 asm("r1") = w1;
489 register u32 r2 asm("r2") = w2;
490 register u32 r3 asm("r3") = w3;
491 register u32 r4 asm("r4") = w4;
492 register u32 r5 asm("r5") = w5;
Taniya Dasab9cb422015-02-12 12:06:12 +0530493 register u32 r6 asm("r6") = 0;
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700494
495 do {
496 asm volatile(
497 __asmeq("%0", R0_STR)
498 __asmeq("%1", R1_STR)
499 __asmeq("%2", R2_STR)
500 __asmeq("%3", R3_STR)
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700501 __asmeq("%4", R4_STR)
502 __asmeq("%5", R5_STR)
503 __asmeq("%6", R6_STR)
504 __asmeq("%7", R0_STR)
505 __asmeq("%8", R1_STR)
506 __asmeq("%9", R2_STR)
507 __asmeq("%10", R3_STR)
508 __asmeq("%11", R4_STR)
509 __asmeq("%12", R5_STR)
510 __asmeq("%13", R6_STR)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700511#ifdef REQUIRES_SEC
512 ".arch_extension sec\n"
513#endif
514 "smc #0\n"
Satya Durga Srinivasu Prabhalab8ffba42017-06-02 19:45:35 -0700515 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3),
516 "=r" (r4), "=r" (r5), "=r" (r6)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700517 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
Taniya Dasab9cb422015-02-12 12:06:12 +0530518 "r" (r5), "r" (r6));
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700519
520 } while (r0 == SCM_INTERRUPTED);
521
522 if (ret1)
523 *ret1 = r1;
524 if (ret2)
525 *ret2 = r2;
526 if (ret3)
527 *ret3 = r3;
528
529 return r0;
530}
531
532static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
533 u64 *ret1, u64 *ret2, u64 *ret3)
534{
535 return 0;
536}
537#endif
538
539struct scm_extra_arg {
540 union {
541 u32 args32[N_EXT_SCM_ARGS];
542 u64 args64[N_EXT_SCM_ARGS];
543 };
544};
545
546static enum scm_interface_version {
547 SCM_UNKNOWN,
548 SCM_LEGACY,
549 SCM_ARMV8_32,
550 SCM_ARMV8_64,
551} scm_version = SCM_UNKNOWN;
552
553/* This will be set to specify SMC32 or SMC64 */
554static u32 scm_version_mask;
555
556bool is_scm_armv8(void)
557{
558 int ret;
559 u64 ret1, x0;
560
561 if (likely(scm_version != SCM_UNKNOWN))
562 return (scm_version == SCM_ARMV8_32) ||
563 (scm_version == SCM_ARMV8_64);
564 /*
565 * This is a one time check that runs on the first ever
566 * invocation of is_scm_armv8. We might be called in atomic
567 * context so no mutexes etc. Also, we can't use the scm_call2
568 * or scm_call2_APIs directly since they depend on this init.
569 */
570
571 /* First try a SMC64 call */
572 scm_version = SCM_ARMV8_64;
573 ret1 = 0;
574 x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
575 ret = __scm_call_armv8_64(x0 | SMC64_MASK, SCM_ARGS(1), x0, 0, 0, 0,
576 &ret1, NULL, NULL);
577 if (ret || !ret1) {
578 /* Try SMC32 call */
579 ret1 = 0;
580 ret = __scm_call_armv8_32(x0, SCM_ARGS(1), x0, 0, 0, 0,
581 &ret1, NULL, NULL);
582 if (ret || !ret1)
583 scm_version = SCM_LEGACY;
584 else
585 scm_version = SCM_ARMV8_32;
586 } else
587 scm_version_mask = SMC64_MASK;
588
589 pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
590 scm_version_mask);
591
592 return (scm_version == SCM_ARMV8_32) ||
593 (scm_version == SCM_ARMV8_64);
594}
595EXPORT_SYMBOL(is_scm_armv8);
596
597/*
598 * If there are more than N_REGISTER_ARGS, allocate a buffer and place
599 * the additional arguments in it. The extra argument buffer will be
600 * pointed to by X5.
601 */
602static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
603{
604 int i, j;
605 struct scm_extra_arg *argbuf;
606 int arglen = desc->arginfo & 0xf;
607 size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg));
608
609 desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
610
611 if (likely(arglen <= N_REGISTER_ARGS)) {
612 desc->extra_arg_buf = NULL;
613 return 0;
614 }
615
616 argbuf = kzalloc(argbuflen, flags);
617 if (!argbuf) {
618 pr_err("scm_call: failed to alloc mem for extended argument buffer\n");
619 return -ENOMEM;
620 }
621
622 desc->extra_arg_buf = argbuf;
623
624 j = FIRST_EXT_ARG_IDX;
625 if (scm_version == SCM_ARMV8_64)
626 for (i = 0; i < N_EXT_SCM_ARGS; i++)
627 argbuf->args64[i] = desc->args[j++];
628 else
629 for (i = 0; i < N_EXT_SCM_ARGS; i++)
630 argbuf->args32[i] = desc->args[j++];
631 desc->x5 = virt_to_phys(argbuf);
632 __cpuc_flush_dcache_area(argbuf, argbuflen);
633 outer_flush_range(virt_to_phys(argbuf),
634 virt_to_phys(argbuf) + argbuflen);
635
636 return 0;
637}
638
639/**
640 * scm_call2() - Invoke a syscall in the secure world
641 * @fn_id: The function ID for this syscall
642 * @desc: Descriptor structure containing arguments and return values
643 *
644 * Sends a command to the SCM and waits for the command to finish processing.
645 * This should *only* be called in pre-emptible context.
646 *
647 * A note on cache maintenance:
648 * Note that any buffers that are expected to be accessed by the secure world
649 * must be flushed before invoking scm_call and invalidated in the cache
650 * immediately after scm_call returns. An important point that must be noted
651 * is that on ARMV8 architectures, invalidation actually also causes a dirty
652 * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
653 * paramount importance that the buffer be flushed before invoking scm_call2,
654 * even if you don't care about the contents of that buffer.
655 *
656 * Note that cache maintenance on the argument buffer (desc->args) is taken care
657 * of by scm_call2; however, callers are responsible for any other cached
658 * buffers passed over to the secure world.
659*/
660int scm_call2(u32 fn_id, struct scm_desc *desc)
661{
662 int arglen = desc->arginfo & 0xf;
663 int ret, retry_count = 0;
664 u64 x0;
665
666 if (unlikely(!is_scm_armv8()))
667 return -ENODEV;
668
669 ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
670 if (ret)
671 return ret;
672
673 x0 = fn_id | scm_version_mask;
674
675 do {
676 mutex_lock(&scm_lock);
677
678 if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
679 mutex_lock(&scm_lmh_lock);
680
681 desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
682
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700683 trace_scm_call_start(x0, desc);
684
685 if (scm_version == SCM_ARMV8_64)
686 ret = __scm_call_armv8_64(x0, desc->arginfo,
687 desc->args[0], desc->args[1],
688 desc->args[2], desc->x5,
689 &desc->ret[0], &desc->ret[1],
690 &desc->ret[2]);
691 else
692 ret = __scm_call_armv8_32(x0, desc->arginfo,
693 desc->args[0], desc->args[1],
694 desc->args[2], desc->x5,
695 &desc->ret[0], &desc->ret[1],
696 &desc->ret[2]);
697
698 trace_scm_call_end(desc);
699
700 if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
701 mutex_unlock(&scm_lmh_lock);
702
703 mutex_unlock(&scm_lock);
704
705 if (ret == SCM_V2_EBUSY)
706 msleep(SCM_EBUSY_WAIT_MS);
707 if (retry_count == 33)
708 pr_warn("scm: secure world has been busy for 1 second!\n");
709 } while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
710
711 if (ret < 0)
Swetha Chikkaboraiah4d8a5182016-11-02 16:49:41 +0530712 pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
713 x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700714
715 if (arglen > N_REGISTER_ARGS)
716 kfree(desc->extra_arg_buf);
717 if (ret < 0)
718 return scm_remap_error(ret);
719 return 0;
720}
721EXPORT_SYMBOL(scm_call2);
722
723/**
724 * scm_call2_atomic() - Invoke a syscall in the secure world
725 *
726 * Similar to scm_call2 except that this can be invoked in atomic context.
727 * There is also no retry mechanism implemented. Please ensure that the
728 * secure world syscall can be executed in such a context and can complete
729 * in a timely manner.
730 */
731int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
732{
733 int arglen = desc->arginfo & 0xf;
734 int ret;
735 u64 x0;
736
737 if (unlikely(!is_scm_armv8()))
738 return -ENODEV;
739
740 ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
741 if (ret)
742 return ret;
743
744 x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
745
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700746 if (scm_version == SCM_ARMV8_64)
747 ret = __scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
748 desc->args[1], desc->args[2],
749 desc->x5, &desc->ret[0],
750 &desc->ret[1], &desc->ret[2]);
751 else
752 ret = __scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
753 desc->args[1], desc->args[2],
754 desc->x5, &desc->ret[0],
755 &desc->ret[1], &desc->ret[2]);
756 if (ret < 0)
Prasad Sodagudib3772a32017-06-17 12:40:35 -0700757 pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
758 x0, ret, desc->ret[0],
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700759 desc->ret[1], desc->ret[2]);
760
761 if (arglen > N_REGISTER_ARGS)
762 kfree(desc->extra_arg_buf);
763 if (ret < 0)
764 return scm_remap_error(ret);
765 return ret;
766}
767
768/**
769 * scm_call() - Send an SCM command
770 * @svc_id: service identifier
771 * @cmd_id: command identifier
772 * @cmd_buf: command buffer
773 * @cmd_len: length of the command buffer
774 * @resp_buf: response buffer
775 * @resp_len: length of the response buffer
776 *
777 * Sends a command to the SCM and waits for the command to finish processing.
778 *
779 * A note on cache maintenance:
780 * Note that any buffers that are expected to be accessed by the secure world
781 * must be flushed before invoking scm_call and invalidated in the cache
782 * immediately after scm_call returns. Cache maintenance on the command and
783 * response buffers is taken care of by scm_call; however, callers are
784 * responsible for any other cached buffers passed over to the secure world.
785 */
786int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
787 void *resp_buf, size_t resp_len)
788{
789 struct scm_command *cmd;
790 int ret;
791 size_t len = SCM_BUF_LEN(cmd_len, resp_len);
792
Satya Durga Srinivasu Prabhalaba747412016-10-25 16:35:23 -0700793 if (len == 0 || PAGE_ALIGN(len) < len)
Channagoud Kadabieee0ffd2016-08-11 14:18:17 -0700794 return -EINVAL;
795
796 cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
797 if (!cmd)
798 return -ENOMEM;
799
800 ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
801 resp_len, cmd, len);
802 if (unlikely(ret == SCM_EBUSY))
803 ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
804 resp_buf, resp_len, cmd, PAGE_ALIGN(len));
805 kfree(cmd);
806 return ret;
807}
808EXPORT_SYMBOL(scm_call);
809
810#define SCM_CLASS_REGISTER (0x2 << 8)
811#define SCM_MASK_IRQS BIT(5)
812#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
813 SCM_CLASS_REGISTER | \
814 SCM_MASK_IRQS | \
815 (n & 0xf))
816
817/**
818 * scm_call_atomic1() - Send an atomic SCM command with one argument
819 * @svc_id: service identifier
820 * @cmd_id: command identifier
821 * @arg1: first argument
822 *
823 * This shall only be used with commands that are guaranteed to be
824 * uninterruptable, atomic and SMP safe.
825 */
826s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
827{
828 int context_id;
829 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
830 register u32 r1 asm("r1") = (uintptr_t)&context_id;
831 register u32 r2 asm("r2") = arg1;
832
833 asm volatile(
834 __asmeq("%0", R0_STR)
835 __asmeq("%1", R0_STR)
836 __asmeq("%2", R1_STR)
837 __asmeq("%3", R2_STR)
838#ifdef REQUIRES_SEC
839 ".arch_extension sec\n"
840#endif
841 "smc #0\n"
842 : "=r" (r0)
843 : "r" (r0), "r" (r1), "r" (r2)
844 : "r3");
845 return r0;
846}
847EXPORT_SYMBOL(scm_call_atomic1);
848
849/**
850 * scm_call_atomic1_1() - SCM command with one argument and one return value
851 * @svc_id: service identifier
852 * @cmd_id: command identifier
853 * @arg1: first argument
854 * @ret1: first return value
855 *
856 * This shall only be used with commands that are guaranteed to be
857 * uninterruptable, atomic and SMP safe.
858 */
859s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
860{
861 int context_id;
862 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
863 register u32 r1 asm("r1") = (uintptr_t)&context_id;
864 register u32 r2 asm("r2") = arg1;
865
866 asm volatile(
867 __asmeq("%0", R0_STR)
868 __asmeq("%1", R1_STR)
869 __asmeq("%2", R0_STR)
870 __asmeq("%3", R1_STR)
871 __asmeq("%4", R2_STR)
872#ifdef REQUIRES_SEC
873 ".arch_extension sec\n"
874#endif
875 "smc #0\n"
876 : "=r" (r0), "=r" (r1)
877 : "r" (r0), "r" (r1), "r" (r2)
878 : "r3");
879 if (ret1)
880 *ret1 = r1;
881 return r0;
882}
883EXPORT_SYMBOL(scm_call_atomic1_1);
884
885/**
886 * scm_call_atomic2() - Send an atomic SCM command with two arguments
887 * @svc_id: service identifier
888 * @cmd_id: command identifier
889 * @arg1: first argument
890 * @arg2: second argument
891 *
892 * This shall only be used with commands that are guaranteed to be
893 * uninterruptable, atomic and SMP safe.
894 */
895s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
896{
897 int context_id;
898 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
899 register u32 r1 asm("r1") = (uintptr_t)&context_id;
900 register u32 r2 asm("r2") = arg1;
901 register u32 r3 asm("r3") = arg2;
902
903 asm volatile(
904 __asmeq("%0", R0_STR)
905 __asmeq("%1", R0_STR)
906 __asmeq("%2", R1_STR)
907 __asmeq("%3", R2_STR)
908 __asmeq("%4", R3_STR)
909#ifdef REQUIRES_SEC
910 ".arch_extension sec\n"
911#endif
912 "smc #0\n"
913 : "=r" (r0)
914 : "r" (r0), "r" (r1), "r" (r2), "r" (r3));
915 return r0;
916}
917EXPORT_SYMBOL(scm_call_atomic2);
918
919/**
920 * scm_call_atomic3() - Send an atomic SCM command with three arguments
921 * @svc_id: service identifier
922 * @cmd_id: command identifier
923 * @arg1: first argument
924 * @arg2: second argument
925 * @arg3: third argument
926 *
927 * This shall only be used with commands that are guaranteed to be
928 * uninterruptable, atomic and SMP safe.
929 */
930s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
931{
932 int context_id;
933 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
934 register u32 r1 asm("r1") = (uintptr_t)&context_id;
935 register u32 r2 asm("r2") = arg1;
936 register u32 r3 asm("r3") = arg2;
937 register u32 r4 asm("r4") = arg3;
938
939 asm volatile(
940 __asmeq("%0", R0_STR)
941 __asmeq("%1", R0_STR)
942 __asmeq("%2", R1_STR)
943 __asmeq("%3", R2_STR)
944 __asmeq("%4", R3_STR)
945 __asmeq("%5", R4_STR)
946#ifdef REQUIRES_SEC
947 ".arch_extension sec\n"
948#endif
949 "smc #0\n"
950 : "=r" (r0)
951 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
952 return r0;
953}
954EXPORT_SYMBOL(scm_call_atomic3);
955
956s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
957 u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
958{
959 int ret;
960 int context_id;
961 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4);
962 register u32 r1 asm("r1") = (uintptr_t)&context_id;
963 register u32 r2 asm("r2") = arg1;
964 register u32 r3 asm("r3") = arg2;
965 register u32 r4 asm("r4") = arg3;
966 register u32 r5 asm("r5") = arg4;
967
968 asm volatile(
969 __asmeq("%0", R0_STR)
970 __asmeq("%1", R1_STR)
971 __asmeq("%2", R2_STR)
972 __asmeq("%3", R0_STR)
973 __asmeq("%4", R1_STR)
974 __asmeq("%5", R2_STR)
975 __asmeq("%6", R3_STR)
976#ifdef REQUIRES_SEC
977 ".arch_extension sec\n"
978#endif
979 "smc #0\n"
980 : "=r" (r0), "=r" (r1), "=r" (r2)
981 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
982 ret = r0;
983 if (ret1)
984 *ret1 = r1;
985 if (ret2)
986 *ret2 = r2;
987 return r0;
988}
989EXPORT_SYMBOL(scm_call_atomic4_3);
990
991/**
992 * scm_call_atomic5_3() - SCM command with five argument and three return value
993 * @svc_id: service identifier
994 * @cmd_id: command identifier
995 * @arg1: first argument
996 * @arg2: second argument
997 * @arg3: third argument
998 * @arg4: fourth argument
999 * @arg5: fifth argument
1000 * @ret1: first return value
1001 * @ret2: second return value
1002 * @ret3: third return value
1003 *
1004 * This shall only be used with commands that are guaranteed to be
1005 * uninterruptable, atomic and SMP safe.
1006 */
1007s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
1008 u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
1009{
1010 int ret;
1011 int context_id;
1012 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5);
1013 register u32 r1 asm("r1") = (uintptr_t)&context_id;
1014 register u32 r2 asm("r2") = arg1;
1015 register u32 r3 asm("r3") = arg2;
1016 register u32 r4 asm("r4") = arg3;
1017 register u32 r5 asm("r5") = arg4;
1018 register u32 r6 asm("r6") = arg5;
1019
1020 asm volatile(
1021 __asmeq("%0", R0_STR)
1022 __asmeq("%1", R1_STR)
1023 __asmeq("%2", R2_STR)
1024 __asmeq("%3", R3_STR)
1025 __asmeq("%4", R0_STR)
1026 __asmeq("%5", R1_STR)
1027 __asmeq("%6", R2_STR)
1028 __asmeq("%7", R3_STR)
1029#ifdef REQUIRES_SEC
1030 ".arch_extension sec\n"
1031#endif
1032 "smc #0\n"
1033 : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
1034 : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5),
1035 "r" (r6));
1036 ret = r0;
1037
1038 if (ret1)
1039 *ret1 = r1;
1040 if (ret2)
1041 *ret2 = r2;
1042 if (ret3)
1043 *ret3 = r3;
1044 return r0;
1045}
1046EXPORT_SYMBOL(scm_call_atomic5_3);
1047
1048u32 scm_get_version(void)
1049{
1050 int context_id;
1051 static u32 version = -1;
1052 register u32 r0 asm("r0");
1053 register u32 r1 asm("r1");
1054
1055 if (version != -1)
1056 return version;
1057
1058 mutex_lock(&scm_lock);
1059
1060 r0 = 0x1 << 8;
1061 r1 = (uintptr_t)&context_id;
1062 do {
1063 asm volatile(
1064 __asmeq("%0", R0_STR)
1065 __asmeq("%1", R1_STR)
1066 __asmeq("%2", R0_STR)
1067 __asmeq("%3", R1_STR)
1068#ifdef REQUIRES_SEC
1069 ".arch_extension sec\n"
1070#endif
1071 "smc #0\n"
1072 : "=r" (r0), "=r" (r1)
1073 : "r" (r0), "r" (r1)
1074 : "r2", "r3");
1075 } while (r0 == SCM_INTERRUPTED);
1076
1077 version = r1;
1078 mutex_unlock(&scm_lock);
1079
1080 return version;
1081}
1082EXPORT_SYMBOL(scm_get_version);
1083
1084#define SCM_IO_READ 0x1
1085#define SCM_IO_WRITE 0x2
1086
1087u32 scm_io_read(phys_addr_t address)
1088{
1089 if (!is_scm_armv8()) {
1090 return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address);
1091 } else {
1092 struct scm_desc desc = {
1093 .args[0] = address,
1094 .arginfo = SCM_ARGS(1),
1095 };
1096 scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc);
1097 return desc.ret[0];
1098 }
1099}
1100EXPORT_SYMBOL(scm_io_read);
1101
1102int scm_io_write(phys_addr_t address, u32 val)
1103{
1104 int ret;
1105
1106 if (!is_scm_armv8()) {
1107 ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
1108 } else {
1109 struct scm_desc desc = {
1110 .args[0] = address,
1111 .args[1] = val,
1112 .arginfo = SCM_ARGS(2),
1113 };
1114 ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
1115 &desc);
1116 }
1117 return ret;
1118}
1119EXPORT_SYMBOL(scm_io_write);
1120
1121int scm_is_call_available(u32 svc_id, u32 cmd_id)
1122{
1123 int ret;
1124 struct scm_desc desc = {0};
1125
1126 if (!is_scm_armv8()) {
1127 u32 ret_val = 0;
1128 u32 svc_cmd = (svc_id << 10) | cmd_id;
1129
1130 ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
1131 sizeof(svc_cmd), &ret_val, sizeof(ret_val));
1132 if (ret)
1133 return ret;
1134
1135 return ret_val;
1136 }
1137 desc.arginfo = SCM_ARGS(1);
1138 desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
1139 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
1140 if (ret)
1141 return ret;
1142
1143 return desc.ret[0];
1144}
1145EXPORT_SYMBOL(scm_is_call_available);
1146
1147#define GET_FEAT_VERSION_CMD 3
1148int scm_get_feat_version(u32 feat)
1149{
1150 struct scm_desc desc = {0};
1151 int ret;
1152
1153 if (!is_scm_armv8()) {
1154 if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
1155 u32 version;
1156 if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
1157 sizeof(feat), &version, sizeof(version)))
1158 return version;
1159 }
1160 return 0;
1161 }
1162
1163 ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
1164 if (ret <= 0)
1165 return 0;
1166
1167 desc.args[0] = feat;
1168 desc.arginfo = SCM_ARGS(1);
1169 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
1170 &desc);
1171 if (!ret)
1172 return desc.ret[0];
1173
1174 return 0;
1175}
1176EXPORT_SYMBOL(scm_get_feat_version);
1177
1178#define RESTORE_SEC_CFG 2
1179int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
1180{
1181 struct scm_desc desc = {0};
1182 int ret;
1183 struct restore_sec_cfg {
1184 u32 device_id;
1185 u32 spare;
1186 } cfg;
1187
1188 cfg.device_id = device_id;
1189 cfg.spare = spare;
1190
1191 if (IS_ERR_OR_NULL(scm_ret))
1192 return -EINVAL;
1193
1194 if (!is_scm_armv8())
1195 return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg),
1196 scm_ret, sizeof(*scm_ret));
1197
1198 desc.args[0] = device_id;
1199 desc.args[1] = spare;
1200 desc.arginfo = SCM_ARGS(2);
1201
1202 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc);
1203 if (ret)
1204 return ret;
1205
1206 *scm_ret = desc.ret[0];
1207 return 0;
1208}
1209EXPORT_SYMBOL(scm_restore_sec_cfg);
1210
1211/*
1212 * SCM call command ID to check secure mode
1213 * Return zero for secure device.
1214 * Return one for non secure device or secure
1215 * device with debug enabled device.
1216 */
1217#define TZ_INFO_GET_SECURE_STATE 0x4
1218bool scm_is_secure_device(void)
1219{
1220 struct scm_desc desc = {0};
1221 int ret = 0, resp;
1222
1223 desc.args[0] = 0;
1224 desc.arginfo = 0;
1225 if (!is_scm_armv8()) {
1226 ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
1227 0, &resp, sizeof(resp));
1228 } else {
1229 ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
1230 TZ_INFO_GET_SECURE_STATE),
1231 &desc);
1232 resp = desc.ret[0];
1233 }
1234
1235 if (ret) {
1236 pr_err("%s: SCM call failed\n", __func__);
1237 return false;
1238 }
1239
1240 if ((resp & BIT(0)) || (resp & BIT(2)))
1241 return true;
1242 else
1243 return false;
1244}
1245EXPORT_SYMBOL(scm_is_secure_device);