blob: 284937bf8e3dae609125cef7099fefb21b731d50 [file] [log] [blame]
Deepa Dinamani0a976552012-11-28 17:01:27 -08001/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are
5 * met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above
9 * copyright notice, this list of conditions and the following
10 * disclaimer in the documentation and/or other materials provided
11 * with the distribution.
12 * * Neither the name of The Linux Foundation nor the names of its
13 * contributors may be used to endorse or promote products derived
14 * from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <reg.h>
30#include <debug.h>
31#include <endian.h>
32#include <stdlib.h>
33#include <arch/ops.h>
34#include <platform.h>
35#include <platform/iomap.h>
36#include <clock.h>
37#include <platform/clock.h>
38#include <crypto5_eng.h>
39
40#define CLEAR_STATUS(dev) crypto_write_reg(&dev->bam, CRYPTO_STATUS(dev->base), 0, BAM_DESC_UNLOCK_FLAG)
41#define CONFIG_WRITE(dev, val) crypto_write_reg(&dev->bam, CRYPTO_CONFIG(dev->base), val, BAM_DESC_LOCK_FLAG)
42#define REG_WRITE(dev, addr, val) crypto_write_reg(&dev->bam, addr, val, 0)
43
44#define ADD_WRITE_CE(dev, addr, val) crypto_add_cmd_element(dev, addr, val)
45
46#define ADD_CMD_DESC(dev, flags) crypto_add_cmd_desc(dev, flags)
47#define ADD_READ_DESC(bam, buf_addr, buf_size, flags) bam_add_desc(bam, CRYPTO_READ_PIPE_INDEX, buf_addr, buf_size, flags)
48#define ADD_WRITE_DESC(bam, buf_addr, buf_size, flags) bam_add_desc(bam, CRYPTO_WRITE_PIPE_INDEX, buf_addr, buf_size, flags)
49
50#define CE_INIT(dev) dev->ce_array_index = 0; dev->cd_start = 0;
51
52static struct bam_desc *crypto_allocate_fifo(uint32_t size)
53{
54 struct bam_desc *ptr;
55
56 ptr = (struct bam_desc *) memalign(lcm(CACHE_LINE, BAM_DESC_SIZE),
57 ROUNDUP(size * BAM_DESC_SIZE, CACHE_LINE));
58
59 if (ptr == NULL)
60 dprintf(CRITICAL, "Could not allocate fifo buffer\n");
61
62 return ptr;
63}
64
65static struct output_dump *crypto_allocate_dump_buffer(void)
66{
67 struct output_dump *ptr;
68
69 ptr = (struct output_dump *) memalign(lcm(CACHE_LINE, CRYPTO_BURST_LEN),
70 ROUNDUP(sizeof(struct output_dump), CACHE_LINE));
71
72 if (ptr == NULL)
73 dprintf(CRITICAL, "Could not allocate output dump buffer\n");
74
75 return ptr;
76}
77
78static struct cmd_element *crypto_allocate_ce_array(uint32_t size)
79{
80 struct cmd_element *ptr;
81
82 ptr = (struct cmd_element*) memalign(CACHE_LINE,
83 ROUNDUP(size * sizeof(struct cmd_element), CACHE_LINE));
84
85 if (ptr == NULL)
86 dprintf(CRITICAL, "Could not allocate ce array buffer\n");
87
88 return ptr;
89}
90
91static void crypto_wait_for_cmd_exec(struct bam_instance *bam_core,
92 uint32_t num_desc,
93 uint8_t pipe)
94{
95 /* Create a read/write event to notify the periperal of the added desc. */
96 bam_sys_gen_event(bam_core, pipe, num_desc);
97
98 /* Wait for the descriptors to be processed */
99 bam_wait_for_interrupt(bam_core, pipe, P_PRCSD_DESC_EN_MASK);
100
101 /* Read offset update for the circular FIFO */
102 bam_read_offset_update(bam_core, pipe);
103}
104
105static void crypto_wait_for_data(struct bam_instance *bam, uint32_t pipe_num)
106{
107 /* Wait for the descriptors to be processed */
108 bam_wait_for_interrupt(bam, pipe_num, P_PRCSD_DESC_EN_MASK);
109
110 /* Read offset update for the circular FIFO */
111 bam_read_offset_update(bam, pipe_num);
112}
113
114static uint32_t crypto_write_reg(struct bam_instance *bam_core,
115 uint32_t reg_addr,
116 uint32_t val,
117 uint8_t flags)
118{
119 uint32_t ret = 0;
120 struct cmd_element cmd_list_ptr;
121
122
123 ret = (uint32_t)bam_add_cmd_element(&cmd_list_ptr, reg_addr, val, CE_WRITE_TYPE);
124
125 /* Enqueue the desc for the above command */
126 ret = bam_add_one_desc(bam_core,
127 CRYPTO_WRITE_PIPE_INDEX,
128 (unsigned char*)PA((addr_t)&cmd_list_ptr),
129 BAM_CE_SIZE,
130 BAM_DESC_CMD_FLAG | BAM_DESC_INT_FLAG | flags);
131
132 if (ret)
133 {
134 dprintf(CRITICAL,
135 "CRYPTO_WRITE_REG: Reg write failed. reg addr = %x\n",
136 reg_addr);
137 goto crypto_read_reg_err;
138 }
139
140 crypto_wait_for_cmd_exec(bam_core, 1, CRYPTO_WRITE_PIPE_INDEX);
141
142crypto_read_reg_err:
143 return val;
144}
145
146static void crypto_add_cmd_element(struct crypto_dev *dev,
147 uint32_t addr,
148 uint32_t val)
149{
150 struct cmd_element *ptr = dev->ce_array;
151
152 bam_add_cmd_element(&(ptr[dev->ce_array_index]), addr, val, CE_WRITE_TYPE);
153
154 arch_clean_invalidate_cache_range((addr_t) &(ptr[dev->ce_array_index]), sizeof(struct cmd_element));
155
156 dev->ce_array_index++;
157}
158
159static void crypto_add_cmd_desc(struct crypto_dev *dev, uint8_t flags)
160{
161 uint32_t ce_size;
162 uint32_t start = (uint32_t)&(dev->ce_array[dev->cd_start]);
163 uint32_t ret;
164
165 ce_size = (uint32_t)&(dev->ce_array[dev->ce_array_index]) - start;
166
167 ret = bam_add_one_desc(&dev->bam,
168 CRYPTO_WRITE_PIPE_INDEX,
169 (unsigned char*)start,
170 ce_size,
171 BAM_DESC_CMD_FLAG | flags);
172
173 if (ret)
174 {
175 dprintf(CRITICAL, "CRYPTO_ADD_DESC: Adding desc failed\n");
176 }
177
178 /* Update the CD ptr. */
179 dev->cd_start = dev->ce_array_index;
180}
181
182static int crypto_bam_init(struct crypto_dev *dev)
183{
184 uint32_t bam_ret;
185
186
187 /* BAM Init. */
188 bam_init(&dev->bam);
189
190 /* Initialize BAM CRYPTO read pipe */
191 bam_sys_pipe_init(&dev->bam, CRYPTO_READ_PIPE_INDEX);
192
193 /* Init read fifo */
194 bam_ret = bam_pipe_fifo_init(&dev->bam, CRYPTO_READ_PIPE_INDEX);
195
196 if (bam_ret)
197 {
198 dprintf(CRITICAL, "CRYPTO: BAM Read FIFO init error\n");
199 bam_ret = CRYPTO_ERR_FAIL;
200 goto crypto_bam_init_err;
201 }
202
203 /* Initialize BAM CRYPTO write pipe */
204 bam_sys_pipe_init(&dev->bam, CRYPTO_WRITE_PIPE_INDEX);
205
206 /* Init write fifo. Use the same fifo as read fifo. */
207 bam_ret = bam_pipe_fifo_init(&dev->bam, CRYPTO_WRITE_PIPE_INDEX);
208
209 if (bam_ret)
210 {
211 dprintf(CRITICAL, "CRYPTO: BAM Write FIFO init error\n");
212 bam_ret = CRYPTO_ERR_FAIL;
213 goto crypto_bam_init_err;
214 }
215
216 bam_ret = CRYPTO_ERR_NONE;
217
218crypto_bam_init_err:
219 return bam_ret;
220}
221
222static void crypto_reset(struct crypto_dev *dev)
223{
224 clock_config_ce(dev->instance);
225}
226
227void crypto5_init_params(struct crypto_dev *dev, struct crypto_init_params *params)
228{
229 dev->base = params->crypto_base;
230 dev->instance = params->crypto_instance;
231
232 dev->bam.base = params->bam_base;
233
234 /* Set Read pipe params. */
235 dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].pipe_num = params->pipes.read_pipe;
236 /* System consumer */
237 dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].trans_type = BAM2SYS;
238 dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.size = params->read_fifo_size;
239 dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.head = crypto_allocate_fifo(params->read_fifo_size);
240
241 /* Set Write pipe params. */
242 dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].pipe_num = params->pipes.write_pipe;
243 /* System producer */
244 dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].trans_type = SYS2BAM;
245 dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.size = params->write_fifo_size;
246 dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.head = crypto_allocate_fifo(params->write_fifo_size);
247
248 dev->bam.threshold = CRYPTO_MAX_THRESHOLD;
249
250 dev->bam.ee = params->bam_ee;
251
252 /* A H/W bug on Crypto 5.0.0 enforces a rule that the desc lengths must be burst aligned. */
253 dev->bam.max_desc_len = ROUNDDOWN(BAM_NDP_MAX_DESC_DATA_LEN, CRYPTO_BURST_LEN);
254
255 dev->dump = crypto_allocate_dump_buffer();
256 dev->ce_array = crypto_allocate_ce_array(params->num_ce);
257 dev->ce_array_index = 0;
258 dev->cd_start = 0;
259}
260
261void crypto5_init(struct crypto_dev *dev)
262{
263 uint32_t config = CRYPTO_RESET_CONFIG
264 | (dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].pipe_num >> 1) << PIPE_SET_SELECT_SHIFT;
265
266 /* Configure CE clocks. */
267 clock_config_ce(dev->instance);
268
269 /* Setup BAM */
270 if (crypto_bam_init(dev) != CRYPTO_ERR_NONE)
271 {
272 dprintf(CRITICAL, "CRYPTO: BAM init error\n");
273 goto crypto_init_err;
274 }
275
276 /* Write basic config to CE.
277 * Note: This setting will be changed to be set from TZ.
278 */
279 writel(config, CRYPTO_CONFIG(dev->base));
280
281 config = 0;
282
283 /* Setup config reg. */
284 /* Mask all irqs. */
285 config |= MASK_ERR_INTR | MASK_OP_DONE_INTR |
286 MASK_DIN_INTR | MASK_DOUT_INTR;
287 /* Program BAM specific crypto settings. */
288 config |= HIGH_SPD_IN_EN_N
289 | ((dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].pipe_num >> 1) << PIPE_SET_SELECT_SHIFT)
290 | MAX_QUEUED_REQS
291 | REQ_SIZE;
292 /* Use a few registers in little endian mode. */
293 config |= LITTLE_ENDIAN_MODE;
294
295 CONFIG_WRITE(dev, config);
296
297crypto_init_err:
298 return;
299}
300
301static uint32_t crypto5_get_sha_cfg(void *ctx_ptr, crypto_auth_alg_type auth_alg)
302{
303 crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr;
304 uint32_t seg_cfg_val;
305
306 seg_cfg_val = SEG_CFG_AUTH_ALG_SHA;
307
308 if (auth_alg == CRYPTO_AUTH_ALG_SHA256)
309 {
310 seg_cfg_val |= SEG_CFG_AUTH_SIZE_SHA256;
311
312 if (sha256_ctx->flags & CRYPTO_LAST_CHUNK)
313 {
314 seg_cfg_val |= SEG_CFG_LAST;
315 }
316 }
317 else
318 {
319 dprintf(CRITICAL, "crypto_set_sha_ctx invalid auth algorithm\n");
320 return 0;
321 }
322
323 return seg_cfg_val;
324}
325
326void crypto5_set_ctx(struct crypto_dev *dev,
327 void *ctx_ptr,
328 crypto_auth_alg_type auth_alg)
329{
330 crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr;
331 uint32_t i = 0;
332 uint32_t iv_len = SHA256_INIT_VECTOR_SIZE;
333 uint32_t *auth_iv = sha256_ctx->auth_iv;
334 uint32_t seg_cfg_val;
335 uint32_t total_bytes_to_write = sha256_ctx->bytes_to_write;
336 uint32_t bytes_to_write = total_bytes_to_write;
337 uint32_t burst_mask;
338
339 seg_cfg_val = crypto5_get_sha_cfg(ctx_ptr, auth_alg);
340
341 if (!seg_cfg_val)
342 {
343 dprintf(CRITICAL, "Authentication alg config failed.\n");
344 return;
345 }
346
347 /* Initialize CE pointers. */
348 CE_INIT(dev);
349
350 ADD_WRITE_CE(dev, CRYPTO_AUTH_SEG_CFG(dev->base), seg_cfg_val);
351
352 for (i = 0; i < iv_len; i++)
353 {
354 if (sha256_ctx->flags & CRYPTO_FIRST_CHUNK)
355 ADD_WRITE_CE(dev, CRYPTO_AUTH_IVn(dev->base, i), BE32(*(auth_iv + i)));
356 else
357 ADD_WRITE_CE(dev, CRYPTO_AUTH_IVn(dev->base, i), (*(auth_iv + i)));
358 }
359
360 /* Check if the transfer length is a 8 beat burst multiple. */
361 burst_mask = CRYPTO_BURST_LEN - 1;
362 if (bytes_to_write & burst_mask)
363 {
364 /* Add trailer to make it a burst multiple. */
365 total_bytes_to_write = (bytes_to_write + burst_mask) & (~burst_mask);
366 }
367
368 sha256_ctx->bytes_to_write = total_bytes_to_write;
369
370 /* Typecast with crypto_SHA1_ctx because offset of auth_bytecnt
371 * in both crypto_SHA1_ctx and crypto_SHA256_ctx are same.
372 */
373 ADD_WRITE_CE(dev, CRYPTO_AUTH_BYTECNTn(dev->base, 0), ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[0]);
374 ADD_WRITE_CE(dev, CRYPTO_AUTH_BYTECNTn(dev->base, 1), ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[1]);
375
376 /* Assume no header, always. */
377 ADD_WRITE_CE(dev, CRYPTO_AUTH_SEG_START(dev->base), 0);
378
379 ADD_WRITE_CE(dev, CRYPTO_AUTH_SEG_SIZE(dev->base), bytes_to_write);
380 ADD_WRITE_CE(dev, CRYPTO_SEG_SIZE(dev->base), total_bytes_to_write);
381 ADD_WRITE_CE(dev, CRYPTO_GOPROC(dev->base), GOPROC_GO);
382
383 ADD_CMD_DESC(dev, BAM_DESC_LOCK_FLAG | BAM_DESC_INT_FLAG);
384
385 crypto_wait_for_cmd_exec(&dev->bam, 1, CRYPTO_WRITE_PIPE_INDEX);
386}
387
388uint32_t crypto5_send_data(struct crypto_dev *dev,
389 void *ctx_ptr,
390 uint8_t *data_ptr)
391{
392 uint32_t bam_status;
393 crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr;
394 uint32_t wr_flags = BAM_DESC_NWD_FLAG | BAM_DESC_INT_FLAG | BAM_DESC_EOT_FLAG;
395 uint32_t ret_status;
396
397 /* A H/W bug on Crypto 5.0.0 enforces a rule that the desc lengths must be burst aligned. */
398 if ((uint32_t) data_ptr & (CRYPTO_BURST_LEN - 1))
399 {
400 dprintf(CRITICAL, "Crypto send data failed\n");
401 dprintf(CRITICAL, "Data start not aligned at burst length.\n");
402 ret_status = CRYPTO_ERR_FAIL;
403 goto CRYPTO_SEND_DATA_ERR;
404 }
405
406 arch_clean_invalidate_cache_range((addr_t) data_ptr, sha256_ctx->bytes_to_write);
407
408 bam_status = ADD_WRITE_DESC(&dev->bam, data_ptr, sha256_ctx->bytes_to_write, wr_flags);
409
410 if (bam_status)
411 {
412 dprintf(CRITICAL, "Crypto send data failed\n");
413 ret_status = CRYPTO_ERR_FAIL;
414 goto CRYPTO_SEND_DATA_ERR;
415 }
416
417 arch_clean_invalidate_cache_range((addr_t) (dev->dump), sizeof(struct output_dump));
418
419 bam_status = ADD_READ_DESC(&dev->bam,
420 (unsigned char *)PA((addr_t)(dev->dump)),
421 sizeof(struct output_dump),
422 BAM_DESC_INT_FLAG);
423
424 if (bam_status)
425 {
426 dprintf(CRITICAL, "Crypto send data failed\n");
427 ret_status = CRYPTO_ERR_FAIL;
428 goto CRYPTO_SEND_DATA_ERR;
429 }
430
431 crypto_wait_for_data(&dev->bam, CRYPTO_WRITE_PIPE_INDEX);
432
433 crypto_wait_for_data(&dev->bam, CRYPTO_READ_PIPE_INDEX);
434
435 arch_clean_invalidate_cache_range((addr_t) (dev->dump), sizeof(struct output_dump));
436
437 ret_status = CRYPTO_ERR_NONE;
438
439CRYPTO_SEND_DATA_ERR:
440
441 return ret_status;
442}
443
444void crypto5_cleanup(struct crypto_dev *dev)
445{
446 CLEAR_STATUS(dev);
447
448 /* Free all related memory. */
449 free(dev->dump);
450 free(dev->ce_array);
451 free(dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.head);
452 free(dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.head);
453}
454
455uint32_t crypto5_get_digest(struct crypto_dev *dev,
456 uint8_t *digest_ptr,
457 crypto_auth_alg_type auth_alg)
458{
459 uint32_t ce_status = 0;
460 uint32_t ce_status2 = 0;
461 uint32_t ce_err_bmsk = 0;
462 uint32_t i = 0;
463 uint32_t digest_len = 0;
464 uint32_t auth_iv;
465
466 /* Check status register for errors. */
467 ce_err_bmsk = (AXI_ERR | SW_ERR | HSD_ERR);
468 ce_status = BE32(dev->dump->status);
469
470 /* Check status register for errors. */
471 ce_status2 = BE32(dev->dump->status2);
472
473 if ((ce_status & ce_err_bmsk) || (ce_status2 & AXI_EXTRA))
474 {
475 crypto_reset(dev);
476 dprintf(CRITICAL, "crypto_get_digest status error");
477 dprintf(CRITICAL, "status = %x status2 = %x\n", ce_status, ce_status2);
478 return CRYPTO_ERR_FAIL;
479 }
480
481 /* Digest length depends on auth_alg */
482 if (auth_alg == CRYPTO_AUTH_ALG_SHA1)
483 {
484 digest_len = SHA1_INIT_VECTOR_SIZE;
485 }
486 else if (auth_alg == CRYPTO_AUTH_ALG_SHA256)
487 {
488 digest_len = SHA256_INIT_VECTOR_SIZE;
489 }
490
491 /* Retrieve digest from CRYPTO */
492 for (i = 0; i < digest_len; i++)
493 {
494 auth_iv = (dev->dump->auth_iv[i]);
495
496 *((unsigned int *)digest_ptr + i) = auth_iv;
497 }
498
499 return CRYPTO_ERR_NONE;
500}
501
502void crypto5_get_ctx(struct crypto_dev *dev, void *ctx_ptr)
503{
504 ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[0] = BE32(dev->dump->auth_bytcnt[0]);
505 ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[1] = BE32(dev->dump->auth_bytcnt[1]);
506}
507
508uint32_t crypto5_get_max_auth_blk_size(struct crypto_dev *dev)
509{
510 return (dev->bam.max_desc_len * (dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.size - 2));
511}