Unnati Gandhi | e4bed30 | 2015-06-03 12:03:24 +0530 | [diff] [blame] | 1 | /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved. |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 2 | * |
| 3 | * Redistribution and use in source and binary forms, with or without |
| 4 | * modification, are permitted provided that the following conditions are |
| 5 | * met: |
| 6 | * * Redistributions of source code must retain the above copyright |
| 7 | * notice, this list of conditions and the following disclaimer. |
| 8 | * * Redistributions in binary form must reproduce the above |
| 9 | * copyright notice, this list of conditions and the following |
| 10 | * disclaimer in the documentation and/or other materials provided |
| 11 | * with the distribution. |
| 12 | * * Neither the name of The Linux Foundation nor the names of its |
| 13 | * contributors may be used to endorse or promote products derived |
| 14 | * from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED |
| 17 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT |
| 19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS |
| 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 23 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 24 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
| 25 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
| 26 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include <reg.h> |
| 30 | #include <debug.h> |
| 31 | #include <endian.h> |
| 32 | #include <stdlib.h> |
| 33 | #include <arch/ops.h> |
| 34 | #include <platform.h> |
| 35 | #include <platform/iomap.h> |
| 36 | #include <clock.h> |
| 37 | #include <platform/clock.h> |
| 38 | #include <crypto5_eng.h> |
| 39 | |
| 40 | #define CLEAR_STATUS(dev) crypto_write_reg(&dev->bam, CRYPTO_STATUS(dev->base), 0, BAM_DESC_UNLOCK_FLAG) |
| 41 | #define CONFIG_WRITE(dev, val) crypto_write_reg(&dev->bam, CRYPTO_CONFIG(dev->base), val, BAM_DESC_LOCK_FLAG) |
| 42 | #define REG_WRITE(dev, addr, val) crypto_write_reg(&dev->bam, addr, val, 0) |
| 43 | |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 44 | #ifndef CRYPTO_REG_ACCESS |
| 45 | #define CE_INIT(dev) dev->ce_array_index = 0; dev->cd_start = 0 |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 46 | #define ADD_WRITE_CE(dev, addr, val) crypto_add_cmd_element(dev, addr, val) |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 47 | #define ADD_CMD_DESC(dev, flags) crypto_add_cmd_desc(dev, flags) |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 48 | #define CMD_EXEC(bam, num_desc, pipe) crypto_wait_for_cmd_exec(bam, num_desc, pipe) |
| 49 | |
| 50 | #define REG_WRITE_QUEUE_INIT(dev) CE_INIT(dev) |
| 51 | #define REG_WRITE_QUEUE(dev, addr, val) ADD_WRITE_CE(dev, addr, val) |
| 52 | #define REG_WRITE_QUEUE_DONE(dev, flags) ADD_CMD_DESC(dev, flags) |
| 53 | #define REG_WRITE_EXEC(bam, num_desc, pipe) CMD_EXEC(bam, num_desc, pipe) |
| 54 | #else |
| 55 | #define REG_WRITE_QUEUE_INIT(dev) /* nop */ |
| 56 | #define REG_WRITE_QUEUE(dev, addr, val) writel(val, addr) |
| 57 | #define REG_WRITE_QUEUE_DONE(dev, flags) /* nop */ |
| 58 | #define REG_WRITE_EXEC(bam, num_desc, pipe) /* nop */ |
| 59 | #endif |
| 60 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 61 | #define ADD_READ_DESC(bam, buf_addr, buf_size, flags) bam_add_desc(bam, CRYPTO_READ_PIPE_INDEX, buf_addr, buf_size, flags) |
| 62 | #define ADD_WRITE_DESC(bam, buf_addr, buf_size, flags) bam_add_desc(bam, CRYPTO_WRITE_PIPE_INDEX, buf_addr, buf_size, flags) |
| 63 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 64 | |
| 65 | static struct bam_desc *crypto_allocate_fifo(uint32_t size) |
| 66 | { |
| 67 | struct bam_desc *ptr; |
| 68 | |
| 69 | ptr = (struct bam_desc *) memalign(lcm(CACHE_LINE, BAM_DESC_SIZE), |
| 70 | ROUNDUP(size * BAM_DESC_SIZE, CACHE_LINE)); |
| 71 | |
| 72 | if (ptr == NULL) |
| 73 | dprintf(CRITICAL, "Could not allocate fifo buffer\n"); |
| 74 | |
| 75 | return ptr; |
| 76 | } |
| 77 | |
| 78 | static struct output_dump *crypto_allocate_dump_buffer(void) |
| 79 | { |
| 80 | struct output_dump *ptr; |
| 81 | |
| 82 | ptr = (struct output_dump *) memalign(lcm(CACHE_LINE, CRYPTO_BURST_LEN), |
| 83 | ROUNDUP(sizeof(struct output_dump), CACHE_LINE)); |
| 84 | |
| 85 | if (ptr == NULL) |
| 86 | dprintf(CRITICAL, "Could not allocate output dump buffer\n"); |
| 87 | |
| 88 | return ptr; |
| 89 | } |
| 90 | |
| 91 | static struct cmd_element *crypto_allocate_ce_array(uint32_t size) |
| 92 | { |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 93 | struct cmd_element *ptr = NULL; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 94 | |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 95 | #ifndef CRYPTO_REG_ACCESS |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 96 | ptr = (struct cmd_element*) memalign(CACHE_LINE, |
| 97 | ROUNDUP(size * sizeof(struct cmd_element), CACHE_LINE)); |
| 98 | |
| 99 | if (ptr == NULL) |
| 100 | dprintf(CRITICAL, "Could not allocate ce array buffer\n"); |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 101 | #endif |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 102 | |
| 103 | return ptr; |
| 104 | } |
| 105 | |
| 106 | static void crypto_wait_for_cmd_exec(struct bam_instance *bam_core, |
| 107 | uint32_t num_desc, |
| 108 | uint8_t pipe) |
| 109 | { |
| 110 | /* Create a read/write event to notify the periperal of the added desc. */ |
| 111 | bam_sys_gen_event(bam_core, pipe, num_desc); |
| 112 | |
| 113 | /* Wait for the descriptors to be processed */ |
| 114 | bam_wait_for_interrupt(bam_core, pipe, P_PRCSD_DESC_EN_MASK); |
| 115 | |
| 116 | /* Read offset update for the circular FIFO */ |
| 117 | bam_read_offset_update(bam_core, pipe); |
| 118 | } |
| 119 | |
| 120 | static void crypto_wait_for_data(struct bam_instance *bam, uint32_t pipe_num) |
| 121 | { |
| 122 | /* Wait for the descriptors to be processed */ |
| 123 | bam_wait_for_interrupt(bam, pipe_num, P_PRCSD_DESC_EN_MASK); |
| 124 | |
| 125 | /* Read offset update for the circular FIFO */ |
| 126 | bam_read_offset_update(bam, pipe_num); |
| 127 | } |
| 128 | |
| 129 | static uint32_t crypto_write_reg(struct bam_instance *bam_core, |
| 130 | uint32_t reg_addr, |
| 131 | uint32_t val, |
| 132 | uint8_t flags) |
| 133 | { |
| 134 | uint32_t ret = 0; |
| 135 | struct cmd_element cmd_list_ptr; |
| 136 | |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 137 | #ifdef CRYPTO_REG_ACCESS |
| 138 | writel(val, reg_addr); |
| 139 | #else |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 140 | ret = (uint32_t)bam_add_cmd_element(&cmd_list_ptr, reg_addr, val, CE_WRITE_TYPE); |
| 141 | |
Channagoud Kadabi | e2dd5a7 | 2015-02-13 20:10:47 -0800 | [diff] [blame] | 142 | arch_clean_invalidate_cache_range((addr_t)&cmd_list_ptr, sizeof(struct cmd_element)); |
| 143 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 144 | /* Enqueue the desc for the above command */ |
| 145 | ret = bam_add_one_desc(bam_core, |
| 146 | CRYPTO_WRITE_PIPE_INDEX, |
| 147 | (unsigned char*)PA((addr_t)&cmd_list_ptr), |
| 148 | BAM_CE_SIZE, |
| 149 | BAM_DESC_CMD_FLAG | BAM_DESC_INT_FLAG | flags); |
| 150 | |
| 151 | if (ret) |
| 152 | { |
| 153 | dprintf(CRITICAL, |
| 154 | "CRYPTO_WRITE_REG: Reg write failed. reg addr = %x\n", |
| 155 | reg_addr); |
| 156 | goto crypto_read_reg_err; |
| 157 | } |
| 158 | |
| 159 | crypto_wait_for_cmd_exec(bam_core, 1, CRYPTO_WRITE_PIPE_INDEX); |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 160 | #endif |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 161 | |
| 162 | crypto_read_reg_err: |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 163 | return ret; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | static void crypto_add_cmd_element(struct crypto_dev *dev, |
| 167 | uint32_t addr, |
| 168 | uint32_t val) |
| 169 | { |
| 170 | struct cmd_element *ptr = dev->ce_array; |
| 171 | |
| 172 | bam_add_cmd_element(&(ptr[dev->ce_array_index]), addr, val, CE_WRITE_TYPE); |
| 173 | |
| 174 | arch_clean_invalidate_cache_range((addr_t) &(ptr[dev->ce_array_index]), sizeof(struct cmd_element)); |
| 175 | |
| 176 | dev->ce_array_index++; |
| 177 | } |
| 178 | |
| 179 | static void crypto_add_cmd_desc(struct crypto_dev *dev, uint8_t flags) |
| 180 | { |
| 181 | uint32_t ce_size; |
| 182 | uint32_t start = (uint32_t)&(dev->ce_array[dev->cd_start]); |
| 183 | uint32_t ret; |
| 184 | |
| 185 | ce_size = (uint32_t)&(dev->ce_array[dev->ce_array_index]) - start; |
| 186 | |
| 187 | ret = bam_add_one_desc(&dev->bam, |
| 188 | CRYPTO_WRITE_PIPE_INDEX, |
| 189 | (unsigned char*)start, |
| 190 | ce_size, |
| 191 | BAM_DESC_CMD_FLAG | flags); |
| 192 | |
| 193 | if (ret) |
| 194 | { |
| 195 | dprintf(CRITICAL, "CRYPTO_ADD_DESC: Adding desc failed\n"); |
| 196 | } |
| 197 | |
| 198 | /* Update the CD ptr. */ |
| 199 | dev->cd_start = dev->ce_array_index; |
| 200 | } |
| 201 | |
| 202 | static int crypto_bam_init(struct crypto_dev *dev) |
| 203 | { |
| 204 | uint32_t bam_ret; |
| 205 | |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 206 | /* Do BAM Init only if required. */ |
| 207 | if (dev->do_bam_init) |
| 208 | bam_init(&dev->bam); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 209 | |
| 210 | /* Initialize BAM CRYPTO read pipe */ |
| 211 | bam_sys_pipe_init(&dev->bam, CRYPTO_READ_PIPE_INDEX); |
| 212 | |
| 213 | /* Init read fifo */ |
| 214 | bam_ret = bam_pipe_fifo_init(&dev->bam, CRYPTO_READ_PIPE_INDEX); |
| 215 | |
| 216 | if (bam_ret) |
| 217 | { |
| 218 | dprintf(CRITICAL, "CRYPTO: BAM Read FIFO init error\n"); |
| 219 | bam_ret = CRYPTO_ERR_FAIL; |
| 220 | goto crypto_bam_init_err; |
| 221 | } |
| 222 | |
| 223 | /* Initialize BAM CRYPTO write pipe */ |
| 224 | bam_sys_pipe_init(&dev->bam, CRYPTO_WRITE_PIPE_INDEX); |
| 225 | |
| 226 | /* Init write fifo. Use the same fifo as read fifo. */ |
| 227 | bam_ret = bam_pipe_fifo_init(&dev->bam, CRYPTO_WRITE_PIPE_INDEX); |
| 228 | |
| 229 | if (bam_ret) |
| 230 | { |
| 231 | dprintf(CRITICAL, "CRYPTO: BAM Write FIFO init error\n"); |
| 232 | bam_ret = CRYPTO_ERR_FAIL; |
| 233 | goto crypto_bam_init_err; |
| 234 | } |
| 235 | |
| 236 | bam_ret = CRYPTO_ERR_NONE; |
| 237 | |
| 238 | crypto_bam_init_err: |
| 239 | return bam_ret; |
| 240 | } |
| 241 | |
| 242 | static void crypto_reset(struct crypto_dev *dev) |
| 243 | { |
| 244 | clock_config_ce(dev->instance); |
| 245 | } |
| 246 | |
| 247 | void crypto5_init_params(struct crypto_dev *dev, struct crypto_init_params *params) |
| 248 | { |
| 249 | dev->base = params->crypto_base; |
| 250 | dev->instance = params->crypto_instance; |
| 251 | |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 252 | dev->bam.base = params->bam_base; |
| 253 | dev->do_bam_init = params->do_bam_init; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 254 | |
| 255 | /* Set Read pipe params. */ |
| 256 | dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].pipe_num = params->pipes.read_pipe; |
| 257 | /* System consumer */ |
| 258 | dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].trans_type = BAM2SYS; |
| 259 | dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.size = params->read_fifo_size; |
| 260 | dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.head = crypto_allocate_fifo(params->read_fifo_size); |
Deepa Dinamani | 48637cd | 2013-07-09 14:04:21 -0700 | [diff] [blame] | 261 | dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].lock_grp = params->pipes.read_pipe_grp; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 262 | |
| 263 | /* Set Write pipe params. */ |
| 264 | dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].pipe_num = params->pipes.write_pipe; |
| 265 | /* System producer */ |
| 266 | dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].trans_type = SYS2BAM; |
| 267 | dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.size = params->write_fifo_size; |
| 268 | dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.head = crypto_allocate_fifo(params->write_fifo_size); |
Deepa Dinamani | 48637cd | 2013-07-09 14:04:21 -0700 | [diff] [blame] | 269 | dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].lock_grp = params->pipes.write_pipe_grp; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 270 | |
| 271 | dev->bam.threshold = CRYPTO_MAX_THRESHOLD; |
| 272 | |
| 273 | dev->bam.ee = params->bam_ee; |
| 274 | |
| 275 | /* A H/W bug on Crypto 5.0.0 enforces a rule that the desc lengths must be burst aligned. */ |
| 276 | dev->bam.max_desc_len = ROUNDDOWN(BAM_NDP_MAX_DESC_DATA_LEN, CRYPTO_BURST_LEN); |
| 277 | |
| 278 | dev->dump = crypto_allocate_dump_buffer(); |
| 279 | dev->ce_array = crypto_allocate_ce_array(params->num_ce); |
| 280 | dev->ce_array_index = 0; |
| 281 | dev->cd_start = 0; |
| 282 | } |
| 283 | |
| 284 | void crypto5_init(struct crypto_dev *dev) |
| 285 | { |
| 286 | uint32_t config = CRYPTO_RESET_CONFIG |
| 287 | | (dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].pipe_num >> 1) << PIPE_SET_SELECT_SHIFT; |
| 288 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 289 | /* Setup BAM */ |
| 290 | if (crypto_bam_init(dev) != CRYPTO_ERR_NONE) |
| 291 | { |
| 292 | dprintf(CRITICAL, "CRYPTO: BAM init error\n"); |
| 293 | goto crypto_init_err; |
| 294 | } |
| 295 | |
| 296 | /* Write basic config to CE. |
| 297 | * Note: This setting will be changed to be set from TZ. |
| 298 | */ |
| 299 | writel(config, CRYPTO_CONFIG(dev->base)); |
| 300 | |
| 301 | config = 0; |
| 302 | |
| 303 | /* Setup config reg. */ |
| 304 | /* Mask all irqs. */ |
| 305 | config |= MASK_ERR_INTR | MASK_OP_DONE_INTR | |
| 306 | MASK_DIN_INTR | MASK_DOUT_INTR; |
| 307 | /* Program BAM specific crypto settings. */ |
| 308 | config |= HIGH_SPD_IN_EN_N |
| 309 | | ((dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].pipe_num >> 1) << PIPE_SET_SELECT_SHIFT) |
| 310 | | MAX_QUEUED_REQS |
| 311 | | REQ_SIZE; |
| 312 | /* Use a few registers in little endian mode. */ |
| 313 | config |= LITTLE_ENDIAN_MODE; |
| 314 | |
| 315 | CONFIG_WRITE(dev, config); |
| 316 | |
| 317 | crypto_init_err: |
| 318 | return; |
| 319 | } |
| 320 | |
| 321 | static uint32_t crypto5_get_sha_cfg(void *ctx_ptr, crypto_auth_alg_type auth_alg) |
| 322 | { |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 323 | crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr; |
| 324 | crypto_SHA1_ctx *sha1_ctx = (crypto_SHA1_ctx *) ctx_ptr; |
| 325 | uint32_t seg_cfg_val; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 326 | |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 327 | seg_cfg_val = SEG_CFG_AUTH_ALG_SHA; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 328 | |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 329 | if (auth_alg == CRYPTO_AUTH_ALG_SHA1) |
| 330 | { |
| 331 | seg_cfg_val |= SEG_CFG_AUTH_SIZE_SHA1; |
| 332 | |
| 333 | if (sha1_ctx->flags & CRYPTO_LAST_CHUNK) |
| 334 | { |
| 335 | seg_cfg_val |= SEG_CFG_LAST; |
| 336 | } |
| 337 | } |
| 338 | else if (auth_alg == CRYPTO_AUTH_ALG_SHA256) |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 339 | { |
| 340 | seg_cfg_val |= SEG_CFG_AUTH_SIZE_SHA256; |
| 341 | |
| 342 | if (sha256_ctx->flags & CRYPTO_LAST_CHUNK) |
| 343 | { |
| 344 | seg_cfg_val |= SEG_CFG_LAST; |
| 345 | } |
| 346 | } |
| 347 | else |
| 348 | { |
| 349 | dprintf(CRITICAL, "crypto_set_sha_ctx invalid auth algorithm\n"); |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | return seg_cfg_val; |
| 354 | } |
| 355 | |
| 356 | void crypto5_set_ctx(struct crypto_dev *dev, |
| 357 | void *ctx_ptr, |
| 358 | crypto_auth_alg_type auth_alg) |
| 359 | { |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 360 | crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr; |
| 361 | crypto_SHA1_ctx *sha1_ctx = (crypto_SHA1_ctx *) ctx_ptr; |
| 362 | uint32_t i = 0; |
| 363 | uint32_t iv_len = 0; |
| 364 | uint32_t *auth_iv = sha1_ctx->auth_iv; |
| 365 | uint32_t seg_cfg_val; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 366 | |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 367 | if(auth_alg == CRYPTO_AUTH_ALG_SHA1) |
| 368 | { |
| 369 | iv_len = SHA1_INIT_VECTOR_SIZE; |
| 370 | } |
| 371 | else if(auth_alg == CRYPTO_AUTH_ALG_SHA256) |
| 372 | { |
| 373 | iv_len = SHA256_INIT_VECTOR_SIZE; |
| 374 | } |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 375 | |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 376 | seg_cfg_val = crypto5_get_sha_cfg(ctx_ptr, auth_alg); |
| 377 | |
| 378 | if (!seg_cfg_val) |
| 379 | { |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 380 | dprintf(CRITICAL, "Authentication alg config failed.\n"); |
| 381 | return; |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 382 | } |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 383 | |
| 384 | /* Initialize CE pointers. */ |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 385 | REG_WRITE_QUEUE_INIT(dev); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 386 | |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 387 | /* For authentication operation set the encryption cfg reg to 0 as per HPG */ |
| 388 | REG_WRITE_QUEUE(dev, CRYPTO_ENCR_SEG_CFG(dev->base), 0); |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 389 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_SEG_CFG(dev->base), seg_cfg_val); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 390 | |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 391 | for (i = 0; i < iv_len; i++) |
| 392 | { |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 393 | if (sha256_ctx->flags & CRYPTO_FIRST_CHUNK) |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 394 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_IVn(dev->base, i), BE32(*(auth_iv + i))); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 395 | else |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 396 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_IVn(dev->base, i), (*(auth_iv + i))); |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 397 | } |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 398 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 399 | /* Typecast with crypto_SHA1_ctx because offset of auth_bytecnt |
| 400 | * in both crypto_SHA1_ctx and crypto_SHA256_ctx are same. |
| 401 | */ |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 402 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_BYTECNTn(dev->base, 0), ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[0]); |
| 403 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_BYTECNTn(dev->base, 1), ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[1]); |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 404 | } |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 405 | |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 406 | /* Function: crypto5_set_auth_cfg |
| 407 | * Arg : dev, ptr to data buffer, buffer_size, burst_mask for alignment |
| 408 | * Return : aligned buffer incase of unaligned data_ptr and total no. of bytes |
| 409 | * passed to crypto HW(includes header and trailer size). |
| 410 | * Flow : If data buffer is aligned, we just configure the crypto auth |
| 411 | * registers for start, size of data etc. If buffer is unaligned |
| 412 | * we align it to burst(64-byte) boundary and also make the no. of |
| 413 | * bytes a multiple of 64 for bam and then configure the registers |
| 414 | * for header/trailer settings. |
| 415 | */ |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 416 | |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 417 | static void crypto5_set_auth_cfg(struct crypto_dev *dev, uint8_t **buffer, |
| 418 | uint8_t *data_ptr, |
| 419 | uint32_t burst_mask, |
| 420 | uint32_t bytes_to_write, |
| 421 | uint32_t *total_bytes_to_write) |
| 422 | { |
| 423 | uint32_t minor_ver = 0; |
| 424 | uint32_t auth_seg_start = 0; |
| 425 | |
| 426 | /* Bits 23:16 - minor version */ |
| 427 | minor_ver = (readl(CRYPTO_VERSION(dev->base)) & 0x00FF0000) >> 16; |
| 428 | |
| 429 | /* A H/W bug on Crypto 5.0.0 enforces a rule that the desc lengths must |
| 430 | * be burst aligned. Here we use the header/trailer crypto register settings. |
| 431 | * buffer : The previous 64 byte aligned address for data_ptr. |
| 432 | * CRYPTO_AUTH_SEG_START : Number of bytes to skip to reach the address data_ptr. |
| 433 | * CRYPTO_AUTH_SEG_SIZE : Number of bytes to be sent to crypto HW. |
| 434 | * CRYPTO_SEG_SIZE : CRYPTO_AUTH_SEG_START + CRYPTO_AUTH_SEG_SIZE. |
| 435 | * Function: We pick a previous 64 byte aligned address buffer, and tell crypto to |
| 436 | * skip (data_ptr - buffer) number of bytes. |
| 437 | * This bug is fixed in 5.1.0 onwards.*/ |
| 438 | |
| 439 | if(minor_ver == 0) |
| 440 | { |
| 441 | if ((uint32_t) data_ptr & (CRYPTO_BURST_LEN - 1)) |
| 442 | { |
| 443 | dprintf(CRITICAL, "Data start not aligned at burst length.\n"); |
| 444 | |
| 445 | *buffer = (uint8_t *)ROUNDDOWN((uint32_t)data_ptr, CRYPTO_BURST_LEN); |
| 446 | |
| 447 | /* Header & Trailer */ |
| 448 | *total_bytes_to_write = ((bytes_to_write +(data_ptr - *buffer) + burst_mask) & (~burst_mask)); |
| 449 | |
| 450 | auth_seg_start = (data_ptr - *buffer); |
| 451 | } |
| 452 | else |
| 453 | { |
| 454 | /* No header */ |
| 455 | /* Add trailer to make it a burst multiple as 5.0.x HW mandates data to be a multiple of 64. */ |
| 456 | *total_bytes_to_write = (bytes_to_write + burst_mask) & (~burst_mask); |
| 457 | } |
| 458 | } |
| 459 | else |
| 460 | { |
| 461 | /* No header. 5.1 crypto HW doesnt require alignment as partial reads and writes are possible*/ |
| 462 | *total_bytes_to_write = bytes_to_write; |
| 463 | } |
| 464 | |
| 465 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_SEG_START(dev->base), auth_seg_start); |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 466 | REG_WRITE_QUEUE(dev, CRYPTO_AUTH_SEG_SIZE(dev->base), bytes_to_write); |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 467 | REG_WRITE_QUEUE(dev, CRYPTO_SEG_SIZE(dev->base), *total_bytes_to_write); |
Sundarajan Srinivasan | 6a2bad2 | 2013-11-11 18:48:27 -0800 | [diff] [blame] | 468 | REG_WRITE_QUEUE(dev, CRYPTO_GOPROC(dev->base), GOPROC_GO); |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 469 | REG_WRITE_QUEUE_DONE(dev, BAM_DESC_LOCK_FLAG | BAM_DESC_INT_FLAG); |
Deepa Dinamani | bcc62d2 | 2013-05-10 14:10:05 -0700 | [diff] [blame] | 470 | REG_WRITE_EXEC(&dev->bam, 1, CRYPTO_WRITE_PIPE_INDEX); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 471 | } |
| 472 | |
| 473 | uint32_t crypto5_send_data(struct crypto_dev *dev, |
| 474 | void *ctx_ptr, |
| 475 | uint8_t *data_ptr) |
| 476 | { |
| 477 | uint32_t bam_status; |
| 478 | crypto_SHA256_ctx *sha256_ctx = (crypto_SHA256_ctx *) ctx_ptr; |
| 479 | uint32_t wr_flags = BAM_DESC_NWD_FLAG | BAM_DESC_INT_FLAG | BAM_DESC_EOT_FLAG; |
| 480 | uint32_t ret_status; |
Sundarajan Srinivasan | 6cb32d3 | 2014-01-02 17:26:03 -0800 | [diff] [blame] | 481 | uint8_t *buffer = NULL; |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 482 | uint32_t total_bytes_to_write = 0; |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 483 | |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 484 | crypto5_set_auth_cfg(dev, &buffer, data_ptr, CRYPTO_BURST_LEN - 1, sha256_ctx->bytes_to_write, |
| 485 | &total_bytes_to_write); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 486 | |
Sundarajan Srinivasan | 6cb32d3 | 2014-01-02 17:26:03 -0800 | [diff] [blame] | 487 | if(buffer) |
| 488 | { |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 489 | arch_clean_invalidate_cache_range((addr_t) buffer, total_bytes_to_write); |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 490 | |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 491 | bam_status = ADD_WRITE_DESC(&dev->bam, buffer, total_bytes_to_write, wr_flags); |
Sundarajan Srinivasan | 6cb32d3 | 2014-01-02 17:26:03 -0800 | [diff] [blame] | 492 | } |
| 493 | else |
| 494 | { |
Sundarajan Srinivasan | 2861677 | 2014-01-08 14:08:04 -0800 | [diff] [blame] | 495 | arch_clean_invalidate_cache_range((addr_t) data_ptr, total_bytes_to_write); |
| 496 | bam_status = ADD_WRITE_DESC(&dev->bam, data_ptr, total_bytes_to_write, wr_flags); |
Sundarajan Srinivasan | 6cb32d3 | 2014-01-02 17:26:03 -0800 | [diff] [blame] | 497 | } |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 498 | |
| 499 | if (bam_status) |
| 500 | { |
| 501 | dprintf(CRITICAL, "Crypto send data failed\n"); |
| 502 | ret_status = CRYPTO_ERR_FAIL; |
| 503 | goto CRYPTO_SEND_DATA_ERR; |
| 504 | } |
| 505 | |
| 506 | arch_clean_invalidate_cache_range((addr_t) (dev->dump), sizeof(struct output_dump)); |
| 507 | |
| 508 | bam_status = ADD_READ_DESC(&dev->bam, |
| 509 | (unsigned char *)PA((addr_t)(dev->dump)), |
| 510 | sizeof(struct output_dump), |
| 511 | BAM_DESC_INT_FLAG); |
| 512 | |
| 513 | if (bam_status) |
| 514 | { |
| 515 | dprintf(CRITICAL, "Crypto send data failed\n"); |
| 516 | ret_status = CRYPTO_ERR_FAIL; |
| 517 | goto CRYPTO_SEND_DATA_ERR; |
| 518 | } |
| 519 | |
| 520 | crypto_wait_for_data(&dev->bam, CRYPTO_WRITE_PIPE_INDEX); |
| 521 | |
| 522 | crypto_wait_for_data(&dev->bam, CRYPTO_READ_PIPE_INDEX); |
| 523 | |
| 524 | arch_clean_invalidate_cache_range((addr_t) (dev->dump), sizeof(struct output_dump)); |
| 525 | |
| 526 | ret_status = CRYPTO_ERR_NONE; |
| 527 | |
| 528 | CRYPTO_SEND_DATA_ERR: |
| 529 | |
Unnati Gandhi | e4bed30 | 2015-06-03 12:03:24 +0530 | [diff] [blame] | 530 | crypto5_unlock_pipes(dev); |
| 531 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 532 | return ret_status; |
| 533 | } |
| 534 | |
Unnati Gandhi | e4bed30 | 2015-06-03 12:03:24 +0530 | [diff] [blame] | 535 | void crypto5_unlock_pipes(struct crypto_dev *dev) |
| 536 | { |
| 537 | CLEAR_STATUS(dev); |
| 538 | } |
| 539 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 540 | void crypto5_cleanup(struct crypto_dev *dev) |
| 541 | { |
| 542 | CLEAR_STATUS(dev); |
| 543 | |
Deepa Dinamani | a74fc8d | 2013-07-29 13:10:01 -0700 | [diff] [blame] | 544 | /* reset the pipes. */ |
| 545 | bam_pipe_reset(&(dev->bam), CRYPTO_READ_PIPE_INDEX); |
| 546 | bam_pipe_reset(&(dev->bam), CRYPTO_WRITE_PIPE_INDEX); |
| 547 | |
Deepa Dinamani | 0a97655 | 2012-11-28 17:01:27 -0800 | [diff] [blame] | 548 | /* Free all related memory. */ |
| 549 | free(dev->dump); |
| 550 | free(dev->ce_array); |
| 551 | free(dev->bam.pipe[CRYPTO_READ_PIPE_INDEX].fifo.head); |
| 552 | free(dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.head); |
| 553 | } |
| 554 | |
| 555 | uint32_t crypto5_get_digest(struct crypto_dev *dev, |
| 556 | uint8_t *digest_ptr, |
| 557 | crypto_auth_alg_type auth_alg) |
| 558 | { |
| 559 | uint32_t ce_status = 0; |
| 560 | uint32_t ce_status2 = 0; |
| 561 | uint32_t ce_err_bmsk = 0; |
| 562 | uint32_t i = 0; |
| 563 | uint32_t digest_len = 0; |
| 564 | uint32_t auth_iv; |
| 565 | |
| 566 | /* Check status register for errors. */ |
| 567 | ce_err_bmsk = (AXI_ERR | SW_ERR | HSD_ERR); |
| 568 | ce_status = BE32(dev->dump->status); |
| 569 | |
| 570 | /* Check status register for errors. */ |
| 571 | ce_status2 = BE32(dev->dump->status2); |
| 572 | |
| 573 | if ((ce_status & ce_err_bmsk) || (ce_status2 & AXI_EXTRA)) |
| 574 | { |
| 575 | crypto_reset(dev); |
| 576 | dprintf(CRITICAL, "crypto_get_digest status error"); |
| 577 | dprintf(CRITICAL, "status = %x status2 = %x\n", ce_status, ce_status2); |
| 578 | return CRYPTO_ERR_FAIL; |
| 579 | } |
| 580 | |
| 581 | /* Digest length depends on auth_alg */ |
| 582 | if (auth_alg == CRYPTO_AUTH_ALG_SHA1) |
| 583 | { |
| 584 | digest_len = SHA1_INIT_VECTOR_SIZE; |
| 585 | } |
| 586 | else if (auth_alg == CRYPTO_AUTH_ALG_SHA256) |
| 587 | { |
| 588 | digest_len = SHA256_INIT_VECTOR_SIZE; |
| 589 | } |
| 590 | |
| 591 | /* Retrieve digest from CRYPTO */ |
| 592 | for (i = 0; i < digest_len; i++) |
| 593 | { |
| 594 | auth_iv = (dev->dump->auth_iv[i]); |
| 595 | |
| 596 | *((unsigned int *)digest_ptr + i) = auth_iv; |
| 597 | } |
| 598 | |
| 599 | return CRYPTO_ERR_NONE; |
| 600 | } |
| 601 | |
| 602 | void crypto5_get_ctx(struct crypto_dev *dev, void *ctx_ptr) |
| 603 | { |
| 604 | ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[0] = BE32(dev->dump->auth_bytcnt[0]); |
| 605 | ((crypto_SHA1_ctx *) ctx_ptr)->auth_bytecnt[1] = BE32(dev->dump->auth_bytcnt[1]); |
| 606 | } |
| 607 | |
| 608 | uint32_t crypto5_get_max_auth_blk_size(struct crypto_dev *dev) |
| 609 | { |
| 610 | return (dev->bam.max_desc_len * (dev->bam.pipe[CRYPTO_WRITE_PIPE_INDEX].fifo.size - 2)); |
| 611 | } |