Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <crypto/internal/hash.h> |
| 17 | |
| 18 | #include "common.h" |
| 19 | #include "core.h" |
| 20 | #include "sha.h" |
| 21 | |
| 22 | /* crypto hw padding constant for first operation */ |
| 23 | #define SHA_PADDING 64 |
| 24 | #define SHA_PADDING_MASK (SHA_PADDING - 1) |
| 25 | |
| 26 | static LIST_HEAD(ahash_algs); |
| 27 | |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 28 | static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 29 | SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 |
| 30 | }; |
| 31 | |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 32 | static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 33 | SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, |
| 34 | SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 |
| 35 | }; |
| 36 | |
| 37 | static void qce_ahash_done(void *data) |
| 38 | { |
| 39 | struct crypto_async_request *async_req = data; |
| 40 | struct ahash_request *req = ahash_request_cast(async_req); |
| 41 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 42 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 43 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); |
| 44 | struct qce_device *qce = tmpl->qce; |
| 45 | struct qce_result_dump *result = qce->dma.result_buf; |
| 46 | unsigned int digestsize = crypto_ahash_digestsize(ahash); |
| 47 | int error; |
| 48 | u32 status; |
| 49 | |
| 50 | error = qce_dma_terminate_all(&qce->dma); |
| 51 | if (error) |
| 52 | dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); |
| 53 | |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 54 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
| 55 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 56 | |
| 57 | memcpy(rctx->digest, result->auth_iv, digestsize); |
| 58 | if (req->result) |
| 59 | memcpy(req->result, result->auth_iv, digestsize); |
| 60 | |
| 61 | rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); |
| 62 | rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); |
| 63 | |
| 64 | error = qce_check_status(qce, &status); |
| 65 | if (error < 0) |
| 66 | dev_dbg(qce->dev, "ahash operation error (%x)\n", status); |
| 67 | |
| 68 | req->src = rctx->src_orig; |
| 69 | req->nbytes = rctx->nbytes_orig; |
| 70 | rctx->last_blk = false; |
| 71 | rctx->first_blk = false; |
| 72 | |
| 73 | qce->async_req_done(tmpl->qce, error); |
| 74 | } |
| 75 | |
| 76 | static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) |
| 77 | { |
| 78 | struct ahash_request *req = ahash_request_cast(async_req); |
| 79 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 80 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); |
| 81 | struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); |
| 82 | struct qce_device *qce = tmpl->qce; |
| 83 | unsigned long flags = rctx->flags; |
| 84 | int ret; |
| 85 | |
| 86 | if (IS_SHA_HMAC(flags)) { |
| 87 | rctx->authkey = ctx->authkey; |
| 88 | rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; |
| 89 | } else if (IS_CMAC(flags)) { |
| 90 | rctx->authkey = ctx->authkey; |
| 91 | rctx->authklen = AES_KEYSIZE_128; |
| 92 | } |
| 93 | |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 94 | rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); |
LABBE Corentin | 4fa9948 | 2015-11-04 21:13:36 +0100 | [diff] [blame] | 95 | if (rctx->src_nents < 0) { |
| 96 | dev_err(qce->dev, "Invalid numbers of src SG.\n"); |
| 97 | return rctx->src_nents; |
| 98 | } |
| 99 | |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 100 | ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 101 | if (ret < 0) |
| 102 | return ret; |
| 103 | |
| 104 | sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); |
| 105 | |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 106 | ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 107 | if (ret < 0) |
| 108 | goto error_unmap_src; |
| 109 | |
| 110 | ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, |
| 111 | &rctx->result_sg, 1, qce_ahash_done, async_req); |
| 112 | if (ret) |
| 113 | goto error_unmap_dst; |
| 114 | |
| 115 | qce_dma_issue_pending(&qce->dma); |
| 116 | |
| 117 | ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); |
| 118 | if (ret) |
| 119 | goto error_terminate; |
| 120 | |
| 121 | return 0; |
| 122 | |
| 123 | error_terminate: |
| 124 | qce_dma_terminate_all(&qce->dma); |
| 125 | error_unmap_dst: |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 126 | dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 127 | error_unmap_src: |
LABBE Corentin | fea4045 | 2015-10-02 08:01:02 +0200 | [diff] [blame] | 128 | dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 129 | return ret; |
| 130 | } |
| 131 | |
| 132 | static int qce_ahash_init(struct ahash_request *req) |
| 133 | { |
| 134 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 135 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 136 | const u32 *std_iv = tmpl->std_iv; |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 137 | |
| 138 | memset(rctx, 0, sizeof(*rctx)); |
| 139 | rctx->first_blk = true; |
| 140 | rctx->last_blk = false; |
| 141 | rctx->flags = tmpl->alg_flags; |
| 142 | memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); |
| 143 | |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | static int qce_ahash_export(struct ahash_request *req, void *out) |
| 148 | { |
| 149 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 150 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 151 | unsigned long flags = rctx->flags; |
| 152 | unsigned int digestsize = crypto_ahash_digestsize(ahash); |
| 153 | unsigned int blocksize = |
| 154 | crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); |
| 155 | |
| 156 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { |
| 157 | struct sha1_state *out_state = out; |
| 158 | |
| 159 | out_state->count = rctx->count; |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 160 | qce_cpu_to_be32p_array((__be32 *)out_state->state, |
| 161 | rctx->digest, digestsize); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 162 | memcpy(out_state->buffer, rctx->buf, blocksize); |
| 163 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { |
| 164 | struct sha256_state *out_state = out; |
| 165 | |
| 166 | out_state->count = rctx->count; |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 167 | qce_cpu_to_be32p_array((__be32 *)out_state->state, |
| 168 | rctx->digest, digestsize); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 169 | memcpy(out_state->buf, rctx->buf, blocksize); |
| 170 | } else { |
| 171 | return -EINVAL; |
| 172 | } |
| 173 | |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | static int qce_import_common(struct ahash_request *req, u64 in_count, |
| 178 | const u32 *state, const u8 *buffer, bool hmac) |
| 179 | { |
| 180 | struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); |
| 181 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 182 | unsigned int digestsize = crypto_ahash_digestsize(ahash); |
| 183 | unsigned int blocksize; |
| 184 | u64 count = in_count; |
| 185 | |
| 186 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); |
| 187 | rctx->count = in_count; |
| 188 | memcpy(rctx->buf, buffer, blocksize); |
| 189 | |
| 190 | if (in_count <= blocksize) { |
| 191 | rctx->first_blk = 1; |
| 192 | } else { |
| 193 | rctx->first_blk = 0; |
| 194 | /* |
| 195 | * For HMAC, there is a hardware padding done when first block |
| 196 | * is set. Therefore the byte_count must be incremened by 64 |
| 197 | * after the first block operation. |
| 198 | */ |
| 199 | if (hmac) |
| 200 | count += SHA_PADDING; |
| 201 | } |
| 202 | |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 203 | rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); |
| 204 | rctx->byte_count[1] = (__force __be32)(count >> 32); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 205 | qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, |
| 206 | digestsize); |
| 207 | rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | static int qce_ahash_import(struct ahash_request *req, const void *in) |
| 213 | { |
| 214 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 215 | unsigned long flags = rctx->flags; |
| 216 | bool hmac = IS_SHA_HMAC(flags); |
| 217 | int ret = -EINVAL; |
| 218 | |
| 219 | if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { |
| 220 | const struct sha1_state *state = in; |
| 221 | |
| 222 | ret = qce_import_common(req, state->count, state->state, |
| 223 | state->buffer, hmac); |
| 224 | } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { |
| 225 | const struct sha256_state *state = in; |
| 226 | |
| 227 | ret = qce_import_common(req, state->count, state->state, |
| 228 | state->buf, hmac); |
| 229 | } |
| 230 | |
| 231 | return ret; |
| 232 | } |
| 233 | |
| 234 | static int qce_ahash_update(struct ahash_request *req) |
| 235 | { |
| 236 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 237 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 238 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); |
| 239 | struct qce_device *qce = tmpl->qce; |
| 240 | struct scatterlist *sg_last, *sg; |
| 241 | unsigned int total, len; |
| 242 | unsigned int hash_later; |
| 243 | unsigned int nbytes; |
| 244 | unsigned int blocksize; |
| 245 | |
| 246 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 247 | rctx->count += req->nbytes; |
| 248 | |
| 249 | /* check for buffer from previous updates and append it */ |
| 250 | total = req->nbytes + rctx->buflen; |
| 251 | |
| 252 | if (total <= blocksize) { |
| 253 | scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, |
| 254 | 0, req->nbytes, 0); |
| 255 | rctx->buflen += req->nbytes; |
| 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | /* save the original req structure fields */ |
| 260 | rctx->src_orig = req->src; |
| 261 | rctx->nbytes_orig = req->nbytes; |
| 262 | |
| 263 | /* |
| 264 | * if we have data from previous update copy them on buffer. The old |
| 265 | * data will be combined with current request bytes. |
| 266 | */ |
| 267 | if (rctx->buflen) |
| 268 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); |
| 269 | |
| 270 | /* calculate how many bytes will be hashed later */ |
| 271 | hash_later = total % blocksize; |
| 272 | if (hash_later) { |
| 273 | unsigned int src_offset = req->nbytes - hash_later; |
| 274 | scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, |
| 275 | hash_later, 0); |
| 276 | } |
| 277 | |
| 278 | /* here nbytes is multiple of blocksize */ |
| 279 | nbytes = total - hash_later; |
| 280 | |
| 281 | len = rctx->buflen; |
| 282 | sg = sg_last = req->src; |
| 283 | |
| 284 | while (len < nbytes && sg) { |
| 285 | if (len + sg_dma_len(sg) > nbytes) |
| 286 | break; |
| 287 | len += sg_dma_len(sg); |
| 288 | sg_last = sg; |
Cristian Stoica | 5be4d4c | 2015-01-20 10:06:16 +0200 | [diff] [blame] | 289 | sg = sg_next(sg); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | if (!sg_last) |
| 293 | return -EINVAL; |
| 294 | |
| 295 | sg_mark_end(sg_last); |
| 296 | |
| 297 | if (rctx->buflen) { |
| 298 | sg_init_table(rctx->sg, 2); |
| 299 | sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); |
Dan Williams | c56f6d1 | 2015-08-07 18:15:13 +0200 | [diff] [blame] | 300 | sg_chain(rctx->sg, 2, req->src); |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 301 | req->src = rctx->sg; |
| 302 | } |
| 303 | |
| 304 | req->nbytes = nbytes; |
| 305 | rctx->buflen = hash_later; |
| 306 | |
| 307 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
| 308 | } |
| 309 | |
| 310 | static int qce_ahash_final(struct ahash_request *req) |
| 311 | { |
| 312 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 313 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); |
| 314 | struct qce_device *qce = tmpl->qce; |
| 315 | |
| 316 | if (!rctx->buflen) |
| 317 | return 0; |
| 318 | |
| 319 | rctx->last_blk = true; |
| 320 | |
| 321 | rctx->src_orig = req->src; |
| 322 | rctx->nbytes_orig = req->nbytes; |
| 323 | |
| 324 | memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); |
| 325 | sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); |
| 326 | |
| 327 | req->src = rctx->sg; |
| 328 | req->nbytes = rctx->buflen; |
| 329 | |
| 330 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
| 331 | } |
| 332 | |
| 333 | static int qce_ahash_digest(struct ahash_request *req) |
| 334 | { |
| 335 | struct qce_sha_reqctx *rctx = ahash_request_ctx(req); |
| 336 | struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); |
| 337 | struct qce_device *qce = tmpl->qce; |
| 338 | int ret; |
| 339 | |
| 340 | ret = qce_ahash_init(req); |
| 341 | if (ret) |
| 342 | return ret; |
| 343 | |
| 344 | rctx->src_orig = req->src; |
| 345 | rctx->nbytes_orig = req->nbytes; |
| 346 | rctx->first_blk = true; |
| 347 | rctx->last_blk = true; |
| 348 | |
| 349 | return qce->async_req_enqueue(tmpl->qce, &req->base); |
| 350 | } |
| 351 | |
| 352 | struct qce_ahash_result { |
| 353 | struct completion completion; |
| 354 | int error; |
| 355 | }; |
| 356 | |
| 357 | static void qce_digest_complete(struct crypto_async_request *req, int error) |
| 358 | { |
| 359 | struct qce_ahash_result *result = req->data; |
| 360 | |
| 361 | if (error == -EINPROGRESS) |
| 362 | return; |
| 363 | |
| 364 | result->error = error; |
| 365 | complete(&result->completion); |
| 366 | } |
| 367 | |
| 368 | static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, |
| 369 | unsigned int keylen) |
| 370 | { |
| 371 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
| 372 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); |
| 373 | struct qce_ahash_result result; |
| 374 | struct ahash_request *req; |
| 375 | struct scatterlist sg; |
| 376 | unsigned int blocksize; |
| 377 | struct crypto_ahash *ahash_tfm; |
| 378 | u8 *buf; |
| 379 | int ret; |
| 380 | const char *alg_name; |
| 381 | |
| 382 | blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); |
| 383 | memset(ctx->authkey, 0, sizeof(ctx->authkey)); |
| 384 | |
| 385 | if (keylen <= blocksize) { |
| 386 | memcpy(ctx->authkey, key, keylen); |
| 387 | return 0; |
| 388 | } |
| 389 | |
| 390 | if (digestsize == SHA1_DIGEST_SIZE) |
| 391 | alg_name = "sha1-qce"; |
| 392 | else if (digestsize == SHA256_DIGEST_SIZE) |
| 393 | alg_name = "sha256-qce"; |
| 394 | else |
| 395 | return -EINVAL; |
| 396 | |
| 397 | ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH, |
| 398 | CRYPTO_ALG_TYPE_AHASH_MASK); |
| 399 | if (IS_ERR(ahash_tfm)) |
| 400 | return PTR_ERR(ahash_tfm); |
| 401 | |
| 402 | req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); |
| 403 | if (!req) { |
| 404 | ret = -ENOMEM; |
| 405 | goto err_free_ahash; |
| 406 | } |
| 407 | |
| 408 | init_completion(&result.completion); |
| 409 | ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
| 410 | qce_digest_complete, &result); |
| 411 | crypto_ahash_clear_flags(ahash_tfm, ~0); |
| 412 | |
| 413 | buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); |
| 414 | if (!buf) { |
| 415 | ret = -ENOMEM; |
| 416 | goto err_free_req; |
| 417 | } |
| 418 | |
| 419 | memcpy(buf, key, keylen); |
| 420 | sg_init_one(&sg, buf, keylen); |
| 421 | ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); |
| 422 | |
| 423 | ret = crypto_ahash_digest(req); |
| 424 | if (ret == -EINPROGRESS || ret == -EBUSY) { |
| 425 | ret = wait_for_completion_interruptible(&result.completion); |
| 426 | if (!ret) |
| 427 | ret = result.error; |
| 428 | } |
| 429 | |
| 430 | if (ret) |
| 431 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
| 432 | |
| 433 | kfree(buf); |
| 434 | err_free_req: |
| 435 | ahash_request_free(req); |
| 436 | err_free_ahash: |
| 437 | crypto_free_ahash(ahash_tfm); |
| 438 | return ret; |
| 439 | } |
| 440 | |
| 441 | static int qce_ahash_cra_init(struct crypto_tfm *tfm) |
| 442 | { |
| 443 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); |
| 444 | struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
| 445 | |
| 446 | crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx)); |
| 447 | memset(ctx, 0, sizeof(*ctx)); |
| 448 | return 0; |
| 449 | } |
| 450 | |
| 451 | struct qce_ahash_def { |
| 452 | unsigned long flags; |
| 453 | const char *name; |
| 454 | const char *drv_name; |
| 455 | unsigned int digestsize; |
| 456 | unsigned int blocksize; |
| 457 | unsigned int statesize; |
Stanimir Varbanov | 58a6535 | 2014-07-04 17:03:29 +0300 | [diff] [blame] | 458 | const u32 *std_iv; |
Stanimir Varbanov | ec8f5d8 | 2014-06-25 19:28:57 +0300 | [diff] [blame] | 459 | }; |
| 460 | |
| 461 | static const struct qce_ahash_def ahash_def[] = { |
| 462 | { |
| 463 | .flags = QCE_HASH_SHA1, |
| 464 | .name = "sha1", |
| 465 | .drv_name = "sha1-qce", |
| 466 | .digestsize = SHA1_DIGEST_SIZE, |
| 467 | .blocksize = SHA1_BLOCK_SIZE, |
| 468 | .statesize = sizeof(struct sha1_state), |
| 469 | .std_iv = std_iv_sha1, |
| 470 | }, |
| 471 | { |
| 472 | .flags = QCE_HASH_SHA256, |
| 473 | .name = "sha256", |
| 474 | .drv_name = "sha256-qce", |
| 475 | .digestsize = SHA256_DIGEST_SIZE, |
| 476 | .blocksize = SHA256_BLOCK_SIZE, |
| 477 | .statesize = sizeof(struct sha256_state), |
| 478 | .std_iv = std_iv_sha256, |
| 479 | }, |
| 480 | { |
| 481 | .flags = QCE_HASH_SHA1_HMAC, |
| 482 | .name = "hmac(sha1)", |
| 483 | .drv_name = "hmac-sha1-qce", |
| 484 | .digestsize = SHA1_DIGEST_SIZE, |
| 485 | .blocksize = SHA1_BLOCK_SIZE, |
| 486 | .statesize = sizeof(struct sha1_state), |
| 487 | .std_iv = std_iv_sha1, |
| 488 | }, |
| 489 | { |
| 490 | .flags = QCE_HASH_SHA256_HMAC, |
| 491 | .name = "hmac(sha256)", |
| 492 | .drv_name = "hmac-sha256-qce", |
| 493 | .digestsize = SHA256_DIGEST_SIZE, |
| 494 | .blocksize = SHA256_BLOCK_SIZE, |
| 495 | .statesize = sizeof(struct sha256_state), |
| 496 | .std_iv = std_iv_sha256, |
| 497 | }, |
| 498 | }; |
| 499 | |
| 500 | static int qce_ahash_register_one(const struct qce_ahash_def *def, |
| 501 | struct qce_device *qce) |
| 502 | { |
| 503 | struct qce_alg_template *tmpl; |
| 504 | struct ahash_alg *alg; |
| 505 | struct crypto_alg *base; |
| 506 | int ret; |
| 507 | |
| 508 | tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); |
| 509 | if (!tmpl) |
| 510 | return -ENOMEM; |
| 511 | |
| 512 | tmpl->std_iv = def->std_iv; |
| 513 | |
| 514 | alg = &tmpl->alg.ahash; |
| 515 | alg->init = qce_ahash_init; |
| 516 | alg->update = qce_ahash_update; |
| 517 | alg->final = qce_ahash_final; |
| 518 | alg->digest = qce_ahash_digest; |
| 519 | alg->export = qce_ahash_export; |
| 520 | alg->import = qce_ahash_import; |
| 521 | if (IS_SHA_HMAC(def->flags)) |
| 522 | alg->setkey = qce_ahash_hmac_setkey; |
| 523 | alg->halg.digestsize = def->digestsize; |
| 524 | alg->halg.statesize = def->statesize; |
| 525 | |
| 526 | base = &alg->halg.base; |
| 527 | base->cra_blocksize = def->blocksize; |
| 528 | base->cra_priority = 300; |
| 529 | base->cra_flags = CRYPTO_ALG_ASYNC; |
| 530 | base->cra_ctxsize = sizeof(struct qce_sha_ctx); |
| 531 | base->cra_alignmask = 0; |
| 532 | base->cra_module = THIS_MODULE; |
| 533 | base->cra_init = qce_ahash_cra_init; |
| 534 | INIT_LIST_HEAD(&base->cra_list); |
| 535 | |
| 536 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); |
| 537 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
| 538 | def->drv_name); |
| 539 | |
| 540 | INIT_LIST_HEAD(&tmpl->entry); |
| 541 | tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; |
| 542 | tmpl->alg_flags = def->flags; |
| 543 | tmpl->qce = qce; |
| 544 | |
| 545 | ret = crypto_register_ahash(alg); |
| 546 | if (ret) { |
| 547 | kfree(tmpl); |
| 548 | dev_err(qce->dev, "%s registration failed\n", base->cra_name); |
| 549 | return ret; |
| 550 | } |
| 551 | |
| 552 | list_add_tail(&tmpl->entry, &ahash_algs); |
| 553 | dev_dbg(qce->dev, "%s is registered\n", base->cra_name); |
| 554 | return 0; |
| 555 | } |
| 556 | |
| 557 | static void qce_ahash_unregister(struct qce_device *qce) |
| 558 | { |
| 559 | struct qce_alg_template *tmpl, *n; |
| 560 | |
| 561 | list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { |
| 562 | crypto_unregister_ahash(&tmpl->alg.ahash); |
| 563 | list_del(&tmpl->entry); |
| 564 | kfree(tmpl); |
| 565 | } |
| 566 | } |
| 567 | |
| 568 | static int qce_ahash_register(struct qce_device *qce) |
| 569 | { |
| 570 | int ret, i; |
| 571 | |
| 572 | for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { |
| 573 | ret = qce_ahash_register_one(&ahash_def[i], qce); |
| 574 | if (ret) |
| 575 | goto err; |
| 576 | } |
| 577 | |
| 578 | return 0; |
| 579 | err: |
| 580 | qce_ahash_unregister(qce); |
| 581 | return ret; |
| 582 | } |
| 583 | |
| 584 | const struct qce_algo_ops ahash_ops = { |
| 585 | .type = CRYPTO_ALG_TYPE_AHASH, |
| 586 | .register_algs = qce_ahash_register, |
| 587 | .unregister_algs = qce_ahash_unregister, |
| 588 | .async_req_handle = qce_ahash_async_req_handle, |
| 589 | }; |