blob: 0ae1b09c58ef956538d1b174b4aec241c9cbdd73 [file] [log] [blame]
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/sha.h>
57#include <crypto/internal/hash.h>
58
59#include "t4fw_api.h"
60#include "t4_msg.h"
61#include "chcr_core.h"
62#include "chcr_algo.h"
63#include "chcr_crypto.h"
64
65static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66{
67 return ctx->crypto_ctx->ablkctx;
68}
69
70static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71{
72 return ctx->crypto_ctx->hmacctx;
73}
74
75static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76{
77 return ctx->dev->u_ctx;
78}
79
80static inline int is_ofld_imm(const struct sk_buff *skb)
81{
82 return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83}
84
85/*
86 * sgl_len - calculates the size of an SGL of the given capacity
87 * @n: the number of SGL entries
88 * Calculates the number of flits needed for a scatter/gather list that
89 * can hold the given number of entries.
90 */
91static inline unsigned int sgl_len(unsigned int n)
92{
93 n--;
94 return (3 * n) / 2 + (n & 1) + 2;
95}
96
97/*
98 * chcr_handle_resp - Unmap the DMA buffers associated with the request
99 * @req: crypto request
100 */
101int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102 int error_status)
103{
104 struct crypto_tfm *tfm = req->tfm;
105 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106 struct uld_ctx *u_ctx = ULD_CTX(ctx);
107 struct chcr_req_ctx ctx_req;
108 struct cpl_fw6_pld *fw6_pld;
109 unsigned int digestsize, updated_digestsize;
110
111 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112 case CRYPTO_ALG_TYPE_BLKCIPHER:
113 ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114 ctx_req.ctx.ablk_ctx =
115 ablkcipher_request_ctx(ctx_req.req.ablk_req);
116 if (!error_status) {
117 fw6_pld = (struct cpl_fw6_pld *)input;
118 memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119 AES_BLOCK_SIZE);
120 }
121 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
122 ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
123 if (ctx_req.ctx.ablk_ctx->skb) {
124 kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125 ctx_req.ctx.ablk_ctx->skb = NULL;
126 }
127 break;
128
129 case CRYPTO_ALG_TYPE_AHASH:
130 ctx_req.req.ahash_req = (struct ahash_request *)req;
131 ctx_req.ctx.ahash_ctx =
132 ahash_request_ctx(ctx_req.req.ahash_req);
133 digestsize =
134 crypto_ahash_digestsize(crypto_ahash_reqtfm(
135 ctx_req.req.ahash_req));
136 updated_digestsize = digestsize;
137 if (digestsize == SHA224_DIGEST_SIZE)
138 updated_digestsize = SHA256_DIGEST_SIZE;
139 else if (digestsize == SHA384_DIGEST_SIZE)
140 updated_digestsize = SHA512_DIGEST_SIZE;
141 if (ctx_req.ctx.ahash_ctx->skb)
142 ctx_req.ctx.ahash_ctx->skb = NULL;
143 if (ctx_req.ctx.ahash_ctx->result == 1) {
144 ctx_req.ctx.ahash_ctx->result = 0;
145 memcpy(ctx_req.req.ahash_req->result, input +
146 sizeof(struct cpl_fw6_pld),
147 digestsize);
148 } else {
149 memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
150 sizeof(struct cpl_fw6_pld),
151 updated_digestsize);
152 }
153 kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
154 ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
155 break;
156 }
157 return 0;
158}
159
160/*
161 * calc_tx_flits_ofld - calculate # of flits for an offload packet
162 * @skb: the packet
163 * Returns the number of flits needed for the given offload packet.
164 * These packets are already fully constructed and no additional headers
165 * will be added.
166 */
167static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
168{
169 unsigned int flits, cnt;
170
171 if (is_ofld_imm(skb))
172 return DIV_ROUND_UP(skb->len, 8);
173
174 flits = skb_transport_offset(skb) / 8; /* headers */
175 cnt = skb_shinfo(skb)->nr_frags;
176 if (skb_tail_pointer(skb) != skb_transport_header(skb))
177 cnt++;
178 return flits + sgl_len(cnt);
179}
180
Harsh Jain39f91a32016-11-29 19:00:35 +0530181static inline void get_aes_decrypt_key(unsigned char *dec_key,
182 const unsigned char *key,
183 unsigned int keylength)
184{
185 u32 temp;
186 u32 w_ring[MAX_NK];
187 int i, j, k;
188 u8 nr, nk;
189
190 switch (keylength) {
191 case AES_KEYLENGTH_128BIT:
192 nk = KEYLENGTH_4BYTES;
193 nr = NUMBER_OF_ROUNDS_10;
194 break;
195 case AES_KEYLENGTH_192BIT:
196 nk = KEYLENGTH_6BYTES;
197 nr = NUMBER_OF_ROUNDS_12;
198 break;
199 case AES_KEYLENGTH_256BIT:
200 nk = KEYLENGTH_8BYTES;
201 nr = NUMBER_OF_ROUNDS_14;
202 break;
203 default:
204 return;
205 }
206 for (i = 0; i < nk; i++)
207 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
208
209 i = 0;
210 temp = w_ring[nk - 1];
211 while (i + nk < (nr + 1) * 4) {
212 if (!(i % nk)) {
213 /* RotWord(temp) */
214 temp = (temp << 8) | (temp >> 24);
215 temp = aes_ks_subword(temp);
216 temp ^= round_constant[i / nk];
217 } else if (nk == 8 && (i % 4 == 0)) {
218 temp = aes_ks_subword(temp);
219 }
220 w_ring[i % nk] ^= temp;
221 temp = w_ring[i % nk];
222 i++;
223 }
224 i--;
225 for (k = 0, j = i % nk; k < nk; k++) {
226 *((u32 *)dec_key + k) = htonl(w_ring[j]);
227 j--;
228 if (j < 0)
229 j += nk;
230 }
231}
232
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530233static struct shash_desc *chcr_alloc_shash(unsigned int ds)
234{
235 struct crypto_shash *base_hash = NULL;
236 struct shash_desc *desc;
237
238 switch (ds) {
239 case SHA1_DIGEST_SIZE:
240 base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
241 break;
242 case SHA224_DIGEST_SIZE:
243 base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
244 break;
245 case SHA256_DIGEST_SIZE:
246 base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
247 break;
248 case SHA384_DIGEST_SIZE:
249 base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
250 break;
251 case SHA512_DIGEST_SIZE:
252 base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
253 break;
254 }
255 if (IS_ERR(base_hash)) {
256 pr_err("Can not allocate sha-generic algo.\n");
257 return (void *)base_hash;
258 }
259
260 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
261 GFP_KERNEL);
262 if (!desc)
263 return ERR_PTR(-ENOMEM);
264 desc->tfm = base_hash;
265 desc->flags = crypto_shash_get_flags(base_hash);
266 return desc;
267}
268
269static int chcr_compute_partial_hash(struct shash_desc *desc,
270 char *iopad, char *result_hash,
271 int digest_size)
272{
273 struct sha1_state sha1_st;
274 struct sha256_state sha256_st;
275 struct sha512_state sha512_st;
276 int error;
277
278 if (digest_size == SHA1_DIGEST_SIZE) {
279 error = crypto_shash_init(desc) ?:
280 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
281 crypto_shash_export(desc, (void *)&sha1_st);
282 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
283 } else if (digest_size == SHA224_DIGEST_SIZE) {
284 error = crypto_shash_init(desc) ?:
285 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
286 crypto_shash_export(desc, (void *)&sha256_st);
287 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
288
289 } else if (digest_size == SHA256_DIGEST_SIZE) {
290 error = crypto_shash_init(desc) ?:
291 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
292 crypto_shash_export(desc, (void *)&sha256_st);
293 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
294
295 } else if (digest_size == SHA384_DIGEST_SIZE) {
296 error = crypto_shash_init(desc) ?:
297 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
298 crypto_shash_export(desc, (void *)&sha512_st);
299 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
300
301 } else if (digest_size == SHA512_DIGEST_SIZE) {
302 error = crypto_shash_init(desc) ?:
303 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
304 crypto_shash_export(desc, (void *)&sha512_st);
305 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
306 } else {
307 error = -EINVAL;
308 pr_err("Unknown digest size %d\n", digest_size);
309 }
310 return error;
311}
312
313static void chcr_change_order(char *buf, int ds)
314{
315 int i;
316
317 if (ds == SHA512_DIGEST_SIZE) {
318 for (i = 0; i < (ds / sizeof(u64)); i++)
319 *((__be64 *)buf + i) =
320 cpu_to_be64(*((u64 *)buf + i));
321 } else {
322 for (i = 0; i < (ds / sizeof(u32)); i++)
323 *((__be32 *)buf + i) =
324 cpu_to_be32(*((u32 *)buf + i));
325 }
326}
327
328static inline int is_hmac(struct crypto_tfm *tfm)
329{
330 struct crypto_alg *alg = tfm->__crt_alg;
331 struct chcr_alg_template *chcr_crypto_alg =
332 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
333 alg.hash);
334 if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
335 CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
336 return 1;
337 return 0;
338}
339
340static inline unsigned int ch_nents(struct scatterlist *sg,
341 unsigned int *total_size)
342{
343 unsigned int nents;
344
345 for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
346 nents++;
347 *total_size += sg->length;
348 }
349 return nents;
350}
351
352static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
353 struct scatterlist *sg,
354 struct phys_sge_parm *sg_param)
355{
356 struct phys_sge_pairs *to;
357 unsigned int out_buf_size = sg_param->obsize;
358 unsigned int nents = sg_param->nents, i, j, tot_len = 0;
359
360 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
361 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
362 phys_cpl->pcirlxorder_to_noofsgentr =
363 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
364 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
365 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
366 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
367 CPL_RX_PHYS_DSGL_DCAID_V(0) |
368 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
369 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
370 phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
371 phys_cpl->rss_hdr_int.hash_val = 0;
372 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
373 sizeof(struct cpl_rx_phys_dsgl));
374
375 for (i = 0; nents; to++) {
376 for (j = i; (nents && (j < (8 + i))); j++, nents--) {
377 to->len[j] = htons(sg->length);
378 to->addr[j] = cpu_to_be64(sg_dma_address(sg));
379 if (out_buf_size) {
380 if (tot_len + sg_dma_len(sg) >= out_buf_size) {
381 to->len[j] = htons(out_buf_size -
382 tot_len);
383 return;
384 }
385 tot_len += sg_dma_len(sg);
386 }
387 sg = sg_next(sg);
388 }
389 }
390}
391
392static inline unsigned
393int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
394 struct scatterlist *sg, struct phys_sge_parm *sg_param)
395{
396 if (!sg || !sg_param->nents)
397 return 0;
398
399 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
400 if (sg_param->nents == 0) {
401 pr_err("CHCR : DMA mapping failed\n");
402 return -EINVAL;
403 }
404 write_phys_cpl(phys_cpl, sg, sg_param);
405 return 0;
406}
407
408static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
409{
410 struct crypto_alg *alg = tfm->__crt_alg;
411 struct chcr_alg_template *chcr_crypto_alg =
412 container_of(alg, struct chcr_alg_template, alg.crypto);
413
414 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
415}
416
Harsh Jain358961d2016-11-29 19:00:36 +0530417static inline void write_buffer_to_skb(struct sk_buff *skb,
418 unsigned int *frags,
419 char *bfr,
420 u8 bfr_len)
421{
422 skb->len += bfr_len;
423 skb->data_len += bfr_len;
424 skb->truesize += bfr_len;
425 get_page(virt_to_page(bfr));
426 skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
427 offset_in_page(bfr), bfr_len);
428 (*frags)++;
429}
430
431
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530432static inline void
Harsh Jain358961d2016-11-29 19:00:36 +0530433write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530434 struct scatterlist *sg, unsigned int count)
435{
436 struct page *spage;
437 unsigned int page_len;
438
439 skb->len += count;
440 skb->data_len += count;
441 skb->truesize += count;
Harsh Jain18f0aa02016-11-29 19:00:37 +0530442
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530443 while (count > 0) {
Harsh Jain18f0aa02016-11-29 19:00:37 +0530444 if (!sg || (!(sg->length)))
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530445 break;
446 spage = sg_page(sg);
447 get_page(spage);
448 page_len = min(sg->length, count);
449 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
450 (*frags)++;
451 count -= page_len;
452 sg = sg_next(sg);
453 }
454}
455
456static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
457 struct _key_ctx *key_ctx)
458{
459 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
460 get_aes_decrypt_key(key_ctx->key, ablkctx->key,
461 ablkctx->enckey_len << 3);
462 memset(key_ctx->key + ablkctx->enckey_len, 0,
463 CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
464 } else {
465 memcpy(key_ctx->key,
466 ablkctx->key + (ablkctx->enckey_len >> 1),
467 ablkctx->enckey_len >> 1);
468 get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
469 ablkctx->key, ablkctx->enckey_len << 2);
470 }
471 return 0;
472}
473
474static inline void create_wreq(struct chcr_context *ctx,
Harsh Jain358961d2016-11-29 19:00:36 +0530475 struct chcr_wr *chcr_req,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530476 void *req, struct sk_buff *skb,
477 int kctx_len, int hash_sz,
478 unsigned int phys_dsgl)
479{
480 struct uld_ctx *u_ctx = ULD_CTX(ctx);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530481 int iv_loc = IV_DSGL;
482 int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
483 unsigned int immdatalen = 0, nr_frags = 0;
484
485 if (is_ofld_imm(skb)) {
486 immdatalen = skb->data_len;
487 iv_loc = IV_IMMEDIATE;
488 } else {
489 nr_frags = skb_shinfo(skb)->nr_frags;
490 }
491
Harsh Jain358961d2016-11-29 19:00:36 +0530492 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
493 ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
494 chcr_req->wreq.pld_size_hash_size =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530495 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
496 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
Harsh Jain358961d2016-11-29 19:00:36 +0530497 chcr_req->wreq.len16_pkd =
498 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530499 (calc_tx_flits_ofld(skb) * 8), 16)));
Harsh Jain358961d2016-11-29 19:00:36 +0530500 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
501 chcr_req->wreq.rx_chid_to_rx_q_id =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530502 FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
503 (hash_sz) ? IV_NOP : iv_loc);
504
Harsh Jain358961d2016-11-29 19:00:36 +0530505 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
506 chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
507 16) - ((sizeof(chcr_req->wreq)) >> 4)));
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530508
Harsh Jain358961d2016-11-29 19:00:36 +0530509 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
510 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
511 sizeof(chcr_req->key_ctx) +
512 kctx_len +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530513 ((hash_sz) ? DUMMY_BYTES :
514 (sizeof(struct cpl_rx_phys_dsgl) +
515 phys_dsgl)) + immdatalen);
516}
517
518/**
519 * create_cipher_wr - form the WR for cipher operations
520 * @req: cipher req.
521 * @ctx: crypto driver context of the request.
522 * @qid: ingress qid where response of this WR should be received.
523 * @op_type: encryption or decryption
524 */
525static struct sk_buff
Harsh Jain358961d2016-11-29 19:00:36 +0530526*create_cipher_wr(struct ablkcipher_request *req,
527 unsigned short qid,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530528 unsigned short op_type)
529{
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530530 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
Harsh Jain358961d2016-11-29 19:00:36 +0530531 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530532 struct uld_ctx *u_ctx = ULD_CTX(ctx);
533 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
534 struct sk_buff *skb = NULL;
Harsh Jain358961d2016-11-29 19:00:36 +0530535 struct chcr_wr *chcr_req;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530536 struct cpl_rx_phys_dsgl *phys_cpl;
537 struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
538 struct phys_sge_parm sg_param;
539 unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
540 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
Harsh Jain358961d2016-11-29 19:00:36 +0530541 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
542 GFP_ATOMIC;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530543
544 if (!req->info)
545 return ERR_PTR(-EINVAL);
546 ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
547 ablkctx->enc = op_type;
548
549 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
Harsh Jain358961d2016-11-29 19:00:36 +0530550 (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
551 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
552 ablkctx->enckey_len, req->nbytes, ivsize);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530553 return ERR_PTR(-EINVAL);
Harsh Jain358961d2016-11-29 19:00:36 +0530554 }
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530555
556 phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
557
Harsh Jain358961d2016-11-29 19:00:36 +0530558 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530559 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
Harsh Jain358961d2016-11-29 19:00:36 +0530560 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530561 if (!skb)
562 return ERR_PTR(-ENOMEM);
563 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
Harsh Jain358961d2016-11-29 19:00:36 +0530564 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
565 memset(chcr_req, 0, transhdr_len);
566 chcr_req->sec_cpl.op_ivinsrtofst =
567 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530568
Harsh Jain358961d2016-11-29 19:00:36 +0530569 chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
570 chcr_req->sec_cpl.aadstart_cipherstop_hi =
571 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530572
Harsh Jain358961d2016-11-29 19:00:36 +0530573 chcr_req->sec_cpl.cipherstop_lo_authinsert =
574 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
575 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530576 ablkctx->ciph_mode,
Harsh Jain358961d2016-11-29 19:00:36 +0530577 0, 0, ivsize >> 1);
578 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530579 0, 1, phys_dsgl);
580
Harsh Jain358961d2016-11-29 19:00:36 +0530581 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530582 if (op_type == CHCR_DECRYPT_OP) {
Harsh Jain358961d2016-11-29 19:00:36 +0530583 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530584 } else {
585 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
Harsh Jain358961d2016-11-29 19:00:36 +0530586 memcpy(chcr_req->key_ctx.key, ablkctx->key,
587 ablkctx->enckey_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530588 } else {
Harsh Jain358961d2016-11-29 19:00:36 +0530589 memcpy(chcr_req->key_ctx.key, ablkctx->key +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530590 (ablkctx->enckey_len >> 1),
591 ablkctx->enckey_len >> 1);
Harsh Jain358961d2016-11-29 19:00:36 +0530592 memcpy(chcr_req->key_ctx.key +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530593 (ablkctx->enckey_len >> 1),
594 ablkctx->key,
595 ablkctx->enckey_len >> 1);
596 }
597 }
Harsh Jain358961d2016-11-29 19:00:36 +0530598 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530599 sg_param.nents = ablkctx->dst_nents;
Harsh Jain358961d2016-11-29 19:00:36 +0530600 sg_param.obsize = req->nbytes;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530601 sg_param.qid = qid;
602 sg_param.align = 1;
603 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
604 &sg_param))
605 goto map_fail1;
606
607 skb_set_transport_header(skb, transhdr_len);
Harsh Jain358961d2016-11-29 19:00:36 +0530608 memcpy(ablkctx->iv, req->info, ivsize);
609 write_buffer_to_skb(skb, &frags, ablkctx->iv, ivsize);
610 write_sg_to_skb(skb, &frags, req->src, req->nbytes);
611 create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530612 req_ctx->skb = skb;
613 skb_get(skb);
614 return skb;
615map_fail1:
616 kfree_skb(skb);
617 return ERR_PTR(-ENOMEM);
618}
619
620static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
621 unsigned int keylen)
622{
623 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
624 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
625 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
626 unsigned int ck_size, context_size;
627 u16 alignment = 0;
628
629 if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
630 goto badkey_err;
631
632 memcpy(ablkctx->key, key, keylen);
633 ablkctx->enckey_len = keylen;
634 if (keylen == AES_KEYSIZE_128) {
635 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
636 } else if (keylen == AES_KEYSIZE_192) {
637 alignment = 8;
638 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
639 } else if (keylen == AES_KEYSIZE_256) {
640 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
641 } else {
642 goto badkey_err;
643 }
644
645 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
646 keylen + alignment) >> 4;
647
648 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
649 0, 0, context_size);
650 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
651 return 0;
652badkey_err:
653 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
654 ablkctx->enckey_len = 0;
655 return -EINVAL;
656}
657
Wei Yongjun73b86bb2016-08-26 14:21:08 +0000658static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530659{
660 int ret = 0;
661 struct sge_ofld_txq *q;
662 struct adapter *adap = netdev2adap(dev);
663
664 local_bh_disable();
665 q = &adap->sge.ofldtxq[idx];
666 spin_lock(&q->sendq.lock);
667 if (q->full)
668 ret = -1;
669 spin_unlock(&q->sendq.lock);
670 local_bh_enable();
671 return ret;
672}
673
674static int chcr_aes_encrypt(struct ablkcipher_request *req)
675{
676 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
677 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530678 struct uld_ctx *u_ctx = ULD_CTX(ctx);
679 struct sk_buff *skb;
680
681 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
682 ctx->tx_channel_id))) {
683 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
684 return -EBUSY;
685 }
686
Harsh Jain358961d2016-11-29 19:00:36 +0530687 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530688 CHCR_ENCRYPT_OP);
689 if (IS_ERR(skb)) {
690 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
691 return PTR_ERR(skb);
692 }
693 skb->dev = u_ctx->lldi.ports[0];
694 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
695 chcr_send_wr(skb);
696 return -EINPROGRESS;
697}
698
699static int chcr_aes_decrypt(struct ablkcipher_request *req)
700{
701 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
702 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530703 struct uld_ctx *u_ctx = ULD_CTX(ctx);
704 struct sk_buff *skb;
705
706 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
707 ctx->tx_channel_id))) {
708 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
709 return -EBUSY;
710 }
711
Harsh Jain358961d2016-11-29 19:00:36 +0530712 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530713 CHCR_DECRYPT_OP);
714 if (IS_ERR(skb)) {
715 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
716 return PTR_ERR(skb);
717 }
718 skb->dev = u_ctx->lldi.ports[0];
719 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
720 chcr_send_wr(skb);
721 return -EINPROGRESS;
722}
723
724static int chcr_device_init(struct chcr_context *ctx)
725{
726 struct uld_ctx *u_ctx;
727 unsigned int id;
728 int err = 0, rxq_perchan, rxq_idx;
729
730 id = smp_processor_id();
731 if (!ctx->dev) {
732 err = assign_chcr_device(&ctx->dev);
733 if (err) {
734 pr_err("chcr device assignment fails\n");
735 goto out;
736 }
737 u_ctx = ULD_CTX(ctx);
738 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
739 ctx->dev->tx_channel_id = 0;
740 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
741 rxq_idx += id % rxq_perchan;
742 spin_lock(&ctx->dev->lock_chcr_dev);
743 ctx->tx_channel_id = rxq_idx;
744 spin_unlock(&ctx->dev->lock_chcr_dev);
745 }
746out:
747 return err;
748}
749
750static int chcr_cra_init(struct crypto_tfm *tfm)
751{
752 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
753 return chcr_device_init(crypto_tfm_ctx(tfm));
754}
755
756static int get_alg_config(struct algo_param *params,
757 unsigned int auth_size)
758{
759 switch (auth_size) {
760 case SHA1_DIGEST_SIZE:
761 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
762 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
763 params->result_size = SHA1_DIGEST_SIZE;
764 break;
765 case SHA224_DIGEST_SIZE:
766 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
767 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
768 params->result_size = SHA256_DIGEST_SIZE;
769 break;
770 case SHA256_DIGEST_SIZE:
771 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
772 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
773 params->result_size = SHA256_DIGEST_SIZE;
774 break;
775 case SHA384_DIGEST_SIZE:
776 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
777 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
778 params->result_size = SHA512_DIGEST_SIZE;
779 break;
780 case SHA512_DIGEST_SIZE:
781 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
782 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
783 params->result_size = SHA512_DIGEST_SIZE;
784 break;
785 default:
786 pr_err("chcr : ERROR, unsupported digest size\n");
787 return -EINVAL;
788 }
789 return 0;
790}
791
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530792/**
Harsh Jain358961d2016-11-29 19:00:36 +0530793 * create_hash_wr - Create hash work request
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530794 * @req - Cipher req base
795 */
Harsh Jain358961d2016-11-29 19:00:36 +0530796static struct sk_buff *create_hash_wr(struct ahash_request *req,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530797 struct hash_wr_param *param)
798{
799 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
800 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
801 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
802 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
803 struct sk_buff *skb = NULL;
Harsh Jain358961d2016-11-29 19:00:36 +0530804 struct chcr_wr *chcr_req;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530805 unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
806 unsigned int digestsize = crypto_ahash_digestsize(tfm);
Harsh Jain358961d2016-11-29 19:00:36 +0530807 unsigned int kctx_len = 0;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530808 u8 hash_size_in_response = 0;
Harsh Jain358961d2016-11-29 19:00:36 +0530809 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
810 GFP_ATOMIC;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530811
812 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
Harsh Jain358961d2016-11-29 19:00:36 +0530813 kctx_len = param->alg_prm.result_size + iopad_alignment;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530814 if (param->opad_needed)
815 kctx_len += param->alg_prm.result_size + iopad_alignment;
816
817 if (req_ctx->result)
818 hash_size_in_response = digestsize;
819 else
820 hash_size_in_response = param->alg_prm.result_size;
821 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
Harsh Jain358961d2016-11-29 19:00:36 +0530822 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530823 if (!skb)
824 return skb;
825
826 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
Harsh Jain358961d2016-11-29 19:00:36 +0530827 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
828 memset(chcr_req, 0, transhdr_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530829
Harsh Jain358961d2016-11-29 19:00:36 +0530830 chcr_req->sec_cpl.op_ivinsrtofst =
831 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
832 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530833
Harsh Jain358961d2016-11-29 19:00:36 +0530834 chcr_req->sec_cpl.aadstart_cipherstop_hi =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530835 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
Harsh Jain358961d2016-11-29 19:00:36 +0530836 chcr_req->sec_cpl.cipherstop_lo_authinsert =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530837 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
Harsh Jain358961d2016-11-29 19:00:36 +0530838 chcr_req->sec_cpl.seqno_numivs =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530839 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
Harsh Jain358961d2016-11-29 19:00:36 +0530840 param->opad_needed, 0);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530841
Harsh Jain358961d2016-11-29 19:00:36 +0530842 chcr_req->sec_cpl.ivgen_hdrlen =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530843 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
844
Harsh Jain358961d2016-11-29 19:00:36 +0530845 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
846 param->alg_prm.result_size);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530847
848 if (param->opad_needed)
Harsh Jain358961d2016-11-29 19:00:36 +0530849 memcpy(chcr_req->key_ctx.key +
850 ((param->alg_prm.result_size <= 32) ? 32 :
851 CHCR_HASH_MAX_DIGEST_SIZE),
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530852 hmacctx->opad, param->alg_prm.result_size);
853
Harsh Jain358961d2016-11-29 19:00:36 +0530854 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530855 param->alg_prm.mk_size, 0,
856 param->opad_needed,
Harsh Jain358961d2016-11-29 19:00:36 +0530857 ((kctx_len +
858 sizeof(chcr_req->key_ctx)) >> 4));
859 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530860
861 skb_set_transport_header(skb, transhdr_len);
862 if (param->bfr_len != 0)
Harsh Jain358961d2016-11-29 19:00:36 +0530863 write_buffer_to_skb(skb, &frags, req_ctx->bfr,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530864 param->bfr_len);
865 if (param->sg_len != 0)
Harsh Jain358961d2016-11-29 19:00:36 +0530866 write_sg_to_skb(skb, &frags, req->src, param->sg_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530867
Harsh Jain358961d2016-11-29 19:00:36 +0530868 create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530869 0);
870 req_ctx->skb = skb;
871 skb_get(skb);
872 return skb;
873}
874
875static int chcr_ahash_update(struct ahash_request *req)
876{
877 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
878 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
879 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
880 struct uld_ctx *u_ctx = NULL;
881 struct sk_buff *skb;
882 u8 remainder = 0, bs;
883 unsigned int nbytes = req->nbytes;
884 struct hash_wr_param params;
885
886 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
887
888 u_ctx = ULD_CTX(ctx);
889 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
890 ctx->tx_channel_id))) {
891 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
892 return -EBUSY;
893 }
894
895 if (nbytes + req_ctx->bfr_len >= bs) {
896 remainder = (nbytes + req_ctx->bfr_len) % bs;
897 nbytes = nbytes + req_ctx->bfr_len - remainder;
898 } else {
899 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
900 req_ctx->bfr_len, nbytes, 0);
901 req_ctx->bfr_len += nbytes;
902 return 0;
903 }
904
905 params.opad_needed = 0;
906 params.more = 1;
907 params.last = 0;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530908 params.scmd1 = 0;
909 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
910 req_ctx->result = 0;
911 req_ctx->data_len += params.sg_len + params.bfr_len;
Harsh Jain358961d2016-11-29 19:00:36 +0530912 skb = create_hash_wr(req, &params);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530913
914 req_ctx->bfr_len = remainder;
915 if (remainder)
916 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
917 req_ctx->bfr, remainder, req->nbytes -
918 remainder);
919 skb->dev = u_ctx->lldi.ports[0];
920 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
921 chcr_send_wr(skb);
922
923 return -EINPROGRESS;
924}
925
926static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
927{
928 memset(bfr_ptr, 0, bs);
929 *bfr_ptr = 0x80;
930 if (bs == 64)
931 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
932 else
933 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
934}
935
936static int chcr_ahash_final(struct ahash_request *req)
937{
938 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
939 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
940 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
941 struct hash_wr_param params;
942 struct sk_buff *skb;
943 struct uld_ctx *u_ctx = NULL;
944 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
945
946 u_ctx = ULD_CTX(ctx);
947 if (is_hmac(crypto_ahash_tfm(rtfm)))
948 params.opad_needed = 1;
949 else
950 params.opad_needed = 0;
951 params.sg_len = 0;
952 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
953 req_ctx->result = 1;
954 params.bfr_len = req_ctx->bfr_len;
955 req_ctx->data_len += params.bfr_len + params.sg_len;
956 if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
957 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
958 params.last = 0;
959 params.more = 1;
960 params.scmd1 = 0;
961 params.bfr_len = bs;
962
963 } else {
964 params.scmd1 = req_ctx->data_len;
965 params.last = 1;
966 params.more = 0;
967 }
Harsh Jain358961d2016-11-29 19:00:36 +0530968 skb = create_hash_wr(req, &params);
969 if (IS_ERR(skb))
970 return PTR_ERR(skb);
971
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530972 skb->dev = u_ctx->lldi.ports[0];
973 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
974 chcr_send_wr(skb);
975 return -EINPROGRESS;
976}
977
978static int chcr_ahash_finup(struct ahash_request *req)
979{
980 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
981 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
982 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
983 struct uld_ctx *u_ctx = NULL;
984 struct sk_buff *skb;
985 struct hash_wr_param params;
986 u8 bs;
987
988 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
989 u_ctx = ULD_CTX(ctx);
990
991 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
992 ctx->tx_channel_id))) {
993 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
994 return -EBUSY;
995 }
996
997 if (is_hmac(crypto_ahash_tfm(rtfm)))
998 params.opad_needed = 1;
999 else
1000 params.opad_needed = 0;
1001
1002 params.sg_len = req->nbytes;
1003 params.bfr_len = req_ctx->bfr_len;
1004 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1005 req_ctx->data_len += params.bfr_len + params.sg_len;
1006 req_ctx->result = 1;
1007 if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
1008 create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
1009 params.last = 0;
1010 params.more = 1;
1011 params.scmd1 = 0;
1012 params.bfr_len = bs;
1013 } else {
1014 params.scmd1 = req_ctx->data_len;
1015 params.last = 1;
1016 params.more = 0;
1017 }
1018
Harsh Jain358961d2016-11-29 19:00:36 +05301019 skb = create_hash_wr(req, &params);
1020 if (IS_ERR(skb))
1021 return PTR_ERR(skb);
1022
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301023 skb->dev = u_ctx->lldi.ports[0];
1024 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1025 chcr_send_wr(skb);
1026
1027 return -EINPROGRESS;
1028}
1029
1030static int chcr_ahash_digest(struct ahash_request *req)
1031{
1032 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1033 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1034 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1035 struct uld_ctx *u_ctx = NULL;
1036 struct sk_buff *skb;
1037 struct hash_wr_param params;
1038 u8 bs;
1039
1040 rtfm->init(req);
1041 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1042
1043 u_ctx = ULD_CTX(ctx);
1044 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1045 ctx->tx_channel_id))) {
1046 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1047 return -EBUSY;
1048 }
1049
1050 if (is_hmac(crypto_ahash_tfm(rtfm)))
1051 params.opad_needed = 1;
1052 else
1053 params.opad_needed = 0;
1054
1055 params.last = 0;
1056 params.more = 0;
1057 params.sg_len = req->nbytes;
1058 params.bfr_len = 0;
1059 params.scmd1 = 0;
1060 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1061 req_ctx->result = 1;
1062 req_ctx->data_len += params.bfr_len + params.sg_len;
1063
1064 if (req_ctx->bfr && req->nbytes == 0) {
1065 create_last_hash_block(req_ctx->bfr, bs, 0);
1066 params.more = 1;
1067 params.bfr_len = bs;
1068 }
1069
Harsh Jain358961d2016-11-29 19:00:36 +05301070 skb = create_hash_wr(req, &params);
1071 if (IS_ERR(skb))
1072 return PTR_ERR(skb);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301073
1074 skb->dev = u_ctx->lldi.ports[0];
1075 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1076 chcr_send_wr(skb);
1077 return -EINPROGRESS;
1078}
1079
1080static int chcr_ahash_export(struct ahash_request *areq, void *out)
1081{
1082 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1083 struct chcr_ahash_req_ctx *state = out;
1084
1085 state->bfr_len = req_ctx->bfr_len;
1086 state->data_len = req_ctx->data_len;
1087 memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1088 memcpy(state->partial_hash, req_ctx->partial_hash,
1089 CHCR_HASH_MAX_DIGEST_SIZE);
1090 return 0;
1091}
1092
1093static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1094{
1095 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1096 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1097
1098 req_ctx->bfr_len = state->bfr_len;
1099 req_ctx->data_len = state->data_len;
1100 req_ctx->dummy_payload_ptr = NULL;
1101 memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
1102 memcpy(req_ctx->partial_hash, state->partial_hash,
1103 CHCR_HASH_MAX_DIGEST_SIZE);
1104 return 0;
1105}
1106
1107static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1108 unsigned int keylen)
1109{
1110 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1111 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1112 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1113 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1114 unsigned int i, err = 0, updated_digestsize;
1115
1116 /*
1117 * use the key to calculate the ipad and opad. ipad will sent with the
1118 * first request's data. opad will be sent with the final hash result
1119 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1120 */
1121 if (!hmacctx->desc)
1122 return -EINVAL;
1123 if (keylen > bs) {
1124 err = crypto_shash_digest(hmacctx->desc, key, keylen,
1125 hmacctx->ipad);
1126 if (err)
1127 goto out;
1128 keylen = digestsize;
1129 } else {
1130 memcpy(hmacctx->ipad, key, keylen);
1131 }
1132 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1133 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1134
1135 for (i = 0; i < bs / sizeof(int); i++) {
1136 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1137 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1138 }
1139
1140 updated_digestsize = digestsize;
1141 if (digestsize == SHA224_DIGEST_SIZE)
1142 updated_digestsize = SHA256_DIGEST_SIZE;
1143 else if (digestsize == SHA384_DIGEST_SIZE)
1144 updated_digestsize = SHA512_DIGEST_SIZE;
1145 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
1146 hmacctx->ipad, digestsize);
1147 if (err)
1148 goto out;
1149 chcr_change_order(hmacctx->ipad, updated_digestsize);
1150
1151 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
1152 hmacctx->opad, digestsize);
1153 if (err)
1154 goto out;
1155 chcr_change_order(hmacctx->opad, updated_digestsize);
1156out:
1157 return err;
1158}
1159
1160static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1161 unsigned int key_len)
1162{
1163 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1164 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1165 int status = 0;
1166 unsigned short context_size = 0;
1167
1168 if ((key_len == (AES_KEYSIZE_128 << 1)) ||
1169 (key_len == (AES_KEYSIZE_256 << 1))) {
1170 memcpy(ablkctx->key, key, key_len);
1171 ablkctx->enckey_len = key_len;
1172 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1173 ablkctx->key_ctx_hdr =
1174 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1175 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1176 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1177 CHCR_KEYCTX_NO_KEY, 1,
1178 0, context_size);
1179 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1180 } else {
1181 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1182 CRYPTO_TFM_RES_BAD_KEY_LEN);
1183 ablkctx->enckey_len = 0;
1184 status = -EINVAL;
1185 }
1186 return status;
1187}
1188
1189static int chcr_sha_init(struct ahash_request *areq)
1190{
1191 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1192 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1193 int digestsize = crypto_ahash_digestsize(tfm);
1194
1195 req_ctx->data_len = 0;
1196 req_ctx->dummy_payload_ptr = NULL;
1197 req_ctx->bfr_len = 0;
1198 req_ctx->skb = NULL;
1199 req_ctx->result = 0;
1200 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1201 return 0;
1202}
1203
1204static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1205{
1206 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1207 sizeof(struct chcr_ahash_req_ctx));
1208 return chcr_device_init(crypto_tfm_ctx(tfm));
1209}
1210
1211static int chcr_hmac_init(struct ahash_request *areq)
1212{
1213 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1214 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1215 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1216 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1217 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1218 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1219
1220 chcr_sha_init(areq);
1221 req_ctx->data_len = bs;
1222 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1223 if (digestsize == SHA224_DIGEST_SIZE)
1224 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1225 SHA256_DIGEST_SIZE);
1226 else if (digestsize == SHA384_DIGEST_SIZE)
1227 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1228 SHA512_DIGEST_SIZE);
1229 else
1230 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1231 digestsize);
1232 }
1233 return 0;
1234}
1235
1236static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1237{
1238 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1239 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1240 unsigned int digestsize =
1241 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1242
1243 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1244 sizeof(struct chcr_ahash_req_ctx));
1245 hmacctx->desc = chcr_alloc_shash(digestsize);
1246 if (IS_ERR(hmacctx->desc))
1247 return PTR_ERR(hmacctx->desc);
1248 return chcr_device_init(crypto_tfm_ctx(tfm));
1249}
1250
1251static void chcr_free_shash(struct shash_desc *desc)
1252{
1253 crypto_free_shash(desc->tfm);
1254 kfree(desc);
1255}
1256
1257static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1258{
1259 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1260 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1261
1262 if (hmacctx->desc) {
1263 chcr_free_shash(hmacctx->desc);
1264 hmacctx->desc = NULL;
1265 }
1266}
1267
1268static struct chcr_alg_template driver_algs[] = {
1269 /* AES-CBC */
1270 {
1271 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1272 .is_registered = 0,
1273 .alg.crypto = {
1274 .cra_name = "cbc(aes)",
1275 .cra_driver_name = "cbc(aes-chcr)",
1276 .cra_priority = CHCR_CRA_PRIORITY,
1277 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1278 CRYPTO_ALG_ASYNC,
1279 .cra_blocksize = AES_BLOCK_SIZE,
1280 .cra_ctxsize = sizeof(struct chcr_context)
1281 + sizeof(struct ablk_ctx),
1282 .cra_alignmask = 0,
1283 .cra_type = &crypto_ablkcipher_type,
1284 .cra_module = THIS_MODULE,
1285 .cra_init = chcr_cra_init,
1286 .cra_exit = NULL,
1287 .cra_u.ablkcipher = {
1288 .min_keysize = AES_MIN_KEY_SIZE,
1289 .max_keysize = AES_MAX_KEY_SIZE,
1290 .ivsize = AES_BLOCK_SIZE,
1291 .setkey = chcr_aes_cbc_setkey,
1292 .encrypt = chcr_aes_encrypt,
1293 .decrypt = chcr_aes_decrypt,
1294 }
1295 }
1296 },
1297 {
1298 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1299 .is_registered = 0,
1300 .alg.crypto = {
1301 .cra_name = "xts(aes)",
1302 .cra_driver_name = "xts(aes-chcr)",
1303 .cra_priority = CHCR_CRA_PRIORITY,
1304 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1305 CRYPTO_ALG_ASYNC,
1306 .cra_blocksize = AES_BLOCK_SIZE,
1307 .cra_ctxsize = sizeof(struct chcr_context) +
1308 sizeof(struct ablk_ctx),
1309 .cra_alignmask = 0,
1310 .cra_type = &crypto_ablkcipher_type,
1311 .cra_module = THIS_MODULE,
1312 .cra_init = chcr_cra_init,
1313 .cra_exit = NULL,
1314 .cra_u = {
1315 .ablkcipher = {
1316 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1317 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1318 .ivsize = AES_BLOCK_SIZE,
1319 .setkey = chcr_aes_xts_setkey,
1320 .encrypt = chcr_aes_encrypt,
1321 .decrypt = chcr_aes_decrypt,
1322 }
1323 }
1324 }
1325 },
1326 /* SHA */
1327 {
1328 .type = CRYPTO_ALG_TYPE_AHASH,
1329 .is_registered = 0,
1330 .alg.hash = {
1331 .halg.digestsize = SHA1_DIGEST_SIZE,
1332 .halg.base = {
1333 .cra_name = "sha1",
1334 .cra_driver_name = "sha1-chcr",
1335 .cra_blocksize = SHA1_BLOCK_SIZE,
1336 }
1337 }
1338 },
1339 {
1340 .type = CRYPTO_ALG_TYPE_AHASH,
1341 .is_registered = 0,
1342 .alg.hash = {
1343 .halg.digestsize = SHA256_DIGEST_SIZE,
1344 .halg.base = {
1345 .cra_name = "sha256",
1346 .cra_driver_name = "sha256-chcr",
1347 .cra_blocksize = SHA256_BLOCK_SIZE,
1348 }
1349 }
1350 },
1351 {
1352 .type = CRYPTO_ALG_TYPE_AHASH,
1353 .is_registered = 0,
1354 .alg.hash = {
1355 .halg.digestsize = SHA224_DIGEST_SIZE,
1356 .halg.base = {
1357 .cra_name = "sha224",
1358 .cra_driver_name = "sha224-chcr",
1359 .cra_blocksize = SHA224_BLOCK_SIZE,
1360 }
1361 }
1362 },
1363 {
1364 .type = CRYPTO_ALG_TYPE_AHASH,
1365 .is_registered = 0,
1366 .alg.hash = {
1367 .halg.digestsize = SHA384_DIGEST_SIZE,
1368 .halg.base = {
1369 .cra_name = "sha384",
1370 .cra_driver_name = "sha384-chcr",
1371 .cra_blocksize = SHA384_BLOCK_SIZE,
1372 }
1373 }
1374 },
1375 {
1376 .type = CRYPTO_ALG_TYPE_AHASH,
1377 .is_registered = 0,
1378 .alg.hash = {
1379 .halg.digestsize = SHA512_DIGEST_SIZE,
1380 .halg.base = {
1381 .cra_name = "sha512",
1382 .cra_driver_name = "sha512-chcr",
1383 .cra_blocksize = SHA512_BLOCK_SIZE,
1384 }
1385 }
1386 },
1387 /* HMAC */
1388 {
1389 .type = CRYPTO_ALG_TYPE_HMAC,
1390 .is_registered = 0,
1391 .alg.hash = {
1392 .halg.digestsize = SHA1_DIGEST_SIZE,
1393 .halg.base = {
1394 .cra_name = "hmac(sha1)",
1395 .cra_driver_name = "hmac(sha1-chcr)",
1396 .cra_blocksize = SHA1_BLOCK_SIZE,
1397 }
1398 }
1399 },
1400 {
1401 .type = CRYPTO_ALG_TYPE_HMAC,
1402 .is_registered = 0,
1403 .alg.hash = {
1404 .halg.digestsize = SHA224_DIGEST_SIZE,
1405 .halg.base = {
1406 .cra_name = "hmac(sha224)",
1407 .cra_driver_name = "hmac(sha224-chcr)",
1408 .cra_blocksize = SHA224_BLOCK_SIZE,
1409 }
1410 }
1411 },
1412 {
1413 .type = CRYPTO_ALG_TYPE_HMAC,
1414 .is_registered = 0,
1415 .alg.hash = {
1416 .halg.digestsize = SHA256_DIGEST_SIZE,
1417 .halg.base = {
1418 .cra_name = "hmac(sha256)",
1419 .cra_driver_name = "hmac(sha256-chcr)",
1420 .cra_blocksize = SHA256_BLOCK_SIZE,
1421 }
1422 }
1423 },
1424 {
1425 .type = CRYPTO_ALG_TYPE_HMAC,
1426 .is_registered = 0,
1427 .alg.hash = {
1428 .halg.digestsize = SHA384_DIGEST_SIZE,
1429 .halg.base = {
1430 .cra_name = "hmac(sha384)",
1431 .cra_driver_name = "hmac(sha384-chcr)",
1432 .cra_blocksize = SHA384_BLOCK_SIZE,
1433 }
1434 }
1435 },
1436 {
1437 .type = CRYPTO_ALG_TYPE_HMAC,
1438 .is_registered = 0,
1439 .alg.hash = {
1440 .halg.digestsize = SHA512_DIGEST_SIZE,
1441 .halg.base = {
1442 .cra_name = "hmac(sha512)",
1443 .cra_driver_name = "hmac(sha512-chcr)",
1444 .cra_blocksize = SHA512_BLOCK_SIZE,
1445 }
1446 }
1447 },
1448};
1449
1450/*
1451 * chcr_unregister_alg - Deregister crypto algorithms with
1452 * kernel framework.
1453 */
1454static int chcr_unregister_alg(void)
1455{
1456 int i;
1457
1458 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1459 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1460 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1461 if (driver_algs[i].is_registered)
1462 crypto_unregister_alg(
1463 &driver_algs[i].alg.crypto);
1464 break;
1465 case CRYPTO_ALG_TYPE_AHASH:
1466 if (driver_algs[i].is_registered)
1467 crypto_unregister_ahash(
1468 &driver_algs[i].alg.hash);
1469 break;
1470 }
1471 driver_algs[i].is_registered = 0;
1472 }
1473 return 0;
1474}
1475
1476#define SZ_AHASH_CTX sizeof(struct chcr_context)
1477#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1478#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1479#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1480
1481/*
1482 * chcr_register_alg - Register crypto algorithms with kernel framework.
1483 */
1484static int chcr_register_alg(void)
1485{
1486 struct crypto_alg ai;
1487 struct ahash_alg *a_hash;
1488 int err = 0, i;
1489 char *name = NULL;
1490
1491 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1492 if (driver_algs[i].is_registered)
1493 continue;
1494 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1495 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1496 err = crypto_register_alg(&driver_algs[i].alg.crypto);
1497 name = driver_algs[i].alg.crypto.cra_driver_name;
1498 break;
1499 case CRYPTO_ALG_TYPE_AHASH:
1500 a_hash = &driver_algs[i].alg.hash;
1501 a_hash->update = chcr_ahash_update;
1502 a_hash->final = chcr_ahash_final;
1503 a_hash->finup = chcr_ahash_finup;
1504 a_hash->digest = chcr_ahash_digest;
1505 a_hash->export = chcr_ahash_export;
1506 a_hash->import = chcr_ahash_import;
1507 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1508 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1509 a_hash->halg.base.cra_module = THIS_MODULE;
1510 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1511 a_hash->halg.base.cra_alignmask = 0;
1512 a_hash->halg.base.cra_exit = NULL;
1513 a_hash->halg.base.cra_type = &crypto_ahash_type;
1514
1515 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1516 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1517 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1518 a_hash->init = chcr_hmac_init;
1519 a_hash->setkey = chcr_ahash_setkey;
1520 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1521 } else {
1522 a_hash->init = chcr_sha_init;
1523 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1524 a_hash->halg.base.cra_init = chcr_sha_cra_init;
1525 }
1526 err = crypto_register_ahash(&driver_algs[i].alg.hash);
1527 ai = driver_algs[i].alg.hash.halg.base;
1528 name = ai.cra_driver_name;
1529 break;
1530 }
1531 if (err) {
1532 pr_err("chcr : %s : Algorithm registration failed\n",
1533 name);
1534 goto register_err;
1535 } else {
1536 driver_algs[i].is_registered = 1;
1537 }
1538 }
1539 return 0;
1540
1541register_err:
1542 chcr_unregister_alg();
1543 return err;
1544}
1545
1546/*
1547 * start_crypto - Register the crypto algorithms.
1548 * This should called once when the first device comesup. After this
1549 * kernel will start calling driver APIs for crypto operations.
1550 */
1551int start_crypto(void)
1552{
1553 return chcr_register_alg();
1554}
1555
1556/*
1557 * stop_crypto - Deregister all the crypto algorithms with kernel.
1558 * This should be called once when the last device goes down. After this
1559 * kernel will not call the driver API for crypto operations.
1560 */
1561int stop_crypto(void)
1562{
1563 chcr_unregister_alg();
1564 return 0;
1565}