blob: 13aaca2b6e9589d4dfebad9304e3dfb902ac55db [file] [log] [blame]
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301/*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42#define pr_fmt(fmt) "chcr:" fmt
43
44#include <linux/kernel.h>
45#include <linux/module.h>
46#include <linux/crypto.h>
47#include <linux/cryptohash.h>
48#include <linux/skbuff.h>
49#include <linux/rtnetlink.h>
50#include <linux/highmem.h>
51#include <linux/scatterlist.h>
52
53#include <crypto/aes.h>
54#include <crypto/algapi.h>
55#include <crypto/hash.h>
56#include <crypto/sha.h>
57#include <crypto/internal/hash.h>
58
59#include "t4fw_api.h"
60#include "t4_msg.h"
61#include "chcr_core.h"
62#include "chcr_algo.h"
63#include "chcr_crypto.h"
64
65static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
66{
67 return ctx->crypto_ctx->ablkctx;
68}
69
70static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
71{
72 return ctx->crypto_ctx->hmacctx;
73}
74
75static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
76{
77 return ctx->dev->u_ctx;
78}
79
80static inline int is_ofld_imm(const struct sk_buff *skb)
81{
82 return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN);
83}
84
85/*
86 * sgl_len - calculates the size of an SGL of the given capacity
87 * @n: the number of SGL entries
88 * Calculates the number of flits needed for a scatter/gather list that
89 * can hold the given number of entries.
90 */
91static inline unsigned int sgl_len(unsigned int n)
92{
93 n--;
94 return (3 * n) / 2 + (n & 1) + 2;
95}
96
97/*
98 * chcr_handle_resp - Unmap the DMA buffers associated with the request
99 * @req: crypto request
100 */
101int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
102 int error_status)
103{
104 struct crypto_tfm *tfm = req->tfm;
105 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
106 struct uld_ctx *u_ctx = ULD_CTX(ctx);
107 struct chcr_req_ctx ctx_req;
108 struct cpl_fw6_pld *fw6_pld;
109 unsigned int digestsize, updated_digestsize;
110
111 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
112 case CRYPTO_ALG_TYPE_BLKCIPHER:
113 ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
114 ctx_req.ctx.ablk_ctx =
115 ablkcipher_request_ctx(ctx_req.req.ablk_req);
116 if (!error_status) {
117 fw6_pld = (struct cpl_fw6_pld *)input;
118 memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
119 AES_BLOCK_SIZE);
120 }
121 dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
122 ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
123 if (ctx_req.ctx.ablk_ctx->skb) {
124 kfree_skb(ctx_req.ctx.ablk_ctx->skb);
125 ctx_req.ctx.ablk_ctx->skb = NULL;
126 }
127 break;
128
129 case CRYPTO_ALG_TYPE_AHASH:
130 ctx_req.req.ahash_req = (struct ahash_request *)req;
131 ctx_req.ctx.ahash_ctx =
132 ahash_request_ctx(ctx_req.req.ahash_req);
133 digestsize =
134 crypto_ahash_digestsize(crypto_ahash_reqtfm(
135 ctx_req.req.ahash_req));
136 updated_digestsize = digestsize;
137 if (digestsize == SHA224_DIGEST_SIZE)
138 updated_digestsize = SHA256_DIGEST_SIZE;
139 else if (digestsize == SHA384_DIGEST_SIZE)
140 updated_digestsize = SHA512_DIGEST_SIZE;
141 if (ctx_req.ctx.ahash_ctx->skb)
142 ctx_req.ctx.ahash_ctx->skb = NULL;
143 if (ctx_req.ctx.ahash_ctx->result == 1) {
144 ctx_req.ctx.ahash_ctx->result = 0;
145 memcpy(ctx_req.req.ahash_req->result, input +
146 sizeof(struct cpl_fw6_pld),
147 digestsize);
148 } else {
149 memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input +
150 sizeof(struct cpl_fw6_pld),
151 updated_digestsize);
152 }
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530153 break;
154 }
155 return 0;
156}
157
158/*
159 * calc_tx_flits_ofld - calculate # of flits for an offload packet
160 * @skb: the packet
161 * Returns the number of flits needed for the given offload packet.
162 * These packets are already fully constructed and no additional headers
163 * will be added.
164 */
165static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
166{
167 unsigned int flits, cnt;
168
169 if (is_ofld_imm(skb))
170 return DIV_ROUND_UP(skb->len, 8);
171
172 flits = skb_transport_offset(skb) / 8; /* headers */
173 cnt = skb_shinfo(skb)->nr_frags;
174 if (skb_tail_pointer(skb) != skb_transport_header(skb))
175 cnt++;
176 return flits + sgl_len(cnt);
177}
178
Harsh Jain39f91a32016-11-29 19:00:35 +0530179static inline void get_aes_decrypt_key(unsigned char *dec_key,
180 const unsigned char *key,
181 unsigned int keylength)
182{
183 u32 temp;
184 u32 w_ring[MAX_NK];
185 int i, j, k;
186 u8 nr, nk;
187
188 switch (keylength) {
189 case AES_KEYLENGTH_128BIT:
190 nk = KEYLENGTH_4BYTES;
191 nr = NUMBER_OF_ROUNDS_10;
192 break;
193 case AES_KEYLENGTH_192BIT:
194 nk = KEYLENGTH_6BYTES;
195 nr = NUMBER_OF_ROUNDS_12;
196 break;
197 case AES_KEYLENGTH_256BIT:
198 nk = KEYLENGTH_8BYTES;
199 nr = NUMBER_OF_ROUNDS_14;
200 break;
201 default:
202 return;
203 }
204 for (i = 0; i < nk; i++)
205 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
206
207 i = 0;
208 temp = w_ring[nk - 1];
209 while (i + nk < (nr + 1) * 4) {
210 if (!(i % nk)) {
211 /* RotWord(temp) */
212 temp = (temp << 8) | (temp >> 24);
213 temp = aes_ks_subword(temp);
214 temp ^= round_constant[i / nk];
215 } else if (nk == 8 && (i % 4 == 0)) {
216 temp = aes_ks_subword(temp);
217 }
218 w_ring[i % nk] ^= temp;
219 temp = w_ring[i % nk];
220 i++;
221 }
222 i--;
223 for (k = 0, j = i % nk; k < nk; k++) {
224 *((u32 *)dec_key + k) = htonl(w_ring[j]);
225 j--;
226 if (j < 0)
227 j += nk;
228 }
229}
230
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530231static struct shash_desc *chcr_alloc_shash(unsigned int ds)
232{
233 struct crypto_shash *base_hash = NULL;
234 struct shash_desc *desc;
235
236 switch (ds) {
237 case SHA1_DIGEST_SIZE:
238 base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
239 break;
240 case SHA224_DIGEST_SIZE:
241 base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
242 break;
243 case SHA256_DIGEST_SIZE:
244 base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
245 break;
246 case SHA384_DIGEST_SIZE:
247 base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
248 break;
249 case SHA512_DIGEST_SIZE:
250 base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
251 break;
252 }
253 if (IS_ERR(base_hash)) {
254 pr_err("Can not allocate sha-generic algo.\n");
255 return (void *)base_hash;
256 }
257
258 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
259 GFP_KERNEL);
260 if (!desc)
261 return ERR_PTR(-ENOMEM);
262 desc->tfm = base_hash;
263 desc->flags = crypto_shash_get_flags(base_hash);
264 return desc;
265}
266
267static int chcr_compute_partial_hash(struct shash_desc *desc,
268 char *iopad, char *result_hash,
269 int digest_size)
270{
271 struct sha1_state sha1_st;
272 struct sha256_state sha256_st;
273 struct sha512_state sha512_st;
274 int error;
275
276 if (digest_size == SHA1_DIGEST_SIZE) {
277 error = crypto_shash_init(desc) ?:
278 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
279 crypto_shash_export(desc, (void *)&sha1_st);
280 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
281 } else if (digest_size == SHA224_DIGEST_SIZE) {
282 error = crypto_shash_init(desc) ?:
283 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
284 crypto_shash_export(desc, (void *)&sha256_st);
285 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
286
287 } else if (digest_size == SHA256_DIGEST_SIZE) {
288 error = crypto_shash_init(desc) ?:
289 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
290 crypto_shash_export(desc, (void *)&sha256_st);
291 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
292
293 } else if (digest_size == SHA384_DIGEST_SIZE) {
294 error = crypto_shash_init(desc) ?:
295 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
296 crypto_shash_export(desc, (void *)&sha512_st);
297 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
298
299 } else if (digest_size == SHA512_DIGEST_SIZE) {
300 error = crypto_shash_init(desc) ?:
301 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
302 crypto_shash_export(desc, (void *)&sha512_st);
303 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
304 } else {
305 error = -EINVAL;
306 pr_err("Unknown digest size %d\n", digest_size);
307 }
308 return error;
309}
310
311static void chcr_change_order(char *buf, int ds)
312{
313 int i;
314
315 if (ds == SHA512_DIGEST_SIZE) {
316 for (i = 0; i < (ds / sizeof(u64)); i++)
317 *((__be64 *)buf + i) =
318 cpu_to_be64(*((u64 *)buf + i));
319 } else {
320 for (i = 0; i < (ds / sizeof(u32)); i++)
321 *((__be32 *)buf + i) =
322 cpu_to_be32(*((u32 *)buf + i));
323 }
324}
325
326static inline int is_hmac(struct crypto_tfm *tfm)
327{
328 struct crypto_alg *alg = tfm->__crt_alg;
329 struct chcr_alg_template *chcr_crypto_alg =
330 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
331 alg.hash);
332 if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
333 CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
334 return 1;
335 return 0;
336}
337
338static inline unsigned int ch_nents(struct scatterlist *sg,
339 unsigned int *total_size)
340{
341 unsigned int nents;
342
343 for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
344 nents++;
345 *total_size += sg->length;
346 }
347 return nents;
348}
349
350static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
351 struct scatterlist *sg,
352 struct phys_sge_parm *sg_param)
353{
354 struct phys_sge_pairs *to;
355 unsigned int out_buf_size = sg_param->obsize;
356 unsigned int nents = sg_param->nents, i, j, tot_len = 0;
357
358 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
359 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
360 phys_cpl->pcirlxorder_to_noofsgentr =
361 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
362 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
363 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
364 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
365 CPL_RX_PHYS_DSGL_DCAID_V(0) |
366 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents));
367 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
368 phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
369 phys_cpl->rss_hdr_int.hash_val = 0;
370 to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
371 sizeof(struct cpl_rx_phys_dsgl));
372
373 for (i = 0; nents; to++) {
374 for (j = i; (nents && (j < (8 + i))); j++, nents--) {
375 to->len[j] = htons(sg->length);
376 to->addr[j] = cpu_to_be64(sg_dma_address(sg));
377 if (out_buf_size) {
378 if (tot_len + sg_dma_len(sg) >= out_buf_size) {
379 to->len[j] = htons(out_buf_size -
380 tot_len);
381 return;
382 }
383 tot_len += sg_dma_len(sg);
384 }
385 sg = sg_next(sg);
386 }
387 }
388}
389
390static inline unsigned
391int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
392 struct scatterlist *sg, struct phys_sge_parm *sg_param)
393{
394 if (!sg || !sg_param->nents)
395 return 0;
396
397 sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
398 if (sg_param->nents == 0) {
399 pr_err("CHCR : DMA mapping failed\n");
400 return -EINVAL;
401 }
402 write_phys_cpl(phys_cpl, sg, sg_param);
403 return 0;
404}
405
406static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
407{
408 struct crypto_alg *alg = tfm->__crt_alg;
409 struct chcr_alg_template *chcr_crypto_alg =
410 container_of(alg, struct chcr_alg_template, alg.crypto);
411
412 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
413}
414
Harsh Jain358961d2016-11-29 19:00:36 +0530415static inline void write_buffer_to_skb(struct sk_buff *skb,
416 unsigned int *frags,
417 char *bfr,
418 u8 bfr_len)
419{
420 skb->len += bfr_len;
421 skb->data_len += bfr_len;
422 skb->truesize += bfr_len;
423 get_page(virt_to_page(bfr));
424 skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
425 offset_in_page(bfr), bfr_len);
426 (*frags)++;
427}
428
429
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530430static inline void
Harsh Jain358961d2016-11-29 19:00:36 +0530431write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530432 struct scatterlist *sg, unsigned int count)
433{
434 struct page *spage;
435 unsigned int page_len;
436
437 skb->len += count;
438 skb->data_len += count;
439 skb->truesize += count;
Harsh Jain18f0aa02016-11-29 19:00:37 +0530440
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530441 while (count > 0) {
Harsh Jain18f0aa02016-11-29 19:00:37 +0530442 if (!sg || (!(sg->length)))
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530443 break;
444 spage = sg_page(sg);
445 get_page(spage);
446 page_len = min(sg->length, count);
447 skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len);
448 (*frags)++;
449 count -= page_len;
450 sg = sg_next(sg);
451 }
452}
453
454static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
455 struct _key_ctx *key_ctx)
456{
457 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
458 get_aes_decrypt_key(key_ctx->key, ablkctx->key,
459 ablkctx->enckey_len << 3);
460 memset(key_ctx->key + ablkctx->enckey_len, 0,
461 CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
462 } else {
463 memcpy(key_ctx->key,
464 ablkctx->key + (ablkctx->enckey_len >> 1),
465 ablkctx->enckey_len >> 1);
466 get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
467 ablkctx->key, ablkctx->enckey_len << 2);
468 }
469 return 0;
470}
471
472static inline void create_wreq(struct chcr_context *ctx,
Harsh Jain358961d2016-11-29 19:00:36 +0530473 struct chcr_wr *chcr_req,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530474 void *req, struct sk_buff *skb,
475 int kctx_len, int hash_sz,
476 unsigned int phys_dsgl)
477{
478 struct uld_ctx *u_ctx = ULD_CTX(ctx);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530479 int iv_loc = IV_DSGL;
480 int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
481 unsigned int immdatalen = 0, nr_frags = 0;
482
483 if (is_ofld_imm(skb)) {
484 immdatalen = skb->data_len;
485 iv_loc = IV_IMMEDIATE;
486 } else {
487 nr_frags = skb_shinfo(skb)->nr_frags;
488 }
489
Harsh Jain358961d2016-11-29 19:00:36 +0530490 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
491 ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
492 chcr_req->wreq.pld_size_hash_size =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530493 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
494 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
Harsh Jain358961d2016-11-29 19:00:36 +0530495 chcr_req->wreq.len16_pkd =
496 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530497 (calc_tx_flits_ofld(skb) * 8), 16)));
Harsh Jain358961d2016-11-29 19:00:36 +0530498 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
499 chcr_req->wreq.rx_chid_to_rx_q_id =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530500 FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
501 (hash_sz) ? IV_NOP : iv_loc);
502
Harsh Jain358961d2016-11-29 19:00:36 +0530503 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
504 chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
505 16) - ((sizeof(chcr_req->wreq)) >> 4)));
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530506
Harsh Jain358961d2016-11-29 19:00:36 +0530507 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
508 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
509 sizeof(chcr_req->key_ctx) +
510 kctx_len +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530511 ((hash_sz) ? DUMMY_BYTES :
512 (sizeof(struct cpl_rx_phys_dsgl) +
513 phys_dsgl)) + immdatalen);
514}
515
516/**
517 * create_cipher_wr - form the WR for cipher operations
518 * @req: cipher req.
519 * @ctx: crypto driver context of the request.
520 * @qid: ingress qid where response of this WR should be received.
521 * @op_type: encryption or decryption
522 */
523static struct sk_buff
Harsh Jain358961d2016-11-29 19:00:36 +0530524*create_cipher_wr(struct ablkcipher_request *req,
525 unsigned short qid,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530526 unsigned short op_type)
527{
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530528 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
Harsh Jain358961d2016-11-29 19:00:36 +0530529 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530530 struct uld_ctx *u_ctx = ULD_CTX(ctx);
531 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
532 struct sk_buff *skb = NULL;
Harsh Jain358961d2016-11-29 19:00:36 +0530533 struct chcr_wr *chcr_req;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530534 struct cpl_rx_phys_dsgl *phys_cpl;
535 struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
536 struct phys_sge_parm sg_param;
537 unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
538 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
Harsh Jain358961d2016-11-29 19:00:36 +0530539 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
540 GFP_ATOMIC;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530541
542 if (!req->info)
543 return ERR_PTR(-EINVAL);
544 ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
545 ablkctx->enc = op_type;
546
547 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
Harsh Jain358961d2016-11-29 19:00:36 +0530548 (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
549 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
550 ablkctx->enckey_len, req->nbytes, ivsize);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530551 return ERR_PTR(-EINVAL);
Harsh Jain358961d2016-11-29 19:00:36 +0530552 }
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530553
554 phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
555
Harsh Jain358961d2016-11-29 19:00:36 +0530556 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530557 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
Harsh Jain358961d2016-11-29 19:00:36 +0530558 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530559 if (!skb)
560 return ERR_PTR(-ENOMEM);
561 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
Harsh Jain358961d2016-11-29 19:00:36 +0530562 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
563 memset(chcr_req, 0, transhdr_len);
564 chcr_req->sec_cpl.op_ivinsrtofst =
565 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530566
Harsh Jain358961d2016-11-29 19:00:36 +0530567 chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
568 chcr_req->sec_cpl.aadstart_cipherstop_hi =
569 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530570
Harsh Jain358961d2016-11-29 19:00:36 +0530571 chcr_req->sec_cpl.cipherstop_lo_authinsert =
572 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
573 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530574 ablkctx->ciph_mode,
Harsh Jain358961d2016-11-29 19:00:36 +0530575 0, 0, ivsize >> 1);
576 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530577 0, 1, phys_dsgl);
578
Harsh Jain358961d2016-11-29 19:00:36 +0530579 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530580 if (op_type == CHCR_DECRYPT_OP) {
Harsh Jain358961d2016-11-29 19:00:36 +0530581 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530582 } else {
583 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
Harsh Jain358961d2016-11-29 19:00:36 +0530584 memcpy(chcr_req->key_ctx.key, ablkctx->key,
585 ablkctx->enckey_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530586 } else {
Harsh Jain358961d2016-11-29 19:00:36 +0530587 memcpy(chcr_req->key_ctx.key, ablkctx->key +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530588 (ablkctx->enckey_len >> 1),
589 ablkctx->enckey_len >> 1);
Harsh Jain358961d2016-11-29 19:00:36 +0530590 memcpy(chcr_req->key_ctx.key +
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530591 (ablkctx->enckey_len >> 1),
592 ablkctx->key,
593 ablkctx->enckey_len >> 1);
594 }
595 }
Harsh Jain358961d2016-11-29 19:00:36 +0530596 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530597 sg_param.nents = ablkctx->dst_nents;
Harsh Jain358961d2016-11-29 19:00:36 +0530598 sg_param.obsize = req->nbytes;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530599 sg_param.qid = qid;
600 sg_param.align = 1;
601 if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
602 &sg_param))
603 goto map_fail1;
604
605 skb_set_transport_header(skb, transhdr_len);
Harsh Jain358961d2016-11-29 19:00:36 +0530606 memcpy(ablkctx->iv, req->info, ivsize);
607 write_buffer_to_skb(skb, &frags, ablkctx->iv, ivsize);
608 write_sg_to_skb(skb, &frags, req->src, req->nbytes);
609 create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530610 req_ctx->skb = skb;
611 skb_get(skb);
612 return skb;
613map_fail1:
614 kfree_skb(skb);
615 return ERR_PTR(-ENOMEM);
616}
617
618static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
619 unsigned int keylen)
620{
621 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
622 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
623 struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
624 unsigned int ck_size, context_size;
625 u16 alignment = 0;
626
627 if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
628 goto badkey_err;
629
630 memcpy(ablkctx->key, key, keylen);
631 ablkctx->enckey_len = keylen;
632 if (keylen == AES_KEYSIZE_128) {
633 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
634 } else if (keylen == AES_KEYSIZE_192) {
635 alignment = 8;
636 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
637 } else if (keylen == AES_KEYSIZE_256) {
638 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
639 } else {
640 goto badkey_err;
641 }
642
643 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
644 keylen + alignment) >> 4;
645
646 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
647 0, 0, context_size);
648 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
649 return 0;
650badkey_err:
651 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
652 ablkctx->enckey_len = 0;
653 return -EINVAL;
654}
655
Wei Yongjun73b86bb2016-08-26 14:21:08 +0000656static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530657{
658 int ret = 0;
659 struct sge_ofld_txq *q;
660 struct adapter *adap = netdev2adap(dev);
661
662 local_bh_disable();
663 q = &adap->sge.ofldtxq[idx];
664 spin_lock(&q->sendq.lock);
665 if (q->full)
666 ret = -1;
667 spin_unlock(&q->sendq.lock);
668 local_bh_enable();
669 return ret;
670}
671
672static int chcr_aes_encrypt(struct ablkcipher_request *req)
673{
674 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
675 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530676 struct uld_ctx *u_ctx = ULD_CTX(ctx);
677 struct sk_buff *skb;
678
679 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
680 ctx->tx_channel_id))) {
681 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
682 return -EBUSY;
683 }
684
Harsh Jain358961d2016-11-29 19:00:36 +0530685 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530686 CHCR_ENCRYPT_OP);
687 if (IS_ERR(skb)) {
688 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
689 return PTR_ERR(skb);
690 }
691 skb->dev = u_ctx->lldi.ports[0];
692 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
693 chcr_send_wr(skb);
694 return -EINPROGRESS;
695}
696
697static int chcr_aes_decrypt(struct ablkcipher_request *req)
698{
699 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
700 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530701 struct uld_ctx *u_ctx = ULD_CTX(ctx);
702 struct sk_buff *skb;
703
704 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
705 ctx->tx_channel_id))) {
706 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
707 return -EBUSY;
708 }
709
Harsh Jain358961d2016-11-29 19:00:36 +0530710 skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530711 CHCR_DECRYPT_OP);
712 if (IS_ERR(skb)) {
713 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
714 return PTR_ERR(skb);
715 }
716 skb->dev = u_ctx->lldi.ports[0];
717 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
718 chcr_send_wr(skb);
719 return -EINPROGRESS;
720}
721
722static int chcr_device_init(struct chcr_context *ctx)
723{
724 struct uld_ctx *u_ctx;
725 unsigned int id;
726 int err = 0, rxq_perchan, rxq_idx;
727
728 id = smp_processor_id();
729 if (!ctx->dev) {
730 err = assign_chcr_device(&ctx->dev);
731 if (err) {
732 pr_err("chcr device assignment fails\n");
733 goto out;
734 }
735 u_ctx = ULD_CTX(ctx);
736 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
737 ctx->dev->tx_channel_id = 0;
738 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
739 rxq_idx += id % rxq_perchan;
740 spin_lock(&ctx->dev->lock_chcr_dev);
741 ctx->tx_channel_id = rxq_idx;
742 spin_unlock(&ctx->dev->lock_chcr_dev);
743 }
744out:
745 return err;
746}
747
748static int chcr_cra_init(struct crypto_tfm *tfm)
749{
750 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
751 return chcr_device_init(crypto_tfm_ctx(tfm));
752}
753
754static int get_alg_config(struct algo_param *params,
755 unsigned int auth_size)
756{
757 switch (auth_size) {
758 case SHA1_DIGEST_SIZE:
759 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
760 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
761 params->result_size = SHA1_DIGEST_SIZE;
762 break;
763 case SHA224_DIGEST_SIZE:
764 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
765 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
766 params->result_size = SHA256_DIGEST_SIZE;
767 break;
768 case SHA256_DIGEST_SIZE:
769 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
770 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
771 params->result_size = SHA256_DIGEST_SIZE;
772 break;
773 case SHA384_DIGEST_SIZE:
774 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
775 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
776 params->result_size = SHA512_DIGEST_SIZE;
777 break;
778 case SHA512_DIGEST_SIZE:
779 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
780 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
781 params->result_size = SHA512_DIGEST_SIZE;
782 break;
783 default:
784 pr_err("chcr : ERROR, unsupported digest size\n");
785 return -EINVAL;
786 }
787 return 0;
788}
789
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530790/**
Harsh Jain358961d2016-11-29 19:00:36 +0530791 * create_hash_wr - Create hash work request
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530792 * @req - Cipher req base
793 */
Harsh Jain358961d2016-11-29 19:00:36 +0530794static struct sk_buff *create_hash_wr(struct ahash_request *req,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530795 struct hash_wr_param *param)
796{
797 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
798 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
799 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
800 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
801 struct sk_buff *skb = NULL;
Harsh Jain358961d2016-11-29 19:00:36 +0530802 struct chcr_wr *chcr_req;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530803 unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
804 unsigned int digestsize = crypto_ahash_digestsize(tfm);
Harsh Jain358961d2016-11-29 19:00:36 +0530805 unsigned int kctx_len = 0;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530806 u8 hash_size_in_response = 0;
Harsh Jain358961d2016-11-29 19:00:36 +0530807 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
808 GFP_ATOMIC;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530809
810 iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
Harsh Jain358961d2016-11-29 19:00:36 +0530811 kctx_len = param->alg_prm.result_size + iopad_alignment;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530812 if (param->opad_needed)
813 kctx_len += param->alg_prm.result_size + iopad_alignment;
814
815 if (req_ctx->result)
816 hash_size_in_response = digestsize;
817 else
818 hash_size_in_response = param->alg_prm.result_size;
819 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
Harsh Jain358961d2016-11-29 19:00:36 +0530820 skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530821 if (!skb)
822 return skb;
823
824 skb_reserve(skb, sizeof(struct sge_opaque_hdr));
Harsh Jain358961d2016-11-29 19:00:36 +0530825 chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
826 memset(chcr_req, 0, transhdr_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530827
Harsh Jain358961d2016-11-29 19:00:36 +0530828 chcr_req->sec_cpl.op_ivinsrtofst =
829 FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
830 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530831
Harsh Jain358961d2016-11-29 19:00:36 +0530832 chcr_req->sec_cpl.aadstart_cipherstop_hi =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530833 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
Harsh Jain358961d2016-11-29 19:00:36 +0530834 chcr_req->sec_cpl.cipherstop_lo_authinsert =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530835 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
Harsh Jain358961d2016-11-29 19:00:36 +0530836 chcr_req->sec_cpl.seqno_numivs =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530837 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
Harsh Jain358961d2016-11-29 19:00:36 +0530838 param->opad_needed, 0);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530839
Harsh Jain358961d2016-11-29 19:00:36 +0530840 chcr_req->sec_cpl.ivgen_hdrlen =
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530841 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
842
Harsh Jain358961d2016-11-29 19:00:36 +0530843 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
844 param->alg_prm.result_size);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530845
846 if (param->opad_needed)
Harsh Jain358961d2016-11-29 19:00:36 +0530847 memcpy(chcr_req->key_ctx.key +
848 ((param->alg_prm.result_size <= 32) ? 32 :
849 CHCR_HASH_MAX_DIGEST_SIZE),
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530850 hmacctx->opad, param->alg_prm.result_size);
851
Harsh Jain358961d2016-11-29 19:00:36 +0530852 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530853 param->alg_prm.mk_size, 0,
854 param->opad_needed,
Harsh Jain358961d2016-11-29 19:00:36 +0530855 ((kctx_len +
856 sizeof(chcr_req->key_ctx)) >> 4));
857 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530858
859 skb_set_transport_header(skb, transhdr_len);
860 if (param->bfr_len != 0)
Harsh Jain44fce122016-11-29 19:00:38 +0530861 write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
862 param->bfr_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530863 if (param->sg_len != 0)
Harsh Jain358961d2016-11-29 19:00:36 +0530864 write_sg_to_skb(skb, &frags, req->src, param->sg_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530865
Harsh Jain358961d2016-11-29 19:00:36 +0530866 create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530867 0);
868 req_ctx->skb = skb;
869 skb_get(skb);
870 return skb;
871}
872
873static int chcr_ahash_update(struct ahash_request *req)
874{
875 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
876 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
877 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
878 struct uld_ctx *u_ctx = NULL;
879 struct sk_buff *skb;
880 u8 remainder = 0, bs;
881 unsigned int nbytes = req->nbytes;
882 struct hash_wr_param params;
883
884 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
885
886 u_ctx = ULD_CTX(ctx);
887 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
888 ctx->tx_channel_id))) {
889 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
890 return -EBUSY;
891 }
892
Harsh Jain44fce122016-11-29 19:00:38 +0530893 if (nbytes + req_ctx->reqlen >= bs) {
894 remainder = (nbytes + req_ctx->reqlen) % bs;
895 nbytes = nbytes + req_ctx->reqlen - remainder;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530896 } else {
Harsh Jain44fce122016-11-29 19:00:38 +0530897 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
898 + req_ctx->reqlen, nbytes, 0);
899 req_ctx->reqlen += nbytes;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530900 return 0;
901 }
902
903 params.opad_needed = 0;
904 params.more = 1;
905 params.last = 0;
Harsh Jain44fce122016-11-29 19:00:38 +0530906 params.sg_len = nbytes - req_ctx->reqlen;
907 params.bfr_len = req_ctx->reqlen;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530908 params.scmd1 = 0;
909 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
910 req_ctx->result = 0;
911 req_ctx->data_len += params.sg_len + params.bfr_len;
Harsh Jain358961d2016-11-29 19:00:36 +0530912 skb = create_hash_wr(req, &params);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530913
Harsh Jain44fce122016-11-29 19:00:38 +0530914 if (IS_ERR(skb))
915 return PTR_ERR(skb);
916
917 if (remainder) {
918 u8 *temp;
919 /* Swap buffers */
920 temp = req_ctx->reqbfr;
921 req_ctx->reqbfr = req_ctx->skbfr;
922 req_ctx->skbfr = temp;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530923 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
Harsh Jain44fce122016-11-29 19:00:38 +0530924 req_ctx->reqbfr, remainder, req->nbytes -
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530925 remainder);
Harsh Jain44fce122016-11-29 19:00:38 +0530926 }
927 req_ctx->reqlen = remainder;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530928 skb->dev = u_ctx->lldi.ports[0];
929 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
930 chcr_send_wr(skb);
931
932 return -EINPROGRESS;
933}
934
935static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
936{
937 memset(bfr_ptr, 0, bs);
938 *bfr_ptr = 0x80;
939 if (bs == 64)
940 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
941 else
942 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
943}
944
945static int chcr_ahash_final(struct ahash_request *req)
946{
947 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
948 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
949 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
950 struct hash_wr_param params;
951 struct sk_buff *skb;
952 struct uld_ctx *u_ctx = NULL;
953 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
954
955 u_ctx = ULD_CTX(ctx);
956 if (is_hmac(crypto_ahash_tfm(rtfm)))
957 params.opad_needed = 1;
958 else
959 params.opad_needed = 0;
960 params.sg_len = 0;
961 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
962 req_ctx->result = 1;
Harsh Jain44fce122016-11-29 19:00:38 +0530963 params.bfr_len = req_ctx->reqlen;
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530964 req_ctx->data_len += params.bfr_len + params.sg_len;
Harsh Jain44fce122016-11-29 19:00:38 +0530965 if (req_ctx->reqlen == 0) {
966 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530967 params.last = 0;
968 params.more = 1;
969 params.scmd1 = 0;
970 params.bfr_len = bs;
971
972 } else {
973 params.scmd1 = req_ctx->data_len;
974 params.last = 1;
975 params.more = 0;
976 }
Harsh Jain358961d2016-11-29 19:00:36 +0530977 skb = create_hash_wr(req, &params);
978 if (IS_ERR(skb))
979 return PTR_ERR(skb);
980
Hariprasad Shenai324429d2016-08-17 12:33:05 +0530981 skb->dev = u_ctx->lldi.ports[0];
982 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
983 chcr_send_wr(skb);
984 return -EINPROGRESS;
985}
986
987static int chcr_ahash_finup(struct ahash_request *req)
988{
989 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
990 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
991 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
992 struct uld_ctx *u_ctx = NULL;
993 struct sk_buff *skb;
994 struct hash_wr_param params;
995 u8 bs;
996
997 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
998 u_ctx = ULD_CTX(ctx);
999
1000 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1001 ctx->tx_channel_id))) {
1002 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1003 return -EBUSY;
1004 }
1005
1006 if (is_hmac(crypto_ahash_tfm(rtfm)))
1007 params.opad_needed = 1;
1008 else
1009 params.opad_needed = 0;
1010
1011 params.sg_len = req->nbytes;
Harsh Jain44fce122016-11-29 19:00:38 +05301012 params.bfr_len = req_ctx->reqlen;
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301013 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1014 req_ctx->data_len += params.bfr_len + params.sg_len;
1015 req_ctx->result = 1;
Harsh Jain44fce122016-11-29 19:00:38 +05301016 if ((req_ctx->reqlen + req->nbytes) == 0) {
1017 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301018 params.last = 0;
1019 params.more = 1;
1020 params.scmd1 = 0;
1021 params.bfr_len = bs;
1022 } else {
1023 params.scmd1 = req_ctx->data_len;
1024 params.last = 1;
1025 params.more = 0;
1026 }
1027
Harsh Jain358961d2016-11-29 19:00:36 +05301028 skb = create_hash_wr(req, &params);
1029 if (IS_ERR(skb))
1030 return PTR_ERR(skb);
1031
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301032 skb->dev = u_ctx->lldi.ports[0];
1033 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1034 chcr_send_wr(skb);
1035
1036 return -EINPROGRESS;
1037}
1038
1039static int chcr_ahash_digest(struct ahash_request *req)
1040{
1041 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1042 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1043 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1044 struct uld_ctx *u_ctx = NULL;
1045 struct sk_buff *skb;
1046 struct hash_wr_param params;
1047 u8 bs;
1048
1049 rtfm->init(req);
1050 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1051
1052 u_ctx = ULD_CTX(ctx);
1053 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1054 ctx->tx_channel_id))) {
1055 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1056 return -EBUSY;
1057 }
1058
1059 if (is_hmac(crypto_ahash_tfm(rtfm)))
1060 params.opad_needed = 1;
1061 else
1062 params.opad_needed = 0;
1063
1064 params.last = 0;
1065 params.more = 0;
1066 params.sg_len = req->nbytes;
1067 params.bfr_len = 0;
1068 params.scmd1 = 0;
1069 get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1070 req_ctx->result = 1;
1071 req_ctx->data_len += params.bfr_len + params.sg_len;
1072
Harsh Jain44fce122016-11-29 19:00:38 +05301073 if (req->nbytes == 0) {
1074 create_last_hash_block(req_ctx->reqbfr, bs, 0);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301075 params.more = 1;
1076 params.bfr_len = bs;
1077 }
1078
Harsh Jain358961d2016-11-29 19:00:36 +05301079 skb = create_hash_wr(req, &params);
1080 if (IS_ERR(skb))
1081 return PTR_ERR(skb);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301082
1083 skb->dev = u_ctx->lldi.ports[0];
1084 set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
1085 chcr_send_wr(skb);
1086 return -EINPROGRESS;
1087}
1088
1089static int chcr_ahash_export(struct ahash_request *areq, void *out)
1090{
1091 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1092 struct chcr_ahash_req_ctx *state = out;
1093
Harsh Jain44fce122016-11-29 19:00:38 +05301094 state->reqlen = req_ctx->reqlen;
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301095 state->data_len = req_ctx->data_len;
Harsh Jain44fce122016-11-29 19:00:38 +05301096 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301097 memcpy(state->partial_hash, req_ctx->partial_hash,
1098 CHCR_HASH_MAX_DIGEST_SIZE);
Harsh Jain44fce122016-11-29 19:00:38 +05301099 return 0;
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301100}
1101
1102static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1103{
1104 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1105 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1106
Harsh Jain44fce122016-11-29 19:00:38 +05301107 req_ctx->reqlen = state->reqlen;
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301108 req_ctx->data_len = state->data_len;
Harsh Jain44fce122016-11-29 19:00:38 +05301109 req_ctx->reqbfr = req_ctx->bfr1;
1110 req_ctx->skbfr = req_ctx->bfr2;
1111 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301112 memcpy(req_ctx->partial_hash, state->partial_hash,
1113 CHCR_HASH_MAX_DIGEST_SIZE);
1114 return 0;
1115}
1116
1117static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1118 unsigned int keylen)
1119{
1120 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1121 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1122 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1123 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1124 unsigned int i, err = 0, updated_digestsize;
1125
1126 /*
1127 * use the key to calculate the ipad and opad. ipad will sent with the
1128 * first request's data. opad will be sent with the final hash result
1129 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1130 */
1131 if (!hmacctx->desc)
1132 return -EINVAL;
1133 if (keylen > bs) {
1134 err = crypto_shash_digest(hmacctx->desc, key, keylen,
1135 hmacctx->ipad);
1136 if (err)
1137 goto out;
1138 keylen = digestsize;
1139 } else {
1140 memcpy(hmacctx->ipad, key, keylen);
1141 }
1142 memset(hmacctx->ipad + keylen, 0, bs - keylen);
1143 memcpy(hmacctx->opad, hmacctx->ipad, bs);
1144
1145 for (i = 0; i < bs / sizeof(int); i++) {
1146 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1147 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1148 }
1149
1150 updated_digestsize = digestsize;
1151 if (digestsize == SHA224_DIGEST_SIZE)
1152 updated_digestsize = SHA256_DIGEST_SIZE;
1153 else if (digestsize == SHA384_DIGEST_SIZE)
1154 updated_digestsize = SHA512_DIGEST_SIZE;
1155 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
1156 hmacctx->ipad, digestsize);
1157 if (err)
1158 goto out;
1159 chcr_change_order(hmacctx->ipad, updated_digestsize);
1160
1161 err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
1162 hmacctx->opad, digestsize);
1163 if (err)
1164 goto out;
1165 chcr_change_order(hmacctx->opad, updated_digestsize);
1166out:
1167 return err;
1168}
1169
1170static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1171 unsigned int key_len)
1172{
1173 struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
1174 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1175 int status = 0;
1176 unsigned short context_size = 0;
1177
1178 if ((key_len == (AES_KEYSIZE_128 << 1)) ||
1179 (key_len == (AES_KEYSIZE_256 << 1))) {
1180 memcpy(ablkctx->key, key, key_len);
1181 ablkctx->enckey_len = key_len;
1182 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1183 ablkctx->key_ctx_hdr =
1184 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1185 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1186 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1187 CHCR_KEYCTX_NO_KEY, 1,
1188 0, context_size);
1189 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1190 } else {
1191 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
1192 CRYPTO_TFM_RES_BAD_KEY_LEN);
1193 ablkctx->enckey_len = 0;
1194 status = -EINVAL;
1195 }
1196 return status;
1197}
1198
1199static int chcr_sha_init(struct ahash_request *areq)
1200{
1201 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1202 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1203 int digestsize = crypto_ahash_digestsize(tfm);
1204
1205 req_ctx->data_len = 0;
Harsh Jain44fce122016-11-29 19:00:38 +05301206 req_ctx->reqlen = 0;
1207 req_ctx->reqbfr = req_ctx->bfr1;
1208 req_ctx->skbfr = req_ctx->bfr2;
Hariprasad Shenai324429d2016-08-17 12:33:05 +05301209 req_ctx->skb = NULL;
1210 req_ctx->result = 0;
1211 copy_hash_init_values(req_ctx->partial_hash, digestsize);
1212 return 0;
1213}
1214
1215static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1216{
1217 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1218 sizeof(struct chcr_ahash_req_ctx));
1219 return chcr_device_init(crypto_tfm_ctx(tfm));
1220}
1221
1222static int chcr_hmac_init(struct ahash_request *areq)
1223{
1224 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1225 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1226 struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm));
1227 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1228 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1229 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1230
1231 chcr_sha_init(areq);
1232 req_ctx->data_len = bs;
1233 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1234 if (digestsize == SHA224_DIGEST_SIZE)
1235 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1236 SHA256_DIGEST_SIZE);
1237 else if (digestsize == SHA384_DIGEST_SIZE)
1238 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1239 SHA512_DIGEST_SIZE);
1240 else
1241 memcpy(req_ctx->partial_hash, hmacctx->ipad,
1242 digestsize);
1243 }
1244 return 0;
1245}
1246
1247static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1248{
1249 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1250 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1251 unsigned int digestsize =
1252 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1253
1254 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1255 sizeof(struct chcr_ahash_req_ctx));
1256 hmacctx->desc = chcr_alloc_shash(digestsize);
1257 if (IS_ERR(hmacctx->desc))
1258 return PTR_ERR(hmacctx->desc);
1259 return chcr_device_init(crypto_tfm_ctx(tfm));
1260}
1261
1262static void chcr_free_shash(struct shash_desc *desc)
1263{
1264 crypto_free_shash(desc->tfm);
1265 kfree(desc);
1266}
1267
1268static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
1269{
1270 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1271 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1272
1273 if (hmacctx->desc) {
1274 chcr_free_shash(hmacctx->desc);
1275 hmacctx->desc = NULL;
1276 }
1277}
1278
1279static struct chcr_alg_template driver_algs[] = {
1280 /* AES-CBC */
1281 {
1282 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1283 .is_registered = 0,
1284 .alg.crypto = {
1285 .cra_name = "cbc(aes)",
1286 .cra_driver_name = "cbc(aes-chcr)",
1287 .cra_priority = CHCR_CRA_PRIORITY,
1288 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1289 CRYPTO_ALG_ASYNC,
1290 .cra_blocksize = AES_BLOCK_SIZE,
1291 .cra_ctxsize = sizeof(struct chcr_context)
1292 + sizeof(struct ablk_ctx),
1293 .cra_alignmask = 0,
1294 .cra_type = &crypto_ablkcipher_type,
1295 .cra_module = THIS_MODULE,
1296 .cra_init = chcr_cra_init,
1297 .cra_exit = NULL,
1298 .cra_u.ablkcipher = {
1299 .min_keysize = AES_MIN_KEY_SIZE,
1300 .max_keysize = AES_MAX_KEY_SIZE,
1301 .ivsize = AES_BLOCK_SIZE,
1302 .setkey = chcr_aes_cbc_setkey,
1303 .encrypt = chcr_aes_encrypt,
1304 .decrypt = chcr_aes_decrypt,
1305 }
1306 }
1307 },
1308 {
1309 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
1310 .is_registered = 0,
1311 .alg.crypto = {
1312 .cra_name = "xts(aes)",
1313 .cra_driver_name = "xts(aes-chcr)",
1314 .cra_priority = CHCR_CRA_PRIORITY,
1315 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
1316 CRYPTO_ALG_ASYNC,
1317 .cra_blocksize = AES_BLOCK_SIZE,
1318 .cra_ctxsize = sizeof(struct chcr_context) +
1319 sizeof(struct ablk_ctx),
1320 .cra_alignmask = 0,
1321 .cra_type = &crypto_ablkcipher_type,
1322 .cra_module = THIS_MODULE,
1323 .cra_init = chcr_cra_init,
1324 .cra_exit = NULL,
1325 .cra_u = {
1326 .ablkcipher = {
1327 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1328 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1329 .ivsize = AES_BLOCK_SIZE,
1330 .setkey = chcr_aes_xts_setkey,
1331 .encrypt = chcr_aes_encrypt,
1332 .decrypt = chcr_aes_decrypt,
1333 }
1334 }
1335 }
1336 },
1337 /* SHA */
1338 {
1339 .type = CRYPTO_ALG_TYPE_AHASH,
1340 .is_registered = 0,
1341 .alg.hash = {
1342 .halg.digestsize = SHA1_DIGEST_SIZE,
1343 .halg.base = {
1344 .cra_name = "sha1",
1345 .cra_driver_name = "sha1-chcr",
1346 .cra_blocksize = SHA1_BLOCK_SIZE,
1347 }
1348 }
1349 },
1350 {
1351 .type = CRYPTO_ALG_TYPE_AHASH,
1352 .is_registered = 0,
1353 .alg.hash = {
1354 .halg.digestsize = SHA256_DIGEST_SIZE,
1355 .halg.base = {
1356 .cra_name = "sha256",
1357 .cra_driver_name = "sha256-chcr",
1358 .cra_blocksize = SHA256_BLOCK_SIZE,
1359 }
1360 }
1361 },
1362 {
1363 .type = CRYPTO_ALG_TYPE_AHASH,
1364 .is_registered = 0,
1365 .alg.hash = {
1366 .halg.digestsize = SHA224_DIGEST_SIZE,
1367 .halg.base = {
1368 .cra_name = "sha224",
1369 .cra_driver_name = "sha224-chcr",
1370 .cra_blocksize = SHA224_BLOCK_SIZE,
1371 }
1372 }
1373 },
1374 {
1375 .type = CRYPTO_ALG_TYPE_AHASH,
1376 .is_registered = 0,
1377 .alg.hash = {
1378 .halg.digestsize = SHA384_DIGEST_SIZE,
1379 .halg.base = {
1380 .cra_name = "sha384",
1381 .cra_driver_name = "sha384-chcr",
1382 .cra_blocksize = SHA384_BLOCK_SIZE,
1383 }
1384 }
1385 },
1386 {
1387 .type = CRYPTO_ALG_TYPE_AHASH,
1388 .is_registered = 0,
1389 .alg.hash = {
1390 .halg.digestsize = SHA512_DIGEST_SIZE,
1391 .halg.base = {
1392 .cra_name = "sha512",
1393 .cra_driver_name = "sha512-chcr",
1394 .cra_blocksize = SHA512_BLOCK_SIZE,
1395 }
1396 }
1397 },
1398 /* HMAC */
1399 {
1400 .type = CRYPTO_ALG_TYPE_HMAC,
1401 .is_registered = 0,
1402 .alg.hash = {
1403 .halg.digestsize = SHA1_DIGEST_SIZE,
1404 .halg.base = {
1405 .cra_name = "hmac(sha1)",
1406 .cra_driver_name = "hmac(sha1-chcr)",
1407 .cra_blocksize = SHA1_BLOCK_SIZE,
1408 }
1409 }
1410 },
1411 {
1412 .type = CRYPTO_ALG_TYPE_HMAC,
1413 .is_registered = 0,
1414 .alg.hash = {
1415 .halg.digestsize = SHA224_DIGEST_SIZE,
1416 .halg.base = {
1417 .cra_name = "hmac(sha224)",
1418 .cra_driver_name = "hmac(sha224-chcr)",
1419 .cra_blocksize = SHA224_BLOCK_SIZE,
1420 }
1421 }
1422 },
1423 {
1424 .type = CRYPTO_ALG_TYPE_HMAC,
1425 .is_registered = 0,
1426 .alg.hash = {
1427 .halg.digestsize = SHA256_DIGEST_SIZE,
1428 .halg.base = {
1429 .cra_name = "hmac(sha256)",
1430 .cra_driver_name = "hmac(sha256-chcr)",
1431 .cra_blocksize = SHA256_BLOCK_SIZE,
1432 }
1433 }
1434 },
1435 {
1436 .type = CRYPTO_ALG_TYPE_HMAC,
1437 .is_registered = 0,
1438 .alg.hash = {
1439 .halg.digestsize = SHA384_DIGEST_SIZE,
1440 .halg.base = {
1441 .cra_name = "hmac(sha384)",
1442 .cra_driver_name = "hmac(sha384-chcr)",
1443 .cra_blocksize = SHA384_BLOCK_SIZE,
1444 }
1445 }
1446 },
1447 {
1448 .type = CRYPTO_ALG_TYPE_HMAC,
1449 .is_registered = 0,
1450 .alg.hash = {
1451 .halg.digestsize = SHA512_DIGEST_SIZE,
1452 .halg.base = {
1453 .cra_name = "hmac(sha512)",
1454 .cra_driver_name = "hmac(sha512-chcr)",
1455 .cra_blocksize = SHA512_BLOCK_SIZE,
1456 }
1457 }
1458 },
1459};
1460
1461/*
1462 * chcr_unregister_alg - Deregister crypto algorithms with
1463 * kernel framework.
1464 */
1465static int chcr_unregister_alg(void)
1466{
1467 int i;
1468
1469 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1470 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1471 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1472 if (driver_algs[i].is_registered)
1473 crypto_unregister_alg(
1474 &driver_algs[i].alg.crypto);
1475 break;
1476 case CRYPTO_ALG_TYPE_AHASH:
1477 if (driver_algs[i].is_registered)
1478 crypto_unregister_ahash(
1479 &driver_algs[i].alg.hash);
1480 break;
1481 }
1482 driver_algs[i].is_registered = 0;
1483 }
1484 return 0;
1485}
1486
1487#define SZ_AHASH_CTX sizeof(struct chcr_context)
1488#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
1489#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
1490#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
1491
1492/*
1493 * chcr_register_alg - Register crypto algorithms with kernel framework.
1494 */
1495static int chcr_register_alg(void)
1496{
1497 struct crypto_alg ai;
1498 struct ahash_alg *a_hash;
1499 int err = 0, i;
1500 char *name = NULL;
1501
1502 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
1503 if (driver_algs[i].is_registered)
1504 continue;
1505 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
1506 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1507 err = crypto_register_alg(&driver_algs[i].alg.crypto);
1508 name = driver_algs[i].alg.crypto.cra_driver_name;
1509 break;
1510 case CRYPTO_ALG_TYPE_AHASH:
1511 a_hash = &driver_algs[i].alg.hash;
1512 a_hash->update = chcr_ahash_update;
1513 a_hash->final = chcr_ahash_final;
1514 a_hash->finup = chcr_ahash_finup;
1515 a_hash->digest = chcr_ahash_digest;
1516 a_hash->export = chcr_ahash_export;
1517 a_hash->import = chcr_ahash_import;
1518 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
1519 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
1520 a_hash->halg.base.cra_module = THIS_MODULE;
1521 a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
1522 a_hash->halg.base.cra_alignmask = 0;
1523 a_hash->halg.base.cra_exit = NULL;
1524 a_hash->halg.base.cra_type = &crypto_ahash_type;
1525
1526 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
1527 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
1528 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
1529 a_hash->init = chcr_hmac_init;
1530 a_hash->setkey = chcr_ahash_setkey;
1531 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
1532 } else {
1533 a_hash->init = chcr_sha_init;
1534 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
1535 a_hash->halg.base.cra_init = chcr_sha_cra_init;
1536 }
1537 err = crypto_register_ahash(&driver_algs[i].alg.hash);
1538 ai = driver_algs[i].alg.hash.halg.base;
1539 name = ai.cra_driver_name;
1540 break;
1541 }
1542 if (err) {
1543 pr_err("chcr : %s : Algorithm registration failed\n",
1544 name);
1545 goto register_err;
1546 } else {
1547 driver_algs[i].is_registered = 1;
1548 }
1549 }
1550 return 0;
1551
1552register_err:
1553 chcr_unregister_alg();
1554 return err;
1555}
1556
1557/*
1558 * start_crypto - Register the crypto algorithms.
1559 * This should called once when the first device comesup. After this
1560 * kernel will start calling driver APIs for crypto operations.
1561 */
1562int start_crypto(void)
1563{
1564 return chcr_register_alg();
1565}
1566
1567/*
1568 * stop_crypto - Deregister all the crypto algorithms with kernel.
1569 * This should be called once when the last device goes down. After this
1570 * kernel will not call the driver API for crypto operations.
1571 */
1572int stop_crypto(void)
1573{
1574 chcr_unregister_alg();
1575 return 0;
1576}