blob: f32d0a58bcc08223d7fe1c09f263aafb5b2cd714 [file] [log] [blame]
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
50#include <crypto/aead.h>
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <crypto/rng.h>
57#include <linux/dma-mapping.h>
58#include "adf_accel_devices.h"
59#include "adf_transport.h"
60#include "adf_common_drv.h"
61#include "qat_crypto.h"
62#include "icp_qat_hw.h"
63#include "icp_qat_fw.h"
64#include "icp_qat_fw_la.h"
65
Tadeusz Struk338e84f2014-12-08 12:08:49 -080066#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
Tadeusz Strukd370cec2014-06-05 13:43:32 -070067 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
Tadeusz Struk338e84f2014-12-08 12:08:49 -080068 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
Tadeusz Strukd370cec2014-06-05 13:43:32 -070070
Tadeusz Struk338e84f2014-12-08 12:08:49 -080071#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
Tadeusz Strukd370cec2014-06-05 13:43:32 -070072 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
Tadeusz Struk338e84f2014-12-08 12:08:49 -080073 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
Tadeusz Strukd370cec2014-06-05 13:43:32 -070075
76static atomic_t active_dev;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107struct qat_auth_state {
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700109} __aligned(64);
110
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800111struct qat_alg_aead_ctx {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700112 struct qat_alg_cd *enc_cd;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700113 struct qat_alg_cd *dec_cd;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800114 dma_addr_t enc_cd_paddr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700115 dma_addr_t dec_cd_paddr;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800116 struct icp_qat_fw_la_bulk_req enc_fw_req;
117 struct icp_qat_fw_la_bulk_req dec_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700118 struct crypto_shash *hash_tfm;
119 enum icp_qat_hw_auth_algo qat_hash_alg;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800120 struct qat_crypto_instance *inst;
121 struct crypto_tfm *tfm;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700122 uint8_t salt[AES_BLOCK_SIZE];
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800123 spinlock_t lock; /* protects qat_alg_aead_ctx struct */
124};
125
126struct qat_alg_ablkcipher_ctx {
127 struct icp_qat_hw_cipher_algo_blk *enc_cd;
128 struct icp_qat_hw_cipher_algo_blk *dec_cd;
129 dma_addr_t enc_cd_paddr;
130 dma_addr_t dec_cd_paddr;
131 struct icp_qat_fw_la_bulk_req enc_fw_req;
132 struct icp_qat_fw_la_bulk_req dec_fw_req;
133 struct qat_crypto_instance *inst;
134 struct crypto_tfm *tfm;
135 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700136};
137
138static int get_current_node(void)
139{
140 return cpu_data(current_thread_info()->cpu).phys_proc_id;
141}
142
143static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
144{
145 switch (qat_hash_alg) {
146 case ICP_QAT_HW_AUTH_ALGO_SHA1:
147 return ICP_QAT_HW_SHA1_STATE1_SZ;
148 case ICP_QAT_HW_AUTH_ALGO_SHA256:
149 return ICP_QAT_HW_SHA256_STATE1_SZ;
150 case ICP_QAT_HW_AUTH_ALGO_SHA512:
151 return ICP_QAT_HW_SHA512_STATE1_SZ;
152 default:
153 return -EFAULT;
154 };
155 return -EFAULT;
156}
157
158static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800159 struct qat_alg_aead_ctx *ctx,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700160 const uint8_t *auth_key,
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700161 unsigned int auth_keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700162{
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700163 struct qat_auth_state auth_state;
Behan Webster37e52652014-04-04 18:18:00 -0300164 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700165 struct sha1_state sha1;
166 struct sha256_state sha256;
167 struct sha512_state sha512;
168 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
169 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700170 uint8_t *ipad = auth_state.data;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700171 uint8_t *opad = ipad + block_size;
172 __be32 *hash_state_out;
173 __be64 *hash512_state_out;
174 int i, offset;
175
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800176 memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
Behan Webster37e52652014-04-04 18:18:00 -0300177 shash->tfm = ctx->hash_tfm;
178 shash->flags = 0x0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700179
180 if (auth_keylen > block_size) {
181 char buff[SHA512_BLOCK_SIZE];
Behan Webster37e52652014-04-04 18:18:00 -0300182 int ret = crypto_shash_digest(shash, auth_key,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700183 auth_keylen, buff);
184 if (ret)
185 return ret;
186
187 memcpy(ipad, buff, digest_size);
188 memcpy(opad, buff, digest_size);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800189 memzero_explicit(ipad + digest_size, block_size - digest_size);
190 memzero_explicit(opad + digest_size, block_size - digest_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700191 } else {
192 memcpy(ipad, auth_key, auth_keylen);
193 memcpy(opad, auth_key, auth_keylen);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800194 memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
195 memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700196 }
197
198 for (i = 0; i < block_size; i++) {
199 char *ipad_ptr = ipad + i;
200 char *opad_ptr = opad + i;
201 *ipad_ptr ^= 0x36;
202 *opad_ptr ^= 0x5C;
203 }
204
Behan Webster37e52652014-04-04 18:18:00 -0300205 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700206 return -EFAULT;
207
Behan Webster37e52652014-04-04 18:18:00 -0300208 if (crypto_shash_update(shash, ipad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700209 return -EFAULT;
210
211 hash_state_out = (__be32 *)hash->sha.state1;
212 hash512_state_out = (__be64 *)hash_state_out;
213
214 switch (ctx->qat_hash_alg) {
215 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300216 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700217 return -EFAULT;
218 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
219 *hash_state_out = cpu_to_be32(*(sha1.state + i));
220 break;
221 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300222 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700223 return -EFAULT;
224 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
225 *hash_state_out = cpu_to_be32(*(sha256.state + i));
226 break;
227 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300228 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700229 return -EFAULT;
230 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
231 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
232 break;
233 default:
234 return -EFAULT;
235 }
236
Behan Webster37e52652014-04-04 18:18:00 -0300237 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700238 return -EFAULT;
239
Behan Webster37e52652014-04-04 18:18:00 -0300240 if (crypto_shash_update(shash, opad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700241 return -EFAULT;
242
243 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
244 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
245 hash512_state_out = (__be64 *)hash_state_out;
246
247 switch (ctx->qat_hash_alg) {
248 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300249 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700250 return -EFAULT;
251 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
252 *hash_state_out = cpu_to_be32(*(sha1.state + i));
253 break;
254 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300255 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700256 return -EFAULT;
257 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
258 *hash_state_out = cpu_to_be32(*(sha256.state + i));
259 break;
260 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300261 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700262 return -EFAULT;
263 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
264 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
265 break;
266 default:
267 return -EFAULT;
268 }
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800269 memzero_explicit(ipad, block_size);
270 memzero_explicit(opad, block_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700271 return 0;
272}
273
274static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
275{
276 header->hdr_flags =
277 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
278 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
279 header->comn_req_flags =
280 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
281 QAT_COMN_PTR_TYPE_SGL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700282 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
283 ICP_QAT_FW_LA_PARTIAL_NONE);
284 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
285 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
286 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
287 ICP_QAT_FW_LA_NO_PROTO);
288 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
289 ICP_QAT_FW_LA_NO_UPDATE_STATE);
290}
291
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800292static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
293 int alg,
294 struct crypto_authenc_keys *keys)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700295{
296 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
297 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
298 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
299 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
300 struct icp_qat_hw_auth_algo_blk *hash =
301 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
302 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800303 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700304 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
305 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
306 void *ptr = &req_tmpl->cd_ctrl;
307 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
308 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700309
310 /* CD setup */
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800311 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700312 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
313 hash->sha.inner_setup.auth_config.config =
314 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
315 ctx->qat_hash_alg, digestsize);
316 hash->sha.inner_setup.auth_counter.counter =
317 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
318
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700319 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700320 return -EFAULT;
321
322 /* Request setup */
323 qat_alg_init_common_hdr(header);
324 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800325 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
326 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700327 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
328 ICP_QAT_FW_LA_RET_AUTH_RES);
329 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
330 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
331 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
332 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
333
334 /* Cipher CD config setup */
335 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
336 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
337 cipher_cd_ctrl->cipher_cfg_offset = 0;
338 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
339 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
340 /* Auth CD config setup */
341 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
342 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
343 hash_cd_ctrl->inner_res_sz = digestsize;
344 hash_cd_ctrl->final_sz = digestsize;
345
346 switch (ctx->qat_hash_alg) {
347 case ICP_QAT_HW_AUTH_ALGO_SHA1:
348 hash_cd_ctrl->inner_state1_sz =
349 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
350 hash_cd_ctrl->inner_state2_sz =
351 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
352 break;
353 case ICP_QAT_HW_AUTH_ALGO_SHA256:
354 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
355 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
356 break;
357 case ICP_QAT_HW_AUTH_ALGO_SHA512:
358 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
359 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
360 break;
361 default:
362 break;
363 }
364 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
365 ((sizeof(struct icp_qat_hw_auth_setup) +
366 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700367 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
368 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
369 return 0;
370}
371
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800372static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
373 int alg,
374 struct crypto_authenc_keys *keys)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700375{
376 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
377 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
378 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
379 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
380 struct icp_qat_hw_cipher_algo_blk *cipher =
381 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
382 sizeof(struct icp_qat_hw_auth_setup) +
383 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800384 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700385 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
386 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
387 void *ptr = &req_tmpl->cd_ctrl;
388 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
389 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
390 struct icp_qat_fw_la_auth_req_params *auth_param =
391 (struct icp_qat_fw_la_auth_req_params *)
392 ((char *)&req_tmpl->serv_specif_rqpars +
393 sizeof(struct icp_qat_fw_la_cipher_req_params));
394
395 /* CD setup */
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800396 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700397 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
398 hash->sha.inner_setup.auth_config.config =
399 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
400 ctx->qat_hash_alg,
401 digestsize);
402 hash->sha.inner_setup.auth_counter.counter =
403 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
404
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700405 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700406 return -EFAULT;
407
408 /* Request setup */
409 qat_alg_init_common_hdr(header);
410 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800411 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
412 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700413 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
414 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
415 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
416 ICP_QAT_FW_LA_CMP_AUTH_RES);
417 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
418 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
419
420 /* Cipher CD config setup */
421 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
422 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
423 cipher_cd_ctrl->cipher_cfg_offset =
424 (sizeof(struct icp_qat_hw_auth_setup) +
425 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
426 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
427 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
428
429 /* Auth CD config setup */
430 hash_cd_ctrl->hash_cfg_offset = 0;
431 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
432 hash_cd_ctrl->inner_res_sz = digestsize;
433 hash_cd_ctrl->final_sz = digestsize;
434
435 switch (ctx->qat_hash_alg) {
436 case ICP_QAT_HW_AUTH_ALGO_SHA1:
437 hash_cd_ctrl->inner_state1_sz =
438 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
439 hash_cd_ctrl->inner_state2_sz =
440 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
441 break;
442 case ICP_QAT_HW_AUTH_ALGO_SHA256:
443 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
444 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
445 break;
446 case ICP_QAT_HW_AUTH_ALGO_SHA512:
447 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
448 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
449 break;
450 default:
451 break;
452 }
453
454 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
455 ((sizeof(struct icp_qat_hw_auth_setup) +
456 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700457 auth_param->auth_res_sz = digestsize;
458 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
459 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
460 return 0;
461}
462
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800463static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
464 struct icp_qat_fw_la_bulk_req *req,
465 struct icp_qat_hw_cipher_algo_blk *cd,
466 const uint8_t *key, unsigned int keylen)
467{
468 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
469 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
470 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
471
472 memcpy(cd->aes.key, key, keylen);
473 qat_alg_init_common_hdr(header);
474 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
475 cd_pars->u.s.content_desc_params_sz =
476 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
477 /* Cipher CD config setup */
478 cd_ctrl->cipher_key_sz = keylen >> 3;
479 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
480 cd_ctrl->cipher_cfg_offset = 0;
481 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
482 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
483}
484
485static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
486 int alg, const uint8_t *key,
487 unsigned int keylen)
488{
489 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
490 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
491 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
492
493 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
494 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
495 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
496}
497
498static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
499 int alg, const uint8_t *key,
500 unsigned int keylen)
501{
502 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
503 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
504 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
505
506 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
507 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
508 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
509}
510
511static int qat_alg_validate_key(int key_len, int *alg)
512{
513 switch (key_len) {
514 case AES_KEYSIZE_128:
515 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
516 break;
517 case AES_KEYSIZE_192:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
519 break;
520 case AES_KEYSIZE_256:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522 break;
523 default:
524 return -EINVAL;
525 }
526 return 0;
527}
528
529static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
530 const uint8_t *key, unsigned int keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700531{
532 struct crypto_authenc_keys keys;
533 int alg;
534
535 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
536 return -EFAULT;
537
538 if (crypto_authenc_extractkeys(&keys, key, keylen))
539 goto bad_key;
540
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800541 if (qat_alg_validate_key(keys.enckeylen, &alg))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700542 goto bad_key;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700543
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800544 if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700545 goto error;
546
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800547 if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700548 goto error;
549
550 return 0;
551bad_key:
552 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
553 return -EINVAL;
554error:
555 return -EFAULT;
556}
557
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800558static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
559 const uint8_t *key,
560 unsigned int keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700561{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800562 int alg;
563
564 if (qat_alg_validate_key(keylen, &alg))
565 goto bad_key;
566
567 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
568 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
569 return 0;
570bad_key:
571 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
572 return -EINVAL;
573}
574
575static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
576 unsigned int keylen)
577{
578 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700579 struct device *dev;
580
581 spin_lock(&ctx->lock);
582 if (ctx->enc_cd) {
583 /* rekeying */
584 dev = &GET_DEV(ctx->inst->accel_dev);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800585 memzero_explicit(ctx->enc_cd, sizeof(*ctx->enc_cd));
586 memzero_explicit(ctx->dec_cd, sizeof(*ctx->dec_cd));
587 memzero_explicit(&ctx->enc_fw_req, sizeof(ctx->enc_fw_req));
588 memzero_explicit(&ctx->dec_fw_req, sizeof(ctx->dec_fw_req));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700589 } else {
590 /* new key */
591 int node = get_current_node();
592 struct qat_crypto_instance *inst =
593 qat_crypto_get_instance_node(node);
594 if (!inst) {
595 spin_unlock(&ctx->lock);
596 return -EINVAL;
597 }
598
599 dev = &GET_DEV(inst->accel_dev);
600 ctx->inst = inst;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800601 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700602 &ctx->enc_cd_paddr,
603 GFP_ATOMIC);
604 if (!ctx->enc_cd) {
605 spin_unlock(&ctx->lock);
606 return -ENOMEM;
607 }
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800608 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700609 &ctx->dec_cd_paddr,
610 GFP_ATOMIC);
611 if (!ctx->dec_cd) {
612 spin_unlock(&ctx->lock);
613 goto out_free_enc;
614 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700615 }
616 spin_unlock(&ctx->lock);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800617 if (qat_alg_aead_init_sessions(ctx, key, keylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700618 goto out_free_all;
619
620 return 0;
621
622out_free_all:
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800623 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->dec_cd, ctx->dec_cd_paddr);
626 ctx->dec_cd = NULL;
627out_free_enc:
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800628 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700629 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
630 ctx->enc_cd, ctx->enc_cd_paddr);
631 ctx->enc_cd = NULL;
632 return -ENOMEM;
633}
634
635static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
636 struct qat_crypto_request *qat_req)
637{
638 struct device *dev = &GET_DEV(inst->accel_dev);
639 struct qat_alg_buf_list *bl = qat_req->buf.bl;
640 struct qat_alg_buf_list *blout = qat_req->buf.blout;
641 dma_addr_t blp = qat_req->buf.blp;
642 dma_addr_t blpout = qat_req->buf.bloutp;
643 size_t sz = qat_req->buf.sz;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800644 size_t sz_out = qat_req->buf.sz_out;
645 int i;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700646
647 for (i = 0; i < bl->num_bufs; i++)
648 dma_unmap_single(dev, bl->bufers[i].addr,
649 bl->bufers[i].len, DMA_BIDIRECTIONAL);
650
651 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
652 kfree(bl);
653 if (blp != blpout) {
654 /* If out of place operation dma unmap only data */
Tadeusz Struk82f82502014-12-08 12:05:42 -0800655 int bufless = blout->num_bufs - blout->num_mapped_bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700656
Tadeusz Struk82f82502014-12-08 12:05:42 -0800657 for (i = bufless; i < blout->num_bufs; i++) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700658 dma_unmap_single(dev, blout->bufers[i].addr,
659 blout->bufers[i].len,
660 DMA_BIDIRECTIONAL);
661 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800662 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700663 kfree(blout);
664 }
665}
666
667static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
668 struct scatterlist *assoc,
669 struct scatterlist *sgl,
670 struct scatterlist *sglout, uint8_t *iv,
671 uint8_t ivlen,
672 struct qat_crypto_request *qat_req)
673{
674 struct device *dev = &GET_DEV(inst->accel_dev);
Tadeusz Struk82f82502014-12-08 12:05:42 -0800675 int i, bufs = 0, sg_nctr = 0;
676 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700677 struct qat_alg_buf_list *bufl;
678 struct qat_alg_buf_list *buflout = NULL;
679 dma_addr_t blp;
680 dma_addr_t bloutp = 0;
681 struct scatterlist *sg;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800682 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700683 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
684
685 if (unlikely(!n))
686 return -EINVAL;
687
Tadeusz Struk82f82502014-12-08 12:05:42 -0800688 bufl = kzalloc_node(sz, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700689 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700690 if (unlikely(!bufl))
691 return -ENOMEM;
692
693 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
694 if (unlikely(dma_mapping_error(dev, blp)))
695 goto err;
696
697 for_each_sg(assoc, sg, assoc_n, i) {
Tadeusz Struk923a6e52014-10-13 18:24:26 -0700698 if (!sg->length)
699 continue;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700700 bufl->bufers[bufs].addr = dma_map_single(dev,
701 sg_virt(sg),
702 sg->length,
703 DMA_BIDIRECTIONAL);
704 bufl->bufers[bufs].len = sg->length;
705 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
706 goto err;
707 bufs++;
708 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800709 if (ivlen) {
710 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
711 DMA_BIDIRECTIONAL);
712 bufl->bufers[bufs].len = ivlen;
713 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
714 goto err;
715 bufs++;
716 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700717
718 for_each_sg(sgl, sg, n, i) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800719 int y = sg_nctr + bufs;
720
721 if (!sg->length)
722 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700723
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700724 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
725 sg->length,
726 DMA_BIDIRECTIONAL);
727 bufl->bufers[y].len = sg->length;
728 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
729 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800730 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700731 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800732 bufl->num_bufs = sg_nctr + bufs;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700733 qat_req->buf.bl = bufl;
734 qat_req->buf.blp = blp;
735 qat_req->buf.sz = sz;
736 /* Handle out of place operation */
737 if (sgl != sglout) {
738 struct qat_alg_buf *bufers;
739
Tadeusz Struk82f82502014-12-08 12:05:42 -0800740 n = sg_nents(sglout);
741 sz_out = sizeof(struct qat_alg_buf_list) +
742 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
743 sg_nctr = 0;
744 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700745 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700746 if (unlikely(!buflout))
747 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800748 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700749 if (unlikely(dma_mapping_error(dev, bloutp)))
750 goto err;
751 bufers = buflout->bufers;
752 /* For out of place operation dma map only data and
753 * reuse assoc mapping and iv */
754 for (i = 0; i < bufs; i++) {
755 bufers[i].len = bufl->bufers[i].len;
756 bufers[i].addr = bufl->bufers[i].addr;
757 }
758 for_each_sg(sglout, sg, n, i) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800759 int y = sg_nctr + bufs;
760
761 if (!sg->length)
762 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700763
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700764 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
765 sg->length,
766 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700767 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
768 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800769 bufers[y].len = sg->length;
770 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700771 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800772 buflout->num_bufs = sg_nctr + bufs;
773 buflout->num_mapped_bufs = sg_nctr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700774 qat_req->buf.blout = buflout;
775 qat_req->buf.bloutp = bloutp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800776 qat_req->buf.sz_out = sz_out;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700777 } else {
778 /* Otherwise set the src and dst to the same address */
779 qat_req->buf.bloutp = qat_req->buf.blp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800780 qat_req->buf.sz_out = 0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700781 }
782 return 0;
783err:
784 dev_err(dev, "Failed to map buf for dma\n");
Tadeusz Struk82f82502014-12-08 12:05:42 -0800785 sg_nctr = 0;
786 for (i = 0; i < n + bufs; i++)
787 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700788 dma_unmap_single(dev, bufl->bufers[i].addr,
789 bufl->bufers[i].len,
790 DMA_BIDIRECTIONAL);
Tadeusz Struk82f82502014-12-08 12:05:42 -0800791
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700792 if (!dma_mapping_error(dev, blp))
793 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
794 kfree(bufl);
795 if (sgl != sglout && buflout) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800796 n = sg_nents(sglout);
797 for (i = bufs; i < n + bufs; i++)
798 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
799 dma_unmap_single(dev, buflout->bufers[i].addr,
800 buflout->bufers[i].len,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700801 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700802 if (!dma_mapping_error(dev, bloutp))
Tadeusz Struk82f82502014-12-08 12:05:42 -0800803 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700804 kfree(buflout);
805 }
806 return -ENOMEM;
807}
808
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800809static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
810 struct qat_crypto_request *qat_req)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700811{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800812 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700813 struct qat_crypto_instance *inst = ctx->inst;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800814 struct aead_request *areq = qat_req->aead_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700815 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
816 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
817
818 qat_alg_free_bufl(inst, qat_req);
819 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
820 res = -EBADMSG;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700821 areq->base.complete(&areq->base, res);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700822}
823
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800824static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
825 struct qat_crypto_request *qat_req)
826{
827 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
828 struct qat_crypto_instance *inst = ctx->inst;
829 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
830 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
831 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
832
833 qat_alg_free_bufl(inst, qat_req);
834 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
835 res = -EINVAL;
836 areq->base.complete(&areq->base, res);
837}
838
839void qat_alg_callback(void *resp)
840{
841 struct icp_qat_fw_la_resp *qat_resp = resp;
842 struct qat_crypto_request *qat_req =
843 (void *)(__force long)qat_resp->opaque_data;
844
845 qat_req->cb(qat_resp, qat_req);
846}
847
848static int qat_alg_aead_dec(struct aead_request *areq)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700849{
850 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
851 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800852 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700853 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
854 struct icp_qat_fw_la_cipher_req_params *cipher_param;
855 struct icp_qat_fw_la_auth_req_params *auth_param;
856 struct icp_qat_fw_la_bulk_req *msg;
857 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
858 int ret, ctr = 0;
859
860 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
861 areq->iv, AES_BLOCK_SIZE, qat_req);
862 if (unlikely(ret))
863 return ret;
864
865 msg = &qat_req->req;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800866 *msg = ctx->dec_fw_req;
867 qat_req->aead_ctx = ctx;
868 qat_req->aead_req = areq;
869 qat_req->cb = qat_aead_alg_callback;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700870 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700871 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
872 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
873 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
874 cipher_param->cipher_length = areq->cryptlen - digst_size;
875 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
876 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
877 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
878 auth_param->auth_off = 0;
879 auth_param->auth_len = areq->assoclen +
880 cipher_param->cipher_length + AES_BLOCK_SIZE;
881 do {
882 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
883 } while (ret == -EAGAIN && ctr++ < 10);
884
885 if (ret == -EAGAIN) {
886 qat_alg_free_bufl(ctx->inst, qat_req);
887 return -EBUSY;
888 }
889 return -EINPROGRESS;
890}
891
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800892static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
893 int enc_iv)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700894{
895 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
896 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800897 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700898 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
899 struct icp_qat_fw_la_cipher_req_params *cipher_param;
900 struct icp_qat_fw_la_auth_req_params *auth_param;
901 struct icp_qat_fw_la_bulk_req *msg;
902 int ret, ctr = 0;
903
904 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
905 iv, AES_BLOCK_SIZE, qat_req);
906 if (unlikely(ret))
907 return ret;
908
909 msg = &qat_req->req;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800910 *msg = ctx->enc_fw_req;
911 qat_req->aead_ctx = ctx;
912 qat_req->aead_req = areq;
913 qat_req->cb = qat_aead_alg_callback;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700914 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700915 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
916 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
917 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
918 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
919
920 if (enc_iv) {
921 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
922 cipher_param->cipher_offset = areq->assoclen;
923 } else {
924 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
925 cipher_param->cipher_length = areq->cryptlen;
926 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
927 }
928 auth_param->auth_off = 0;
929 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
930
931 do {
932 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
933 } while (ret == -EAGAIN && ctr++ < 10);
934
935 if (ret == -EAGAIN) {
936 qat_alg_free_bufl(ctx->inst, qat_req);
937 return -EBUSY;
938 }
939 return -EINPROGRESS;
940}
941
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800942static int qat_alg_aead_enc(struct aead_request *areq)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700943{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800944 return qat_alg_aead_enc_internal(areq, areq->iv, 0);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700945}
946
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800947static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700948{
949 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
950 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800951 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700952 __be64 seq;
953
954 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
955 seq = cpu_to_be64(req->seq);
956 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
957 &seq, sizeof(uint64_t));
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800958 return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700959}
960
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800961static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
962 const uint8_t *key,
963 unsigned int keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700964{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800965 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
966 struct device *dev;
967
968 spin_lock(&ctx->lock);
969 if (ctx->enc_cd) {
970 /* rekeying */
971 dev = &GET_DEV(ctx->inst->accel_dev);
972 memzero_explicit(ctx->enc_cd, sizeof(*ctx->enc_cd));
973 memzero_explicit(ctx->dec_cd, sizeof(*ctx->dec_cd));
974 memzero_explicit(&ctx->enc_fw_req, sizeof(ctx->enc_fw_req));
975 memzero_explicit(&ctx->dec_fw_req, sizeof(ctx->dec_fw_req));
976 } else {
977 /* new key */
978 int node = get_current_node();
979 struct qat_crypto_instance *inst =
980 qat_crypto_get_instance_node(node);
981 if (!inst) {
982 spin_unlock(&ctx->lock);
983 return -EINVAL;
984 }
985
986 dev = &GET_DEV(inst->accel_dev);
987 ctx->inst = inst;
988 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
989 &ctx->enc_cd_paddr,
990 GFP_ATOMIC);
991 if (!ctx->enc_cd) {
992 spin_unlock(&ctx->lock);
993 return -ENOMEM;
994 }
995 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
996 &ctx->dec_cd_paddr,
997 GFP_ATOMIC);
998 if (!ctx->dec_cd) {
999 spin_unlock(&ctx->lock);
1000 goto out_free_enc;
1001 }
1002 }
1003 spin_unlock(&ctx->lock);
1004 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
1005 goto out_free_all;
1006
1007 return 0;
1008
1009out_free_all:
1010 memzero_explicit(ctx->dec_cd, sizeof(*ctx->enc_cd));
1011 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1012 ctx->dec_cd, ctx->dec_cd_paddr);
1013 ctx->dec_cd = NULL;
1014out_free_enc:
1015 memzero_explicit(ctx->enc_cd, sizeof(*ctx->dec_cd));
1016 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
1017 ctx->enc_cd, ctx->enc_cd_paddr);
1018 ctx->enc_cd = NULL;
1019 return -ENOMEM;
1020}
1021
1022static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
1023{
1024 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1025 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1026 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1027 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1028 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1029 struct icp_qat_fw_la_bulk_req *msg;
1030 int ret, ctr = 0;
1031
1032 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1033 NULL, 0, qat_req);
1034 if (unlikely(ret))
1035 return ret;
1036
1037 msg = &qat_req->req;
1038 *msg = ctx->enc_fw_req;
1039 qat_req->ablkcipher_ctx = ctx;
1040 qat_req->ablkcipher_req = req;
1041 qat_req->cb = qat_ablkcipher_alg_callback;
1042 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1043 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1044 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1045 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1046 cipher_param->cipher_length = req->nbytes;
1047 cipher_param->cipher_offset = 0;
1048 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1049 do {
1050 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1051 } while (ret == -EAGAIN && ctr++ < 10);
1052
1053 if (ret == -EAGAIN) {
1054 qat_alg_free_bufl(ctx->inst, qat_req);
1055 return -EBUSY;
1056 }
1057 return -EINPROGRESS;
1058}
1059
1060static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1061{
1062 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1063 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1064 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1065 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1066 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1067 struct icp_qat_fw_la_bulk_req *msg;
1068 int ret, ctr = 0;
1069
1070 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
1071 NULL, 0, qat_req);
1072 if (unlikely(ret))
1073 return ret;
1074
1075 msg = &qat_req->req;
1076 *msg = ctx->dec_fw_req;
1077 qat_req->ablkcipher_ctx = ctx;
1078 qat_req->ablkcipher_req = req;
1079 qat_req->cb = qat_ablkcipher_alg_callback;
1080 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1081 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1082 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1083 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1084 cipher_param->cipher_length = req->nbytes;
1085 cipher_param->cipher_offset = 0;
1086 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1087 do {
1088 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1089 } while (ret == -EAGAIN && ctr++ < 10);
1090
1091 if (ret == -EAGAIN) {
1092 qat_alg_free_bufl(ctx->inst, qat_req);
1093 return -EBUSY;
1094 }
1095 return -EINPROGRESS;
1096}
1097
1098static int qat_alg_aead_init(struct crypto_tfm *tfm,
1099 enum icp_qat_hw_auth_algo hash,
1100 const char *hash_name)
1101{
1102 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001103
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001104 memzero_explicit(ctx, sizeof(*ctx));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001105 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1106 if (IS_ERR(ctx->hash_tfm))
1107 return -EFAULT;
1108 spin_lock_init(&ctx->lock);
1109 ctx->qat_hash_alg = hash;
1110 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
1111 sizeof(struct qat_crypto_request);
1112 ctx->tfm = tfm;
1113 return 0;
1114}
1115
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001116static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001117{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001118 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001119}
1120
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001121static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001122{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001123 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001124}
1125
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001126static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001127{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001128 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001129}
1130
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001131static void qat_alg_aead_exit(struct crypto_tfm *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001132{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001133 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001134 struct qat_crypto_instance *inst = ctx->inst;
1135 struct device *dev;
1136
1137 if (!IS_ERR(ctx->hash_tfm))
1138 crypto_free_shash(ctx->hash_tfm);
1139
1140 if (!inst)
1141 return;
1142
1143 dev = &GET_DEV(inst->accel_dev);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001144 if (ctx->enc_cd) {
1145 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001146 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1147 ctx->enc_cd, ctx->enc_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001148 }
1149 if (ctx->dec_cd) {
1150 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001151 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1152 ctx->dec_cd, ctx->dec_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001153 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001154 qat_crypto_put_instance(inst);
1155}
1156
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001157static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1158{
1159 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1160
1161 memzero_explicit(ctx, sizeof(*ctx));
1162 spin_lock_init(&ctx->lock);
1163 tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
1164 sizeof(struct qat_crypto_request);
1165 ctx->tfm = tfm;
1166 return 0;
1167}
1168
1169static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1170{
1171 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1172 struct qat_crypto_instance *inst = ctx->inst;
1173 struct device *dev;
1174
1175 if (!inst)
1176 return;
1177
1178 dev = &GET_DEV(inst->accel_dev);
1179 if (ctx->enc_cd) {
1180 memzero_explicit(ctx->enc_cd,
1181 sizeof(struct icp_qat_hw_cipher_algo_blk));
1182 dma_free_coherent(dev,
1183 sizeof(struct icp_qat_hw_cipher_algo_blk),
1184 ctx->enc_cd, ctx->enc_cd_paddr);
1185 }
1186 if (ctx->dec_cd) {
1187 memzero_explicit(ctx->dec_cd,
1188 sizeof(struct icp_qat_hw_cipher_algo_blk));
1189 dma_free_coherent(dev,
1190 sizeof(struct icp_qat_hw_cipher_algo_blk),
1191 ctx->dec_cd, ctx->dec_cd_paddr);
1192 }
1193 qat_crypto_put_instance(inst);
1194}
1195
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001196static struct crypto_alg qat_algs[] = { {
1197 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1198 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1199 .cra_priority = 4001,
1200 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1201 .cra_blocksize = AES_BLOCK_SIZE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001202 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001203 .cra_alignmask = 0,
1204 .cra_type = &crypto_aead_type,
1205 .cra_module = THIS_MODULE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001206 .cra_init = qat_alg_aead_sha1_init,
1207 .cra_exit = qat_alg_aead_exit,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001208 .cra_u = {
1209 .aead = {
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001210 .setkey = qat_alg_aead_setkey,
1211 .decrypt = qat_alg_aead_dec,
1212 .encrypt = qat_alg_aead_enc,
1213 .givencrypt = qat_alg_aead_genivenc,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001214 .ivsize = AES_BLOCK_SIZE,
1215 .maxauthsize = SHA1_DIGEST_SIZE,
1216 },
1217 },
1218}, {
1219 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1220 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1221 .cra_priority = 4001,
1222 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1223 .cra_blocksize = AES_BLOCK_SIZE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001224 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001225 .cra_alignmask = 0,
1226 .cra_type = &crypto_aead_type,
1227 .cra_module = THIS_MODULE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001228 .cra_init = qat_alg_aead_sha256_init,
1229 .cra_exit = qat_alg_aead_exit,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001230 .cra_u = {
1231 .aead = {
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001232 .setkey = qat_alg_aead_setkey,
1233 .decrypt = qat_alg_aead_dec,
1234 .encrypt = qat_alg_aead_enc,
1235 .givencrypt = qat_alg_aead_genivenc,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001236 .ivsize = AES_BLOCK_SIZE,
1237 .maxauthsize = SHA256_DIGEST_SIZE,
1238 },
1239 },
1240}, {
1241 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1242 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1243 .cra_priority = 4001,
1244 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1245 .cra_blocksize = AES_BLOCK_SIZE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001246 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001247 .cra_alignmask = 0,
1248 .cra_type = &crypto_aead_type,
1249 .cra_module = THIS_MODULE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001250 .cra_init = qat_alg_aead_sha512_init,
1251 .cra_exit = qat_alg_aead_exit,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001252 .cra_u = {
1253 .aead = {
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001254 .setkey = qat_alg_aead_setkey,
1255 .decrypt = qat_alg_aead_dec,
1256 .encrypt = qat_alg_aead_enc,
1257 .givencrypt = qat_alg_aead_genivenc,
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001258 .ivsize = AES_BLOCK_SIZE,
1259 .maxauthsize = SHA512_DIGEST_SIZE,
1260 },
1261 },
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001262}, {
1263 .cra_name = "cbc(aes)",
1264 .cra_driver_name = "qat_aes_cbc",
1265 .cra_priority = 4001,
1266 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1267 .cra_blocksize = AES_BLOCK_SIZE,
1268 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1269 .cra_alignmask = 0,
1270 .cra_type = &crypto_ablkcipher_type,
1271 .cra_module = THIS_MODULE,
1272 .cra_init = qat_alg_ablkcipher_init,
1273 .cra_exit = qat_alg_ablkcipher_exit,
1274 .cra_u = {
1275 .ablkcipher = {
1276 .setkey = qat_alg_ablkcipher_setkey,
1277 .decrypt = qat_alg_ablkcipher_decrypt,
1278 .encrypt = qat_alg_ablkcipher_encrypt,
1279 .min_keysize = AES_MIN_KEY_SIZE,
1280 .max_keysize = AES_MAX_KEY_SIZE,
1281 .ivsize = AES_BLOCK_SIZE,
1282 },
1283 },
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001284} };
1285
1286int qat_algs_register(void)
1287{
1288 if (atomic_add_return(1, &active_dev) == 1) {
1289 int i;
1290
1291 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001292 qat_algs[i].cra_flags =
1293 (qat_algs[i].cra_type == &crypto_aead_type) ?
1294 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1295 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1296
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001297 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1298 }
1299 return 0;
1300}
1301
1302int qat_algs_unregister(void)
1303{
1304 if (atomic_sub_return(1, &active_dev) == 0)
1305 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1306 return 0;
1307}
1308
1309int qat_algs_init(void)
1310{
1311 atomic_set(&active_dev, 0);
1312 crypto_get_default_rng();
1313 return 0;
1314}
1315
1316void qat_algs_exit(void)
1317{
1318 crypto_put_default_rng();
1319}