blob: e4e32d872902be09a1955996a059b7965e911c94 [file] [log] [blame]
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
50#include <crypto/aead.h>
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <crypto/rng.h>
57#include <linux/dma-mapping.h>
58#include "adf_accel_devices.h"
59#include "adf_transport.h"
60#include "adf_common_drv.h"
61#include "qat_crypto.h"
62#include "icp_qat_hw.h"
63#include "icp_qat_fw.h"
64#include "icp_qat_fw_la.h"
65
66#define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71#define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76static atomic_t active_dev;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107struct qat_auth_state {
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700109} __aligned(64);
110
111struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
124};
125
126static int get_current_node(void)
127{
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
129}
130
131static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132{
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
140 default:
141 return -EFAULT;
142 };
143 return -EFAULT;
144}
145
146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700149 unsigned int auth_keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700150{
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700151 struct qat_auth_state auth_state;
Behan Webster37e52652014-04-04 18:18:00 -0300152 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700153 struct sha1_state sha1;
154 struct sha256_state sha256;
155 struct sha512_state sha512;
156 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
157 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700158 uint8_t *ipad = auth_state.data;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700159 uint8_t *opad = ipad + block_size;
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
162 int i, offset;
163
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800164 memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
Behan Webster37e52652014-04-04 18:18:00 -0300165 shash->tfm = ctx->hash_tfm;
166 shash->flags = 0x0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700167
168 if (auth_keylen > block_size) {
169 char buff[SHA512_BLOCK_SIZE];
Behan Webster37e52652014-04-04 18:18:00 -0300170 int ret = crypto_shash_digest(shash, auth_key,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700171 auth_keylen, buff);
172 if (ret)
173 return ret;
174
175 memcpy(ipad, buff, digest_size);
176 memcpy(opad, buff, digest_size);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800177 memzero_explicit(ipad + digest_size, block_size - digest_size);
178 memzero_explicit(opad + digest_size, block_size - digest_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700179 } else {
180 memcpy(ipad, auth_key, auth_keylen);
181 memcpy(opad, auth_key, auth_keylen);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800182 memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
183 memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700184 }
185
186 for (i = 0; i < block_size; i++) {
187 char *ipad_ptr = ipad + i;
188 char *opad_ptr = opad + i;
189 *ipad_ptr ^= 0x36;
190 *opad_ptr ^= 0x5C;
191 }
192
Behan Webster37e52652014-04-04 18:18:00 -0300193 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700194 return -EFAULT;
195
Behan Webster37e52652014-04-04 18:18:00 -0300196 if (crypto_shash_update(shash, ipad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700197 return -EFAULT;
198
199 hash_state_out = (__be32 *)hash->sha.state1;
200 hash512_state_out = (__be64 *)hash_state_out;
201
202 switch (ctx->qat_hash_alg) {
203 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300204 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700205 return -EFAULT;
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(*(sha1.state + i));
208 break;
209 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300210 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700211 return -EFAULT;
212 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
213 *hash_state_out = cpu_to_be32(*(sha256.state + i));
214 break;
215 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300216 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700217 return -EFAULT;
218 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
219 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
220 break;
221 default:
222 return -EFAULT;
223 }
224
Behan Webster37e52652014-04-04 18:18:00 -0300225 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700226 return -EFAULT;
227
Behan Webster37e52652014-04-04 18:18:00 -0300228 if (crypto_shash_update(shash, opad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700229 return -EFAULT;
230
231 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
232 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
233 hash512_state_out = (__be64 *)hash_state_out;
234
235 switch (ctx->qat_hash_alg) {
236 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300237 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700238 return -EFAULT;
239 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
240 *hash_state_out = cpu_to_be32(*(sha1.state + i));
241 break;
242 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300243 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700244 return -EFAULT;
245 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
246 *hash_state_out = cpu_to_be32(*(sha256.state + i));
247 break;
248 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300249 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700250 return -EFAULT;
251 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
252 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
253 break;
254 default:
255 return -EFAULT;
256 }
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800257 memzero_explicit(ipad, block_size);
258 memzero_explicit(opad, block_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700259 return 0;
260}
261
262static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
263{
264 header->hdr_flags =
265 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
266 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
267 header->comn_req_flags =
268 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
269 QAT_COMN_PTR_TYPE_SGL);
270 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
271 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
272 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
273 ICP_QAT_FW_LA_PARTIAL_NONE);
274 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
275 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
276 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
277 ICP_QAT_FW_LA_NO_PROTO);
278 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
279 ICP_QAT_FW_LA_NO_UPDATE_STATE);
280}
281
282static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
283 int alg, struct crypto_authenc_keys *keys)
284{
285 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
286 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
287 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
288 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
289 struct icp_qat_hw_auth_algo_blk *hash =
290 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
291 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
292 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
293 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
294 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
295 void *ptr = &req_tmpl->cd_ctrl;
296 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
297 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700298
299 /* CD setup */
300 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
301 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
302 hash->sha.inner_setup.auth_config.config =
303 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
304 ctx->qat_hash_alg, digestsize);
305 hash->sha.inner_setup.auth_counter.counter =
306 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
307
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700308 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700309 return -EFAULT;
310
311 /* Request setup */
312 qat_alg_init_common_hdr(header);
313 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
314 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
315 ICP_QAT_FW_LA_RET_AUTH_RES);
316 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
317 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
318 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
319 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
320
321 /* Cipher CD config setup */
322 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
323 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
324 cipher_cd_ctrl->cipher_cfg_offset = 0;
325 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
326 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
327 /* Auth CD config setup */
328 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
329 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
330 hash_cd_ctrl->inner_res_sz = digestsize;
331 hash_cd_ctrl->final_sz = digestsize;
332
333 switch (ctx->qat_hash_alg) {
334 case ICP_QAT_HW_AUTH_ALGO_SHA1:
335 hash_cd_ctrl->inner_state1_sz =
336 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
337 hash_cd_ctrl->inner_state2_sz =
338 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
339 break;
340 case ICP_QAT_HW_AUTH_ALGO_SHA256:
341 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
342 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
343 break;
344 case ICP_QAT_HW_AUTH_ALGO_SHA512:
345 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
346 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
347 break;
348 default:
349 break;
350 }
351 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
352 ((sizeof(struct icp_qat_hw_auth_setup) +
353 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700354 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
355 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
356 return 0;
357}
358
359static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
360 int alg, struct crypto_authenc_keys *keys)
361{
362 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
363 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
364 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
365 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
366 struct icp_qat_hw_cipher_algo_blk *cipher =
367 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
368 sizeof(struct icp_qat_hw_auth_setup) +
369 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
370 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
371 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
372 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
373 void *ptr = &req_tmpl->cd_ctrl;
374 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
375 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
376 struct icp_qat_fw_la_auth_req_params *auth_param =
377 (struct icp_qat_fw_la_auth_req_params *)
378 ((char *)&req_tmpl->serv_specif_rqpars +
379 sizeof(struct icp_qat_fw_la_cipher_req_params));
380
381 /* CD setup */
382 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
383 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
384 hash->sha.inner_setup.auth_config.config =
385 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
386 ctx->qat_hash_alg,
387 digestsize);
388 hash->sha.inner_setup.auth_counter.counter =
389 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
390
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700391 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700392 return -EFAULT;
393
394 /* Request setup */
395 qat_alg_init_common_hdr(header);
396 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
397 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
399 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_CMP_AUTH_RES);
401 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
402 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
403
404 /* Cipher CD config setup */
405 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
406 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
407 cipher_cd_ctrl->cipher_cfg_offset =
408 (sizeof(struct icp_qat_hw_auth_setup) +
409 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
410 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
411 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
412
413 /* Auth CD config setup */
414 hash_cd_ctrl->hash_cfg_offset = 0;
415 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
416 hash_cd_ctrl->inner_res_sz = digestsize;
417 hash_cd_ctrl->final_sz = digestsize;
418
419 switch (ctx->qat_hash_alg) {
420 case ICP_QAT_HW_AUTH_ALGO_SHA1:
421 hash_cd_ctrl->inner_state1_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
423 hash_cd_ctrl->inner_state2_sz =
424 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
425 break;
426 case ICP_QAT_HW_AUTH_ALGO_SHA256:
427 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
428 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
429 break;
430 case ICP_QAT_HW_AUTH_ALGO_SHA512:
431 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
432 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
433 break;
434 default:
435 break;
436 }
437
438 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
439 ((sizeof(struct icp_qat_hw_auth_setup) +
440 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700441 auth_param->auth_res_sz = digestsize;
442 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
443 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
444 return 0;
445}
446
447static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
448 const uint8_t *key, unsigned int keylen)
449{
450 struct crypto_authenc_keys keys;
451 int alg;
452
453 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
454 return -EFAULT;
455
456 if (crypto_authenc_extractkeys(&keys, key, keylen))
457 goto bad_key;
458
459 switch (keys.enckeylen) {
460 case AES_KEYSIZE_128:
461 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
462 break;
463 case AES_KEYSIZE_192:
464 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
465 break;
466 case AES_KEYSIZE_256:
467 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
468 break;
469 default:
470 goto bad_key;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700471 }
472
473 if (qat_alg_init_enc_session(ctx, alg, &keys))
474 goto error;
475
476 if (qat_alg_init_dec_session(ctx, alg, &keys))
477 goto error;
478
479 return 0;
480bad_key:
481 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
482 return -EINVAL;
483error:
484 return -EFAULT;
485}
486
487static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
488 unsigned int keylen)
489{
490 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
491 struct device *dev;
492
493 spin_lock(&ctx->lock);
494 if (ctx->enc_cd) {
495 /* rekeying */
496 dev = &GET_DEV(ctx->inst->accel_dev);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800497 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
498 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
499 memzero_explicit(&ctx->enc_fw_req_tmpl,
500 sizeof(struct icp_qat_fw_la_bulk_req));
501 memzero_explicit(&ctx->dec_fw_req_tmpl,
502 sizeof(struct icp_qat_fw_la_bulk_req));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700503 } else {
504 /* new key */
505 int node = get_current_node();
506 struct qat_crypto_instance *inst =
507 qat_crypto_get_instance_node(node);
508 if (!inst) {
509 spin_unlock(&ctx->lock);
510 return -EINVAL;
511 }
512
513 dev = &GET_DEV(inst->accel_dev);
514 ctx->inst = inst;
515 ctx->enc_cd = dma_zalloc_coherent(dev,
516 sizeof(struct qat_alg_cd),
517 &ctx->enc_cd_paddr,
518 GFP_ATOMIC);
519 if (!ctx->enc_cd) {
520 spin_unlock(&ctx->lock);
521 return -ENOMEM;
522 }
523 ctx->dec_cd = dma_zalloc_coherent(dev,
524 sizeof(struct qat_alg_cd),
525 &ctx->dec_cd_paddr,
526 GFP_ATOMIC);
527 if (!ctx->dec_cd) {
528 spin_unlock(&ctx->lock);
529 goto out_free_enc;
530 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700531 }
532 spin_unlock(&ctx->lock);
533 if (qat_alg_init_sessions(ctx, key, keylen))
534 goto out_free_all;
535
536 return 0;
537
538out_free_all:
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800539 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700540 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
541 ctx->dec_cd, ctx->dec_cd_paddr);
542 ctx->dec_cd = NULL;
543out_free_enc:
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800544 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700545 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
546 ctx->enc_cd, ctx->enc_cd_paddr);
547 ctx->enc_cd = NULL;
548 return -ENOMEM;
549}
550
551static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
552 struct qat_crypto_request *qat_req)
553{
554 struct device *dev = &GET_DEV(inst->accel_dev);
555 struct qat_alg_buf_list *bl = qat_req->buf.bl;
556 struct qat_alg_buf_list *blout = qat_req->buf.blout;
557 dma_addr_t blp = qat_req->buf.blp;
558 dma_addr_t blpout = qat_req->buf.bloutp;
559 size_t sz = qat_req->buf.sz;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800560 size_t sz_out = qat_req->buf.sz_out;
561 int i;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700562
563 for (i = 0; i < bl->num_bufs; i++)
564 dma_unmap_single(dev, bl->bufers[i].addr,
565 bl->bufers[i].len, DMA_BIDIRECTIONAL);
566
567 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
568 kfree(bl);
569 if (blp != blpout) {
570 /* If out of place operation dma unmap only data */
Tadeusz Struk82f82502014-12-08 12:05:42 -0800571 int bufless = blout->num_bufs - blout->num_mapped_bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700572
Tadeusz Struk82f82502014-12-08 12:05:42 -0800573 for (i = bufless; i < blout->num_bufs; i++) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700574 dma_unmap_single(dev, blout->bufers[i].addr,
575 blout->bufers[i].len,
576 DMA_BIDIRECTIONAL);
577 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800578 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700579 kfree(blout);
580 }
581}
582
583static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
584 struct scatterlist *assoc,
585 struct scatterlist *sgl,
586 struct scatterlist *sglout, uint8_t *iv,
587 uint8_t ivlen,
588 struct qat_crypto_request *qat_req)
589{
590 struct device *dev = &GET_DEV(inst->accel_dev);
Tadeusz Struk82f82502014-12-08 12:05:42 -0800591 int i, bufs = 0, sg_nctr = 0;
592 int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700593 struct qat_alg_buf_list *bufl;
594 struct qat_alg_buf_list *buflout = NULL;
595 dma_addr_t blp;
596 dma_addr_t bloutp = 0;
597 struct scatterlist *sg;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800598 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700599 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
600
601 if (unlikely(!n))
602 return -EINVAL;
603
Tadeusz Struk82f82502014-12-08 12:05:42 -0800604 bufl = kzalloc_node(sz, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700605 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700606 if (unlikely(!bufl))
607 return -ENOMEM;
608
609 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
610 if (unlikely(dma_mapping_error(dev, blp)))
611 goto err;
612
613 for_each_sg(assoc, sg, assoc_n, i) {
Tadeusz Struk923a6e52014-10-13 18:24:26 -0700614 if (!sg->length)
615 continue;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700616 bufl->bufers[bufs].addr = dma_map_single(dev,
617 sg_virt(sg),
618 sg->length,
619 DMA_BIDIRECTIONAL);
620 bufl->bufers[bufs].len = sg->length;
621 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
622 goto err;
623 bufs++;
624 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800625 if (ivlen) {
626 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
627 DMA_BIDIRECTIONAL);
628 bufl->bufers[bufs].len = ivlen;
629 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
630 goto err;
631 bufs++;
632 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700633
634 for_each_sg(sgl, sg, n, i) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800635 int y = sg_nctr + bufs;
636
637 if (!sg->length)
638 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700639
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700640 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
641 sg->length,
642 DMA_BIDIRECTIONAL);
643 bufl->bufers[y].len = sg->length;
644 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
645 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800646 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700647 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800648 bufl->num_bufs = sg_nctr + bufs;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700649 qat_req->buf.bl = bufl;
650 qat_req->buf.blp = blp;
651 qat_req->buf.sz = sz;
652 /* Handle out of place operation */
653 if (sgl != sglout) {
654 struct qat_alg_buf *bufers;
655
Tadeusz Struk82f82502014-12-08 12:05:42 -0800656 n = sg_nents(sglout);
657 sz_out = sizeof(struct qat_alg_buf_list) +
658 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
659 sg_nctr = 0;
660 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700661 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700662 if (unlikely(!buflout))
663 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800664 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700665 if (unlikely(dma_mapping_error(dev, bloutp)))
666 goto err;
667 bufers = buflout->bufers;
668 /* For out of place operation dma map only data and
669 * reuse assoc mapping and iv */
670 for (i = 0; i < bufs; i++) {
671 bufers[i].len = bufl->bufers[i].len;
672 bufers[i].addr = bufl->bufers[i].addr;
673 }
674 for_each_sg(sglout, sg, n, i) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800675 int y = sg_nctr + bufs;
676
677 if (!sg->length)
678 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700679
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700680 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
681 sg->length,
682 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700683 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
684 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800685 bufers[y].len = sg->length;
686 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700687 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800688 buflout->num_bufs = sg_nctr + bufs;
689 buflout->num_mapped_bufs = sg_nctr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700690 qat_req->buf.blout = buflout;
691 qat_req->buf.bloutp = bloutp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800692 qat_req->buf.sz_out = sz_out;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700693 } else {
694 /* Otherwise set the src and dst to the same address */
695 qat_req->buf.bloutp = qat_req->buf.blp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800696 qat_req->buf.sz_out = 0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700697 }
698 return 0;
699err:
700 dev_err(dev, "Failed to map buf for dma\n");
Tadeusz Struk82f82502014-12-08 12:05:42 -0800701 sg_nctr = 0;
702 for (i = 0; i < n + bufs; i++)
703 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700704 dma_unmap_single(dev, bufl->bufers[i].addr,
705 bufl->bufers[i].len,
706 DMA_BIDIRECTIONAL);
Tadeusz Struk82f82502014-12-08 12:05:42 -0800707
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700708 if (!dma_mapping_error(dev, blp))
709 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
710 kfree(bufl);
711 if (sgl != sglout && buflout) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800712 n = sg_nents(sglout);
713 for (i = bufs; i < n + bufs; i++)
714 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
715 dma_unmap_single(dev, buflout->bufers[i].addr,
716 buflout->bufers[i].len,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700717 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700718 if (!dma_mapping_error(dev, bloutp))
Tadeusz Struk82f82502014-12-08 12:05:42 -0800719 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700720 kfree(buflout);
721 }
722 return -ENOMEM;
723}
724
725void qat_alg_callback(void *resp)
726{
727 struct icp_qat_fw_la_resp *qat_resp = resp;
728 struct qat_crypto_request *qat_req =
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700729 (void *)(__force long)qat_resp->opaque_data;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700730 struct qat_alg_session_ctx *ctx = qat_req->ctx;
731 struct qat_crypto_instance *inst = ctx->inst;
732 struct aead_request *areq = qat_req->areq;
733 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
734 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
735
736 qat_alg_free_bufl(inst, qat_req);
737 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
738 res = -EBADMSG;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700739 areq->base.complete(&areq->base, res);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700740}
741
742static int qat_alg_dec(struct aead_request *areq)
743{
744 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
745 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
746 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
747 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
748 struct icp_qat_fw_la_cipher_req_params *cipher_param;
749 struct icp_qat_fw_la_auth_req_params *auth_param;
750 struct icp_qat_fw_la_bulk_req *msg;
751 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
752 int ret, ctr = 0;
753
754 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
755 areq->iv, AES_BLOCK_SIZE, qat_req);
756 if (unlikely(ret))
757 return ret;
758
759 msg = &qat_req->req;
760 *msg = ctx->dec_fw_req_tmpl;
761 qat_req->ctx = ctx;
762 qat_req->areq = areq;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700763 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700764 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
765 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
766 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
767 cipher_param->cipher_length = areq->cryptlen - digst_size;
768 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
769 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
770 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
771 auth_param->auth_off = 0;
772 auth_param->auth_len = areq->assoclen +
773 cipher_param->cipher_length + AES_BLOCK_SIZE;
774 do {
775 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
776 } while (ret == -EAGAIN && ctr++ < 10);
777
778 if (ret == -EAGAIN) {
779 qat_alg_free_bufl(ctx->inst, qat_req);
780 return -EBUSY;
781 }
782 return -EINPROGRESS;
783}
784
785static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
786 int enc_iv)
787{
788 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
789 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
790 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
791 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
792 struct icp_qat_fw_la_cipher_req_params *cipher_param;
793 struct icp_qat_fw_la_auth_req_params *auth_param;
794 struct icp_qat_fw_la_bulk_req *msg;
795 int ret, ctr = 0;
796
797 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
798 iv, AES_BLOCK_SIZE, qat_req);
799 if (unlikely(ret))
800 return ret;
801
802 msg = &qat_req->req;
803 *msg = ctx->enc_fw_req_tmpl;
804 qat_req->ctx = ctx;
805 qat_req->areq = areq;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700806 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700807 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
808 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
809 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
810 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
811
812 if (enc_iv) {
813 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
814 cipher_param->cipher_offset = areq->assoclen;
815 } else {
816 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
817 cipher_param->cipher_length = areq->cryptlen;
818 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
819 }
820 auth_param->auth_off = 0;
821 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
822
823 do {
824 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
825 } while (ret == -EAGAIN && ctr++ < 10);
826
827 if (ret == -EAGAIN) {
828 qat_alg_free_bufl(ctx->inst, qat_req);
829 return -EBUSY;
830 }
831 return -EINPROGRESS;
832}
833
834static int qat_alg_enc(struct aead_request *areq)
835{
836 return qat_alg_enc_internal(areq, areq->iv, 0);
837}
838
839static int qat_alg_genivenc(struct aead_givcrypt_request *req)
840{
841 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
842 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
843 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
844 __be64 seq;
845
846 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
847 seq = cpu_to_be64(req->seq);
848 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
849 &seq, sizeof(uint64_t));
850 return qat_alg_enc_internal(&req->areq, req->giv, 1);
851}
852
853static int qat_alg_init(struct crypto_tfm *tfm,
854 enum icp_qat_hw_auth_algo hash, const char *hash_name)
855{
856 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
857
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800858 memzero_explicit(ctx, sizeof(*ctx));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700859 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
860 if (IS_ERR(ctx->hash_tfm))
861 return -EFAULT;
862 spin_lock_init(&ctx->lock);
863 ctx->qat_hash_alg = hash;
864 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
865 sizeof(struct qat_crypto_request);
866 ctx->tfm = tfm;
867 return 0;
868}
869
870static int qat_alg_sha1_init(struct crypto_tfm *tfm)
871{
872 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
873}
874
875static int qat_alg_sha256_init(struct crypto_tfm *tfm)
876{
877 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
878}
879
880static int qat_alg_sha512_init(struct crypto_tfm *tfm)
881{
882 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
883}
884
885static void qat_alg_exit(struct crypto_tfm *tfm)
886{
887 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
888 struct qat_crypto_instance *inst = ctx->inst;
889 struct device *dev;
890
891 if (!IS_ERR(ctx->hash_tfm))
892 crypto_free_shash(ctx->hash_tfm);
893
894 if (!inst)
895 return;
896
897 dev = &GET_DEV(inst->accel_dev);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800898 if (ctx->enc_cd) {
899 memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700900 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
901 ctx->enc_cd, ctx->enc_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800902 }
903 if (ctx->dec_cd) {
904 memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700905 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
906 ctx->dec_cd, ctx->dec_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800907 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700908 qat_crypto_put_instance(inst);
909}
910
911static struct crypto_alg qat_algs[] = { {
912 .cra_name = "authenc(hmac(sha1),cbc(aes))",
913 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
914 .cra_priority = 4001,
915 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
916 .cra_blocksize = AES_BLOCK_SIZE,
917 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
918 .cra_alignmask = 0,
919 .cra_type = &crypto_aead_type,
920 .cra_module = THIS_MODULE,
921 .cra_init = qat_alg_sha1_init,
922 .cra_exit = qat_alg_exit,
923 .cra_u = {
924 .aead = {
925 .setkey = qat_alg_setkey,
926 .decrypt = qat_alg_dec,
927 .encrypt = qat_alg_enc,
928 .givencrypt = qat_alg_genivenc,
929 .ivsize = AES_BLOCK_SIZE,
930 .maxauthsize = SHA1_DIGEST_SIZE,
931 },
932 },
933}, {
934 .cra_name = "authenc(hmac(sha256),cbc(aes))",
935 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
936 .cra_priority = 4001,
937 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
938 .cra_blocksize = AES_BLOCK_SIZE,
939 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
940 .cra_alignmask = 0,
941 .cra_type = &crypto_aead_type,
942 .cra_module = THIS_MODULE,
943 .cra_init = qat_alg_sha256_init,
944 .cra_exit = qat_alg_exit,
945 .cra_u = {
946 .aead = {
947 .setkey = qat_alg_setkey,
948 .decrypt = qat_alg_dec,
949 .encrypt = qat_alg_enc,
950 .givencrypt = qat_alg_genivenc,
951 .ivsize = AES_BLOCK_SIZE,
952 .maxauthsize = SHA256_DIGEST_SIZE,
953 },
954 },
955}, {
956 .cra_name = "authenc(hmac(sha512),cbc(aes))",
957 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
958 .cra_priority = 4001,
959 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
960 .cra_blocksize = AES_BLOCK_SIZE,
961 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
962 .cra_alignmask = 0,
963 .cra_type = &crypto_aead_type,
964 .cra_module = THIS_MODULE,
965 .cra_init = qat_alg_sha512_init,
966 .cra_exit = qat_alg_exit,
967 .cra_u = {
968 .aead = {
969 .setkey = qat_alg_setkey,
970 .decrypt = qat_alg_dec,
971 .encrypt = qat_alg_enc,
972 .givencrypt = qat_alg_genivenc,
973 .ivsize = AES_BLOCK_SIZE,
974 .maxauthsize = SHA512_DIGEST_SIZE,
975 },
976 },
977} };
978
979int qat_algs_register(void)
980{
981 if (atomic_add_return(1, &active_dev) == 1) {
982 int i;
983
984 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
985 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
986 CRYPTO_ALG_ASYNC;
987 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
988 }
989 return 0;
990}
991
992int qat_algs_unregister(void)
993{
994 if (atomic_sub_return(1, &active_dev) == 0)
995 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
996 return 0;
997}
998
999int qat_algs_init(void)
1000{
1001 atomic_set(&active_dev, 0);
1002 crypto_get_default_rng();
1003 return 0;
1004}
1005
1006void qat_algs_exit(void)
1007{
1008 crypto_put_default_rng();
1009}