blob: 20f35df8a01fafefbb6f5da84903fa4a2eefc3a7 [file] [log] [blame]
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
Herbert Xu0ed62642015-05-11 17:47:46 +080050#include <crypto/internal/aead.h>
Tadeusz Strukd370cec2014-06-05 13:43:32 -070051#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
Tadeusz Strukd370cec2014-06-05 13:43:32 -070056#include <linux/dma-mapping.h>
57#include "adf_accel_devices.h"
58#include "adf_transport.h"
59#include "adf_common_drv.h"
60#include "qat_crypto.h"
61#include "icp_qat_hw.h"
62#include "icp_qat_fw.h"
63#include "icp_qat_fw_la.h"
64
Tadeusz Strukdef14bf2015-09-30 05:38:39 -070065#define QAT_AES_HW_CONFIG_ENC(alg, mode) \
66 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
Tadeusz Struk338e84f2014-12-08 12:08:49 -080067 ICP_QAT_HW_CIPHER_NO_CONVERT, \
68 ICP_QAT_HW_CIPHER_ENCRYPT)
Tadeusz Strukd370cec2014-06-05 13:43:32 -070069
Tadeusz Strukdef14bf2015-09-30 05:38:39 -070070#define QAT_AES_HW_CONFIG_DEC(alg, mode) \
71 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
Tadeusz Struk338e84f2014-12-08 12:08:49 -080072 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
73 ICP_QAT_HW_CIPHER_DECRYPT)
Tadeusz Strukd370cec2014-06-05 13:43:32 -070074
Tadeusz Struk6f043b52015-07-21 22:07:47 -070075static DEFINE_MUTEX(algs_lock);
76static unsigned int active_devs;
Tadeusz Strukd370cec2014-06-05 13:43:32 -070077
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800105struct qat_alg_aead_ctx {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700106 struct qat_alg_cd *enc_cd;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700107 struct qat_alg_cd *dec_cd;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800108 dma_addr_t enc_cd_paddr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700109 dma_addr_t dec_cd_paddr;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800110 struct icp_qat_fw_la_bulk_req enc_fw_req;
111 struct icp_qat_fw_la_bulk_req dec_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700112 struct crypto_shash *hash_tfm;
113 enum icp_qat_hw_auth_algo qat_hash_alg;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800114 struct qat_crypto_instance *inst;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800115};
116
117struct qat_alg_ablkcipher_ctx {
118 struct icp_qat_hw_cipher_algo_blk *enc_cd;
119 struct icp_qat_hw_cipher_algo_blk *dec_cd;
120 dma_addr_t enc_cd_paddr;
121 dma_addr_t dec_cd_paddr;
122 struct icp_qat_fw_la_bulk_req enc_fw_req;
123 struct icp_qat_fw_la_bulk_req dec_fw_req;
124 struct qat_crypto_instance *inst;
125 struct crypto_tfm *tfm;
126 spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700127};
128
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700129static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
130{
131 switch (qat_hash_alg) {
132 case ICP_QAT_HW_AUTH_ALGO_SHA1:
133 return ICP_QAT_HW_SHA1_STATE1_SZ;
134 case ICP_QAT_HW_AUTH_ALGO_SHA256:
135 return ICP_QAT_HW_SHA256_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA512:
137 return ICP_QAT_HW_SHA512_STATE1_SZ;
138 default:
139 return -EFAULT;
140 };
141 return -EFAULT;
142}
143
144static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800145 struct qat_alg_aead_ctx *ctx,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700146 const uint8_t *auth_key,
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700147 unsigned int auth_keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700148{
Behan Webster37e52652014-04-04 18:18:00 -0300149 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700150 struct sha1_state sha1;
151 struct sha256_state sha256;
152 struct sha512_state sha512;
153 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
154 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
Herbert Xu48eb3692015-01-14 10:07:03 +1100155 char ipad[block_size];
156 char opad[block_size];
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700157 __be32 *hash_state_out;
158 __be64 *hash512_state_out;
159 int i, offset;
160
Herbert Xu48eb3692015-01-14 10:07:03 +1100161 memset(ipad, 0, block_size);
162 memset(opad, 0, block_size);
Behan Webster37e52652014-04-04 18:18:00 -0300163 shash->tfm = ctx->hash_tfm;
164 shash->flags = 0x0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700165
166 if (auth_keylen > block_size) {
Behan Webster37e52652014-04-04 18:18:00 -0300167 int ret = crypto_shash_digest(shash, auth_key,
Herbert Xu48eb3692015-01-14 10:07:03 +1100168 auth_keylen, ipad);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700169 if (ret)
170 return ret;
171
Herbert Xu48eb3692015-01-14 10:07:03 +1100172 memcpy(opad, ipad, digest_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700173 } else {
174 memcpy(ipad, auth_key, auth_keylen);
175 memcpy(opad, auth_key, auth_keylen);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700176 }
177
178 for (i = 0; i < block_size; i++) {
179 char *ipad_ptr = ipad + i;
180 char *opad_ptr = opad + i;
181 *ipad_ptr ^= 0x36;
182 *opad_ptr ^= 0x5C;
183 }
184
Behan Webster37e52652014-04-04 18:18:00 -0300185 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700186 return -EFAULT;
187
Behan Webster37e52652014-04-04 18:18:00 -0300188 if (crypto_shash_update(shash, ipad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700189 return -EFAULT;
190
191 hash_state_out = (__be32 *)hash->sha.state1;
192 hash512_state_out = (__be64 *)hash_state_out;
193
194 switch (ctx->qat_hash_alg) {
195 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300196 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700197 return -EFAULT;
198 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
199 *hash_state_out = cpu_to_be32(*(sha1.state + i));
200 break;
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300202 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700203 return -EFAULT;
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
205 *hash_state_out = cpu_to_be32(*(sha256.state + i));
206 break;
207 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300208 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700209 return -EFAULT;
210 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
211 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
212 break;
213 default:
214 return -EFAULT;
215 }
216
Behan Webster37e52652014-04-04 18:18:00 -0300217 if (crypto_shash_init(shash))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700218 return -EFAULT;
219
Behan Webster37e52652014-04-04 18:18:00 -0300220 if (crypto_shash_update(shash, opad, block_size))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700221 return -EFAULT;
222
223 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
224 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
225 hash512_state_out = (__be64 *)hash_state_out;
226
227 switch (ctx->qat_hash_alg) {
228 case ICP_QAT_HW_AUTH_ALGO_SHA1:
Behan Webster37e52652014-04-04 18:18:00 -0300229 if (crypto_shash_export(shash, &sha1))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700230 return -EFAULT;
231 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
232 *hash_state_out = cpu_to_be32(*(sha1.state + i));
233 break;
234 case ICP_QAT_HW_AUTH_ALGO_SHA256:
Behan Webster37e52652014-04-04 18:18:00 -0300235 if (crypto_shash_export(shash, &sha256))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700236 return -EFAULT;
237 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
238 *hash_state_out = cpu_to_be32(*(sha256.state + i));
239 break;
240 case ICP_QAT_HW_AUTH_ALGO_SHA512:
Behan Webster37e52652014-04-04 18:18:00 -0300241 if (crypto_shash_export(shash, &sha512))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700242 return -EFAULT;
243 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
244 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
245 break;
246 default:
247 return -EFAULT;
248 }
Struk, Tadeuszaa408d62014-11-14 11:23:52 -0800249 memzero_explicit(ipad, block_size);
250 memzero_explicit(opad, block_size);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700251 return 0;
252}
253
254static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
255{
256 header->hdr_flags =
257 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
258 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
259 header->comn_req_flags =
260 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
261 QAT_COMN_PTR_TYPE_SGL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700262 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
263 ICP_QAT_FW_LA_PARTIAL_NONE);
264 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
265 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
266 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
267 ICP_QAT_FW_LA_NO_PROTO);
268 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
269 ICP_QAT_FW_LA_NO_UPDATE_STATE);
270}
271
Herbert Xue19ab1212015-07-30 17:53:20 +0800272static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800273 int alg,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700274 struct crypto_authenc_keys *keys,
275 int mode)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700276{
Herbert Xue19ab1212015-07-30 17:53:20 +0800277 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
Herbert Xu0a139412015-08-13 17:29:04 +0800278 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700279 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
280 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
281 struct icp_qat_hw_auth_algo_blk *hash =
282 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
283 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800284 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700285 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
286 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
287 void *ptr = &req_tmpl->cd_ctrl;
288 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
289 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700290
291 /* CD setup */
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700292 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700293 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
294 hash->sha.inner_setup.auth_config.config =
295 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
296 ctx->qat_hash_alg, digestsize);
297 hash->sha.inner_setup.auth_counter.counter =
298 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
299
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700300 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700301 return -EFAULT;
302
303 /* Request setup */
304 qat_alg_init_common_hdr(header);
305 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800306 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
307 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700308 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
309 ICP_QAT_FW_LA_RET_AUTH_RES);
310 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
311 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
312 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
313 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
314
315 /* Cipher CD config setup */
316 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
317 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
318 cipher_cd_ctrl->cipher_cfg_offset = 0;
319 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
320 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
321 /* Auth CD config setup */
322 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
323 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
324 hash_cd_ctrl->inner_res_sz = digestsize;
325 hash_cd_ctrl->final_sz = digestsize;
326
327 switch (ctx->qat_hash_alg) {
328 case ICP_QAT_HW_AUTH_ALGO_SHA1:
329 hash_cd_ctrl->inner_state1_sz =
330 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
331 hash_cd_ctrl->inner_state2_sz =
332 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
333 break;
334 case ICP_QAT_HW_AUTH_ALGO_SHA256:
335 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
336 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
337 break;
338 case ICP_QAT_HW_AUTH_ALGO_SHA512:
339 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
340 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
341 break;
342 default:
343 break;
344 }
345 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
346 ((sizeof(struct icp_qat_hw_auth_setup) +
347 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700348 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
349 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
350 return 0;
351}
352
Herbert Xue19ab1212015-07-30 17:53:20 +0800353static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800354 int alg,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700355 struct crypto_authenc_keys *keys,
356 int mode)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700357{
Herbert Xue19ab1212015-07-30 17:53:20 +0800358 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
Herbert Xu0a139412015-08-13 17:29:04 +0800359 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700360 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
361 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
362 struct icp_qat_hw_cipher_algo_blk *cipher =
363 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
364 sizeof(struct icp_qat_hw_auth_setup) +
365 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800366 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700367 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
368 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
369 void *ptr = &req_tmpl->cd_ctrl;
370 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
371 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
372 struct icp_qat_fw_la_auth_req_params *auth_param =
373 (struct icp_qat_fw_la_auth_req_params *)
374 ((char *)&req_tmpl->serv_specif_rqpars +
375 sizeof(struct icp_qat_fw_la_cipher_req_params));
376
377 /* CD setup */
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700378 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700379 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
380 hash->sha.inner_setup.auth_config.config =
381 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
382 ctx->qat_hash_alg,
383 digestsize);
384 hash->sha.inner_setup.auth_counter.counter =
385 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
386
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700387 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700388 return -EFAULT;
389
390 /* Request setup */
391 qat_alg_init_common_hdr(header);
392 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800393 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
394 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700395 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
396 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
397 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
398 ICP_QAT_FW_LA_CMP_AUTH_RES);
399 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
400 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
401
402 /* Cipher CD config setup */
403 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
404 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
405 cipher_cd_ctrl->cipher_cfg_offset =
406 (sizeof(struct icp_qat_hw_auth_setup) +
407 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
408 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
409 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
410
411 /* Auth CD config setup */
412 hash_cd_ctrl->hash_cfg_offset = 0;
413 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
414 hash_cd_ctrl->inner_res_sz = digestsize;
415 hash_cd_ctrl->final_sz = digestsize;
416
417 switch (ctx->qat_hash_alg) {
418 case ICP_QAT_HW_AUTH_ALGO_SHA1:
419 hash_cd_ctrl->inner_state1_sz =
420 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
421 hash_cd_ctrl->inner_state2_sz =
422 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
423 break;
424 case ICP_QAT_HW_AUTH_ALGO_SHA256:
425 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
426 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
427 break;
428 case ICP_QAT_HW_AUTH_ALGO_SHA512:
429 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
430 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
431 break;
432 default:
433 break;
434 }
435
436 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
437 ((sizeof(struct icp_qat_hw_auth_setup) +
438 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700439 auth_param->auth_res_sz = digestsize;
440 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
441 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
442 return 0;
443}
444
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800445static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
446 struct icp_qat_fw_la_bulk_req *req,
447 struct icp_qat_hw_cipher_algo_blk *cd,
448 const uint8_t *key, unsigned int keylen)
449{
450 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
451 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
452 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
453
454 memcpy(cd->aes.key, key, keylen);
455 qat_alg_init_common_hdr(header);
456 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
457 cd_pars->u.s.content_desc_params_sz =
458 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
459 /* Cipher CD config setup */
460 cd_ctrl->cipher_key_sz = keylen >> 3;
461 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
462 cd_ctrl->cipher_cfg_offset = 0;
463 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
465}
466
467static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
468 int alg, const uint8_t *key,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700469 unsigned int keylen, int mode)
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800470{
471 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
472 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
473 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
474
475 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
476 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700477 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800478}
479
480static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
481 int alg, const uint8_t *key,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700482 unsigned int keylen, int mode)
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800483{
484 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
485 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
486 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
487
488 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
489 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700490
491 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
492 dec_cd->aes.cipher_config.val =
493 QAT_AES_HW_CONFIG_DEC(alg, mode);
494 else
495 dec_cd->aes.cipher_config.val =
496 QAT_AES_HW_CONFIG_ENC(alg, mode);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800497}
498
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700499static int qat_alg_validate_key(int key_len, int *alg, int mode)
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800500{
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700501 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
502 switch (key_len) {
503 case AES_KEYSIZE_128:
504 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
505 break;
506 case AES_KEYSIZE_192:
507 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
508 break;
509 case AES_KEYSIZE_256:
510 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
511 break;
512 default:
513 return -EINVAL;
514 }
515 } else {
516 switch (key_len) {
517 case AES_KEYSIZE_128 << 1:
518 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
519 break;
520 case AES_KEYSIZE_256 << 1:
521 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
522 break;
523 default:
524 return -EINVAL;
525 }
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800526 }
527 return 0;
528}
529
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700530static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
531 unsigned int keylen, int mode)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700532{
533 struct crypto_authenc_keys keys;
534 int alg;
535
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700536 if (crypto_authenc_extractkeys(&keys, key, keylen))
537 goto bad_key;
538
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700539 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700540 goto bad_key;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700541
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700542 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700543 goto error;
544
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700545 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700546 goto error;
547
548 return 0;
549bad_key:
Herbert Xue19ab1212015-07-30 17:53:20 +0800550 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700551 return -EINVAL;
552error:
553 return -EFAULT;
554}
555
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800556static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
557 const uint8_t *key,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700558 unsigned int keylen,
559 int mode)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700560{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800561 int alg;
562
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700563 if (qat_alg_validate_key(keylen, &alg, mode))
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800564 goto bad_key;
565
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700566 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen, mode);
567 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen, mode);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800568 return 0;
569bad_key:
570 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
571 return -EINVAL;
572}
573
574static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
575 unsigned int keylen)
576{
577 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700578 struct device *dev;
579
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700580 if (ctx->enc_cd) {
581 /* rekeying */
582 dev = &GET_DEV(ctx->inst->accel_dev);
Herbert Xuad511e22015-01-06 07:54:41 +1100583 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
584 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
585 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
586 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700587 } else {
588 /* new key */
589 int node = get_current_node();
590 struct qat_crypto_instance *inst =
591 qat_crypto_get_instance_node(node);
592 if (!inst) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700593 return -EINVAL;
594 }
595
596 dev = &GET_DEV(inst->accel_dev);
597 ctx->inst = inst;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800598 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700599 &ctx->enc_cd_paddr,
600 GFP_ATOMIC);
601 if (!ctx->enc_cd) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700602 return -ENOMEM;
603 }
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800604 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700605 &ctx->dec_cd_paddr,
606 GFP_ATOMIC);
607 if (!ctx->dec_cd) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700608 goto out_free_enc;
609 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700610 }
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700611 if (qat_alg_aead_init_sessions(tfm, key, keylen,
612 ICP_QAT_HW_CIPHER_CBC_MODE))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700613 goto out_free_all;
614
615 return 0;
616
617out_free_all:
Herbert Xuad511e22015-01-06 07:54:41 +1100618 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700619 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
620 ctx->dec_cd, ctx->dec_cd_paddr);
621 ctx->dec_cd = NULL;
622out_free_enc:
Herbert Xuad511e22015-01-06 07:54:41 +1100623 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700624 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
625 ctx->enc_cd, ctx->enc_cd_paddr);
626 ctx->enc_cd = NULL;
627 return -ENOMEM;
628}
629
630static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
631 struct qat_crypto_request *qat_req)
632{
633 struct device *dev = &GET_DEV(inst->accel_dev);
634 struct qat_alg_buf_list *bl = qat_req->buf.bl;
635 struct qat_alg_buf_list *blout = qat_req->buf.blout;
636 dma_addr_t blp = qat_req->buf.blp;
637 dma_addr_t blpout = qat_req->buf.bloutp;
638 size_t sz = qat_req->buf.sz;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800639 size_t sz_out = qat_req->buf.sz_out;
640 int i;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700641
642 for (i = 0; i < bl->num_bufs; i++)
643 dma_unmap_single(dev, bl->bufers[i].addr,
644 bl->bufers[i].len, DMA_BIDIRECTIONAL);
645
646 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
647 kfree(bl);
648 if (blp != blpout) {
649 /* If out of place operation dma unmap only data */
Tadeusz Struk82f82502014-12-08 12:05:42 -0800650 int bufless = blout->num_bufs - blout->num_mapped_bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700651
Tadeusz Struk82f82502014-12-08 12:05:42 -0800652 for (i = bufless; i < blout->num_bufs; i++) {
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700653 dma_unmap_single(dev, blout->bufers[i].addr,
654 blout->bufers[i].len,
655 DMA_BIDIRECTIONAL);
656 }
Tadeusz Struk82f82502014-12-08 12:05:42 -0800657 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700658 kfree(blout);
659 }
660}
661
662static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700663 struct scatterlist *sgl,
Herbert Xue19ab1212015-07-30 17:53:20 +0800664 struct scatterlist *sglout,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700665 struct qat_crypto_request *qat_req)
666{
667 struct device *dev = &GET_DEV(inst->accel_dev);
Herbert Xue19ab1212015-07-30 17:53:20 +0800668 int i, sg_nctr = 0;
669 int n = sg_nents(sgl);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700670 struct qat_alg_buf_list *bufl;
671 struct qat_alg_buf_list *buflout = NULL;
672 dma_addr_t blp;
673 dma_addr_t bloutp = 0;
674 struct scatterlist *sg;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800675 size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
Herbert Xue19ab1212015-07-30 17:53:20 +0800676 ((1 + n) * sizeof(struct qat_alg_buf));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700677
678 if (unlikely(!n))
679 return -EINVAL;
680
Tadeusz Struk82f82502014-12-08 12:05:42 -0800681 bufl = kzalloc_node(sz, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700682 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700683 if (unlikely(!bufl))
684 return -ENOMEM;
685
686 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
687 if (unlikely(dma_mapping_error(dev, blp)))
688 goto err;
689
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700690 for_each_sg(sgl, sg, n, i) {
Herbert Xue19ab1212015-07-30 17:53:20 +0800691 int y = sg_nctr;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800692
693 if (!sg->length)
694 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700695
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700696 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
697 sg->length,
698 DMA_BIDIRECTIONAL);
699 bufl->bufers[y].len = sg->length;
700 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
701 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800702 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700703 }
Herbert Xue19ab1212015-07-30 17:53:20 +0800704 bufl->num_bufs = sg_nctr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700705 qat_req->buf.bl = bufl;
706 qat_req->buf.blp = blp;
707 qat_req->buf.sz = sz;
708 /* Handle out of place operation */
709 if (sgl != sglout) {
710 struct qat_alg_buf *bufers;
711
Tadeusz Struk82f82502014-12-08 12:05:42 -0800712 n = sg_nents(sglout);
713 sz_out = sizeof(struct qat_alg_buf_list) +
Herbert Xue19ab1212015-07-30 17:53:20 +0800714 ((1 + n) * sizeof(struct qat_alg_buf));
Tadeusz Struk82f82502014-12-08 12:05:42 -0800715 sg_nctr = 0;
716 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
Tadeusz Struk09adc872014-10-13 18:24:32 -0700717 dev_to_node(&GET_DEV(inst->accel_dev)));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700718 if (unlikely(!buflout))
719 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800720 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700721 if (unlikely(dma_mapping_error(dev, bloutp)))
722 goto err;
723 bufers = buflout->bufers;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700724 for_each_sg(sglout, sg, n, i) {
Herbert Xue19ab1212015-07-30 17:53:20 +0800725 int y = sg_nctr;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800726
727 if (!sg->length)
728 continue;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700729
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700730 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
731 sg->length,
732 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700733 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
734 goto err;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800735 bufers[y].len = sg->length;
736 sg_nctr++;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700737 }
Herbert Xue19ab1212015-07-30 17:53:20 +0800738 buflout->num_bufs = sg_nctr;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800739 buflout->num_mapped_bufs = sg_nctr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700740 qat_req->buf.blout = buflout;
741 qat_req->buf.bloutp = bloutp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800742 qat_req->buf.sz_out = sz_out;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700743 } else {
744 /* Otherwise set the src and dst to the same address */
745 qat_req->buf.bloutp = qat_req->buf.blp;
Tadeusz Struk82f82502014-12-08 12:05:42 -0800746 qat_req->buf.sz_out = 0;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700747 }
748 return 0;
749err:
750 dev_err(dev, "Failed to map buf for dma\n");
Tadeusz Struk82f82502014-12-08 12:05:42 -0800751 sg_nctr = 0;
Herbert Xue19ab1212015-07-30 17:53:20 +0800752 for (i = 0; i < n; i++)
Tadeusz Struk82f82502014-12-08 12:05:42 -0800753 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700754 dma_unmap_single(dev, bufl->bufers[i].addr,
755 bufl->bufers[i].len,
756 DMA_BIDIRECTIONAL);
Tadeusz Struk82f82502014-12-08 12:05:42 -0800757
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700758 if (!dma_mapping_error(dev, blp))
759 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
760 kfree(bufl);
761 if (sgl != sglout && buflout) {
Tadeusz Struk82f82502014-12-08 12:05:42 -0800762 n = sg_nents(sglout);
Herbert Xue19ab1212015-07-30 17:53:20 +0800763 for (i = 0; i < n; i++)
Tadeusz Struk82f82502014-12-08 12:05:42 -0800764 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
765 dma_unmap_single(dev, buflout->bufers[i].addr,
766 buflout->bufers[i].len,
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700767 DMA_BIDIRECTIONAL);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700768 if (!dma_mapping_error(dev, bloutp))
Tadeusz Struk82f82502014-12-08 12:05:42 -0800769 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700770 kfree(buflout);
771 }
772 return -ENOMEM;
773}
774
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800775static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
776 struct qat_crypto_request *qat_req)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700777{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800778 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700779 struct qat_crypto_instance *inst = ctx->inst;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800780 struct aead_request *areq = qat_req->aead_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700781 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
782 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
783
784 qat_alg_free_bufl(inst, qat_req);
785 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
786 res = -EBADMSG;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700787 areq->base.complete(&areq->base, res);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700788}
789
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800790static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
791 struct qat_crypto_request *qat_req)
792{
793 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
794 struct qat_crypto_instance *inst = ctx->inst;
795 struct ablkcipher_request *areq = qat_req->ablkcipher_req;
796 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
797 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
798
799 qat_alg_free_bufl(inst, qat_req);
800 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
801 res = -EINVAL;
802 areq->base.complete(&areq->base, res);
803}
804
805void qat_alg_callback(void *resp)
806{
807 struct icp_qat_fw_la_resp *qat_resp = resp;
808 struct qat_crypto_request *qat_req =
809 (void *)(__force long)qat_resp->opaque_data;
810
811 qat_req->cb(qat_resp, qat_req);
812}
813
814static int qat_alg_aead_dec(struct aead_request *areq)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700815{
816 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
817 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800818 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700819 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
820 struct icp_qat_fw_la_cipher_req_params *cipher_param;
821 struct icp_qat_fw_la_auth_req_params *auth_param;
822 struct icp_qat_fw_la_bulk_req *msg;
Herbert Xu0a139412015-08-13 17:29:04 +0800823 int digst_size = crypto_aead_authsize(aead_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700824 int ret, ctr = 0;
825
Herbert Xue19ab1212015-07-30 17:53:20 +0800826 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700827 if (unlikely(ret))
828 return ret;
829
830 msg = &qat_req->req;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800831 *msg = ctx->dec_fw_req;
832 qat_req->aead_ctx = ctx;
833 qat_req->aead_req = areq;
834 qat_req->cb = qat_aead_alg_callback;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700835 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700836 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
837 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
838 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
839 cipher_param->cipher_length = areq->cryptlen - digst_size;
Herbert Xue19ab1212015-07-30 17:53:20 +0800840 cipher_param->cipher_offset = areq->assoclen;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700841 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
842 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
843 auth_param->auth_off = 0;
Herbert Xue19ab1212015-07-30 17:53:20 +0800844 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700845 do {
846 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
847 } while (ret == -EAGAIN && ctr++ < 10);
848
849 if (ret == -EAGAIN) {
850 qat_alg_free_bufl(ctx->inst, qat_req);
851 return -EBUSY;
852 }
853 return -EINPROGRESS;
854}
855
Herbert Xue19ab1212015-07-30 17:53:20 +0800856static int qat_alg_aead_enc(struct aead_request *areq)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700857{
858 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
859 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800860 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700861 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
862 struct icp_qat_fw_la_cipher_req_params *cipher_param;
863 struct icp_qat_fw_la_auth_req_params *auth_param;
864 struct icp_qat_fw_la_bulk_req *msg;
Herbert Xue19ab1212015-07-30 17:53:20 +0800865 uint8_t *iv = areq->iv;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700866 int ret, ctr = 0;
867
Herbert Xue19ab1212015-07-30 17:53:20 +0800868 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700869 if (unlikely(ret))
870 return ret;
871
872 msg = &qat_req->req;
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800873 *msg = ctx->enc_fw_req;
874 qat_req->aead_ctx = ctx;
875 qat_req->aead_req = areq;
876 qat_req->cb = qat_aead_alg_callback;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700877 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700878 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
879 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
880 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
881 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
882
Herbert Xue19ab1212015-07-30 17:53:20 +0800883 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
884 cipher_param->cipher_length = areq->cryptlen;
885 cipher_param->cipher_offset = areq->assoclen;
886
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700887 auth_param->auth_off = 0;
Herbert Xue19ab1212015-07-30 17:53:20 +0800888 auth_param->auth_len = areq->assoclen + areq->cryptlen;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700889
890 do {
891 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
892 } while (ret == -EAGAIN && ctr++ < 10);
893
894 if (ret == -EAGAIN) {
895 qat_alg_free_bufl(ctx->inst, qat_req);
896 return -EBUSY;
897 }
898 return -EINPROGRESS;
899}
900
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800901static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700902 const u8 *key, unsigned int keylen,
903 int mode)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700904{
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800905 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
906 struct device *dev;
907
908 spin_lock(&ctx->lock);
909 if (ctx->enc_cd) {
910 /* rekeying */
911 dev = &GET_DEV(ctx->inst->accel_dev);
Herbert Xuad511e22015-01-06 07:54:41 +1100912 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
913 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
914 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
915 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800916 } else {
917 /* new key */
918 int node = get_current_node();
919 struct qat_crypto_instance *inst =
920 qat_crypto_get_instance_node(node);
921 if (!inst) {
922 spin_unlock(&ctx->lock);
923 return -EINVAL;
924 }
925
926 dev = &GET_DEV(inst->accel_dev);
927 ctx->inst = inst;
928 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
929 &ctx->enc_cd_paddr,
930 GFP_ATOMIC);
931 if (!ctx->enc_cd) {
932 spin_unlock(&ctx->lock);
933 return -ENOMEM;
934 }
935 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
936 &ctx->dec_cd_paddr,
937 GFP_ATOMIC);
938 if (!ctx->dec_cd) {
939 spin_unlock(&ctx->lock);
940 goto out_free_enc;
941 }
942 }
943 spin_unlock(&ctx->lock);
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700944 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen, mode))
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800945 goto out_free_all;
946
947 return 0;
948
949out_free_all:
Tadeusz Struk21a3d3b2016-05-17 10:53:51 -0700950 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
951 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800952 ctx->dec_cd, ctx->dec_cd_paddr);
953 ctx->dec_cd = NULL;
954out_free_enc:
Tadeusz Struk21a3d3b2016-05-17 10:53:51 -0700955 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
956 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800957 ctx->enc_cd, ctx->enc_cd_paddr);
958 ctx->enc_cd = NULL;
959 return -ENOMEM;
960}
961
Tadeusz Strukdef14bf2015-09-30 05:38:39 -0700962static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher *tfm,
963 const u8 *key, unsigned int keylen)
964{
965 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
966 ICP_QAT_HW_CIPHER_CBC_MODE);
967}
968
969static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher *tfm,
970 const u8 *key, unsigned int keylen)
971{
972 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
973 ICP_QAT_HW_CIPHER_CTR_MODE);
974}
975
976static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher *tfm,
977 const u8 *key, unsigned int keylen)
978{
979 return qat_alg_ablkcipher_setkey(tfm, key, keylen,
980 ICP_QAT_HW_CIPHER_XTS_MODE);
981}
982
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800983static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
984{
985 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
986 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
987 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
988 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
989 struct icp_qat_fw_la_cipher_req_params *cipher_param;
990 struct icp_qat_fw_la_bulk_req *msg;
991 int ret, ctr = 0;
992
Herbert Xue19ab1212015-07-30 17:53:20 +0800993 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
Tadeusz Struk338e84f2014-12-08 12:08:49 -0800994 if (unlikely(ret))
995 return ret;
996
997 msg = &qat_req->req;
998 *msg = ctx->enc_fw_req;
999 qat_req->ablkcipher_ctx = ctx;
1000 qat_req->ablkcipher_req = req;
1001 qat_req->cb = qat_ablkcipher_alg_callback;
1002 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1003 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1004 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1005 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1006 cipher_param->cipher_length = req->nbytes;
1007 cipher_param->cipher_offset = 0;
1008 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1009 do {
1010 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1011 } while (ret == -EAGAIN && ctr++ < 10);
1012
1013 if (ret == -EAGAIN) {
1014 qat_alg_free_bufl(ctx->inst, qat_req);
1015 return -EBUSY;
1016 }
1017 return -EINPROGRESS;
1018}
1019
1020static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
1021{
1022 struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
1023 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
1024 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1025 struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
1026 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1027 struct icp_qat_fw_la_bulk_req *msg;
1028 int ret, ctr = 0;
1029
Herbert Xue19ab1212015-07-30 17:53:20 +08001030 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001031 if (unlikely(ret))
1032 return ret;
1033
1034 msg = &qat_req->req;
1035 *msg = ctx->dec_fw_req;
1036 qat_req->ablkcipher_ctx = ctx;
1037 qat_req->ablkcipher_req = req;
1038 qat_req->cb = qat_ablkcipher_alg_callback;
1039 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1040 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1041 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1042 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1043 cipher_param->cipher_length = req->nbytes;
1044 cipher_param->cipher_offset = 0;
1045 memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
1046 do {
1047 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1048 } while (ret == -EAGAIN && ctr++ < 10);
1049
1050 if (ret == -EAGAIN) {
1051 qat_alg_free_bufl(ctx->inst, qat_req);
1052 return -EBUSY;
1053 }
1054 return -EINPROGRESS;
1055}
1056
Herbert Xue19ab1212015-07-30 17:53:20 +08001057static int qat_alg_aead_init(struct crypto_aead *tfm,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001058 enum icp_qat_hw_auth_algo hash,
1059 const char *hash_name)
1060{
Herbert Xue19ab1212015-07-30 17:53:20 +08001061 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001062
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001063 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1064 if (IS_ERR(ctx->hash_tfm))
Herbert Xue19ab1212015-07-30 17:53:20 +08001065 return PTR_ERR(ctx->hash_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001066 ctx->qat_hash_alg = hash;
Cabiddu, Giovanni7768fb22016-01-19 17:34:04 +00001067 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001068 return 0;
1069}
1070
Herbert Xue19ab1212015-07-30 17:53:20 +08001071static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001072{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001073 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001074}
1075
Herbert Xue19ab1212015-07-30 17:53:20 +08001076static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001077{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001078 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001079}
1080
Herbert Xue19ab1212015-07-30 17:53:20 +08001081static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001082{
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001083 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001084}
1085
Herbert Xue19ab1212015-07-30 17:53:20 +08001086static void qat_alg_aead_exit(struct crypto_aead *tfm)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001087{
Herbert Xue19ab1212015-07-30 17:53:20 +08001088 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001089 struct qat_crypto_instance *inst = ctx->inst;
1090 struct device *dev;
1091
Herbert Xue19ab1212015-07-30 17:53:20 +08001092 crypto_free_shash(ctx->hash_tfm);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001093
1094 if (!inst)
1095 return;
1096
1097 dev = &GET_DEV(inst->accel_dev);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001098 if (ctx->enc_cd) {
Herbert Xuad511e22015-01-06 07:54:41 +11001099 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001100 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1101 ctx->enc_cd, ctx->enc_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001102 }
1103 if (ctx->dec_cd) {
Herbert Xuad511e22015-01-06 07:54:41 +11001104 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001105 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1106 ctx->dec_cd, ctx->dec_cd_paddr);
Struk, Tadeuszaa408d62014-11-14 11:23:52 -08001107 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001108 qat_crypto_put_instance(inst);
1109}
1110
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001111static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
1112{
1113 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1114
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001115 spin_lock_init(&ctx->lock);
Cabiddu, Giovanni7768fb22016-01-19 17:34:04 +00001116 tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001117 ctx->tfm = tfm;
1118 return 0;
1119}
1120
1121static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
1122{
1123 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1124 struct qat_crypto_instance *inst = ctx->inst;
1125 struct device *dev;
1126
1127 if (!inst)
1128 return;
1129
1130 dev = &GET_DEV(inst->accel_dev);
1131 if (ctx->enc_cd) {
Herbert Xuad511e22015-01-06 07:54:41 +11001132 memset(ctx->enc_cd, 0,
1133 sizeof(struct icp_qat_hw_cipher_algo_blk));
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001134 dma_free_coherent(dev,
1135 sizeof(struct icp_qat_hw_cipher_algo_blk),
1136 ctx->enc_cd, ctx->enc_cd_paddr);
1137 }
1138 if (ctx->dec_cd) {
Herbert Xuad511e22015-01-06 07:54:41 +11001139 memset(ctx->dec_cd, 0,
1140 sizeof(struct icp_qat_hw_cipher_algo_blk));
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001141 dma_free_coherent(dev,
1142 sizeof(struct icp_qat_hw_cipher_algo_blk),
1143 ctx->dec_cd, ctx->dec_cd_paddr);
1144 }
1145 qat_crypto_put_instance(inst);
1146}
1147
Herbert Xue19ab1212015-07-30 17:53:20 +08001148
1149static struct aead_alg qat_aeads[] = { {
1150 .base = {
1151 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1152 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1153 .cra_priority = 4001,
Herbert Xu5e4b8c12015-08-13 17:29:06 +08001154 .cra_flags = CRYPTO_ALG_ASYNC,
Herbert Xue19ab1212015-07-30 17:53:20 +08001155 .cra_blocksize = AES_BLOCK_SIZE,
1156 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1157 .cra_module = THIS_MODULE,
1158 },
1159 .init = qat_alg_aead_sha1_init,
1160 .exit = qat_alg_aead_exit,
1161 .setkey = qat_alg_aead_setkey,
1162 .decrypt = qat_alg_aead_dec,
1163 .encrypt = qat_alg_aead_enc,
1164 .ivsize = AES_BLOCK_SIZE,
1165 .maxauthsize = SHA1_DIGEST_SIZE,
1166}, {
1167 .base = {
1168 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1169 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1170 .cra_priority = 4001,
Herbert Xu5e4b8c12015-08-13 17:29:06 +08001171 .cra_flags = CRYPTO_ALG_ASYNC,
Herbert Xue19ab1212015-07-30 17:53:20 +08001172 .cra_blocksize = AES_BLOCK_SIZE,
1173 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1174 .cra_module = THIS_MODULE,
1175 },
1176 .init = qat_alg_aead_sha256_init,
1177 .exit = qat_alg_aead_exit,
1178 .setkey = qat_alg_aead_setkey,
1179 .decrypt = qat_alg_aead_dec,
1180 .encrypt = qat_alg_aead_enc,
1181 .ivsize = AES_BLOCK_SIZE,
1182 .maxauthsize = SHA256_DIGEST_SIZE,
1183}, {
1184 .base = {
1185 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1186 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1187 .cra_priority = 4001,
Herbert Xu5e4b8c12015-08-13 17:29:06 +08001188 .cra_flags = CRYPTO_ALG_ASYNC,
Herbert Xue19ab1212015-07-30 17:53:20 +08001189 .cra_blocksize = AES_BLOCK_SIZE,
1190 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1191 .cra_module = THIS_MODULE,
1192 },
1193 .init = qat_alg_aead_sha512_init,
1194 .exit = qat_alg_aead_exit,
1195 .setkey = qat_alg_aead_setkey,
1196 .decrypt = qat_alg_aead_dec,
1197 .encrypt = qat_alg_aead_enc,
1198 .ivsize = AES_BLOCK_SIZE,
1199 .maxauthsize = SHA512_DIGEST_SIZE,
1200} };
1201
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001202static struct crypto_alg qat_algs[] = { {
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001203 .cra_name = "cbc(aes)",
1204 .cra_driver_name = "qat_aes_cbc",
1205 .cra_priority = 4001,
1206 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1207 .cra_blocksize = AES_BLOCK_SIZE,
1208 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1209 .cra_alignmask = 0,
1210 .cra_type = &crypto_ablkcipher_type,
1211 .cra_module = THIS_MODULE,
1212 .cra_init = qat_alg_ablkcipher_init,
1213 .cra_exit = qat_alg_ablkcipher_exit,
1214 .cra_u = {
1215 .ablkcipher = {
Tadeusz Strukdef14bf2015-09-30 05:38:39 -07001216 .setkey = qat_alg_ablkcipher_cbc_setkey,
1217 .decrypt = qat_alg_ablkcipher_decrypt,
1218 .encrypt = qat_alg_ablkcipher_encrypt,
1219 .min_keysize = AES_MIN_KEY_SIZE,
1220 .max_keysize = AES_MAX_KEY_SIZE,
1221 .ivsize = AES_BLOCK_SIZE,
1222 },
1223 },
1224}, {
1225 .cra_name = "ctr(aes)",
1226 .cra_driver_name = "qat_aes_ctr",
1227 .cra_priority = 4001,
1228 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1229 .cra_blocksize = AES_BLOCK_SIZE,
1230 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1231 .cra_alignmask = 0,
1232 .cra_type = &crypto_ablkcipher_type,
1233 .cra_module = THIS_MODULE,
1234 .cra_init = qat_alg_ablkcipher_init,
1235 .cra_exit = qat_alg_ablkcipher_exit,
1236 .cra_u = {
1237 .ablkcipher = {
1238 .setkey = qat_alg_ablkcipher_ctr_setkey,
1239 .decrypt = qat_alg_ablkcipher_decrypt,
1240 .encrypt = qat_alg_ablkcipher_encrypt,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .ivsize = AES_BLOCK_SIZE,
1244 },
1245 },
1246}, {
1247 .cra_name = "xts(aes)",
1248 .cra_driver_name = "qat_aes_xts",
1249 .cra_priority = 4001,
1250 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1251 .cra_blocksize = AES_BLOCK_SIZE,
1252 .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
1253 .cra_alignmask = 0,
1254 .cra_type = &crypto_ablkcipher_type,
1255 .cra_module = THIS_MODULE,
1256 .cra_init = qat_alg_ablkcipher_init,
1257 .cra_exit = qat_alg_ablkcipher_exit,
1258 .cra_u = {
1259 .ablkcipher = {
1260 .setkey = qat_alg_ablkcipher_xts_setkey,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001261 .decrypt = qat_alg_ablkcipher_decrypt,
1262 .encrypt = qat_alg_ablkcipher_encrypt,
Giovanni Cabiddu10bb0872016-08-18 19:53:36 +01001263 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1264 .max_keysize = 2 * AES_MAX_KEY_SIZE,
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001265 .ivsize = AES_BLOCK_SIZE,
1266 },
1267 },
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001268} };
1269
1270int qat_algs_register(void)
1271{
Herbert Xue19ab1212015-07-30 17:53:20 +08001272 int ret = 0, i;
Tadeusz Struk6f043b52015-07-21 22:07:47 -07001273
1274 mutex_lock(&algs_lock);
Herbert Xue19ab1212015-07-30 17:53:20 +08001275 if (++active_devs != 1)
1276 goto unlock;
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001277
Herbert Xue19ab1212015-07-30 17:53:20 +08001278 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
1279 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
Tadeusz Struk338e84f2014-12-08 12:08:49 -08001280
Herbert Xue19ab1212015-07-30 17:53:20 +08001281 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1282 if (ret)
1283 goto unlock;
1284
1285 for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
Herbert Xu5e4b8c12015-08-13 17:29:06 +08001286 qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
Herbert Xue19ab1212015-07-30 17:53:20 +08001287
1288 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1289 if (ret)
1290 goto unreg_algs;
1291
1292unlock:
Tadeusz Struk6f043b52015-07-21 22:07:47 -07001293 mutex_unlock(&algs_lock);
1294 return ret;
Herbert Xue19ab1212015-07-30 17:53:20 +08001295
1296unreg_algs:
1297 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1298 goto unlock;
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001299}
1300
Tadeusz Strukbe2cfac2015-09-22 11:57:47 -07001301void qat_algs_unregister(void)
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001302{
Tadeusz Struk6f043b52015-07-21 22:07:47 -07001303 mutex_lock(&algs_lock);
Herbert Xue19ab1212015-07-30 17:53:20 +08001304 if (--active_devs != 0)
1305 goto unlock;
1306
1307 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1308 crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309
1310unlock:
Tadeusz Struk6f043b52015-07-21 22:07:47 -07001311 mutex_unlock(&algs_lock);
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001312}