blob: 3e26fa2b293fa7bb4e8e8c3dff558990954bdd8b [file] [log] [blame]
Tadeusz Strukd370cec2014-06-05 13:43:32 -07001/*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
4
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 Contact Information:
17 qat-linux@intel.com
18
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
24
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
34
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*/
47#include <linux/module.h>
48#include <linux/slab.h>
49#include <linux/crypto.h>
50#include <crypto/aead.h>
51#include <crypto/aes.h>
52#include <crypto/sha.h>
53#include <crypto/hash.h>
54#include <crypto/algapi.h>
55#include <crypto/authenc.h>
56#include <crypto/rng.h>
57#include <linux/dma-mapping.h>
58#include "adf_accel_devices.h"
59#include "adf_transport.h"
60#include "adf_common_drv.h"
61#include "qat_crypto.h"
62#include "icp_qat_hw.h"
63#include "icp_qat_fw.h"
64#include "icp_qat_fw_la.h"
65
66#define QAT_AES_HW_CONFIG_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
70
71#define QAT_AES_HW_CONFIG_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
75
76static atomic_t active_dev;
77
78struct qat_alg_buf {
79 uint32_t len;
80 uint32_t resrvd;
81 uint64_t addr;
82} __packed;
83
84struct qat_alg_buf_list {
85 uint64_t resrvd;
86 uint32_t num_bufs;
87 uint32_t num_mapped_bufs;
88 struct qat_alg_buf bufers[];
89} __packed __aligned(64);
90
91/* Common content descriptor */
92struct qat_alg_cd {
93 union {
94 struct qat_enc { /* Encrypt content desc */
95 struct icp_qat_hw_cipher_algo_blk cipher;
96 struct icp_qat_hw_auth_algo_blk hash;
97 } qat_enc_cd;
98 struct qat_dec { /* Decrytp content desc */
99 struct icp_qat_hw_auth_algo_blk hash;
100 struct icp_qat_hw_cipher_algo_blk cipher;
101 } qat_dec_cd;
102 };
103} __aligned(64);
104
105#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
106
107struct qat_auth_state {
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700108 uint8_t data[MAX_AUTH_STATE_SIZE + 64];
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700109} __aligned(64);
110
111struct qat_alg_session_ctx {
112 struct qat_alg_cd *enc_cd;
113 dma_addr_t enc_cd_paddr;
114 struct qat_alg_cd *dec_cd;
115 dma_addr_t dec_cd_paddr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700116 struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
117 struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
118 struct qat_crypto_instance *inst;
119 struct crypto_tfm *tfm;
120 struct crypto_shash *hash_tfm;
121 enum icp_qat_hw_auth_algo qat_hash_alg;
122 uint8_t salt[AES_BLOCK_SIZE];
123 spinlock_t lock; /* protects qat_alg_session_ctx struct */
124};
125
126static int get_current_node(void)
127{
128 return cpu_data(current_thread_info()->cpu).phys_proc_id;
129}
130
131static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
132{
133 switch (qat_hash_alg) {
134 case ICP_QAT_HW_AUTH_ALGO_SHA1:
135 return ICP_QAT_HW_SHA1_STATE1_SZ;
136 case ICP_QAT_HW_AUTH_ALGO_SHA256:
137 return ICP_QAT_HW_SHA256_STATE1_SZ;
138 case ICP_QAT_HW_AUTH_ALGO_SHA512:
139 return ICP_QAT_HW_SHA512_STATE1_SZ;
140 default:
141 return -EFAULT;
142 };
143 return -EFAULT;
144}
145
146static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
147 struct qat_alg_session_ctx *ctx,
148 const uint8_t *auth_key,
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700149 unsigned int auth_keylen)
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700150{
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700151 struct qat_auth_state auth_state;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700152 struct {
153 struct shash_desc shash;
154 char ctx[crypto_shash_descsize(ctx->hash_tfm)];
155 } desc;
156 struct sha1_state sha1;
157 struct sha256_state sha256;
158 struct sha512_state sha512;
159 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
160 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700161 uint8_t *ipad = auth_state.data;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700162 uint8_t *opad = ipad + block_size;
163 __be32 *hash_state_out;
164 __be64 *hash512_state_out;
165 int i, offset;
166
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700167 memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700168 desc.shash.tfm = ctx->hash_tfm;
169 desc.shash.flags = 0x0;
170
171 if (auth_keylen > block_size) {
172 char buff[SHA512_BLOCK_SIZE];
173 int ret = crypto_shash_digest(&desc.shash, auth_key,
174 auth_keylen, buff);
175 if (ret)
176 return ret;
177
178 memcpy(ipad, buff, digest_size);
179 memcpy(opad, buff, digest_size);
180 memset(ipad + digest_size, 0, block_size - digest_size);
181 memset(opad + digest_size, 0, block_size - digest_size);
182 } else {
183 memcpy(ipad, auth_key, auth_keylen);
184 memcpy(opad, auth_key, auth_keylen);
185 memset(ipad + auth_keylen, 0, block_size - auth_keylen);
186 memset(opad + auth_keylen, 0, block_size - auth_keylen);
187 }
188
189 for (i = 0; i < block_size; i++) {
190 char *ipad_ptr = ipad + i;
191 char *opad_ptr = opad + i;
192 *ipad_ptr ^= 0x36;
193 *opad_ptr ^= 0x5C;
194 }
195
196 if (crypto_shash_init(&desc.shash))
197 return -EFAULT;
198
199 if (crypto_shash_update(&desc.shash, ipad, block_size))
200 return -EFAULT;
201
202 hash_state_out = (__be32 *)hash->sha.state1;
203 hash512_state_out = (__be64 *)hash_state_out;
204
205 switch (ctx->qat_hash_alg) {
206 case ICP_QAT_HW_AUTH_ALGO_SHA1:
207 if (crypto_shash_export(&desc.shash, &sha1))
208 return -EFAULT;
209 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
210 *hash_state_out = cpu_to_be32(*(sha1.state + i));
211 break;
212 case ICP_QAT_HW_AUTH_ALGO_SHA256:
213 if (crypto_shash_export(&desc.shash, &sha256))
214 return -EFAULT;
215 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
216 *hash_state_out = cpu_to_be32(*(sha256.state + i));
217 break;
218 case ICP_QAT_HW_AUTH_ALGO_SHA512:
219 if (crypto_shash_export(&desc.shash, &sha512))
220 return -EFAULT;
221 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
222 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
223 break;
224 default:
225 return -EFAULT;
226 }
227
228 if (crypto_shash_init(&desc.shash))
229 return -EFAULT;
230
231 if (crypto_shash_update(&desc.shash, opad, block_size))
232 return -EFAULT;
233
234 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
235 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
236 hash512_state_out = (__be64 *)hash_state_out;
237
238 switch (ctx->qat_hash_alg) {
239 case ICP_QAT_HW_AUTH_ALGO_SHA1:
240 if (crypto_shash_export(&desc.shash, &sha1))
241 return -EFAULT;
242 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
243 *hash_state_out = cpu_to_be32(*(sha1.state + i));
244 break;
245 case ICP_QAT_HW_AUTH_ALGO_SHA256:
246 if (crypto_shash_export(&desc.shash, &sha256))
247 return -EFAULT;
248 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
249 *hash_state_out = cpu_to_be32(*(sha256.state + i));
250 break;
251 case ICP_QAT_HW_AUTH_ALGO_SHA512:
252 if (crypto_shash_export(&desc.shash, &sha512))
253 return -EFAULT;
254 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
255 *hash512_state_out = cpu_to_be64(*(sha512.state + i));
256 break;
257 default:
258 return -EFAULT;
259 }
260 return 0;
261}
262
263static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
264{
265 header->hdr_flags =
266 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
267 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
268 header->comn_req_flags =
269 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
270 QAT_COMN_PTR_TYPE_SGL);
271 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
273 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
274 ICP_QAT_FW_LA_PARTIAL_NONE);
275 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
276 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
277 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
278 ICP_QAT_FW_LA_NO_PROTO);
279 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
280 ICP_QAT_FW_LA_NO_UPDATE_STATE);
281}
282
283static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
284 int alg, struct crypto_authenc_keys *keys)
285{
286 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
287 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
288 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
289 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
290 struct icp_qat_hw_auth_algo_blk *hash =
291 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
292 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
293 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
294 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
295 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
296 void *ptr = &req_tmpl->cd_ctrl;
297 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
298 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700299
300 /* CD setup */
301 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
302 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
303 hash->sha.inner_setup.auth_config.config =
304 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
305 ctx->qat_hash_alg, digestsize);
306 hash->sha.inner_setup.auth_counter.counter =
307 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
308
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700309 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700310 return -EFAULT;
311
312 /* Request setup */
313 qat_alg_init_common_hdr(header);
314 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
315 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
316 ICP_QAT_FW_LA_RET_AUTH_RES);
317 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
318 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
319 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
320 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
321
322 /* Cipher CD config setup */
323 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
324 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
325 cipher_cd_ctrl->cipher_cfg_offset = 0;
326 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
327 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
328 /* Auth CD config setup */
329 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
330 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
331 hash_cd_ctrl->inner_res_sz = digestsize;
332 hash_cd_ctrl->final_sz = digestsize;
333
334 switch (ctx->qat_hash_alg) {
335 case ICP_QAT_HW_AUTH_ALGO_SHA1:
336 hash_cd_ctrl->inner_state1_sz =
337 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
338 hash_cd_ctrl->inner_state2_sz =
339 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
340 break;
341 case ICP_QAT_HW_AUTH_ALGO_SHA256:
342 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
343 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
344 break;
345 case ICP_QAT_HW_AUTH_ALGO_SHA512:
346 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
347 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
348 break;
349 default:
350 break;
351 }
352 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
353 ((sizeof(struct icp_qat_hw_auth_setup) +
354 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700355 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
356 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
357 return 0;
358}
359
360static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
361 int alg, struct crypto_authenc_keys *keys)
362{
363 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
364 unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
365 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
366 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
367 struct icp_qat_hw_cipher_algo_blk *cipher =
368 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
369 sizeof(struct icp_qat_hw_auth_setup) +
370 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
371 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
372 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
373 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
374 void *ptr = &req_tmpl->cd_ctrl;
375 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
376 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
377 struct icp_qat_fw_la_auth_req_params *auth_param =
378 (struct icp_qat_fw_la_auth_req_params *)
379 ((char *)&req_tmpl->serv_specif_rqpars +
380 sizeof(struct icp_qat_fw_la_cipher_req_params));
381
382 /* CD setup */
383 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
384 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
385 hash->sha.inner_setup.auth_config.config =
386 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
387 ctx->qat_hash_alg,
388 digestsize);
389 hash->sha.inner_setup.auth_counter.counter =
390 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
391
Tadeusz Struk26c3af62014-09-10 14:07:36 -0700392 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700393 return -EFAULT;
394
395 /* Request setup */
396 qat_alg_init_common_hdr(header);
397 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
398 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
399 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
400 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
401 ICP_QAT_FW_LA_CMP_AUTH_RES);
402 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
403 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
404
405 /* Cipher CD config setup */
406 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
407 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
408 cipher_cd_ctrl->cipher_cfg_offset =
409 (sizeof(struct icp_qat_hw_auth_setup) +
410 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
411 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
412 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
413
414 /* Auth CD config setup */
415 hash_cd_ctrl->hash_cfg_offset = 0;
416 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
417 hash_cd_ctrl->inner_res_sz = digestsize;
418 hash_cd_ctrl->final_sz = digestsize;
419
420 switch (ctx->qat_hash_alg) {
421 case ICP_QAT_HW_AUTH_ALGO_SHA1:
422 hash_cd_ctrl->inner_state1_sz =
423 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
424 hash_cd_ctrl->inner_state2_sz =
425 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
426 break;
427 case ICP_QAT_HW_AUTH_ALGO_SHA256:
428 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
429 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
430 break;
431 case ICP_QAT_HW_AUTH_ALGO_SHA512:
432 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
433 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
434 break;
435 default:
436 break;
437 }
438
439 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
440 ((sizeof(struct icp_qat_hw_auth_setup) +
441 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700442 auth_param->auth_res_sz = digestsize;
443 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
444 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
445 return 0;
446}
447
448static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
449 const uint8_t *key, unsigned int keylen)
450{
451 struct crypto_authenc_keys keys;
452 int alg;
453
454 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
455 return -EFAULT;
456
457 if (crypto_authenc_extractkeys(&keys, key, keylen))
458 goto bad_key;
459
460 switch (keys.enckeylen) {
461 case AES_KEYSIZE_128:
462 alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
463 break;
464 case AES_KEYSIZE_192:
465 alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
466 break;
467 case AES_KEYSIZE_256:
468 alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
469 break;
470 default:
471 goto bad_key;
472 break;
473 }
474
475 if (qat_alg_init_enc_session(ctx, alg, &keys))
476 goto error;
477
478 if (qat_alg_init_dec_session(ctx, alg, &keys))
479 goto error;
480
481 return 0;
482bad_key:
483 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
484 return -EINVAL;
485error:
486 return -EFAULT;
487}
488
489static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
490 unsigned int keylen)
491{
492 struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
493 struct device *dev;
494
495 spin_lock(&ctx->lock);
496 if (ctx->enc_cd) {
497 /* rekeying */
498 dev = &GET_DEV(ctx->inst->accel_dev);
499 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
500 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700501 memset(&ctx->enc_fw_req_tmpl, 0,
502 sizeof(struct icp_qat_fw_la_bulk_req));
503 memset(&ctx->dec_fw_req_tmpl, 0,
504 sizeof(struct icp_qat_fw_la_bulk_req));
505 } else {
506 /* new key */
507 int node = get_current_node();
508 struct qat_crypto_instance *inst =
509 qat_crypto_get_instance_node(node);
510 if (!inst) {
511 spin_unlock(&ctx->lock);
512 return -EINVAL;
513 }
514
515 dev = &GET_DEV(inst->accel_dev);
516 ctx->inst = inst;
517 ctx->enc_cd = dma_zalloc_coherent(dev,
518 sizeof(struct qat_alg_cd),
519 &ctx->enc_cd_paddr,
520 GFP_ATOMIC);
521 if (!ctx->enc_cd) {
522 spin_unlock(&ctx->lock);
523 return -ENOMEM;
524 }
525 ctx->dec_cd = dma_zalloc_coherent(dev,
526 sizeof(struct qat_alg_cd),
527 &ctx->dec_cd_paddr,
528 GFP_ATOMIC);
529 if (!ctx->dec_cd) {
530 spin_unlock(&ctx->lock);
531 goto out_free_enc;
532 }
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700533 }
534 spin_unlock(&ctx->lock);
535 if (qat_alg_init_sessions(ctx, key, keylen))
536 goto out_free_all;
537
538 return 0;
539
540out_free_all:
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700541 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
542 ctx->dec_cd, ctx->dec_cd_paddr);
543 ctx->dec_cd = NULL;
544out_free_enc:
545 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
546 ctx->enc_cd, ctx->enc_cd_paddr);
547 ctx->enc_cd = NULL;
548 return -ENOMEM;
549}
550
551static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
552 struct qat_crypto_request *qat_req)
553{
554 struct device *dev = &GET_DEV(inst->accel_dev);
555 struct qat_alg_buf_list *bl = qat_req->buf.bl;
556 struct qat_alg_buf_list *blout = qat_req->buf.blout;
557 dma_addr_t blp = qat_req->buf.blp;
558 dma_addr_t blpout = qat_req->buf.bloutp;
559 size_t sz = qat_req->buf.sz;
560 int i, bufs = bl->num_bufs;
561
562 for (i = 0; i < bl->num_bufs; i++)
563 dma_unmap_single(dev, bl->bufers[i].addr,
564 bl->bufers[i].len, DMA_BIDIRECTIONAL);
565
566 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
567 kfree(bl);
568 if (blp != blpout) {
569 /* If out of place operation dma unmap only data */
570 int bufless = bufs - blout->num_mapped_bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700571
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700572 for (i = bufless; i < bufs; i++) {
573 dma_unmap_single(dev, blout->bufers[i].addr,
574 blout->bufers[i].len,
575 DMA_BIDIRECTIONAL);
576 }
577 dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
578 kfree(blout);
579 }
580}
581
582static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
583 struct scatterlist *assoc,
584 struct scatterlist *sgl,
585 struct scatterlist *sglout, uint8_t *iv,
586 uint8_t ivlen,
587 struct qat_crypto_request *qat_req)
588{
589 struct device *dev = &GET_DEV(inst->accel_dev);
590 int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
591 struct qat_alg_buf_list *bufl;
592 struct qat_alg_buf_list *buflout = NULL;
593 dma_addr_t blp;
594 dma_addr_t bloutp = 0;
595 struct scatterlist *sg;
596 size_t sz = sizeof(struct qat_alg_buf_list) +
597 ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
598
599 if (unlikely(!n))
600 return -EINVAL;
601
602 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
603 if (unlikely(!bufl))
604 return -ENOMEM;
605
606 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
607 if (unlikely(dma_mapping_error(dev, blp)))
608 goto err;
609
610 for_each_sg(assoc, sg, assoc_n, i) {
611 bufl->bufers[bufs].addr = dma_map_single(dev,
612 sg_virt(sg),
613 sg->length,
614 DMA_BIDIRECTIONAL);
615 bufl->bufers[bufs].len = sg->length;
616 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
617 goto err;
618 bufs++;
619 }
620 bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
621 DMA_BIDIRECTIONAL);
622 bufl->bufers[bufs].len = ivlen;
623 if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
624 goto err;
625 bufs++;
626
627 for_each_sg(sgl, sg, n, i) {
628 int y = i + bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700629
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700630 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
631 sg->length,
632 DMA_BIDIRECTIONAL);
633 bufl->bufers[y].len = sg->length;
634 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
635 goto err;
636 }
637 bufl->num_bufs = n + bufs;
638 qat_req->buf.bl = bufl;
639 qat_req->buf.blp = blp;
640 qat_req->buf.sz = sz;
641 /* Handle out of place operation */
642 if (sgl != sglout) {
643 struct qat_alg_buf *bufers;
644
645 buflout = kmalloc_node(sz, GFP_ATOMIC,
646 inst->accel_dev->numa_node);
647 if (unlikely(!buflout))
648 goto err;
649 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
650 if (unlikely(dma_mapping_error(dev, bloutp)))
651 goto err;
652 bufers = buflout->bufers;
653 /* For out of place operation dma map only data and
654 * reuse assoc mapping and iv */
655 for (i = 0; i < bufs; i++) {
656 bufers[i].len = bufl->bufers[i].len;
657 bufers[i].addr = bufl->bufers[i].addr;
658 }
659 for_each_sg(sglout, sg, n, i) {
660 int y = i + bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700661
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700662 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
663 sg->length,
664 DMA_BIDIRECTIONAL);
665 buflout->bufers[y].len = sg->length;
666 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
667 goto err;
668 }
669 buflout->num_bufs = n + bufs;
670 buflout->num_mapped_bufs = n;
671 qat_req->buf.blout = buflout;
672 qat_req->buf.bloutp = bloutp;
673 } else {
674 /* Otherwise set the src and dst to the same address */
675 qat_req->buf.bloutp = qat_req->buf.blp;
676 }
677 return 0;
678err:
679 dev_err(dev, "Failed to map buf for dma\n");
680 for_each_sg(sgl, sg, n + bufs, i) {
681 if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
682 dma_unmap_single(dev, bufl->bufers[i].addr,
683 bufl->bufers[i].len,
684 DMA_BIDIRECTIONAL);
685 }
686 }
687 if (!dma_mapping_error(dev, blp))
688 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
689 kfree(bufl);
690 if (sgl != sglout && buflout) {
691 for_each_sg(sglout, sg, n, i) {
692 int y = i + bufs;
Tadeusz Strukd65071e2014-06-24 15:19:34 -0700693
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700694 if (!dma_mapping_error(dev, buflout->bufers[y].addr))
695 dma_unmap_single(dev, buflout->bufers[y].addr,
696 buflout->bufers[y].len,
697 DMA_BIDIRECTIONAL);
698 }
699 if (!dma_mapping_error(dev, bloutp))
700 dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
701 kfree(buflout);
702 }
703 return -ENOMEM;
704}
705
706void qat_alg_callback(void *resp)
707{
708 struct icp_qat_fw_la_resp *qat_resp = resp;
709 struct qat_crypto_request *qat_req =
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700710 (void *)(__force long)qat_resp->opaque_data;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700711 struct qat_alg_session_ctx *ctx = qat_req->ctx;
712 struct qat_crypto_instance *inst = ctx->inst;
713 struct aead_request *areq = qat_req->areq;
714 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
715 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
716
717 qat_alg_free_bufl(inst, qat_req);
718 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
719 res = -EBADMSG;
Tadeusz Struk45cff262014-07-25 15:55:26 -0700720 areq->base.complete(&areq->base, res);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700721}
722
723static int qat_alg_dec(struct aead_request *areq)
724{
725 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
726 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
727 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
728 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
729 struct icp_qat_fw_la_cipher_req_params *cipher_param;
730 struct icp_qat_fw_la_auth_req_params *auth_param;
731 struct icp_qat_fw_la_bulk_req *msg;
732 int digst_size = crypto_aead_crt(aead_tfm)->authsize;
733 int ret, ctr = 0;
734
735 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
736 areq->iv, AES_BLOCK_SIZE, qat_req);
737 if (unlikely(ret))
738 return ret;
739
740 msg = &qat_req->req;
741 *msg = ctx->dec_fw_req_tmpl;
742 qat_req->ctx = ctx;
743 qat_req->areq = areq;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700744 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700745 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
746 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
747 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
748 cipher_param->cipher_length = areq->cryptlen - digst_size;
749 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
750 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
751 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
752 auth_param->auth_off = 0;
753 auth_param->auth_len = areq->assoclen +
754 cipher_param->cipher_length + AES_BLOCK_SIZE;
755 do {
756 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
757 } while (ret == -EAGAIN && ctr++ < 10);
758
759 if (ret == -EAGAIN) {
760 qat_alg_free_bufl(ctx->inst, qat_req);
761 return -EBUSY;
762 }
763 return -EINPROGRESS;
764}
765
766static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
767 int enc_iv)
768{
769 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
770 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
771 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
772 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
773 struct icp_qat_fw_la_cipher_req_params *cipher_param;
774 struct icp_qat_fw_la_auth_req_params *auth_param;
775 struct icp_qat_fw_la_bulk_req *msg;
776 int ret, ctr = 0;
777
778 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
779 iv, AES_BLOCK_SIZE, qat_req);
780 if (unlikely(ret))
781 return ret;
782
783 msg = &qat_req->req;
784 *msg = ctx->enc_fw_req_tmpl;
785 qat_req->ctx = ctx;
786 qat_req->areq = areq;
Tadeusz Strukbce3cc62014-06-24 15:19:24 -0700787 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700788 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
789 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
790 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
791 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
792
793 if (enc_iv) {
794 cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
795 cipher_param->cipher_offset = areq->assoclen;
796 } else {
797 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
798 cipher_param->cipher_length = areq->cryptlen;
799 cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
800 }
801 auth_param->auth_off = 0;
802 auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
803
804 do {
805 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
806 } while (ret == -EAGAIN && ctr++ < 10);
807
808 if (ret == -EAGAIN) {
809 qat_alg_free_bufl(ctx->inst, qat_req);
810 return -EBUSY;
811 }
812 return -EINPROGRESS;
813}
814
815static int qat_alg_enc(struct aead_request *areq)
816{
817 return qat_alg_enc_internal(areq, areq->iv, 0);
818}
819
820static int qat_alg_genivenc(struct aead_givcrypt_request *req)
821{
822 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
823 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
824 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
825 __be64 seq;
826
827 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
828 seq = cpu_to_be64(req->seq);
829 memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
830 &seq, sizeof(uint64_t));
831 return qat_alg_enc_internal(&req->areq, req->giv, 1);
832}
833
834static int qat_alg_init(struct crypto_tfm *tfm,
835 enum icp_qat_hw_auth_algo hash, const char *hash_name)
836{
837 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
838
839 memset(ctx, '\0', sizeof(*ctx));
840 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
841 if (IS_ERR(ctx->hash_tfm))
842 return -EFAULT;
843 spin_lock_init(&ctx->lock);
844 ctx->qat_hash_alg = hash;
845 tfm->crt_aead.reqsize = sizeof(struct aead_request) +
846 sizeof(struct qat_crypto_request);
847 ctx->tfm = tfm;
848 return 0;
849}
850
851static int qat_alg_sha1_init(struct crypto_tfm *tfm)
852{
853 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
854}
855
856static int qat_alg_sha256_init(struct crypto_tfm *tfm)
857{
858 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
859}
860
861static int qat_alg_sha512_init(struct crypto_tfm *tfm)
862{
863 return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
864}
865
866static void qat_alg_exit(struct crypto_tfm *tfm)
867{
868 struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
869 struct qat_crypto_instance *inst = ctx->inst;
870 struct device *dev;
871
872 if (!IS_ERR(ctx->hash_tfm))
873 crypto_free_shash(ctx->hash_tfm);
874
875 if (!inst)
876 return;
877
878 dev = &GET_DEV(inst->accel_dev);
879 if (ctx->enc_cd)
880 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
881 ctx->enc_cd, ctx->enc_cd_paddr);
882 if (ctx->dec_cd)
883 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
884 ctx->dec_cd, ctx->dec_cd_paddr);
Tadeusz Strukd370cec2014-06-05 13:43:32 -0700885 qat_crypto_put_instance(inst);
886}
887
888static struct crypto_alg qat_algs[] = { {
889 .cra_name = "authenc(hmac(sha1),cbc(aes))",
890 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
891 .cra_priority = 4001,
892 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
893 .cra_blocksize = AES_BLOCK_SIZE,
894 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
895 .cra_alignmask = 0,
896 .cra_type = &crypto_aead_type,
897 .cra_module = THIS_MODULE,
898 .cra_init = qat_alg_sha1_init,
899 .cra_exit = qat_alg_exit,
900 .cra_u = {
901 .aead = {
902 .setkey = qat_alg_setkey,
903 .decrypt = qat_alg_dec,
904 .encrypt = qat_alg_enc,
905 .givencrypt = qat_alg_genivenc,
906 .ivsize = AES_BLOCK_SIZE,
907 .maxauthsize = SHA1_DIGEST_SIZE,
908 },
909 },
910}, {
911 .cra_name = "authenc(hmac(sha256),cbc(aes))",
912 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
913 .cra_priority = 4001,
914 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
915 .cra_blocksize = AES_BLOCK_SIZE,
916 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
917 .cra_alignmask = 0,
918 .cra_type = &crypto_aead_type,
919 .cra_module = THIS_MODULE,
920 .cra_init = qat_alg_sha256_init,
921 .cra_exit = qat_alg_exit,
922 .cra_u = {
923 .aead = {
924 .setkey = qat_alg_setkey,
925 .decrypt = qat_alg_dec,
926 .encrypt = qat_alg_enc,
927 .givencrypt = qat_alg_genivenc,
928 .ivsize = AES_BLOCK_SIZE,
929 .maxauthsize = SHA256_DIGEST_SIZE,
930 },
931 },
932}, {
933 .cra_name = "authenc(hmac(sha512),cbc(aes))",
934 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
935 .cra_priority = 4001,
936 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
937 .cra_blocksize = AES_BLOCK_SIZE,
938 .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
939 .cra_alignmask = 0,
940 .cra_type = &crypto_aead_type,
941 .cra_module = THIS_MODULE,
942 .cra_init = qat_alg_sha512_init,
943 .cra_exit = qat_alg_exit,
944 .cra_u = {
945 .aead = {
946 .setkey = qat_alg_setkey,
947 .decrypt = qat_alg_dec,
948 .encrypt = qat_alg_enc,
949 .givencrypt = qat_alg_genivenc,
950 .ivsize = AES_BLOCK_SIZE,
951 .maxauthsize = SHA512_DIGEST_SIZE,
952 },
953 },
954} };
955
956int qat_algs_register(void)
957{
958 if (atomic_add_return(1, &active_dev) == 1) {
959 int i;
960
961 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
962 qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
963 CRYPTO_ALG_ASYNC;
964 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
965 }
966 return 0;
967}
968
969int qat_algs_unregister(void)
970{
971 if (atomic_sub_return(1, &active_dev) == 0)
972 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
973 return 0;
974}
975
976int qat_algs_init(void)
977{
978 atomic_set(&active_dev, 0);
979 crypto_get_default_rng();
980 return 0;
981}
982
983void qat_algs_exit(void)
984{
985 crypto_put_default_rng();
986}