blob: 154b6baa124ed9237e73dfa4e5b179e5a079be90 [file] [log] [blame]
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +03001/*
2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/device.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <crypto/aes.h>
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030018#include <crypto/des.h>
Herbert Xu2d20ce072016-06-29 18:04:04 +080019#include <crypto/internal/skcipher.h>
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030020
21#include "cipher.h"
22
23static LIST_HEAD(ablkcipher_algs);
24
25static void qce_ablkcipher_done(void *data)
26{
27 struct crypto_async_request *async_req = data;
28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
30 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
31 struct qce_device *qce = tmpl->qce;
32 enum dma_data_direction dir_src, dir_dst;
33 u32 status;
34 int error;
35 bool diff_dst;
36
37 diff_dst = (req->src != req->dst) ? true : false;
38 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
39 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
40
41 error = qce_dma_terminate_all(&qce->dma);
42 if (error)
43 dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
44 error);
45
46 if (diff_dst)
LABBE Corentinfea40452015-10-02 08:01:02 +020047 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
48 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030049
50 sg_free_table(&rctx->dst_tbl);
51
52 error = qce_check_status(qce, &status);
53 if (error < 0)
54 dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
55
56 qce->async_req_done(tmpl->qce, error);
57}
58
59static int
60qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
61{
62 struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
63 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
64 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
65 struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
66 struct qce_device *qce = tmpl->qce;
67 enum dma_data_direction dir_src, dir_dst;
68 struct scatterlist *sg;
69 bool diff_dst;
70 gfp_t gfp;
71 int ret;
72
73 rctx->iv = req->info;
74 rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
75 rctx->cryptlen = req->nbytes;
76
77 diff_dst = (req->src != req->dst) ? true : false;
78 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
79 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
80
LABBE Corentinfea40452015-10-02 08:01:02 +020081 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
82 if (diff_dst)
83 rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
84 else
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030085 rctx->dst_nents = rctx->src_nents;
LABBE Corentin4fa99482015-11-04 21:13:36 +010086 if (rctx->src_nents < 0) {
87 dev_err(qce->dev, "Invalid numbers of src SG.\n");
88 return rctx->src_nents;
89 }
90 if (rctx->dst_nents < 0) {
91 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
92 return -rctx->dst_nents;
93 }
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +030094
95 rctx->dst_nents += 1;
96
97 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
98 GFP_KERNEL : GFP_ATOMIC;
99
100 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
101 if (ret)
102 return ret;
103
104 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
105
106 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
107 if (IS_ERR(sg)) {
108 ret = PTR_ERR(sg);
109 goto error_free;
110 }
111
112 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
113 if (IS_ERR(sg)) {
114 ret = PTR_ERR(sg);
115 goto error_free;
116 }
117
118 sg_mark_end(sg);
119 rctx->dst_sg = rctx->dst_tbl.sgl;
120
LABBE Corentinfea40452015-10-02 08:01:02 +0200121 ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300122 if (ret < 0)
123 goto error_free;
124
125 if (diff_dst) {
LABBE Corentinfea40452015-10-02 08:01:02 +0200126 ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300127 if (ret < 0)
128 goto error_unmap_dst;
129 rctx->src_sg = req->src;
130 } else {
131 rctx->src_sg = rctx->dst_sg;
132 }
133
134 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
135 rctx->dst_sg, rctx->dst_nents,
136 qce_ablkcipher_done, async_req);
137 if (ret)
138 goto error_unmap_src;
139
140 qce_dma_issue_pending(&qce->dma);
141
142 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
143 if (ret)
144 goto error_terminate;
145
146 return 0;
147
148error_terminate:
149 qce_dma_terminate_all(&qce->dma);
150error_unmap_src:
151 if (diff_dst)
LABBE Corentinfea40452015-10-02 08:01:02 +0200152 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300153error_unmap_dst:
LABBE Corentinfea40452015-10-02 08:01:02 +0200154 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300155error_free:
156 sg_free_table(&rctx->dst_tbl);
157 return ret;
158}
159
160static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
161 unsigned int keylen)
162{
163 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
164 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
165 unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
166 int ret;
167
168 if (!key || !keylen)
169 return -EINVAL;
170
171 if (IS_AES(flags)) {
172 switch (keylen) {
173 case AES_KEYSIZE_128:
174 case AES_KEYSIZE_256:
175 break;
176 default:
177 goto fallback;
178 }
179 } else if (IS_DES(flags)) {
180 u32 tmp[DES_EXPKEY_WORDS];
181
182 ret = des_ekey(tmp, key);
Eric Biggers231baec2019-01-18 22:48:00 -0800183 if (!ret && (crypto_ablkcipher_get_flags(ablk) &
184 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS))
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300185 goto weakkey;
186 }
187
188 ctx->enc_keylen = keylen;
189 memcpy(ctx->enc_key, key, keylen);
190 return 0;
191fallback:
Kees Cookd1e4ba82018-09-18 19:10:54 -0700192 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300193 if (!ret)
194 ctx->enc_keylen = keylen;
195 return ret;
196weakkey:
197 crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
198 return -EINVAL;
199}
200
201static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
202{
203 struct crypto_tfm *tfm =
204 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
205 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
206 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
207 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
208 int ret;
209
210 rctx->flags = tmpl->alg_flags;
211 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
212
213 if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
214 ctx->enc_keylen != AES_KEYSIZE_256) {
Kees Cookd1e4ba82018-09-18 19:10:54 -0700215 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
Herbert Xu2d20ce072016-06-29 18:04:04 +0800216
Kees Cookd1e4ba82018-09-18 19:10:54 -0700217 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
Herbert Xu2d20ce072016-06-29 18:04:04 +0800218 skcipher_request_set_callback(subreq, req->base.flags,
219 NULL, NULL);
220 skcipher_request_set_crypt(subreq, req->src, req->dst,
221 req->nbytes, req->info);
222 ret = encrypt ? crypto_skcipher_encrypt(subreq) :
223 crypto_skcipher_decrypt(subreq);
224 skcipher_request_zero(subreq);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300225 return ret;
226 }
227
228 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
229}
230
231static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
232{
233 return qce_ablkcipher_crypt(req, 1);
234}
235
236static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
237{
238 return qce_ablkcipher_crypt(req, 0);
239}
240
241static int qce_ablkcipher_init(struct crypto_tfm *tfm)
242{
243 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
244
245 memset(ctx, 0, sizeof(*ctx));
246 tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
247
Kees Cookd1e4ba82018-09-18 19:10:54 -0700248 ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
249 0, CRYPTO_ALG_NEED_FALLBACK);
Himanshu Jha06ec1f82017-08-27 11:52:48 +0530250 return PTR_ERR_OR_ZERO(ctx->fallback);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300251}
252
253static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
254{
255 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
256
Kees Cookd1e4ba82018-09-18 19:10:54 -0700257 crypto_free_sync_skcipher(ctx->fallback);
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300258}
259
260struct qce_ablkcipher_def {
261 unsigned long flags;
262 const char *name;
263 const char *drv_name;
264 unsigned int blocksize;
265 unsigned int ivsize;
266 unsigned int min_keysize;
267 unsigned int max_keysize;
268};
269
270static const struct qce_ablkcipher_def ablkcipher_def[] = {
271 {
272 .flags = QCE_ALG_AES | QCE_MODE_ECB,
273 .name = "ecb(aes)",
274 .drv_name = "ecb-aes-qce",
275 .blocksize = AES_BLOCK_SIZE,
276 .ivsize = AES_BLOCK_SIZE,
277 .min_keysize = AES_MIN_KEY_SIZE,
278 .max_keysize = AES_MAX_KEY_SIZE,
279 },
280 {
281 .flags = QCE_ALG_AES | QCE_MODE_CBC,
282 .name = "cbc(aes)",
283 .drv_name = "cbc-aes-qce",
284 .blocksize = AES_BLOCK_SIZE,
285 .ivsize = AES_BLOCK_SIZE,
286 .min_keysize = AES_MIN_KEY_SIZE,
287 .max_keysize = AES_MAX_KEY_SIZE,
288 },
289 {
290 .flags = QCE_ALG_AES | QCE_MODE_CTR,
291 .name = "ctr(aes)",
292 .drv_name = "ctr-aes-qce",
293 .blocksize = AES_BLOCK_SIZE,
294 .ivsize = AES_BLOCK_SIZE,
295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE,
297 },
298 {
299 .flags = QCE_ALG_AES | QCE_MODE_XTS,
300 .name = "xts(aes)",
301 .drv_name = "xts-aes-qce",
302 .blocksize = AES_BLOCK_SIZE,
303 .ivsize = AES_BLOCK_SIZE,
304 .min_keysize = AES_MIN_KEY_SIZE,
305 .max_keysize = AES_MAX_KEY_SIZE,
306 },
307 {
308 .flags = QCE_ALG_DES | QCE_MODE_ECB,
309 .name = "ecb(des)",
310 .drv_name = "ecb-des-qce",
311 .blocksize = DES_BLOCK_SIZE,
312 .ivsize = 0,
313 .min_keysize = DES_KEY_SIZE,
314 .max_keysize = DES_KEY_SIZE,
315 },
316 {
317 .flags = QCE_ALG_DES | QCE_MODE_CBC,
318 .name = "cbc(des)",
319 .drv_name = "cbc-des-qce",
320 .blocksize = DES_BLOCK_SIZE,
321 .ivsize = DES_BLOCK_SIZE,
322 .min_keysize = DES_KEY_SIZE,
323 .max_keysize = DES_KEY_SIZE,
324 },
325 {
326 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
327 .name = "ecb(des3_ede)",
328 .drv_name = "ecb-3des-qce",
329 .blocksize = DES3_EDE_BLOCK_SIZE,
330 .ivsize = 0,
331 .min_keysize = DES3_EDE_KEY_SIZE,
332 .max_keysize = DES3_EDE_KEY_SIZE,
333 },
334 {
335 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
336 .name = "cbc(des3_ede)",
337 .drv_name = "cbc-3des-qce",
338 .blocksize = DES3_EDE_BLOCK_SIZE,
339 .ivsize = DES3_EDE_BLOCK_SIZE,
340 .min_keysize = DES3_EDE_KEY_SIZE,
341 .max_keysize = DES3_EDE_KEY_SIZE,
342 },
343};
344
345static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
346 struct qce_device *qce)
347{
348 struct qce_alg_template *tmpl;
349 struct crypto_alg *alg;
350 int ret;
351
352 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
353 if (!tmpl)
354 return -ENOMEM;
355
356 alg = &tmpl->alg.crypto;
357
358 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
359 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
360 def->drv_name);
361
362 alg->cra_blocksize = def->blocksize;
363 alg->cra_ablkcipher.ivsize = def->ivsize;
364 alg->cra_ablkcipher.min_keysize = def->min_keysize;
365 alg->cra_ablkcipher.max_keysize = def->max_keysize;
366 alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
367 alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
368 alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
369
370 alg->cra_priority = 300;
371 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
372 CRYPTO_ALG_NEED_FALLBACK;
373 alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
374 alg->cra_alignmask = 0;
375 alg->cra_type = &crypto_ablkcipher_type;
376 alg->cra_module = THIS_MODULE;
377 alg->cra_init = qce_ablkcipher_init;
378 alg->cra_exit = qce_ablkcipher_exit;
Stanimir Varbanovec8f5d82014-06-25 19:28:57 +0300379
380 INIT_LIST_HEAD(&tmpl->entry);
381 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
382 tmpl->alg_flags = def->flags;
383 tmpl->qce = qce;
384
385 ret = crypto_register_alg(alg);
386 if (ret) {
387 kfree(tmpl);
388 dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
389 return ret;
390 }
391
392 list_add_tail(&tmpl->entry, &ablkcipher_algs);
393 dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
394 return 0;
395}
396
397static void qce_ablkcipher_unregister(struct qce_device *qce)
398{
399 struct qce_alg_template *tmpl, *n;
400
401 list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
402 crypto_unregister_alg(&tmpl->alg.crypto);
403 list_del(&tmpl->entry);
404 kfree(tmpl);
405 }
406}
407
408static int qce_ablkcipher_register(struct qce_device *qce)
409{
410 int ret, i;
411
412 for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
413 ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
414 if (ret)
415 goto err;
416 }
417
418 return 0;
419err:
420 qce_ablkcipher_unregister(qce);
421 return ret;
422}
423
424const struct qce_algo_ops ablkcipher_ops = {
425 .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
426 .register_algs = qce_ablkcipher_register,
427 .unregister_algs = qce_ablkcipher_unregister,
428 .async_req_handle = qce_ablkcipher_async_req_handle,
429};