blob: 1a5d9e372b4e9ee8b13d35418398dc886173c042 [file] [log] [blame]
Kent Yoder6148c1a2012-05-14 11:06:09 +00001/**
2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/crypto.h>
28#include <asm/vio.h>
29
30#include "nx_csbcpb.h"
31#include "nx.h"
32
33
34struct xcbc_state {
35 u8 state[AES_BLOCK_SIZE];
36 unsigned int count;
37 u8 buffer[AES_BLOCK_SIZE];
38};
39
40static int nx_xcbc_set_key(struct crypto_shash *desc,
41 const u8 *in_key,
42 unsigned int key_len)
43{
44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
45
46 switch (key_len) {
47 case AES_KEYSIZE_128:
48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
49 break;
50 default:
51 return -EINVAL;
52 }
53
54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
55
56 return 0;
57}
58
59static int nx_xcbc_init(struct shash_desc *desc)
60{
61 struct xcbc_state *sctx = shash_desc_ctx(desc);
62 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
63 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
64 struct nx_sg *out_sg;
65
66 nx_ctx_init(nx_ctx, HCOP_FC_AES);
67
68 memset(sctx, 0, sizeof *sctx);
69
70 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
71 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
72
73 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
74 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
75
76 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
77 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
78 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
79
80 return 0;
81}
82
83static int nx_xcbc_update(struct shash_desc *desc,
84 const u8 *data,
85 unsigned int len)
86{
87 struct xcbc_state *sctx = shash_desc_ctx(desc);
88 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
90 struct nx_sg *in_sg;
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -030091 u32 to_process, leftover, total;
92 u32 max_sg_len;
Marcelo Cerric8491632013-08-12 18:49:37 -030093 unsigned long irq_flags;
Kent Yoder6148c1a2012-05-14 11:06:09 +000094 int rc = 0;
95
Marcelo Cerric8491632013-08-12 18:49:37 -030096 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
97
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -030098
99 total = sctx->count + len;
Kent Yoder6148c1a2012-05-14 11:06:09 +0000100
101 /* 2 cases for total data len:
102 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
103 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
104 */
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300105 if (total <= AES_BLOCK_SIZE) {
Kent Yoder6148c1a2012-05-14 11:06:09 +0000106 memcpy(sctx->buffer + sctx->count, data, len);
107 sctx->count += len;
108 goto out;
109 }
110
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300111 in_sg = nx_ctx->in_sg;
112 max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
113 nx_ctx->ap->sglen);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000114
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300115 do {
Kent Yoder6148c1a2012-05-14 11:06:09 +0000116
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300117 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
118 * update */
119 to_process = min_t(u64, total, nx_ctx->ap->databytelen);
120 to_process = min_t(u64, to_process,
121 NX_PAGE_SIZE * (max_sg_len - 1));
122 to_process = to_process & ~(AES_BLOCK_SIZE - 1);
123 leftover = total - to_process;
124
125 /* the hardware will not accept a 0 byte operation for this
126 * algorithm and the operation MUST be finalized to be correct.
127 * So if we happen to get an update that falls on a block sized
128 * boundary, we must save off the last block to finalize with
129 * later. */
130 if (!leftover) {
131 to_process -= AES_BLOCK_SIZE;
132 leftover = AES_BLOCK_SIZE;
133 }
134
135 if (sctx->count) {
136 in_sg = nx_build_sg_list(nx_ctx->in_sg,
137 (u8 *) sctx->buffer,
138 sctx->count,
139 max_sg_len);
140 }
141 in_sg = nx_build_sg_list(in_sg,
142 (u8 *) data,
143 to_process - sctx->count,
144 max_sg_len);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000145 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
146 sizeof(struct nx_sg);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000147
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300148 /* we've hit the nx chip previously and we're updating again,
149 * so copy over the partial digest */
150 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
151 memcpy(csbcpb->cpb.aes_xcbc.cv,
152 csbcpb->cpb.aes_xcbc.out_cv_mac,
153 AES_BLOCK_SIZE);
154 }
Kent Yoder6148c1a2012-05-14 11:06:09 +0000155
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300156 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
157 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
158 rc = -EINVAL;
159 goto out;
160 }
Kent Yoder6148c1a2012-05-14 11:06:09 +0000161
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300162 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
Kent Yoder6148c1a2012-05-14 11:06:09 +0000163 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300164 if (rc)
165 goto out;
Kent Yoder6148c1a2012-05-14 11:06:09 +0000166
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300167 atomic_inc(&(nx_ctx->stats->aes_ops));
168
169 /* everything after the first update is continuation */
170 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
171
172 total -= to_process;
173 data += to_process - sctx->count;
174 sctx->count = 0;
175 in_sg = nx_ctx->in_sg;
176 } while (leftover > AES_BLOCK_SIZE);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000177
178 /* copy the leftover back into the state struct */
Fionnuala Gunter9d6f1a82013-08-29 11:36:36 -0300179 memcpy(sctx->buffer, data, leftover);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000180 sctx->count = leftover;
181
Kent Yoder6148c1a2012-05-14 11:06:09 +0000182out:
Marcelo Cerric8491632013-08-12 18:49:37 -0300183 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000184 return rc;
185}
186
187static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
188{
189 struct xcbc_state *sctx = shash_desc_ctx(desc);
190 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
191 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
192 struct nx_sg *in_sg, *out_sg;
Marcelo Cerric8491632013-08-12 18:49:37 -0300193 unsigned long irq_flags;
Kent Yoder6148c1a2012-05-14 11:06:09 +0000194 int rc = 0;
195
Marcelo Cerric8491632013-08-12 18:49:37 -0300196 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
197
Kent Yoder6148c1a2012-05-14 11:06:09 +0000198 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
199 /* we've hit the nx chip previously, now we're finalizing,
200 * so copy over the partial digest */
201 memcpy(csbcpb->cpb.aes_xcbc.cv,
202 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
203 } else if (sctx->count == 0) {
204 /* we've never seen an update, so this is a 0 byte op. The
205 * hardware cannot handle a 0 byte op, so just copy out the
206 * known 0 byte result. This is cheaper than allocating a
207 * software context to do a 0 byte op */
208 u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
209 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
210 memcpy(out, data, sizeof(data));
211 goto out;
212 }
213
214 /* final is represented by continuing the operation and indicating that
215 * this is not an intermediate operation */
216 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
217
218 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
219 sctx->count, nx_ctx->ap->sglen);
220 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
221 nx_ctx->ap->sglen);
222
223 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
224 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
225
226 if (!nx_ctx->op.outlen) {
227 rc = -EINVAL;
228 goto out;
229 }
230
231 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
232 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
233 if (rc)
234 goto out;
235
236 atomic_inc(&(nx_ctx->stats->aes_ops));
237
238 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
239out:
Marcelo Cerric8491632013-08-12 18:49:37 -0300240 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000241 return rc;
242}
243
244struct shash_alg nx_shash_aes_xcbc_alg = {
245 .digestsize = AES_BLOCK_SIZE,
246 .init = nx_xcbc_init,
247 .update = nx_xcbc_update,
248 .final = nx_xcbc_final,
249 .setkey = nx_xcbc_set_key,
250 .descsize = sizeof(struct xcbc_state),
251 .statesize = sizeof(struct xcbc_state),
252 .base = {
253 .cra_name = "xcbc(aes)",
254 .cra_driver_name = "xcbc-aes-nx",
255 .cra_priority = 300,
256 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
257 .cra_blocksize = AES_BLOCK_SIZE,
258 .cra_module = THIS_MODULE,
259 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
260 .cra_init = nx_crypto_ctx_aes_xcbc_init,
261 .cra_exit = nx_crypto_ctx_exit,
262 }
263};