blob: 658da0fd3e1f18f241143fe1e4d3c12a4f0b8af0 [file] [log] [blame]
Kent Yoder6148c1a2012-05-14 11:06:09 +00001/**
2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22#include <crypto/internal/hash.h>
23#include <crypto/aes.h>
24#include <crypto/algapi.h>
25#include <linux/module.h>
26#include <linux/types.h>
27#include <linux/crypto.h>
28#include <asm/vio.h>
29
30#include "nx_csbcpb.h"
31#include "nx.h"
32
33
34struct xcbc_state {
35 u8 state[AES_BLOCK_SIZE];
36 unsigned int count;
37 u8 buffer[AES_BLOCK_SIZE];
38};
39
40static int nx_xcbc_set_key(struct crypto_shash *desc,
41 const u8 *in_key,
42 unsigned int key_len)
43{
44 struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
45
46 switch (key_len) {
47 case AES_KEYSIZE_128:
48 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
49 break;
50 default:
51 return -EINVAL;
52 }
53
54 memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
55
56 return 0;
57}
58
59static int nx_xcbc_init(struct shash_desc *desc)
60{
61 struct xcbc_state *sctx = shash_desc_ctx(desc);
62 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
63 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
64 struct nx_sg *out_sg;
65
66 nx_ctx_init(nx_ctx, HCOP_FC_AES);
67
68 memset(sctx, 0, sizeof *sctx);
69
70 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
71 csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
72
73 memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
74 memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
75
76 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
77 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
78 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
79
80 return 0;
81}
82
83static int nx_xcbc_update(struct shash_desc *desc,
84 const u8 *data,
85 unsigned int len)
86{
87 struct xcbc_state *sctx = shash_desc_ctx(desc);
88 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
89 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
90 struct nx_sg *in_sg;
91 u32 to_process, leftover;
Marcelo Cerric8491632013-08-12 18:49:37 -030092 unsigned long irq_flags;
Kent Yoder6148c1a2012-05-14 11:06:09 +000093 int rc = 0;
94
Marcelo Cerric8491632013-08-12 18:49:37 -030095 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
96
Kent Yoder6148c1a2012-05-14 11:06:09 +000097 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
98 /* we've hit the nx chip previously and we're updating again,
99 * so copy over the partial digest */
100 memcpy(csbcpb->cpb.aes_xcbc.cv,
101 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
102 }
103
104 /* 2 cases for total data len:
105 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
106 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
107 */
108 if (len + sctx->count <= AES_BLOCK_SIZE) {
109 memcpy(sctx->buffer + sctx->count, data, len);
110 sctx->count += len;
111 goto out;
112 }
113
114 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
115 * update */
116 to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
117 leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
118
119 /* the hardware will not accept a 0 byte operation for this algorithm
120 * and the operation MUST be finalized to be correct. So if we happen
121 * to get an update that falls on a block sized boundary, we must
122 * save off the last block to finalize with later. */
123 if (!leftover) {
124 to_process -= AES_BLOCK_SIZE;
125 leftover = AES_BLOCK_SIZE;
126 }
127
128 if (sctx->count) {
129 in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
130 sctx->count, nx_ctx->ap->sglen);
131 in_sg = nx_build_sg_list(in_sg, (u8 *)data,
132 to_process - sctx->count,
133 nx_ctx->ap->sglen);
134 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
135 sizeof(struct nx_sg);
136 } else {
137 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
138 nx_ctx->ap->sglen);
139 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
140 sizeof(struct nx_sg);
141 }
142
143 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
144
145 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
146 rc = -EINVAL;
147 goto out;
148 }
149
150 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
151 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
152 if (rc)
153 goto out;
154
155 atomic_inc(&(nx_ctx->stats->aes_ops));
156
157 /* copy the leftover back into the state struct */
158 memcpy(sctx->buffer, data + len - leftover, leftover);
159 sctx->count = leftover;
160
161 /* everything after the first update is continuation */
162 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
163out:
Marcelo Cerric8491632013-08-12 18:49:37 -0300164 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000165 return rc;
166}
167
168static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
169{
170 struct xcbc_state *sctx = shash_desc_ctx(desc);
171 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
172 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
173 struct nx_sg *in_sg, *out_sg;
Marcelo Cerric8491632013-08-12 18:49:37 -0300174 unsigned long irq_flags;
Kent Yoder6148c1a2012-05-14 11:06:09 +0000175 int rc = 0;
176
Marcelo Cerric8491632013-08-12 18:49:37 -0300177 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
178
Kent Yoder6148c1a2012-05-14 11:06:09 +0000179 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
180 /* we've hit the nx chip previously, now we're finalizing,
181 * so copy over the partial digest */
182 memcpy(csbcpb->cpb.aes_xcbc.cv,
183 csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
184 } else if (sctx->count == 0) {
185 /* we've never seen an update, so this is a 0 byte op. The
186 * hardware cannot handle a 0 byte op, so just copy out the
187 * known 0 byte result. This is cheaper than allocating a
188 * software context to do a 0 byte op */
189 u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
190 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
191 memcpy(out, data, sizeof(data));
192 goto out;
193 }
194
195 /* final is represented by continuing the operation and indicating that
196 * this is not an intermediate operation */
197 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
198
199 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
200 sctx->count, nx_ctx->ap->sglen);
201 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
202 nx_ctx->ap->sglen);
203
204 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
205 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
206
207 if (!nx_ctx->op.outlen) {
208 rc = -EINVAL;
209 goto out;
210 }
211
212 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
213 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
214 if (rc)
215 goto out;
216
217 atomic_inc(&(nx_ctx->stats->aes_ops));
218
219 memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
220out:
Marcelo Cerric8491632013-08-12 18:49:37 -0300221 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
Kent Yoder6148c1a2012-05-14 11:06:09 +0000222 return rc;
223}
224
225struct shash_alg nx_shash_aes_xcbc_alg = {
226 .digestsize = AES_BLOCK_SIZE,
227 .init = nx_xcbc_init,
228 .update = nx_xcbc_update,
229 .final = nx_xcbc_final,
230 .setkey = nx_xcbc_set_key,
231 .descsize = sizeof(struct xcbc_state),
232 .statesize = sizeof(struct xcbc_state),
233 .base = {
234 .cra_name = "xcbc(aes)",
235 .cra_driver_name = "xcbc-aes-nx",
236 .cra_priority = 300,
237 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
238 .cra_blocksize = AES_BLOCK_SIZE,
239 .cra_module = THIS_MODULE,
240 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
241 .cra_init = nx_crypto_ctx_aes_xcbc_init,
242 .cra_exit = nx_crypto_ctx_exit,
243 }
244};