blob: f45ae69d0d1af02e0cf644cb723ea18a0515d627 [file] [log] [blame]
David S. Miller81658ad2012-08-28 12:05:54 -07001/* Glue code for CAMELLIA encryption optimized for sparc64 crypto opcodes.
2 *
3 * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/crypto.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
10#include <linux/types.h>
11#include <crypto/algapi.h>
12
13#include <asm/fpumacro.h>
14#include <asm/pstate.h>
15#include <asm/elf.h>
16
David S. Miller10803622012-09-15 09:06:30 -070017#include "opcodes.h"
18
David S. Miller81658ad2012-08-28 12:05:54 -070019#define CAMELLIA_MIN_KEY_SIZE 16
20#define CAMELLIA_MAX_KEY_SIZE 32
21#define CAMELLIA_BLOCK_SIZE 16
22#define CAMELLIA_TABLE_BYTE_LEN 272
23
24struct camellia_sparc64_ctx {
25 u64 encrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
26 u64 decrypt_key[CAMELLIA_TABLE_BYTE_LEN / sizeof(u64)];
27 int key_len;
28};
29
30extern void camellia_sparc64_key_expand(const u32 *in_key, u64 *encrypt_key,
31 unsigned int key_len, u64 *decrypt_key);
32
33static int camellia_set_key(struct crypto_tfm *tfm, const u8 *_in_key,
34 unsigned int key_len)
35{
36 struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
37 const u32 *in_key = (const u32 *) _in_key;
38 u32 *flags = &tfm->crt_flags;
39
40 if (key_len != 16 && key_len != 24 && key_len != 32) {
41 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
42 return -EINVAL;
43 }
44
45 ctx->key_len = key_len;
46
47 camellia_sparc64_key_expand(in_key, &ctx->encrypt_key[0],
48 key_len, &ctx->decrypt_key[0]);
49 return 0;
50}
51
52extern void camellia_sparc64_crypt(const u64 *key, const u32 *input,
53 u32 *output, unsigned int key_len);
54
55static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
56{
57 struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
58
59 camellia_sparc64_crypt(&ctx->encrypt_key[0],
60 (const u32 *) src,
61 (u32 *) dst, ctx->key_len);
62}
63
64static void camellia_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
65{
66 struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm);
67
68 camellia_sparc64_crypt(&ctx->decrypt_key[0],
69 (const u32 *) src,
70 (u32 *) dst, ctx->key_len);
71}
72
73extern void camellia_sparc64_load_keys(const u64 *key, unsigned int key_len);
74
75typedef void ecb_crypt_op(const u64 *input, u64 *output, unsigned int len,
76 const u64 *key);
77
78extern ecb_crypt_op camellia_sparc64_ecb_crypt_3_grand_rounds;
79extern ecb_crypt_op camellia_sparc64_ecb_crypt_4_grand_rounds;
80
81#define CAMELLIA_BLOCK_MASK (~(CAMELLIA_BLOCK_SIZE - 1))
82
83static int __ecb_crypt(struct blkcipher_desc *desc,
84 struct scatterlist *dst, struct scatterlist *src,
85 unsigned int nbytes, bool encrypt)
86{
87 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
88 struct blkcipher_walk walk;
89 ecb_crypt_op *op;
90 const u64 *key;
91 int err;
92
93 op = camellia_sparc64_ecb_crypt_3_grand_rounds;
94 if (ctx->key_len != 16)
95 op = camellia_sparc64_ecb_crypt_4_grand_rounds;
96
97 blkcipher_walk_init(&walk, dst, src, nbytes);
98 err = blkcipher_walk_virt(desc, &walk);
99
100 if (encrypt)
101 key = &ctx->encrypt_key[0];
102 else
103 key = &ctx->decrypt_key[0];
104 camellia_sparc64_load_keys(key, ctx->key_len);
105 while ((nbytes = walk.nbytes)) {
106 unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
107
108 if (likely(block_len)) {
109 const u64 *src64;
110 u64 *dst64;
111
112 src64 = (const u64 *)walk.src.virt.addr;
113 dst64 = (u64 *) walk.dst.virt.addr;
114 op(src64, dst64, block_len, key);
115 }
116 nbytes &= CAMELLIA_BLOCK_SIZE - 1;
117 err = blkcipher_walk_done(desc, &walk, nbytes);
118 }
119 fprs_write(0);
120 return err;
121}
122
123static int ecb_encrypt(struct blkcipher_desc *desc,
124 struct scatterlist *dst, struct scatterlist *src,
125 unsigned int nbytes)
126{
127 return __ecb_crypt(desc, dst, src, nbytes, true);
128}
129
130static int ecb_decrypt(struct blkcipher_desc *desc,
131 struct scatterlist *dst, struct scatterlist *src,
132 unsigned int nbytes)
133{
134 return __ecb_crypt(desc, dst, src, nbytes, false);
135}
136
137typedef void cbc_crypt_op(const u64 *input, u64 *output, unsigned int len,
138 const u64 *key, u64 *iv);
139
140extern cbc_crypt_op camellia_sparc64_cbc_encrypt_3_grand_rounds;
141extern cbc_crypt_op camellia_sparc64_cbc_encrypt_4_grand_rounds;
142extern cbc_crypt_op camellia_sparc64_cbc_decrypt_3_grand_rounds;
143extern cbc_crypt_op camellia_sparc64_cbc_decrypt_4_grand_rounds;
144
145static int cbc_encrypt(struct blkcipher_desc *desc,
146 struct scatterlist *dst, struct scatterlist *src,
147 unsigned int nbytes)
148{
149 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
150 struct blkcipher_walk walk;
151 cbc_crypt_op *op;
152 const u64 *key;
153 int err;
154
155 op = camellia_sparc64_cbc_encrypt_3_grand_rounds;
156 if (ctx->key_len != 16)
157 op = camellia_sparc64_cbc_encrypt_4_grand_rounds;
158
159 blkcipher_walk_init(&walk, dst, src, nbytes);
160 err = blkcipher_walk_virt(desc, &walk);
161
162 key = &ctx->encrypt_key[0];
163 camellia_sparc64_load_keys(key, ctx->key_len);
164 while ((nbytes = walk.nbytes)) {
165 unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
166
167 if (likely(block_len)) {
168 const u64 *src64;
169 u64 *dst64;
170
171 src64 = (const u64 *)walk.src.virt.addr;
172 dst64 = (u64 *) walk.dst.virt.addr;
173 op(src64, dst64, block_len, key,
174 (u64 *) walk.iv);
175 }
176 nbytes &= CAMELLIA_BLOCK_SIZE - 1;
177 err = blkcipher_walk_done(desc, &walk, nbytes);
178 }
179 fprs_write(0);
180 return err;
181}
182
183static int cbc_decrypt(struct blkcipher_desc *desc,
184 struct scatterlist *dst, struct scatterlist *src,
185 unsigned int nbytes)
186{
187 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
188 struct blkcipher_walk walk;
189 cbc_crypt_op *op;
190 const u64 *key;
191 int err;
192
193 op = camellia_sparc64_cbc_decrypt_3_grand_rounds;
194 if (ctx->key_len != 16)
195 op = camellia_sparc64_cbc_decrypt_4_grand_rounds;
196
197 blkcipher_walk_init(&walk, dst, src, nbytes);
198 err = blkcipher_walk_virt(desc, &walk);
199
200 key = &ctx->decrypt_key[0];
201 camellia_sparc64_load_keys(key, ctx->key_len);
202 while ((nbytes = walk.nbytes)) {
203 unsigned int block_len = nbytes & CAMELLIA_BLOCK_MASK;
204
205 if (likely(block_len)) {
206 const u64 *src64;
207 u64 *dst64;
208
209 src64 = (const u64 *)walk.src.virt.addr;
210 dst64 = (u64 *) walk.dst.virt.addr;
211 op(src64, dst64, block_len, key,
212 (u64 *) walk.iv);
213 }
214 nbytes &= CAMELLIA_BLOCK_SIZE - 1;
215 err = blkcipher_walk_done(desc, &walk, nbytes);
216 }
217 fprs_write(0);
218 return err;
219}
220
221static struct crypto_alg algs[] = { {
222 .cra_name = "camellia",
223 .cra_driver_name = "camellia-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700224 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller81658ad2012-08-28 12:05:54 -0700225 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
226 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
227 .cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
228 .cra_alignmask = 3,
229 .cra_module = THIS_MODULE,
230 .cra_u = {
231 .cipher = {
232 .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
233 .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
234 .cia_setkey = camellia_set_key,
235 .cia_encrypt = camellia_encrypt,
236 .cia_decrypt = camellia_decrypt
237 }
238 }
239}, {
240 .cra_name = "ecb(camellia)",
241 .cra_driver_name = "ecb-camellia-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700242 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller81658ad2012-08-28 12:05:54 -0700243 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
244 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
245 .cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
246 .cra_alignmask = 7,
247 .cra_type = &crypto_blkcipher_type,
248 .cra_module = THIS_MODULE,
249 .cra_u = {
250 .blkcipher = {
251 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
252 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
253 .setkey = camellia_set_key,
254 .encrypt = ecb_encrypt,
255 .decrypt = ecb_decrypt,
256 },
257 },
258}, {
259 .cra_name = "cbc(camellia)",
260 .cra_driver_name = "cbc-camellia-sparc64",
David S. Miller10803622012-09-15 09:06:30 -0700261 .cra_priority = SPARC_CR_OPCODE_PRIORITY,
David S. Miller81658ad2012-08-28 12:05:54 -0700262 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
263 .cra_blocksize = CAMELLIA_BLOCK_SIZE,
264 .cra_ctxsize = sizeof(struct camellia_sparc64_ctx),
265 .cra_alignmask = 7,
266 .cra_type = &crypto_blkcipher_type,
267 .cra_module = THIS_MODULE,
268 .cra_u = {
269 .blkcipher = {
270 .min_keysize = CAMELLIA_MIN_KEY_SIZE,
271 .max_keysize = CAMELLIA_MAX_KEY_SIZE,
272 .setkey = camellia_set_key,
273 .encrypt = cbc_encrypt,
274 .decrypt = cbc_decrypt,
275 },
276 },
277}
278};
279
280static bool __init sparc64_has_camellia_opcode(void)
281{
282 unsigned long cfr;
283
284 if (!(sparc64_elf_hwcap & HWCAP_SPARC_CRYPTO))
285 return false;
286
287 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
288 if (!(cfr & CFR_CAMELLIA))
289 return false;
290
291 return true;
292}
293
294static int __init camellia_sparc64_mod_init(void)
295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(algs); i++)
299 INIT_LIST_HEAD(&algs[i].cra_list);
300
301 if (sparc64_has_camellia_opcode()) {
302 pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
303 return crypto_register_algs(algs, ARRAY_SIZE(algs));
304 }
305 pr_info("sparc64 camellia opcodes not available.\n");
306 return -ENODEV;
307}
308
309static void __exit camellia_sparc64_mod_fini(void)
310{
311 crypto_unregister_algs(algs, ARRAY_SIZE(algs));
312}
313
314module_init(camellia_sparc64_mod_init);
315module_exit(camellia_sparc64_mod_fini);
316
317MODULE_LICENSE("GPL");
318MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
319
320MODULE_ALIAS("aes");