blob: 8c2f3703ec855f27a6863cb964c5d58a9d01dfa2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Herbert Xu28ce7282006-08-21 21:38:42 +100010#include <crypto/algapi.h>
Sebastian Siewior89e12652007-10-17 23:18:57 +080011#include <crypto/aes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/interrupt.h>
Herbert Xu6789b2d2005-07-06 13:52:27 -070017#include <linux/kernel.h>
Herbert Xu420a4b22008-08-31 15:58:45 +100018#include <linux/percpu.h>
19#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/byteorder.h>
Chuck Ebberta76c1c22009-06-18 19:24:10 +080021#include <asm/processor.h>
Suresh Siddhae4914012008-08-13 22:02:26 +100022#include <asm/i387.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "padlock.h"
24
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080025/*
26 * Number of data blocks actually fetched for each xcrypt insn.
27 * Processors with prefetch errata will fetch extra blocks.
28 */
Chuck Ebberta76c1c22009-06-18 19:24:10 +080029static unsigned int ecb_fetch_blocks = 2;
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080030#define MAX_ECB_FETCH_BLOCKS (8)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080031#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080032
33static unsigned int cbc_fetch_blocks = 1;
34#define MAX_CBC_FETCH_BLOCKS (4)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080035#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
36
Michal Ludvigccc17c32006-07-15 10:23:49 +100037/* Control word. */
38struct cword {
39 unsigned int __attribute__ ((__packed__))
40 rounds:4,
41 algo:3,
42 keygen:1,
43 interm:1,
44 encdec:1,
45 ksize:2;
46} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
47
Michal Ludvigcc086322006-07-15 11:08:50 +100048/* Whenever making any changes to the following
49 * structure *make sure* you keep E, d_data
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080050 * and cword aligned on 16 Bytes boundaries and
51 * the Hardware can access 16 * 16 bytes of E and d_data
52 * (only the first 15 * 16 bytes matter but the HW reads
53 * more).
54 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070055struct aes_ctx {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080056 u32 E[AES_MAX_KEYLENGTH_U32]
57 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
58 u32 d_data[AES_MAX_KEYLENGTH_U32]
59 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
Herbert Xu6789b2d2005-07-06 13:52:27 -070060 struct {
61 struct cword encrypt;
62 struct cword decrypt;
63 } cword;
Herbert Xu82062c72006-05-16 22:20:34 +100064 u32 *D;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065};
66
Tejun Heo390dfd92009-10-29 22:34:14 +090067static DEFINE_PER_CPU(struct cword *, paes_last_cword);
Herbert Xu420a4b22008-08-31 15:58:45 +100068
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* Tells whether the ACE is capable to generate
70 the extended key for a given key_len. */
71static inline int
72aes_hw_extkey_available(uint8_t key_len)
73{
74 /* TODO: We should check the actual CPU model/stepping
75 as it's possible that the capability will be
76 added in the next CPU revisions. */
77 if (key_len == 16)
78 return 1;
79 return 0;
80}
81
Herbert Xu28ce7282006-08-21 21:38:42 +100082static inline struct aes_ctx *aes_ctx_common(void *ctx)
Herbert Xu6789b2d2005-07-06 13:52:27 -070083{
Herbert Xu28ce7282006-08-21 21:38:42 +100084 unsigned long addr = (unsigned long)ctx;
Herbert Xuf10b7892006-01-25 22:34:01 +110085 unsigned long align = PADLOCK_ALIGNMENT;
86
87 if (align <= crypto_tfm_ctx_alignment())
88 align = 1;
Herbert Xu6c2bb982006-05-16 22:09:29 +100089 return (struct aes_ctx *)ALIGN(addr, align);
Herbert Xu6789b2d2005-07-06 13:52:27 -070090}
91
Herbert Xu28ce7282006-08-21 21:38:42 +100092static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
93{
94 return aes_ctx_common(crypto_tfm_ctx(tfm));
95}
96
97static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
98{
99 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
100}
101
Herbert Xu6c2bb982006-05-16 22:09:29 +1000102static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000103 unsigned int key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000105 struct aes_ctx *ctx = aes_ctx(tfm);
Herbert Xu06ace7a2005-10-30 21:25:15 +1100106 const __le32 *key = (const __le32 *)in_key;
Herbert Xu560c06a2006-08-13 14:16:39 +1000107 u32 *flags = &tfm->crt_flags;
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800108 struct crypto_aes_ctx gen_aes;
Herbert Xu420a4b22008-08-31 15:58:45 +1000109 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Herbert Xu560c06a2006-08-13 14:16:39 +1000111 if (key_len % 8) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
113 return -EINVAL;
114 }
115
Herbert Xu6789b2d2005-07-06 13:52:27 -0700116 /*
117 * If the hardware is capable of generating the extended key
118 * itself we must supply the plain key for both encryption
119 * and decryption.
120 */
Herbert Xu82062c72006-05-16 22:20:34 +1000121 ctx->D = ctx->E;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800123 ctx->E[0] = le32_to_cpu(key[0]);
124 ctx->E[1] = le32_to_cpu(key[1]);
125 ctx->E[2] = le32_to_cpu(key[2]);
126 ctx->E[3] = le32_to_cpu(key[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Herbert Xu6789b2d2005-07-06 13:52:27 -0700128 /* Prepare control words. */
129 memset(&ctx->cword, 0, sizeof(ctx->cword));
130
131 ctx->cword.decrypt.encdec = 1;
132 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
133 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
134 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
135 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
136
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 /* Don't generate extended keys if the hardware can do it. */
138 if (aes_hw_extkey_available(key_len))
Herbert Xu420a4b22008-08-31 15:58:45 +1000139 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Herbert Xu6789b2d2005-07-06 13:52:27 -0700141 ctx->D = ctx->d_data;
142 ctx->cword.encrypt.keygen = 1;
143 ctx->cword.decrypt.keygen = 1;
144
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800145 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
146 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
147 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 }
149
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800150 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
151 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
Herbert Xu420a4b22008-08-31 15:58:45 +1000152
153ok:
154 for_each_online_cpu(cpu)
Tejun Heo390dfd92009-10-29 22:34:14 +0900155 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
156 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
157 per_cpu(paes_last_cword, cpu) = NULL;
Herbert Xu420a4b22008-08-31 15:58:45 +1000158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 return 0;
160}
161
162/* ====== Encryption/decryption routines ====== */
163
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700164/* These are the real call to PadLock. */
Herbert Xu420a4b22008-08-31 15:58:45 +1000165static inline void padlock_reset_key(struct cword *cword)
Herbert Xu866cd902007-12-27 00:04:44 +1100166{
Herbert Xu420a4b22008-08-31 15:58:45 +1000167 int cpu = raw_smp_processor_id();
168
Tejun Heo390dfd92009-10-29 22:34:14 +0900169 if (cword != per_cpu(paes_last_cword, cpu))
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800170#ifndef CONFIG_X86_64
Herbert Xu420a4b22008-08-31 15:58:45 +1000171 asm volatile ("pushfl; popfl");
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800172#else
173 asm volatile ("pushfq; popfq");
174#endif
Herbert Xu420a4b22008-08-31 15:58:45 +1000175}
176
177static inline void padlock_store_cword(struct cword *cword)
178{
Tejun Heo390dfd92009-10-29 22:34:14 +0900179 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
Herbert Xu866cd902007-12-27 00:04:44 +1100180}
181
Suresh Siddhae4914012008-08-13 22:02:26 +1000182/*
183 * While the padlock instructions don't use FP/SSE registers, they
184 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
185 * should be used only inside the irq_ts_save/restore() context
186 */
187
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800188static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800189 struct cword *control_word, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100190{
191 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
192 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800193 : "d"(control_word), "b"(key), "c"(count));
Herbert Xud4a7dd82007-12-28 11:05:46 +1100194}
195
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800196static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
197 u8 *iv, struct cword *control_word, int count)
198{
199 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
200 : "+S" (input), "+D" (output), "+a" (iv)
201 : "d" (control_word), "b" (key), "c" (count));
202 return iv;
203}
204
205static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800206 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100207{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800208 /*
209 * Padlock prefetches extra data so we must provide mapped input buffers.
210 * Assume there are at least 16 bytes of stack already in use.
211 */
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800212 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
Herbert Xu490fe3f2008-01-11 08:09:35 +1100213 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100214
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800215 memcpy(tmp, in, count * AES_BLOCK_SIZE);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800216 rep_xcrypt_ecb(tmp, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100217}
218
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800219static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
220 u8 *iv, struct cword *cword, int count)
221{
222 /*
223 * Padlock prefetches extra data so we must provide mapped input buffers.
224 * Assume there are at least 16 bytes of stack already in use.
225 */
226 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
227 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
228
229 memcpy(tmp, in, count * AES_BLOCK_SIZE);
230 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
231}
232
233static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800234 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100235{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
237 * We could avoid some copying here but it's probably not worth it.
238 */
Chuck Ebberte8edb3c2009-11-03 10:32:03 -0500239 if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800240 ecb_crypt_copy(in, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100241 return;
242 }
243
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800244 rep_xcrypt_ecb(in, out, key, cword, count);
245}
246
247static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
248 u8 *iv, struct cword *cword, int count)
249{
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
Chuck Ebberte8edb3c2009-11-03 10:32:03 -0500251 if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800252 return cbc_crypt_copy(in, out, key, iv, cword, count);
253
254 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100255}
256
Herbert Xu6789b2d2005-07-06 13:52:27 -0700257static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
258 void *control_word, u32 count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800260 u32 initial = count & (ecb_fetch_blocks - 1);
261
262 if (count < ecb_fetch_blocks) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800263 ecb_crypt(input, output, key, control_word, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100264 return;
265 }
266
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800267 if (initial)
268 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
269 : "+S"(input), "+D"(output)
270 : "d"(control_word), "b"(key), "c"(initial));
271
272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800274 : "d"(control_word), "b"(key), "c"(count - initial));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
Herbert Xu476df252005-07-06 13:54:09 -0700277static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
278 u8 *iv, void *control_word, u32 count)
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700279{
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800280 u32 initial = count & (cbc_fetch_blocks - 1);
281
282 if (count < cbc_fetch_blocks)
283 return cbc_crypt(input, output, key, iv, control_word, count);
284
285 if (initial)
286 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
287 : "+S" (input), "+D" (output), "+a" (iv)
288 : "d" (control_word), "b" (key), "c" (count));
289
290 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700291 : "+S" (input), "+D" (output), "+a" (iv)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800292 : "d" (control_word), "b" (key), "c" (count-initial));
Herbert Xu476df252005-07-06 13:54:09 -0700293 return iv;
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700294}
295
Herbert Xu6c2bb982006-05-16 22:09:29 +1000296static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000298 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000299 int ts_state;
Suresh Siddhae4914012008-08-13 22:02:26 +1000300
Herbert Xu420a4b22008-08-31 15:58:45 +1000301 padlock_reset_key(&ctx->cword.encrypt);
Suresh Siddhae4914012008-08-13 22:02:26 +1000302 ts_state = irq_ts_save();
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800303 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
Suresh Siddhae4914012008-08-13 22:02:26 +1000304 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000305 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Herbert Xu6c2bb982006-05-16 22:09:29 +1000308static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000310 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000311 int ts_state;
Suresh Siddhae4914012008-08-13 22:02:26 +1000312
Herbert Xu420a4b22008-08-31 15:58:45 +1000313 padlock_reset_key(&ctx->cword.encrypt);
Suresh Siddhae4914012008-08-13 22:02:26 +1000314 ts_state = irq_ts_save();
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800315 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
Suresh Siddhae4914012008-08-13 22:02:26 +1000316 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000317 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static struct crypto_alg aes_alg = {
321 .cra_name = "aes",
Herbert Xuc8a19c92005-11-05 18:06:26 +1100322 .cra_driver_name = "aes-padlock",
Michal Ludvigccc17c32006-07-15 10:23:49 +1000323 .cra_priority = PADLOCK_CRA_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
325 .cra_blocksize = AES_BLOCK_SIZE,
Herbert Xufbdae9f2005-07-06 13:53:29 -0700326 .cra_ctxsize = sizeof(struct aes_ctx),
Herbert Xu6789b2d2005-07-06 13:52:27 -0700327 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 .cra_module = THIS_MODULE,
329 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
330 .cra_u = {
331 .cipher = {
332 .cia_min_keysize = AES_MIN_KEY_SIZE,
333 .cia_max_keysize = AES_MAX_KEY_SIZE,
334 .cia_setkey = aes_set_key,
335 .cia_encrypt = aes_encrypt,
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700336 .cia_decrypt = aes_decrypt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 }
338 }
339};
340
Herbert Xu28ce7282006-08-21 21:38:42 +1000341static int ecb_aes_encrypt(struct blkcipher_desc *desc,
342 struct scatterlist *dst, struct scatterlist *src,
343 unsigned int nbytes)
344{
345 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
346 struct blkcipher_walk walk;
347 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000348 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000349
Herbert Xu420a4b22008-08-31 15:58:45 +1000350 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100351
Herbert Xu28ce7282006-08-21 21:38:42 +1000352 blkcipher_walk_init(&walk, dst, src, nbytes);
353 err = blkcipher_walk_virt(desc, &walk);
354
Suresh Siddhae4914012008-08-13 22:02:26 +1000355 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000356 while ((nbytes = walk.nbytes)) {
357 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
358 ctx->E, &ctx->cword.encrypt,
359 nbytes / AES_BLOCK_SIZE);
360 nbytes &= AES_BLOCK_SIZE - 1;
361 err = blkcipher_walk_done(desc, &walk, nbytes);
362 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000363 irq_ts_restore(ts_state);
Herbert Xu28ce7282006-08-21 21:38:42 +1000364
Herbert Xu420a4b22008-08-31 15:58:45 +1000365 padlock_store_cword(&ctx->cword.encrypt);
366
Herbert Xu28ce7282006-08-21 21:38:42 +1000367 return err;
368}
369
370static int ecb_aes_decrypt(struct blkcipher_desc *desc,
371 struct scatterlist *dst, struct scatterlist *src,
372 unsigned int nbytes)
373{
374 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
375 struct blkcipher_walk walk;
376 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000377 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000378
Herbert Xu420a4b22008-08-31 15:58:45 +1000379 padlock_reset_key(&ctx->cword.decrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100380
Herbert Xu28ce7282006-08-21 21:38:42 +1000381 blkcipher_walk_init(&walk, dst, src, nbytes);
382 err = blkcipher_walk_virt(desc, &walk);
383
Suresh Siddhae4914012008-08-13 22:02:26 +1000384 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000385 while ((nbytes = walk.nbytes)) {
386 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
387 ctx->D, &ctx->cword.decrypt,
388 nbytes / AES_BLOCK_SIZE);
389 nbytes &= AES_BLOCK_SIZE - 1;
390 err = blkcipher_walk_done(desc, &walk, nbytes);
391 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000392 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000393
394 padlock_store_cword(&ctx->cword.encrypt);
395
Herbert Xu28ce7282006-08-21 21:38:42 +1000396 return err;
397}
398
399static struct crypto_alg ecb_aes_alg = {
400 .cra_name = "ecb(aes)",
401 .cra_driver_name = "ecb-aes-padlock",
402 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
403 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
404 .cra_blocksize = AES_BLOCK_SIZE,
405 .cra_ctxsize = sizeof(struct aes_ctx),
406 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
407 .cra_type = &crypto_blkcipher_type,
408 .cra_module = THIS_MODULE,
409 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
410 .cra_u = {
411 .blkcipher = {
412 .min_keysize = AES_MIN_KEY_SIZE,
413 .max_keysize = AES_MAX_KEY_SIZE,
414 .setkey = aes_set_key,
415 .encrypt = ecb_aes_encrypt,
416 .decrypt = ecb_aes_decrypt,
417 }
418 }
419};
420
421static int cbc_aes_encrypt(struct blkcipher_desc *desc,
422 struct scatterlist *dst, struct scatterlist *src,
423 unsigned int nbytes)
424{
425 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
426 struct blkcipher_walk walk;
427 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000428 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000429
Herbert Xu420a4b22008-08-31 15:58:45 +1000430 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100431
Herbert Xu28ce7282006-08-21 21:38:42 +1000432 blkcipher_walk_init(&walk, dst, src, nbytes);
433 err = blkcipher_walk_virt(desc, &walk);
434
Suresh Siddhae4914012008-08-13 22:02:26 +1000435 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000436 while ((nbytes = walk.nbytes)) {
437 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
438 walk.dst.virt.addr, ctx->E,
439 walk.iv, &ctx->cword.encrypt,
440 nbytes / AES_BLOCK_SIZE);
441 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
442 nbytes &= AES_BLOCK_SIZE - 1;
443 err = blkcipher_walk_done(desc, &walk, nbytes);
444 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000445 irq_ts_restore(ts_state);
Herbert Xu28ce7282006-08-21 21:38:42 +1000446
Herbert Xu420a4b22008-08-31 15:58:45 +1000447 padlock_store_cword(&ctx->cword.decrypt);
448
Herbert Xu28ce7282006-08-21 21:38:42 +1000449 return err;
450}
451
452static int cbc_aes_decrypt(struct blkcipher_desc *desc,
453 struct scatterlist *dst, struct scatterlist *src,
454 unsigned int nbytes)
455{
456 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
457 struct blkcipher_walk walk;
458 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000459 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000460
Herbert Xu420a4b22008-08-31 15:58:45 +1000461 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100462
Herbert Xu28ce7282006-08-21 21:38:42 +1000463 blkcipher_walk_init(&walk, dst, src, nbytes);
464 err = blkcipher_walk_virt(desc, &walk);
465
Suresh Siddhae4914012008-08-13 22:02:26 +1000466 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000467 while ((nbytes = walk.nbytes)) {
468 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
469 ctx->D, walk.iv, &ctx->cword.decrypt,
470 nbytes / AES_BLOCK_SIZE);
471 nbytes &= AES_BLOCK_SIZE - 1;
472 err = blkcipher_walk_done(desc, &walk, nbytes);
473 }
474
Suresh Siddhae4914012008-08-13 22:02:26 +1000475 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000476
477 padlock_store_cword(&ctx->cword.encrypt);
478
Herbert Xu28ce7282006-08-21 21:38:42 +1000479 return err;
480}
481
482static struct crypto_alg cbc_aes_alg = {
483 .cra_name = "cbc(aes)",
484 .cra_driver_name = "cbc-aes-padlock",
485 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
486 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
487 .cra_blocksize = AES_BLOCK_SIZE,
488 .cra_ctxsize = sizeof(struct aes_ctx),
489 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
490 .cra_type = &crypto_blkcipher_type,
491 .cra_module = THIS_MODULE,
492 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
493 .cra_u = {
494 .blkcipher = {
495 .min_keysize = AES_MIN_KEY_SIZE,
496 .max_keysize = AES_MAX_KEY_SIZE,
497 .ivsize = AES_BLOCK_SIZE,
498 .setkey = aes_set_key,
499 .encrypt = cbc_aes_encrypt,
500 .decrypt = cbc_aes_decrypt,
501 }
502 }
503};
504
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000505static int __init padlock_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000507 int ret;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800508 struct cpuinfo_x86 *c = &cpu_data(0);
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000509
510 if (!cpu_has_xcrypt) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800511 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000512 return -ENODEV;
513 }
514
515 if (!cpu_has_xcrypt_enabled) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800516 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000517 return -ENODEV;
518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Herbert Xu28ce7282006-08-21 21:38:42 +1000520 if ((ret = crypto_register_alg(&aes_alg)))
521 goto aes_err;
522
523 if ((ret = crypto_register_alg(&ecb_aes_alg)))
524 goto ecb_aes_err;
525
526 if ((ret = crypto_register_alg(&cbc_aes_alg)))
527 goto cbc_aes_err;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000528
529 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
530
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800531 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800532 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
533 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800534 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
535 }
536
Herbert Xu28ce7282006-08-21 21:38:42 +1000537out:
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000538 return ret;
Herbert Xu28ce7282006-08-21 21:38:42 +1000539
540cbc_aes_err:
541 crypto_unregister_alg(&ecb_aes_alg);
542ecb_aes_err:
543 crypto_unregister_alg(&aes_alg);
544aes_err:
545 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
546 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000549static void __exit padlock_fini(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Herbert Xu28ce7282006-08-21 21:38:42 +1000551 crypto_unregister_alg(&cbc_aes_alg);
552 crypto_unregister_alg(&ecb_aes_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 crypto_unregister_alg(&aes_alg);
554}
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000555
556module_init(padlock_init);
557module_exit(padlock_fini);
558
559MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
560MODULE_LICENSE("GPL");
561MODULE_AUTHOR("Michal Ludvig");
562
Herbert Xuacd246b2009-04-21 13:55:20 +0800563MODULE_ALIAS("aes");