blob: 37b2e9406af6d218785e77fada9aa6458b08e3a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Cryptographic API.
3 *
4 * Support for VIA PadLock hardware crypto engine.
5 *
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Herbert Xu28ce7282006-08-21 21:38:42 +100010#include <crypto/algapi.h>
Sebastian Siewior89e12652007-10-17 23:18:57 +080011#include <crypto/aes.h>
Herbert Xu21493082011-01-07 14:52:00 +110012#include <crypto/padlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/types.h>
16#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/interrupt.h>
Herbert Xu6789b2d2005-07-06 13:52:27 -070018#include <linux/kernel.h>
Herbert Xu420a4b22008-08-31 15:58:45 +100019#include <linux/percpu.h>
20#include <linux/smp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Andi Kleen3bd391f2012-01-26 00:09:06 +010022#include <asm/cpu_device_id.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/byteorder.h>
Chuck Ebberta76c1c22009-06-18 19:24:10 +080024#include <asm/processor.h>
Suresh Siddhae4914012008-08-13 22:02:26 +100025#include <asm/i387.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080027/*
28 * Number of data blocks actually fetched for each xcrypt insn.
29 * Processors with prefetch errata will fetch extra blocks.
30 */
Chuck Ebberta76c1c22009-06-18 19:24:10 +080031static unsigned int ecb_fetch_blocks = 2;
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080032#define MAX_ECB_FETCH_BLOCKS (8)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080033#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +080034
35static unsigned int cbc_fetch_blocks = 1;
36#define MAX_CBC_FETCH_BLOCKS (4)
Chuck Ebberta76c1c22009-06-18 19:24:10 +080037#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
38
Michal Ludvigccc17c32006-07-15 10:23:49 +100039/* Control word. */
40struct cword {
41 unsigned int __attribute__ ((__packed__))
42 rounds:4,
43 algo:3,
44 keygen:1,
45 interm:1,
46 encdec:1,
47 ksize:2;
48} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
49
Michal Ludvigcc086322006-07-15 11:08:50 +100050/* Whenever making any changes to the following
51 * structure *make sure* you keep E, d_data
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080052 * and cword aligned on 16 Bytes boundaries and
53 * the Hardware can access 16 * 16 bytes of E and d_data
54 * (only the first 15 * 16 bytes matter but the HW reads
55 * more).
56 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057struct aes_ctx {
Sebastian Siewior7dc748e2008-04-01 21:24:50 +080058 u32 E[AES_MAX_KEYLENGTH_U32]
59 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
60 u32 d_data[AES_MAX_KEYLENGTH_U32]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
Herbert Xu6789b2d2005-07-06 13:52:27 -070062 struct {
63 struct cword encrypt;
64 struct cword decrypt;
65 } cword;
Herbert Xu82062c72006-05-16 22:20:34 +100066 u32 *D;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067};
68
Tejun Heo390dfd92009-10-29 22:34:14 +090069static DEFINE_PER_CPU(struct cword *, paes_last_cword);
Herbert Xu420a4b22008-08-31 15:58:45 +100070
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/* Tells whether the ACE is capable to generate
72 the extended key for a given key_len. */
73static inline int
74aes_hw_extkey_available(uint8_t key_len)
75{
76 /* TODO: We should check the actual CPU model/stepping
77 as it's possible that the capability will be
78 added in the next CPU revisions. */
79 if (key_len == 16)
80 return 1;
81 return 0;
82}
83
Herbert Xu28ce7282006-08-21 21:38:42 +100084static inline struct aes_ctx *aes_ctx_common(void *ctx)
Herbert Xu6789b2d2005-07-06 13:52:27 -070085{
Herbert Xu28ce7282006-08-21 21:38:42 +100086 unsigned long addr = (unsigned long)ctx;
Herbert Xuf10b7892006-01-25 22:34:01 +110087 unsigned long align = PADLOCK_ALIGNMENT;
88
89 if (align <= crypto_tfm_ctx_alignment())
90 align = 1;
Herbert Xu6c2bb982006-05-16 22:09:29 +100091 return (struct aes_ctx *)ALIGN(addr, align);
Herbert Xu6789b2d2005-07-06 13:52:27 -070092}
93
Herbert Xu28ce7282006-08-21 21:38:42 +100094static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
95{
96 return aes_ctx_common(crypto_tfm_ctx(tfm));
97}
98
99static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
100{
101 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
102}
103
Herbert Xu6c2bb982006-05-16 22:09:29 +1000104static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
Herbert Xu560c06a2006-08-13 14:16:39 +1000105 unsigned int key_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000107 struct aes_ctx *ctx = aes_ctx(tfm);
Herbert Xu06ace7a2005-10-30 21:25:15 +1100108 const __le32 *key = (const __le32 *)in_key;
Herbert Xu560c06a2006-08-13 14:16:39 +1000109 u32 *flags = &tfm->crt_flags;
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800110 struct crypto_aes_ctx gen_aes;
Herbert Xu420a4b22008-08-31 15:58:45 +1000111 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Herbert Xu560c06a2006-08-13 14:16:39 +1000113 if (key_len % 8) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
115 return -EINVAL;
116 }
117
Herbert Xu6789b2d2005-07-06 13:52:27 -0700118 /*
119 * If the hardware is capable of generating the extended key
120 * itself we must supply the plain key for both encryption
121 * and decryption.
122 */
Herbert Xu82062c72006-05-16 22:20:34 +1000123 ctx->D = ctx->E;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800125 ctx->E[0] = le32_to_cpu(key[0]);
126 ctx->E[1] = le32_to_cpu(key[1]);
127 ctx->E[2] = le32_to_cpu(key[2]);
128 ctx->E[3] = le32_to_cpu(key[3]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
Herbert Xu6789b2d2005-07-06 13:52:27 -0700130 /* Prepare control words. */
131 memset(&ctx->cword, 0, sizeof(ctx->cword));
132
133 ctx->cword.decrypt.encdec = 1;
134 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
135 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
136 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
137 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 /* Don't generate extended keys if the hardware can do it. */
140 if (aes_hw_extkey_available(key_len))
Herbert Xu420a4b22008-08-31 15:58:45 +1000141 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Herbert Xu6789b2d2005-07-06 13:52:27 -0700143 ctx->D = ctx->d_data;
144 ctx->cword.encrypt.keygen = 1;
145 ctx->cword.decrypt.keygen = 1;
146
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800147 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
148 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
149 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 }
151
Sebastian Siewior7dc748e2008-04-01 21:24:50 +0800152 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
153 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
Herbert Xu420a4b22008-08-31 15:58:45 +1000154
155ok:
156 for_each_online_cpu(cpu)
Tejun Heo390dfd92009-10-29 22:34:14 +0900157 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
158 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
159 per_cpu(paes_last_cword, cpu) = NULL;
Herbert Xu420a4b22008-08-31 15:58:45 +1000160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return 0;
162}
163
164/* ====== Encryption/decryption routines ====== */
165
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700166/* These are the real call to PadLock. */
Herbert Xu420a4b22008-08-31 15:58:45 +1000167static inline void padlock_reset_key(struct cword *cword)
Herbert Xu866cd902007-12-27 00:04:44 +1100168{
Herbert Xu420a4b22008-08-31 15:58:45 +1000169 int cpu = raw_smp_processor_id();
170
Tejun Heo390dfd92009-10-29 22:34:14 +0900171 if (cword != per_cpu(paes_last_cword, cpu))
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800172#ifndef CONFIG_X86_64
Herbert Xu420a4b22008-08-31 15:58:45 +1000173 asm volatile ("pushfl; popfl");
Sebastian Andrzej Siewiord1c8b0a2009-04-21 14:14:37 +0800174#else
175 asm volatile ("pushfq; popfq");
176#endif
Herbert Xu420a4b22008-08-31 15:58:45 +1000177}
178
179static inline void padlock_store_cword(struct cword *cword)
180{
Tejun Heo390dfd92009-10-29 22:34:14 +0900181 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
Herbert Xu866cd902007-12-27 00:04:44 +1100182}
183
Suresh Siddhae4914012008-08-13 22:02:26 +1000184/*
185 * While the padlock instructions don't use FP/SSE registers, they
186 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
187 * should be used only inside the irq_ts_save/restore() context
188 */
189
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800190static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800191 struct cword *control_word, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100192{
193 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
194 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800195 : "d"(control_word), "b"(key), "c"(count));
Herbert Xud4a7dd82007-12-28 11:05:46 +1100196}
197
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800198static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
199 u8 *iv, struct cword *control_word, int count)
200{
201 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
202 : "+S" (input), "+D" (output), "+a" (iv)
203 : "d" (control_word), "b" (key), "c" (count));
204 return iv;
205}
206
207static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800208 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100209{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800210 /*
211 * Padlock prefetches extra data so we must provide mapped input buffers.
212 * Assume there are at least 16 bytes of stack already in use.
213 */
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800214 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
Herbert Xu490fe3f2008-01-11 08:09:35 +1100215 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100216
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800217 memcpy(tmp, in, count * AES_BLOCK_SIZE);
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800218 rep_xcrypt_ecb(tmp, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100219}
220
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800221static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
222 u8 *iv, struct cword *cword, int count)
223{
224 /*
225 * Padlock prefetches extra data so we must provide mapped input buffers.
226 * Assume there are at least 16 bytes of stack already in use.
227 */
228 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
229 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
230
231 memcpy(tmp, in, count * AES_BLOCK_SIZE);
232 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
233}
234
235static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800236 struct cword *cword, int count)
Herbert Xud4a7dd82007-12-28 11:05:46 +1100237{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800238 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
239 * We could avoid some copying here but it's probably not worth it.
240 */
Chuck Ebberte8edb3c2009-11-03 10:32:03 -0500241 if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800242 ecb_crypt_copy(in, out, key, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100243 return;
244 }
245
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800246 rep_xcrypt_ecb(in, out, key, cword, count);
247}
248
249static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
250 u8 *iv, struct cword *cword, int count)
251{
252 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
Chuck Ebberte8edb3c2009-11-03 10:32:03 -0500253 if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800254 return cbc_crypt_copy(in, out, key, iv, cword, count);
255
256 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100257}
258
Herbert Xu6789b2d2005-07-06 13:52:27 -0700259static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
260 void *control_word, u32 count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261{
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800262 u32 initial = count & (ecb_fetch_blocks - 1);
263
264 if (count < ecb_fetch_blocks) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800265 ecb_crypt(input, output, key, control_word, count);
Herbert Xud4a7dd82007-12-28 11:05:46 +1100266 return;
267 }
268
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800269 if (initial)
270 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
271 : "+S"(input), "+D"(output)
272 : "d"(control_word), "b"(key), "c"(initial));
273
274 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 : "+S"(input), "+D"(output)
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800276 : "d"(control_word), "b"(key), "c"(count - initial));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
278
Herbert Xu476df252005-07-06 13:54:09 -0700279static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
280 u8 *iv, void *control_word, u32 count)
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700281{
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800282 u32 initial = count & (cbc_fetch_blocks - 1);
283
284 if (count < cbc_fetch_blocks)
285 return cbc_crypt(input, output, key, iv, control_word, count);
286
287 if (initial)
288 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
289 : "+S" (input), "+D" (output), "+a" (iv)
Herbert Xuc054a072010-11-04 14:38:39 -0400290 : "d" (control_word), "b" (key), "c" (initial));
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800291
292 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700293 : "+S" (input), "+D" (output), "+a" (iv)
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800294 : "d" (control_word), "b" (key), "c" (count-initial));
Herbert Xu476df252005-07-06 13:54:09 -0700295 return iv;
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700296}
297
Herbert Xu6c2bb982006-05-16 22:09:29 +1000298static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000300 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000301 int ts_state;
Suresh Siddhae4914012008-08-13 22:02:26 +1000302
Herbert Xu420a4b22008-08-31 15:58:45 +1000303 padlock_reset_key(&ctx->cword.encrypt);
Suresh Siddhae4914012008-08-13 22:02:26 +1000304 ts_state = irq_ts_save();
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800305 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
Suresh Siddhae4914012008-08-13 22:02:26 +1000306 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000307 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309
Herbert Xu6c2bb982006-05-16 22:09:29 +1000310static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311{
Herbert Xu6c2bb982006-05-16 22:09:29 +1000312 struct aes_ctx *ctx = aes_ctx(tfm);
Suresh Siddhae4914012008-08-13 22:02:26 +1000313 int ts_state;
Suresh Siddhae4914012008-08-13 22:02:26 +1000314
Herbert Xu420a4b22008-08-31 15:58:45 +1000315 padlock_reset_key(&ctx->cword.encrypt);
Suresh Siddhae4914012008-08-13 22:02:26 +1000316 ts_state = irq_ts_save();
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800317 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
Suresh Siddhae4914012008-08-13 22:02:26 +1000318 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000319 padlock_store_cword(&ctx->cword.encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322static struct crypto_alg aes_alg = {
323 .cra_name = "aes",
Herbert Xuc8a19c92005-11-05 18:06:26 +1100324 .cra_driver_name = "aes-padlock",
Michal Ludvigccc17c32006-07-15 10:23:49 +1000325 .cra_priority = PADLOCK_CRA_PRIORITY,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
327 .cra_blocksize = AES_BLOCK_SIZE,
Herbert Xufbdae9f2005-07-06 13:53:29 -0700328 .cra_ctxsize = sizeof(struct aes_ctx),
Herbert Xu6789b2d2005-07-06 13:52:27 -0700329 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 .cra_module = THIS_MODULE,
331 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
332 .cra_u = {
333 .cipher = {
334 .cia_min_keysize = AES_MIN_KEY_SIZE,
335 .cia_max_keysize = AES_MAX_KEY_SIZE,
336 .cia_setkey = aes_set_key,
337 .cia_encrypt = aes_encrypt,
Herbert Xu28e8c3a2005-07-06 13:52:43 -0700338 .cia_decrypt = aes_decrypt,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340 }
341};
342
Herbert Xu28ce7282006-08-21 21:38:42 +1000343static int ecb_aes_encrypt(struct blkcipher_desc *desc,
344 struct scatterlist *dst, struct scatterlist *src,
345 unsigned int nbytes)
346{
347 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
348 struct blkcipher_walk walk;
349 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000350 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000351
Herbert Xu420a4b22008-08-31 15:58:45 +1000352 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100353
Herbert Xu28ce7282006-08-21 21:38:42 +1000354 blkcipher_walk_init(&walk, dst, src, nbytes);
355 err = blkcipher_walk_virt(desc, &walk);
356
Suresh Siddhae4914012008-08-13 22:02:26 +1000357 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000358 while ((nbytes = walk.nbytes)) {
359 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
360 ctx->E, &ctx->cword.encrypt,
361 nbytes / AES_BLOCK_SIZE);
362 nbytes &= AES_BLOCK_SIZE - 1;
363 err = blkcipher_walk_done(desc, &walk, nbytes);
364 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000365 irq_ts_restore(ts_state);
Herbert Xu28ce7282006-08-21 21:38:42 +1000366
Herbert Xu420a4b22008-08-31 15:58:45 +1000367 padlock_store_cword(&ctx->cword.encrypt);
368
Herbert Xu28ce7282006-08-21 21:38:42 +1000369 return err;
370}
371
372static int ecb_aes_decrypt(struct blkcipher_desc *desc,
373 struct scatterlist *dst, struct scatterlist *src,
374 unsigned int nbytes)
375{
376 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
377 struct blkcipher_walk walk;
378 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000379 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000380
Herbert Xu420a4b22008-08-31 15:58:45 +1000381 padlock_reset_key(&ctx->cword.decrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100382
Herbert Xu28ce7282006-08-21 21:38:42 +1000383 blkcipher_walk_init(&walk, dst, src, nbytes);
384 err = blkcipher_walk_virt(desc, &walk);
385
Suresh Siddhae4914012008-08-13 22:02:26 +1000386 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000387 while ((nbytes = walk.nbytes)) {
388 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
389 ctx->D, &ctx->cword.decrypt,
390 nbytes / AES_BLOCK_SIZE);
391 nbytes &= AES_BLOCK_SIZE - 1;
392 err = blkcipher_walk_done(desc, &walk, nbytes);
393 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000394 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000395
396 padlock_store_cword(&ctx->cword.encrypt);
397
Herbert Xu28ce7282006-08-21 21:38:42 +1000398 return err;
399}
400
401static struct crypto_alg ecb_aes_alg = {
402 .cra_name = "ecb(aes)",
403 .cra_driver_name = "ecb-aes-padlock",
404 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
405 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
406 .cra_blocksize = AES_BLOCK_SIZE,
407 .cra_ctxsize = sizeof(struct aes_ctx),
408 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
409 .cra_type = &crypto_blkcipher_type,
410 .cra_module = THIS_MODULE,
411 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
412 .cra_u = {
413 .blkcipher = {
414 .min_keysize = AES_MIN_KEY_SIZE,
415 .max_keysize = AES_MAX_KEY_SIZE,
416 .setkey = aes_set_key,
417 .encrypt = ecb_aes_encrypt,
418 .decrypt = ecb_aes_decrypt,
419 }
420 }
421};
422
423static int cbc_aes_encrypt(struct blkcipher_desc *desc,
424 struct scatterlist *dst, struct scatterlist *src,
425 unsigned int nbytes)
426{
427 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
428 struct blkcipher_walk walk;
429 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000430 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000431
Herbert Xu420a4b22008-08-31 15:58:45 +1000432 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100433
Herbert Xu28ce7282006-08-21 21:38:42 +1000434 blkcipher_walk_init(&walk, dst, src, nbytes);
435 err = blkcipher_walk_virt(desc, &walk);
436
Suresh Siddhae4914012008-08-13 22:02:26 +1000437 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000438 while ((nbytes = walk.nbytes)) {
439 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
440 walk.dst.virt.addr, ctx->E,
441 walk.iv, &ctx->cword.encrypt,
442 nbytes / AES_BLOCK_SIZE);
443 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
444 nbytes &= AES_BLOCK_SIZE - 1;
445 err = blkcipher_walk_done(desc, &walk, nbytes);
446 }
Suresh Siddhae4914012008-08-13 22:02:26 +1000447 irq_ts_restore(ts_state);
Herbert Xu28ce7282006-08-21 21:38:42 +1000448
Herbert Xu420a4b22008-08-31 15:58:45 +1000449 padlock_store_cword(&ctx->cword.decrypt);
450
Herbert Xu28ce7282006-08-21 21:38:42 +1000451 return err;
452}
453
454static int cbc_aes_decrypt(struct blkcipher_desc *desc,
455 struct scatterlist *dst, struct scatterlist *src,
456 unsigned int nbytes)
457{
458 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
459 struct blkcipher_walk walk;
460 int err;
Suresh Siddhae4914012008-08-13 22:02:26 +1000461 int ts_state;
Herbert Xu28ce7282006-08-21 21:38:42 +1000462
Herbert Xu420a4b22008-08-31 15:58:45 +1000463 padlock_reset_key(&ctx->cword.encrypt);
Herbert Xu866cd902007-12-27 00:04:44 +1100464
Herbert Xu28ce7282006-08-21 21:38:42 +1000465 blkcipher_walk_init(&walk, dst, src, nbytes);
466 err = blkcipher_walk_virt(desc, &walk);
467
Suresh Siddhae4914012008-08-13 22:02:26 +1000468 ts_state = irq_ts_save();
Herbert Xu28ce7282006-08-21 21:38:42 +1000469 while ((nbytes = walk.nbytes)) {
470 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
471 ctx->D, walk.iv, &ctx->cword.decrypt,
472 nbytes / AES_BLOCK_SIZE);
473 nbytes &= AES_BLOCK_SIZE - 1;
474 err = blkcipher_walk_done(desc, &walk, nbytes);
475 }
476
Suresh Siddhae4914012008-08-13 22:02:26 +1000477 irq_ts_restore(ts_state);
Herbert Xu420a4b22008-08-31 15:58:45 +1000478
479 padlock_store_cword(&ctx->cword.encrypt);
480
Herbert Xu28ce7282006-08-21 21:38:42 +1000481 return err;
482}
483
484static struct crypto_alg cbc_aes_alg = {
485 .cra_name = "cbc(aes)",
486 .cra_driver_name = "cbc-aes-padlock",
487 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
488 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
489 .cra_blocksize = AES_BLOCK_SIZE,
490 .cra_ctxsize = sizeof(struct aes_ctx),
491 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
492 .cra_type = &crypto_blkcipher_type,
493 .cra_module = THIS_MODULE,
494 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
495 .cra_u = {
496 .blkcipher = {
497 .min_keysize = AES_MIN_KEY_SIZE,
498 .max_keysize = AES_MAX_KEY_SIZE,
499 .ivsize = AES_BLOCK_SIZE,
500 .setkey = aes_set_key,
501 .encrypt = cbc_aes_encrypt,
502 .decrypt = cbc_aes_decrypt,
503 }
504 }
505};
506
Andi Kleen3bd391f2012-01-26 00:09:06 +0100507static struct x86_cpu_id padlock_cpu_id[] = {
508 X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
509 {}
510};
511MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
512
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000513static int __init padlock_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514{
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000515 int ret;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800516 struct cpuinfo_x86 *c = &cpu_data(0);
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000517
Andi Kleen3bd391f2012-01-26 00:09:06 +0100518 if (!x86_match_cpu(padlock_cpu_id))
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000519 return -ENODEV;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000520
521 if (!cpu_has_xcrypt_enabled) {
Jeremy Katzb43e7262008-07-03 19:03:31 +0800522 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000523 return -ENODEV;
524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Herbert Xu28ce7282006-08-21 21:38:42 +1000526 if ((ret = crypto_register_alg(&aes_alg)))
527 goto aes_err;
528
529 if ((ret = crypto_register_alg(&ecb_aes_alg)))
530 goto ecb_aes_err;
531
532 if ((ret = crypto_register_alg(&cbc_aes_alg)))
533 goto cbc_aes_err;
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000534
535 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
536
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800537 if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
Chuck Ebbert8d8409f2009-06-18 19:31:09 +0800538 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
539 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
Chuck Ebberta76c1c22009-06-18 19:24:10 +0800540 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
541 }
542
Herbert Xu28ce7282006-08-21 21:38:42 +1000543out:
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000544 return ret;
Herbert Xu28ce7282006-08-21 21:38:42 +1000545
546cbc_aes_err:
547 crypto_unregister_alg(&ecb_aes_alg);
548ecb_aes_err:
549 crypto_unregister_alg(&aes_alg);
550aes_err:
551 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
552 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553}
554
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000555static void __exit padlock_fini(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556{
Herbert Xu28ce7282006-08-21 21:38:42 +1000557 crypto_unregister_alg(&cbc_aes_alg);
558 crypto_unregister_alg(&ecb_aes_alg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 crypto_unregister_alg(&aes_alg);
560}
Michal Ludvig1191f0a2006-08-06 22:46:20 +1000561
562module_init(padlock_init);
563module_exit(padlock_fini);
564
565MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
566MODULE_LICENSE("GPL");
567MODULE_AUTHOR("Michal Ludvig");
568
Herbert Xuacd246b2009-04-21 13:55:20 +0800569MODULE_ALIAS("aes");