Mathias Krause | 66be895 | 2011-08-04 20:19:25 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Cryptographic API. |
| 3 | * |
| 4 | * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using |
| 5 | * Supplemental SSE3 instructions. |
| 6 | * |
| 7 | * This file is based on sha1_generic.c |
| 8 | * |
| 9 | * Copyright (c) Alan Smithee. |
| 10 | * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> |
| 11 | * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> |
| 12 | * Copyright (c) Mathias Krause <minipli@googlemail.com> |
| 13 | * |
| 14 | * This program is free software; you can redistribute it and/or modify it |
| 15 | * under the terms of the GNU General Public License as published by the Free |
| 16 | * Software Foundation; either version 2 of the License, or (at your option) |
| 17 | * any later version. |
| 18 | * |
| 19 | */ |
| 20 | |
| 21 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 22 | |
| 23 | #include <crypto/internal/hash.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/module.h> |
| 26 | #include <linux/mm.h> |
| 27 | #include <linux/cryptohash.h> |
| 28 | #include <linux/types.h> |
| 29 | #include <crypto/sha.h> |
| 30 | #include <asm/byteorder.h> |
| 31 | #include <asm/i387.h> |
| 32 | #include <asm/xcr.h> |
| 33 | #include <asm/xsave.h> |
| 34 | |
| 35 | |
| 36 | asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, |
| 37 | unsigned int rounds); |
| 38 | #ifdef SHA1_ENABLE_AVX_SUPPORT |
| 39 | asmlinkage void sha1_transform_avx(u32 *digest, const char *data, |
| 40 | unsigned int rounds); |
| 41 | #endif |
| 42 | |
| 43 | static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int); |
| 44 | |
| 45 | |
| 46 | static int sha1_ssse3_init(struct shash_desc *desc) |
| 47 | { |
| 48 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 49 | |
| 50 | *sctx = (struct sha1_state){ |
| 51 | .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, |
| 52 | }; |
| 53 | |
| 54 | return 0; |
| 55 | } |
| 56 | |
| 57 | static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data, |
| 58 | unsigned int len, unsigned int partial) |
| 59 | { |
| 60 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 61 | unsigned int done = 0; |
| 62 | |
| 63 | sctx->count += len; |
| 64 | |
| 65 | if (partial) { |
| 66 | done = SHA1_BLOCK_SIZE - partial; |
| 67 | memcpy(sctx->buffer + partial, data, done); |
| 68 | sha1_transform_asm(sctx->state, sctx->buffer, 1); |
| 69 | } |
| 70 | |
| 71 | if (len - done >= SHA1_BLOCK_SIZE) { |
| 72 | const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; |
| 73 | |
| 74 | sha1_transform_asm(sctx->state, data + done, rounds); |
| 75 | done += rounds * SHA1_BLOCK_SIZE; |
| 76 | } |
| 77 | |
| 78 | memcpy(sctx->buffer, data + done, len - done); |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, |
| 84 | unsigned int len) |
| 85 | { |
| 86 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 87 | unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; |
| 88 | int res; |
| 89 | |
| 90 | /* Handle the fast case right here */ |
| 91 | if (partial + len < SHA1_BLOCK_SIZE) { |
| 92 | sctx->count += len; |
| 93 | memcpy(sctx->buffer + partial, data, len); |
| 94 | |
| 95 | return 0; |
| 96 | } |
| 97 | |
| 98 | if (!irq_fpu_usable()) { |
| 99 | res = crypto_sha1_update(desc, data, len); |
| 100 | } else { |
| 101 | kernel_fpu_begin(); |
| 102 | res = __sha1_ssse3_update(desc, data, len, partial); |
| 103 | kernel_fpu_end(); |
| 104 | } |
| 105 | |
| 106 | return res; |
| 107 | } |
| 108 | |
| 109 | |
| 110 | /* Add padding and return the message digest. */ |
| 111 | static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) |
| 112 | { |
| 113 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 114 | unsigned int i, index, padlen; |
| 115 | __be32 *dst = (__be32 *)out; |
| 116 | __be64 bits; |
| 117 | static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; |
| 118 | |
| 119 | bits = cpu_to_be64(sctx->count << 3); |
| 120 | |
| 121 | /* Pad out to 56 mod 64 and append length */ |
| 122 | index = sctx->count % SHA1_BLOCK_SIZE; |
| 123 | padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); |
| 124 | if (!irq_fpu_usable()) { |
| 125 | crypto_sha1_update(desc, padding, padlen); |
| 126 | crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); |
| 127 | } else { |
| 128 | kernel_fpu_begin(); |
| 129 | /* We need to fill a whole block for __sha1_ssse3_update() */ |
| 130 | if (padlen <= 56) { |
| 131 | sctx->count += padlen; |
| 132 | memcpy(sctx->buffer + index, padding, padlen); |
| 133 | } else { |
| 134 | __sha1_ssse3_update(desc, padding, padlen, index); |
| 135 | } |
| 136 | __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); |
| 137 | kernel_fpu_end(); |
| 138 | } |
| 139 | |
| 140 | /* Store state in digest */ |
| 141 | for (i = 0; i < 5; i++) |
| 142 | dst[i] = cpu_to_be32(sctx->state[i]); |
| 143 | |
| 144 | /* Wipe context */ |
| 145 | memset(sctx, 0, sizeof(*sctx)); |
| 146 | |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int sha1_ssse3_export(struct shash_desc *desc, void *out) |
| 151 | { |
| 152 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 153 | |
| 154 | memcpy(out, sctx, sizeof(*sctx)); |
| 155 | |
| 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | static int sha1_ssse3_import(struct shash_desc *desc, const void *in) |
| 160 | { |
| 161 | struct sha1_state *sctx = shash_desc_ctx(desc); |
| 162 | |
| 163 | memcpy(sctx, in, sizeof(*sctx)); |
| 164 | |
| 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | static struct shash_alg alg = { |
| 169 | .digestsize = SHA1_DIGEST_SIZE, |
| 170 | .init = sha1_ssse3_init, |
| 171 | .update = sha1_ssse3_update, |
| 172 | .final = sha1_ssse3_final, |
| 173 | .export = sha1_ssse3_export, |
| 174 | .import = sha1_ssse3_import, |
| 175 | .descsize = sizeof(struct sha1_state), |
| 176 | .statesize = sizeof(struct sha1_state), |
| 177 | .base = { |
| 178 | .cra_name = "sha1", |
| 179 | .cra_driver_name= "sha1-ssse3", |
| 180 | .cra_priority = 150, |
| 181 | .cra_flags = CRYPTO_ALG_TYPE_SHASH, |
| 182 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 183 | .cra_module = THIS_MODULE, |
| 184 | } |
| 185 | }; |
| 186 | |
| 187 | #ifdef SHA1_ENABLE_AVX_SUPPORT |
| 188 | static bool __init avx_usable(void) |
| 189 | { |
| 190 | u64 xcr0; |
| 191 | |
| 192 | if (!cpu_has_avx || !cpu_has_osxsave) |
| 193 | return false; |
| 194 | |
| 195 | xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); |
| 196 | if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { |
| 197 | pr_info("AVX detected but unusable.\n"); |
| 198 | |
| 199 | return false; |
| 200 | } |
| 201 | |
| 202 | return true; |
| 203 | } |
| 204 | #endif |
| 205 | |
| 206 | static int __init sha1_ssse3_mod_init(void) |
| 207 | { |
| 208 | /* test for SSSE3 first */ |
| 209 | if (cpu_has_ssse3) |
| 210 | sha1_transform_asm = sha1_transform_ssse3; |
| 211 | |
| 212 | #ifdef SHA1_ENABLE_AVX_SUPPORT |
| 213 | /* allow AVX to override SSSE3, it's a little faster */ |
| 214 | if (avx_usable()) |
| 215 | sha1_transform_asm = sha1_transform_avx; |
| 216 | #endif |
| 217 | |
| 218 | if (sha1_transform_asm) { |
| 219 | pr_info("Using %s optimized SHA-1 implementation\n", |
| 220 | sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3" |
| 221 | : "AVX"); |
| 222 | return crypto_register_shash(&alg); |
| 223 | } |
| 224 | pr_info("Neither AVX nor SSSE3 is available/usable.\n"); |
| 225 | |
| 226 | return -ENODEV; |
| 227 | } |
| 228 | |
| 229 | static void __exit sha1_ssse3_mod_fini(void) |
| 230 | { |
| 231 | crypto_unregister_shash(&alg); |
| 232 | } |
| 233 | |
| 234 | module_init(sha1_ssse3_mod_init); |
| 235 | module_exit(sha1_ssse3_mod_fini); |
| 236 | |
| 237 | MODULE_LICENSE("GPL"); |
| 238 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); |
| 239 | |
| 240 | MODULE_ALIAS("sha1"); |