external/boringssl: Sync to 2c45fa0b90f61b27973fa81893e014fc8c8e8999.

This includes the following changes:

https://boringssl.googlesource.com/boringssl/+log/faa539f877432814d0f2de19846eb99f2ea1e207..2c45fa0b90f61b27973fa81893e014fc8c8e8999

Test: BoringSSL CTS Presubmits
Change-Id: Ie6dc40e0c979168ec73fa1165cbc6e6b83793439
diff --git a/src/crypto/fipsmodule/CMakeLists.txt b/src/crypto/fipsmodule/CMakeLists.txt
new file mode 100644
index 0000000..4db44a9
--- /dev/null
+++ b/src/crypto/fipsmodule/CMakeLists.txt
@@ -0,0 +1,176 @@
+include_directories(../../include)
+
+if (${ARCH} STREQUAL "x86_64")
+  set(
+    BCM_ASM_SOURCES
+
+    md5-x86_64.${ASM_EXT}
+    sha1-x86_64.${ASM_EXT}
+    sha256-x86_64.${ASM_EXT}
+    sha512-x86_64.${ASM_EXT}
+    aes-x86_64.${ASM_EXT}
+    aesni-x86_64.${ASM_EXT}
+    bsaes-x86_64.${ASM_EXT}
+    vpaes-x86_64.${ASM_EXT}
+  )
+endif()
+
+if (${ARCH} STREQUAL "x86")
+  set(
+    BCM_ASM_SOURCES
+
+    md5-586.${ASM_EXT}
+    sha1-586.${ASM_EXT}
+    sha256-586.${ASM_EXT}
+    sha512-586.${ASM_EXT}
+    aes-586.${ASM_EXT}
+    vpaes-x86.${ASM_EXT}
+    aesni-x86.${ASM_EXT}
+  )
+endif()
+
+if (${ARCH} STREQUAL "arm")
+  set(
+    BCM_ASM_SOURCES
+
+    sha1-armv4-large.${ASM_EXT}
+    sha256-armv4.${ASM_EXT}
+    sha512-armv4.${ASM_EXT}
+    aes-armv4.${ASM_EXT}
+    bsaes-armv7.${ASM_EXT}
+    aesv8-armx.${ASM_EXT}
+  )
+endif()
+
+if (${ARCH} STREQUAL "aarch64")
+  set(
+    BCM_ASM_SOURCES
+
+    sha1-armv8.${ASM_EXT}
+    sha256-armv8.${ASM_EXT}
+    sha512-armv8.${ASM_EXT}
+    aesv8-armx.${ASM_EXT}
+  )
+endif()
+
+if (${ARCH} STREQUAL "ppc64le")
+  set(
+    AES_ARCH_SOURCES
+
+    aesp8-ppc.${ASM_EXT}
+  )
+endif()
+
+perlasm(aes-586.${ASM_EXT} aes/asm/aes-586.pl)
+perlasm(aes-armv4.${ASM_EXT} aes/asm/aes-armv4.pl)
+perlasm(aesni-x86_64.${ASM_EXT} aes/asm/aesni-x86_64.pl)
+perlasm(aesni-x86.${ASM_EXT} aes/asm/aesni-x86.pl)
+perlasm(aesp8-ppc.${ASM_EXT} aes/asm/aesp8-ppc.pl)
+perlasm(aesv8-armx.${ASM_EXT} aes/asm/aesv8-armx.pl)
+perlasm(aes-x86_64.${ASM_EXT} aes/asm/aes-x86_64.pl)
+perlasm(bsaes-armv7.${ASM_EXT} aes/asm/bsaes-armv7.pl)
+perlasm(bsaes-x86_64.${ASM_EXT} aes/asm/bsaes-x86_64.pl)
+perlasm(md5-586.${ASM_EXT} md5/asm/md5-586.pl)
+perlasm(md5-x86_64.${ASM_EXT} md5/asm/md5-x86_64.pl)
+perlasm(sha1-586.${ASM_EXT} sha/asm/sha1-586.pl)
+perlasm(sha1-armv4-large.${ASM_EXT} sha/asm/sha1-armv4-large.pl)
+perlasm(sha1-armv8.${ASM_EXT} sha/asm/sha1-armv8.pl)
+perlasm(sha1-x86_64.${ASM_EXT} sha/asm/sha1-x86_64.pl)
+perlasm(sha256-586.${ASM_EXT} sha/asm/sha256-586.pl)
+perlasm(sha256-armv4.${ASM_EXT} sha/asm/sha256-armv4.pl)
+perlasm(sha256-armv8.${ASM_EXT} sha/asm/sha512-armv8.pl)
+perlasm(sha256-x86_64.${ASM_EXT} sha/asm/sha512-x86_64.pl)
+perlasm(sha512-586.${ASM_EXT} sha/asm/sha512-586.pl)
+perlasm(sha512-armv4.${ASM_EXT} sha/asm/sha512-armv4.pl)
+perlasm(sha512-armv8.${ASM_EXT} sha/asm/sha512-armv8.pl)
+perlasm(sha512-x86_64.${ASM_EXT} sha/asm/sha512-x86_64.pl)
+perlasm(vpaes-x86_64.${ASM_EXT} aes/asm/vpaes-x86_64.pl)
+perlasm(vpaes-x86.${ASM_EXT} aes/asm/vpaes-x86.pl)
+
+if(FIPS)
+  add_library(
+    bcm_c_generated_asm
+
+    STATIC
+
+    bcm.c
+  )
+
+  SET_TARGET_PROPERTIES(bcm_c_generated_asm PROPERTIES COMPILE_OPTIONS "-S")
+  SET_TARGET_PROPERTIES(bcm_c_generated_asm PROPERTIES POSITION_INDEPENDENT_CODE ON)
+
+  function(JOIN VALUES GLUE OUTPUT)
+    string (REPLACE ";" "${GLUE}" _TMP_STR "${VALUES}")
+    set (${OUTPUT} "${_TMP_STR}" PARENT_SCOPE)
+  endfunction()
+
+  set(DELOCATE_AS_FLAG)
+  set(DELOCATE_AS_ARG)
+
+  if(NOT "${BCM_ASM_SOURCES}" STREQUAL "")
+    set(DELOCATE_AS_FLAG "-as")
+    JOIN("${BCM_ASM_SOURCES}" ",${CMAKE_CURRENT_BINARY_DIR}/" BCM_ASM_SOURCES_COMMA_SEP)
+    SET(DELOCATE_AS_ARG "${CMAKE_CURRENT_BINARY_DIR}/${BCM_ASM_SOURCES_COMMA_SEP}")
+  endif()
+
+  add_custom_command(
+    OUTPUT bcm-delocated.S
+    COMMAND ${GO_EXECUTABLE} run crypto/fipsmodule/delocate.go crypto/fipsmodule/ar.go crypto/fipsmodule/const.go -a $<TARGET_FILE:bcm_c_generated_asm> ${DELOCATE_AS_FLAG} ${DELOCATE_AS_ARG} -o ${CMAKE_CURRENT_BINARY_DIR}/bcm-delocated.S
+    DEPENDS bcm_c_generated_asm ${BCM_ASM_SOURCES} delocate.go ar.go const.go
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+  )
+
+  add_library(
+    bcm_hashunset
+
+    STATIC
+
+    bcm-delocated.S
+  )
+
+  set_target_properties(bcm_hashunset PROPERTIES POSITION_INDEPENDENT_CODE ON)
+  set_target_properties(bcm_hashunset PROPERTIES LINKER_LANGUAGE C)
+
+  add_custom_command(
+    OUTPUT bcm.o
+    COMMAND ${GO_EXECUTABLE} run crypto/fipsmodule/inject-hash.go crypto/fipsmodule/ar.go crypto/fipsmodule/const.go -o ${CMAKE_CURRENT_BINARY_DIR}/bcm.o -in $<TARGET_FILE:bcm_hashunset>
+    DEPENDS bcm_hashunset inject-hash.go ar.go const.go
+    WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+  )
+
+  # The outputs of add_custom_command cannot be referenced outside of the
+  # CMakeLists.txt that defines it. Thus we have to wrap bcm.o in a custom target
+  # so that crypto can depend on it.
+  add_custom_target(bcm_o_target DEPENDS bcm.o)
+
+  add_library(
+    fipsmodule
+
+    OBJECT
+
+    is_fips.c
+  )
+
+  set_target_properties(fipsmodule PROPERTIES LINKER_LANGUAGE C)
+else()
+  add_library(
+    fipsmodule
+
+    OBJECT
+
+    bcm.c
+    is_fips.c
+
+    ${BCM_ASM_SOURCES}
+  )
+endif()
+
+add_executable(
+  aes_test
+
+  aes/aes_test.cc
+  $<TARGET_OBJECTS:test_support>
+)
+
+target_link_libraries(aes_test crypto)
+add_dependencies(all_tests aes_test)
diff --git a/src/crypto/fipsmodule/FIPS.md b/src/crypto/fipsmodule/FIPS.md
new file mode 100644
index 0000000..06ef9fb
--- /dev/null
+++ b/src/crypto/fipsmodule/FIPS.md
@@ -0,0 +1,75 @@
+## Integrity Test
+
+FIPS-140 mandates that a module calculate an HMAC of its own code in a constructor function and compare the result to a known-good value. Typical code produced by a C compiler includes large numbers of relocations: places in the machine code where the linker needs to resolve and inject the final value of a symbolic expression. These relocations mean that the bytes that make up any specific bit of code generally aren't known until the final link has completed.
+
+Additionally, because of shared libraries and ASLR, some relocations can only be resolved at run-time, and thus targets of those relocations vary even after the final link.
+
+BoringSSL is linked (often statically) into a large number of binaries. It would be a significant cost if each of these binaries had to be post-processed in order to calculate the known-good HMAC value. We would much prefer if the value could be calculated, once, when BoringSSL itself is compiled.
+
+In order for the value to be calculated before the final link, there can be no relocations in the hashed code and data. This document describes how we build C and assembly code in order to produce an object file containing all the code and data for the FIPS module without that code having any relocations.
+
+First, all the C source files for the module are compiled as a single unit by compiling a single source file that `#include`s them all (this is `bcm.c`). The `-fPIC` flag is used to cause the compiler to use IP-relative addressing in many (but not all) cases. Also the `-S` flag is used to instruct the compiler to produce a textual assembly file rather than a binary object file.
+
+The textual assembly file is then processed by a script to merge in assembly implementations of some primitives and to eliminate the remaining sources of relocations.
+
+##### Redirector functions
+
+The most obvious cause of relocations are out-calls from the module to non-cryptographic functions outside of the module. Most obviously these include `malloc`, `memcpy` and other libc functions, but also include calls to support code in BoringSSL such as functions for managing the error queue.
+
+Offsets to these functions cannot be known until the final link because only the linker sees the object files containing them. Thus calls to these functions are rewritten into an IP-relative jump to a redirector function. The redirector functions contain a single jump instruction to the real function and are placed outside of the module and are thus not hashed (see diagram).
+
+![module structure](/crypto/fipsmodule/intcheck1.png)
+
+In this diagram, the integrity check hashes from `module_start` to `module_end`. Since this does not cover the jump to `memcpy`, it's fine that the linker will poke the final offset into that instruction.
+
+##### Read-only data
+
+Normally read-only data is placed in a `.data` segment that doesn't get mapped into memory with execute permissions. However, the offset of the data segment from the text segment is another thing that isn't determined until the final link. In order to fix data offsets before the link, read-only data is simply placed in the module's `.text` segment. This might make building ROP chains easier for an attacker, but so it goes.
+
+One special case is `rel.ro` data, which is data that contains function pointers. Since these function pointers are absolute, they are written by the dynamic linker at run-time and so we must eliminate them. The pattern that causes them is when we have a static `EVP_MD` or `EVP_CIPHER` object thus, inside the module, we'll change this pattern to instead to reserve space in the BSS for the object, and add a `CRYPTO_once_t` to protect its initialisation. The script will generate functions outside of the module that return pointers to these areas of memory—they effectively act like a special-purpose malloc calls that cannot fail.
+
+##### Read-write data
+
+Mutable data is a problem. It cannot be in the text segment because the text segment is mapped read-only. If it's in a different segment then the code cannot reference it with a known, IP-relative offset because the segment layout is only fixed during the final link.
+
+Thankfully, mutable data is very rare in our cryptographic code and I hope that we can get it down to just a few variables. In order to allow this we use a similar design to the redirector functions: the code references a symbol that's in the text segment, but out of the module and thus not hashed. A relocation record is emitted to instruct the linker to poke the final offset to the variable in that location. Thus the only change needed is an extra indirection when loading the value.
+
+##### Other transforms
+
+The script performs a number of other transformations which are worth noting but do not warrant their own sections:
+
+1.  It duplicates each global symbol with a local symbol that has `_local_target` appended to the name. References to the global symbols are rewritten to use these duplicates instead. Otherwise, although the generated code uses IP-relative references, relocations are emitted for global symbols in case they are overridden by a different object file during the link.
+1.  Various sections, notably `.rodata`, are moved to the `.text` section, inside the module, so module code may reference it without relocations.
+1.  For each BSS symbol, it generates a function named after that symbol but with `_bss_get` appended, which returns its address.
+1.  It inserts the labels that delimit the module's code and data (called `module_start` and `module_end` in the diagram above).
+1.  It adds a 32-byte, read-only array outside of the module to contain the known-good HMAC value.
+1.  It rewrites some "dummy" references to point to those labels and that array. In order to get the C compiler to emit the correct code it's necessary to make it think that it's referencing static functions. This compensates for that trick.
+
+##### Integrity testing
+
+In order to actually implement the integrity test, a constructor function within the module calculates an HMAC from `module_start` to `module_end` using a fixed, all-zero key. It compares the result with the known-good value added (by the script) to the unhashed portion of the text segment. If they don't match, it calls `exit` in an infinite loop.
+
+Initially the known-good value will be incorrect. Another script (`inject-hash.go`) calculates the correct value from the assembled object and injects it back into the object.
+
+![build process](/crypto/fipsmodule/intcheck2.png)
+
+### Comparison with OpenSSL's method
+
+(This is based on reading OpenSSL's [user guide](https://www.openssl.org/docs/fips/UserGuide-2.0.pdf) and inspecting the code of OpenSSL FIPS 2.0.12.)
+
+OpenSSL's solution to this problem is broadly similar but has a number of differences:
+
+1.  OpenSSL deals with run-time relocations by not hashing parts of the module's data. BoringSSL will eliminate run-time relocations instead and hash everything.
+1.  OpenSSL uses `ld -r` (the partial linking mode) to merge a number of object files into their `fipscanister.o`. For BoringSSL, we propose to merge all the C source files by building a single C file that #includes all the others, then we propose to merge the assembly sources by concatenating them to the assembly output from the C compiler.
+1.  OpenSSL depends on the link order and inserts two object files, `fips_start.o` and `fips_end.o`, in order to establish the `module_start` and `module_end` values. BoringSSL proposes to simply add labels at the correct places in the assembly.
+1.  OpenSSL calculates the hash after the final link and either injects it into the binary or recompiles with the value of the hash passed in as a #define. BoringSSL calculates it prior to the final link and injects it into the object file.
+1.  OpenSSL references read-write data directly, since it can know the offsets to it. BoringSSL indirects these loads and stores.
+1.  OpenSSL doesn't run the power-on test until `FIPS_module_mode_set` is called, BoringSSL plans to do it in a constructor function. Failure of the test is non-fatal in OpenSSL, BoringSSL plans to crash.
+1.  Since the contents of OpenSSL's module change between compilation and use, OpenSSL generates `fipscanister.o.sha1` to check that the compiled object doesn't change before linking. Since BoringSSL's module is fixed after compilation, the final integrity check is unbroken through the linking process.
+
+Some of the similarities are worth noting:
+
+1.  OpenSSL has all out-calls from the module indirecting via the PLT, which is equivalent to the redirector functions described above.
+
+
+![OpenSSL build process](/crypto/fipsmodule/intcheck3.png)
diff --git a/src/crypto/fipsmodule/aes/aes.c b/src/crypto/fipsmodule/aes/aes.c
new file mode 100644
index 0000000..cd53a46
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/aes.c
@@ -0,0 +1,1142 @@
+/* ====================================================================
+ * Copyright (c) 2002-2006 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ==================================================================== */
+
+#include <openssl/aes.h>
+
+#include <assert.h>
+#include <stdlib.h>
+
+#include <openssl/cpu.h>
+
+#include "../../modes/internal.h"
+
+
+#if defined(OPENSSL_NO_ASM) || \
+    (!defined(OPENSSL_X86) && !defined(OPENSSL_X86_64) && !defined(OPENSSL_ARM))
+
+/* Te0[x] = S [x].[02, 01, 01, 03];
+ * Te1[x] = S [x].[03, 02, 01, 01];
+ * Te2[x] = S [x].[01, 03, 02, 01];
+ * Te3[x] = S [x].[01, 01, 03, 02];
+ *
+ * Td0[x] = Si[x].[0e, 09, 0d, 0b];
+ * Td1[x] = Si[x].[0b, 0e, 09, 0d];
+ * Td2[x] = Si[x].[0d, 0b, 0e, 09];
+ * Td3[x] = Si[x].[09, 0d, 0b, 0e];
+ * Td4[x] = Si[x].[01]; */
+
+static const uint32_t Te0[256] = {
+    0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU, 0xfff2f20dU,
+    0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U, 0x60303050U, 0x02010103U,
+    0xce6767a9U, 0x562b2b7dU, 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U,
+    0xec76769aU, 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+    0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU, 0x41adadecU,
+    0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU, 0x239c9cbfU, 0x53a4a4f7U,
+    0xe4727296U, 0x9bc0c05bU, 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU,
+    0x4c26266aU, 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+    0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U, 0xe2717193U,
+    0xabd8d873U, 0x62313153U, 0x2a15153fU, 0x0804040cU, 0x95c7c752U,
+    0x46232365U, 0x9dc3c35eU, 0x30181828U, 0x379696a1U, 0x0a05050fU,
+    0x2f9a9ab5U, 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+    0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU, 0x1209091bU,
+    0x1d83839eU, 0x582c2c74U, 0x341a1a2eU, 0x361b1b2dU, 0xdc6e6eb2U,
+    0xb45a5aeeU, 0x5ba0a0fbU, 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U,
+    0x7db3b3ceU, 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+    0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU, 0x40202060U,
+    0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU, 0xd46a6abeU, 0x8dcbcb46U,
+    0x67bebed9U, 0x7239394bU, 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U,
+    0x85cfcf4aU, 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+    0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U, 0x8a4545cfU,
+    0xe9f9f910U, 0x04020206U, 0xfe7f7f81U, 0xa05050f0U, 0x783c3c44U,
+    0x259f9fbaU, 0x4ba8a8e3U, 0xa25151f3U, 0x5da3a3feU, 0x804040c0U,
+    0x058f8f8aU, 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+    0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U, 0x20101030U,
+    0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU, 0x81cdcd4cU, 0x180c0c14U,
+    0x26131335U, 0xc3ecec2fU, 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU,
+    0x2e171739U, 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+    0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U, 0xc06060a0U,
+    0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU, 0x44222266U, 0x542a2a7eU,
+    0x3b9090abU, 0x0b888883U, 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U,
+    0x2814143cU, 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+    0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU, 0x924949dbU,
+    0x0c06060aU, 0x4824246cU, 0xb85c5ce4U, 0x9fc2c25dU, 0xbdd3d36eU,
+    0x43acacefU, 0xc46262a6U, 0x399191a8U, 0x319595a4U, 0xd3e4e437U,
+    0xf279798bU, 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+    0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U, 0xd86c6cb4U,
+    0xac5656faU, 0xf3f4f407U, 0xcfeaea25U, 0xca6565afU, 0xf47a7a8eU,
+    0x47aeaee9U, 0x10080818U, 0x6fbabad5U, 0xf0787888U, 0x4a25256fU,
+    0x5c2e2e72U, 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+    0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U, 0x964b4bddU,
+    0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U, 0xe0707090U, 0x7c3e3e42U,
+    0x71b5b5c4U, 0xcc6666aaU, 0x904848d8U, 0x06030305U, 0xf7f6f601U,
+    0x1c0e0e12U, 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+    0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U, 0xd9e1e138U,
+    0xebf8f813U, 0x2b9898b3U, 0x22111133U, 0xd26969bbU, 0xa9d9d970U,
+    0x078e8e89U, 0x339494a7U, 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U,
+    0xc9e9e920U, 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+    0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U, 0x65bfbfdaU,
+    0xd7e6e631U, 0x844242c6U, 0xd06868b8U, 0x824141c3U, 0x299999b0U,
+    0x5a2d2d77U, 0x1e0f0f11U, 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U,
+    0x2c16163aU, };
+
+static const uint32_t Te1[256] = {
+    0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU, 0x0dfff2f2U,
+    0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U, 0x50603030U, 0x03020101U,
+    0xa9ce6767U, 0x7d562b2bU, 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU,
+    0x9aec7676U, 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
+    0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U, 0xec41adadU,
+    0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU, 0xbf239c9cU, 0xf753a4a4U,
+    0x96e47272U, 0x5b9bc0c0U, 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U,
+    0x6a4c2626U, 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
+    0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U, 0x93e27171U,
+    0x73abd8d8U, 0x53623131U, 0x3f2a1515U, 0x0c080404U, 0x5295c7c7U,
+    0x65462323U, 0x5e9dc3c3U, 0x28301818U, 0xa1379696U, 0x0f0a0505U,
+    0xb52f9a9aU, 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
+    0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U, 0x1b120909U,
+    0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU, 0x2d361b1bU, 0xb2dc6e6eU,
+    0xeeb45a5aU, 0xfb5ba0a0U, 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U,
+    0xce7db3b3U, 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
+    0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU, 0x60402020U,
+    0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU, 0xbed46a6aU, 0x468dcbcbU,
+    0xd967bebeU, 0x4b723939U, 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U,
+    0x4a85cfcfU, 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
+    0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U, 0xcf8a4545U,
+    0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU, 0xf0a05050U, 0x44783c3cU,
+    0xba259f9fU, 0xe34ba8a8U, 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U,
+    0x8a058f8fU, 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
+    0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U, 0x30201010U,
+    0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U, 0x4c81cdcdU, 0x14180c0cU,
+    0x35261313U, 0x2fc3ececU, 0xe1be5f5fU, 0xa2359797U, 0xcc884444U,
+    0x392e1717U, 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
+    0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U, 0xa0c06060U,
+    0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU, 0x66442222U, 0x7e542a2aU,
+    0xab3b9090U, 0x830b8888U, 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U,
+    0x3c281414U, 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
+    0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU, 0xdb924949U,
+    0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU, 0x5d9fc2c2U, 0x6ebdd3d3U,
+    0xef43acacU, 0xa6c46262U, 0xa8399191U, 0xa4319595U, 0x37d3e4e4U,
+    0x8bf27979U, 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
+    0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U, 0xb4d86c6cU,
+    0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU, 0xafca6565U, 0x8ef47a7aU,
+    0xe947aeaeU, 0x18100808U, 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U,
+    0x725c2e2eU, 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
+    0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU, 0xdd964b4bU,
+    0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU, 0x90e07070U, 0x427c3e3eU,
+    0xc471b5b5U, 0xaacc6666U, 0xd8904848U, 0x05060303U, 0x01f7f6f6U,
+    0x121c0e0eU, 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
+    0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU, 0x38d9e1e1U,
+    0x13ebf8f8U, 0xb32b9898U, 0x33221111U, 0xbbd26969U, 0x70a9d9d9U,
+    0x89078e8eU, 0xa7339494U, 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U,
+    0x20c9e9e9U, 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
+    0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU, 0xda65bfbfU,
+    0x31d7e6e6U, 0xc6844242U, 0xb8d06868U, 0xc3824141U, 0xb0299999U,
+    0x775a2d2dU, 0x111e0f0fU, 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU,
+    0x3a2c1616U, };
+
+static const uint32_t Te2[256] = {
+    0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU, 0xf20dfff2U,
+    0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U, 0x30506030U, 0x01030201U,
+    0x67a9ce67U, 0x2b7d562bU, 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU,
+    0x769aec76U, 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
+    0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U, 0xadec41adU,
+    0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU, 0x9cbf239cU, 0xa4f753a4U,
+    0x7296e472U, 0xc05b9bc0U, 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U,
+    0x266a4c26U, 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
+    0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U, 0x7193e271U,
+    0xd873abd8U, 0x31536231U, 0x153f2a15U, 0x040c0804U, 0xc75295c7U,
+    0x23654623U, 0xc35e9dc3U, 0x18283018U, 0x96a13796U, 0x050f0a05U,
+    0x9ab52f9aU, 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
+    0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U, 0x091b1209U,
+    0x839e1d83U, 0x2c74582cU, 0x1a2e341aU, 0x1b2d361bU, 0x6eb2dc6eU,
+    0x5aeeb45aU, 0xa0fb5ba0U, 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U,
+    0xb3ce7db3U, 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
+    0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU, 0x20604020U,
+    0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU, 0x6abed46aU, 0xcb468dcbU,
+    0xbed967beU, 0x394b7239U, 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U,
+    0xcf4a85cfU, 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
+    0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U, 0x45cf8a45U,
+    0xf910e9f9U, 0x02060402U, 0x7f81fe7fU, 0x50f0a050U, 0x3c44783cU,
+    0x9fba259fU, 0xa8e34ba8U, 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U,
+    0x8f8a058fU, 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
+    0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U, 0x10302010U,
+    0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U, 0xcd4c81cdU, 0x0c14180cU,
+    0x13352613U, 0xec2fc3ecU, 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U,
+    0x17392e17U, 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
+    0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U, 0x60a0c060U,
+    0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU, 0x22664422U, 0x2a7e542aU,
+    0x90ab3b90U, 0x88830b88U, 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U,
+    0x143c2814U, 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
+    0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU, 0x49db9249U,
+    0x060a0c06U, 0x246c4824U, 0x5ce4b85cU, 0xc25d9fc2U, 0xd36ebdd3U,
+    0xacef43acU, 0x62a6c462U, 0x91a83991U, 0x95a43195U, 0xe437d3e4U,
+    0x798bf279U, 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
+    0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U, 0x6cb4d86cU,
+    0x56faac56U, 0xf407f3f4U, 0xea25cfeaU, 0x65afca65U, 0x7a8ef47aU,
+    0xaee947aeU, 0x08181008U, 0xbad56fbaU, 0x7888f078U, 0x256f4a25U,
+    0x2e725c2eU, 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
+    0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU, 0x4bdd964bU,
+    0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU, 0x7090e070U, 0x3e427c3eU,
+    0xb5c471b5U, 0x66aacc66U, 0x48d89048U, 0x03050603U, 0xf601f7f6U,
+    0x0e121c0eU, 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
+    0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU, 0xe138d9e1U,
+    0xf813ebf8U, 0x98b32b98U, 0x11332211U, 0x69bbd269U, 0xd970a9d9U,
+    0x8e89078eU, 0x94a73394U, 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U,
+    0xe920c9e9U, 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
+    0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU, 0xbfda65bfU,
+    0xe631d7e6U, 0x42c68442U, 0x68b8d068U, 0x41c38241U, 0x99b02999U,
+    0x2d775a2dU, 0x0f111e0fU, 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU,
+    0x163a2c16U, };
+
+static const uint32_t Te3[256] = {
+    0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U, 0xf2f20dffU,
+    0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U, 0x30305060U, 0x01010302U,
+    0x6767a9ceU, 0x2b2b7d56U, 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU,
+    0x76769aecU, 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
+    0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU, 0xadadec41U,
+    0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U, 0x9c9cbf23U, 0xa4a4f753U,
+    0x727296e4U, 0xc0c05b9bU, 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU,
+    0x26266a4cU, 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
+    0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U, 0x717193e2U,
+    0xd8d873abU, 0x31315362U, 0x15153f2aU, 0x04040c08U, 0xc7c75295U,
+    0x23236546U, 0xc3c35e9dU, 0x18182830U, 0x9696a137U, 0x05050f0aU,
+    0x9a9ab52fU, 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
+    0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU, 0x09091b12U,
+    0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U, 0x1b1b2d36U, 0x6e6eb2dcU,
+    0x5a5aeeb4U, 0xa0a0fb5bU, 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U,
+    0xb3b3ce7dU, 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
+    0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U, 0x20206040U,
+    0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U, 0x6a6abed4U, 0xcbcb468dU,
+    0xbebed967U, 0x39394b72U, 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U,
+    0xcfcf4a85U, 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
+    0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U, 0x4545cf8aU,
+    0xf9f910e9U, 0x02020604U, 0x7f7f81feU, 0x5050f0a0U, 0x3c3c4478U,
+    0x9f9fba25U, 0xa8a8e34bU, 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U,
+    0x8f8f8a05U, 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
+    0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U, 0x10103020U,
+    0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU, 0xcdcd4c81U, 0x0c0c1418U,
+    0x13133526U, 0xecec2fc3U, 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U,
+    0x1717392eU, 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
+    0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U, 0x6060a0c0U,
+    0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U, 0x22226644U, 0x2a2a7e54U,
+    0x9090ab3bU, 0x8888830bU, 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU,
+    0x14143c28U, 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
+    0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U, 0x4949db92U,
+    0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U, 0xc2c25d9fU, 0xd3d36ebdU,
+    0xacacef43U, 0x6262a6c4U, 0x9191a839U, 0x9595a431U, 0xe4e437d3U,
+    0x79798bf2U, 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
+    0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U, 0x6c6cb4d8U,
+    0x5656faacU, 0xf4f407f3U, 0xeaea25cfU, 0x6565afcaU, 0x7a7a8ef4U,
+    0xaeaee947U, 0x08081810U, 0xbabad56fU, 0x787888f0U, 0x25256f4aU,
+    0x2e2e725cU, 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
+    0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU, 0x4b4bdd96U,
+    0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU, 0x707090e0U, 0x3e3e427cU,
+    0xb5b5c471U, 0x6666aaccU, 0x4848d890U, 0x03030506U, 0xf6f601f7U,
+    0x0e0e121cU, 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
+    0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U, 0xe1e138d9U,
+    0xf8f813ebU, 0x9898b32bU, 0x11113322U, 0x6969bbd2U, 0xd9d970a9U,
+    0x8e8e8907U, 0x9494a733U, 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U,
+    0xe9e920c9U, 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
+    0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU, 0xbfbfda65U,
+    0xe6e631d7U, 0x4242c684U, 0x6868b8d0U, 0x4141c382U, 0x9999b029U,
+    0x2d2d775aU, 0x0f0f111eU, 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU,
+    0x16163a2cU, };
+
+static const uint32_t Td0[256] = {
+    0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U, 0x3bab6bcbU,
+    0x1f9d45f1U, 0xacfa58abU, 0x4be30393U, 0x2030fa55U, 0xad766df6U,
+    0x88cc7691U, 0xf5024c25U, 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U,
+    0xb562a38fU, 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+    0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U, 0x038f5fe7U,
+    0x15929c95U, 0xbf6d7aebU, 0x955259daU, 0xd4be832dU, 0x587421d3U,
+    0x49e06929U, 0x8ec9c844U, 0x75c2896aU, 0xf48e7978U, 0x99583e6bU,
+    0x27b971ddU, 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+    0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U, 0xb16477e0U,
+    0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U, 0x70486858U, 0x8f45fd19U,
+    0x94de6c87U, 0x527bf8b7U, 0xab73d323U, 0x724b02e2U, 0xe31f8f57U,
+    0x6655ab2aU, 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+    0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU, 0x8acf1c2bU,
+    0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U, 0x65daf4cdU, 0x0605bed5U,
+    0xd134621fU, 0xc4a6fe8aU, 0x342e539dU, 0xa2f355a0U, 0x058ae132U,
+    0xa4f6eb75U, 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+    0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U, 0x91548db5U,
+    0x71c45d05U, 0x0406d46fU, 0x605015ffU, 0x1998fb24U, 0xd6bde997U,
+    0x894043ccU, 0x67d99e77U, 0xb0e842bdU, 0x07898b88U, 0xe7195b38U,
+    0x79c8eedbU, 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+    0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU, 0xfd0efffbU,
+    0x0f853856U, 0x3daed51eU, 0x362d3927U, 0x0a0fd964U, 0x685ca621U,
+    0x9b5b54d1U, 0x24362e3aU, 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U,
+    0x1b9b919eU, 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+    0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU, 0x0e090d0bU,
+    0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U, 0x57f11985U, 0xaf75074cU,
+    0xee99ddbbU, 0xa37f60fdU, 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U,
+    0x5bfb7e34U, 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+    0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U, 0x854a247dU,
+    0xd2bb3df8U, 0xaef93211U, 0xc729a16dU, 0x1d9e2f4bU, 0xdcb230f3U,
+    0x0d8652ecU, 0x77c1e3d0U, 0x2bb3166cU, 0xa970b999U, 0x119448faU,
+    0x47e96422U, 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+    0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U, 0xa6f581cfU,
+    0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U, 0x2c3a9de4U, 0x5078920dU,
+    0x6a5fcc9bU, 0x547e4662U, 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU,
+    0x82c3aff5U, 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+    0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU, 0xcd267809U,
+    0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U, 0xe6956e65U, 0xaaffe67eU,
+    0x21bccf08U, 0xef15e8e6U, 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U,
+    0x29b07cd6U, 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+    0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U, 0xf104984aU,
+    0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU, 0x764dd68dU, 0x43efb04dU,
+    0xccaa4d54U, 0xe49604dfU, 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U,
+    0x4665517fU, 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+    0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U, 0x9ad7618cU,
+    0x37a10c7aU, 0x59f8148eU, 0xeb133c89U, 0xcea927eeU, 0xb761c935U,
+    0xe11ce5edU, 0x7a47b13cU, 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U,
+    0x73c737bfU, 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+    0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU, 0x161dc372U,
+    0xbce2250cU, 0x283c498bU, 0xff0d9541U, 0x39a80171U, 0x080cb3deU,
+    0xd8b4e49cU, 0x6456c190U, 0x7bcb8461U, 0xd532b670U, 0x486c5c74U,
+    0xd0b85742U, };
+
+static const uint32_t Td1[256] = {
+    0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU, 0xcb3bab6bU,
+    0xf11f9d45U, 0xabacfa58U, 0x934be303U, 0x552030faU, 0xf6ad766dU,
+    0x9188cc76U, 0x25f5024cU, 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U,
+    0x8fb562a3U, 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
+    0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U, 0xe7038f5fU,
+    0x9515929cU, 0xebbf6d7aU, 0xda955259U, 0x2dd4be83U, 0xd3587421U,
+    0x2949e069U, 0x448ec9c8U, 0x6a75c289U, 0x78f48e79U, 0x6b99583eU,
+    0xdd27b971U, 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
+    0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU, 0xe0b16477U,
+    0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU, 0x58704868U, 0x198f45fdU,
+    0x8794de6cU, 0xb7527bf8U, 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU,
+    0x2a6655abU, 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
+    0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U, 0x2b8acf1cU,
+    0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U, 0xcd65daf4U, 0xd50605beU,
+    0x1fd13462U, 0x8ac4a6feU, 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U,
+    0x75a4f6ebU, 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
+    0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU, 0xb591548dU,
+    0x0571c45dU, 0x6f0406d4U, 0xff605015U, 0x241998fbU, 0x97d6bde9U,
+    0xcc894043U, 0x7767d99eU, 0xbdb0e842U, 0x8807898bU, 0x38e7195bU,
+    0xdb79c8eeU, 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
+    0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U, 0xfbfd0effU,
+    0x560f8538U, 0x1e3daed5U, 0x27362d39U, 0x640a0fd9U, 0x21685ca6U,
+    0xd19b5b54U, 0x3a24362eU, 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U,
+    0x9e1b9b91U, 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
+    0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U, 0x0b0e090dU,
+    0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U, 0x8557f119U, 0x4caf7507U,
+    0xbbee99ddU, 0xfda37f60U, 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU,
+    0x345bfb7eU, 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
+    0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U, 0x7d854a24U,
+    0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U, 0x4b1d9e2fU, 0xf3dcb230U,
+    0xec0d8652U, 0xd077c1e3U, 0x6c2bb316U, 0x99a970b9U, 0xfa119448U,
+    0x2247e964U, 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
+    0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU, 0xcfa6f581U,
+    0x28a57adeU, 0x26dab78eU, 0xa43fadbfU, 0xe42c3a9dU, 0x0d507892U,
+    0x9b6a5fccU, 0x62547e46U, 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U,
+    0xf582c3afU, 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
+    0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU, 0x09cd2678U,
+    0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU, 0x65e6956eU, 0x7eaaffe6U,
+    0x0821bccfU, 0xe6ef15e8U, 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U,
+    0xd629b07cU, 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
+    0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U, 0x4af10498U,
+    0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U, 0x8d764dd6U, 0x4d43efb0U,
+    0x54ccaa4dU, 0xdfe49604U, 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU,
+    0x7f466551U, 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
+    0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U, 0x8c9ad761U,
+    0x7a37a10cU, 0x8e59f814U, 0x89eb133cU, 0xeecea927U, 0x35b761c9U,
+    0xede11ce5U, 0x3c7a47b1U, 0x599cd2dfU, 0x3f55f273U, 0x791814ceU,
+    0xbf73c737U, 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
+    0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U, 0x72161dc3U,
+    0x0cbce225U, 0x8b283c49U, 0x41ff0d95U, 0x7139a801U, 0xde080cb3U,
+    0x9cd8b4e4U, 0x906456c1U, 0x617bcb84U, 0x70d532b6U, 0x74486c5cU,
+    0x42d0b857U, };
+
+static const uint32_t Td2[256] = {
+    0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U, 0x6bcb3babU,
+    0x45f11f9dU, 0x58abacfaU, 0x03934be3U, 0xfa552030U, 0x6df6ad76U,
+    0x769188ccU, 0x4c25f502U, 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U,
+    0xa38fb562U, 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
+    0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U, 0x5fe7038fU,
+    0x9c951592U, 0x7aebbf6dU, 0x59da9552U, 0x832dd4beU, 0x21d35874U,
+    0x692949e0U, 0xc8448ec9U, 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U,
+    0x71dd27b9U, 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
+    0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U, 0x77e0b164U,
+    0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U, 0x68587048U, 0xfd198f45U,
+    0x6c8794deU, 0xf8b7527bU, 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU,
+    0xab2a6655U, 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
+    0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U, 0x1c2b8acfU,
+    0xb492a779U, 0xf2f0f307U, 0xe2a14e69U, 0xf4cd65daU, 0xbed50605U,
+    0x621fd134U, 0xfe8ac4a6U, 0x539d342eU, 0x55a0a2f3U, 0xe132058aU,
+    0xeb75a4f6U, 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
+    0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U, 0x8db59154U,
+    0x5d0571c4U, 0xd46f0406U, 0x15ff6050U, 0xfb241998U, 0xe997d6bdU,
+    0x43cc8940U, 0x9e7767d9U, 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U,
+    0xeedb79c8U, 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
+    0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU, 0xfffbfd0eU,
+    0x38560f85U, 0xd51e3daeU, 0x3927362dU, 0xd9640a0fU, 0xa621685cU,
+    0x54d19b5bU, 0x2e3a2436U, 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU,
+    0x919e1b9bU, 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
+    0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU, 0x0d0b0e09U,
+    0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU, 0x198557f1U, 0x074caf75U,
+    0xddbbee99U, 0x60fda37fU, 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U,
+    0x7e345bfbU, 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
+    0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U, 0x247d854aU,
+    0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U, 0x2f4b1d9eU, 0x30f3dcb2U,
+    0x52ec0d86U, 0xe3d077c1U, 0x166c2bb3U, 0xb999a970U, 0x48fa1194U,
+    0x642247e9U, 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
+    0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U, 0x81cfa6f5U,
+    0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU, 0x9de42c3aU, 0x920d5078U,
+    0xcc9b6a5fU, 0x4662547eU, 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U,
+    0xaff582c3U, 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
+    0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU, 0x7809cd26U,
+    0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU, 0x6e65e695U, 0xe67eaaffU,
+    0xcf0821bcU, 0xe8e6ef15U, 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU,
+    0x7cd629b0U, 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
+    0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U, 0x984af104U,
+    0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U, 0xd68d764dU, 0xb04d43efU,
+    0x4d54ccaaU, 0x04dfe496U, 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU,
+    0x517f4665U, 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
+    0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U, 0x618c9ad7U,
+    0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U, 0x27eecea9U, 0xc935b761U,
+    0xe5ede11cU, 0xb13c7a47U, 0xdf599cd2U, 0x733f55f2U, 0xce791814U,
+    0x37bf73c7U, 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
+    0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U, 0xc372161dU,
+    0x250cbce2U, 0x498b283cU, 0x9541ff0dU, 0x017139a8U, 0xb3de080cU,
+    0xe49cd8b4U, 0xc1906456U, 0x84617bcbU, 0xb670d532U, 0x5c74486cU,
+    0x5742d0b8U, };
+
+static const uint32_t Td3[256] = {
+    0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU, 0xab6bcb3bU,
+    0x9d45f11fU, 0xfa58abacU, 0xe303934bU, 0x30fa5520U, 0x766df6adU,
+    0xcc769188U, 0x024c25f5U, 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U,
+    0x62a38fb5U, 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
+    0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU, 0x8f5fe703U,
+    0x929c9515U, 0x6d7aebbfU, 0x5259da95U, 0xbe832dd4U, 0x7421d358U,
+    0xe0692949U, 0xc9c8448eU, 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U,
+    0xb971dd27U, 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
+    0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U, 0x6477e0b1U,
+    0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U, 0x48685870U, 0x45fd198fU,
+    0xde6c8794U, 0x7bf8b752U, 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U,
+    0x55ab2a66U, 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
+    0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU, 0xcf1c2b8aU,
+    0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU, 0xdaf4cd65U, 0x05bed506U,
+    0x34621fd1U, 0xa6fe8ac4U, 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U,
+    0xf6eb75a4U, 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
+    0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU, 0x548db591U,
+    0xc45d0571U, 0x06d46f04U, 0x5015ff60U, 0x98fb2419U, 0xbde997d6U,
+    0x4043cc89U, 0xd99e7767U, 0xe842bdb0U, 0x898b8807U, 0x195b38e7U,
+    0xc8eedb79U, 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
+    0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU, 0x0efffbfdU,
+    0x8538560fU, 0xaed51e3dU, 0x2d392736U, 0x0fd9640aU, 0x5ca62168U,
+    0x5b54d19bU, 0x362e3a24U, 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U,
+    0x9b919e1bU, 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
+    0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U, 0x090d0b0eU,
+    0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U, 0xf1198557U, 0x75074cafU,
+    0x99ddbbeeU, 0x7f60fda3U, 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U,
+    0xfb7e345bU, 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
+    0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U, 0x4a247d85U,
+    0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U, 0x9e2f4b1dU, 0xb230f3dcU,
+    0x8652ec0dU, 0xc1e3d077U, 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U,
+    0xe9642247U, 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
+    0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U, 0xf581cfa6U,
+    0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU, 0x3a9de42cU, 0x78920d50U,
+    0x5fcc9b6aU, 0x7e466254U, 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU,
+    0xc3aff582U, 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
+    0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU, 0x267809cdU,
+    0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U, 0x956e65e6U, 0xffe67eaaU,
+    0xbccf0821U, 0x15e8e6efU, 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU,
+    0xb07cd629U, 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
+    0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U, 0x04984af1U,
+    0xecdaf741U, 0xcd500e7fU, 0x91f62f17U, 0x4dd68d76U, 0xefb04d43U,
+    0xaa4d54ccU, 0x9604dfe4U, 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U,
+    0x65517f46U, 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
+    0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU, 0xd7618c9aU,
+    0xa10c7a37U, 0xf8148e59U, 0x133c89ebU, 0xa927eeceU, 0x61c935b7U,
+    0x1ce5ede1U, 0x47b13c7aU, 0xd2df599cU, 0xf2733f55U, 0x14ce7918U,
+    0xc737bf73U, 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
+    0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U, 0x1dc37216U,
+    0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU, 0xa8017139U, 0x0cb3de08U,
+    0xb4e49cd8U, 0x56c19064U, 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U,
+    0xb85742d0U, };
+
+static const uint8_t Td4[256] = {
+    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U, 0xbfU, 0x40U, 0xa3U,
+    0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU, 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU,
+    0xffU, 0x87U, 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU, 0x54U,
+    0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU, 0xeeU, 0x4cU, 0x95U, 0x0bU,
+    0x42U, 0xfaU, 0xc3U, 0x4eU, 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U,
+    0xb2U, 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U, 0x72U, 0xf8U,
+    0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U, 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU,
+    0x65U, 0xb6U, 0x92U, 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U, 0x90U, 0xd8U, 0xabU,
+    0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU, 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U,
+    0x45U, 0x06U, 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U, 0xc1U,
+    0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU, 0x3aU, 0x91U, 0x11U, 0x41U,
+    0x4fU, 0x67U, 0xdcU, 0xeaU, 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U,
+    0x73U, 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U, 0xe2U, 0xf9U,
+    0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU, 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU,
+    0x29U, 0xc5U, 0x89U, 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U, 0x9aU, 0xdbU, 0xc0U,
+    0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U, 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U,
+    0xc7U, 0x31U, 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU, 0x60U,
+    0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU, 0x2dU, 0xe5U, 0x7aU, 0x9fU,
+    0x93U, 0xc9U, 0x9cU, 0xefU, 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U,
+    0xb0U, 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U, 0x17U, 0x2bU,
+    0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U, 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U,
+    0x21U, 0x0cU, 0x7dU, };
+
+static const uint32_t rcon[] = {
+    0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000,
+    0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000,
+    /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
+  uint32_t *rk;
+  int i = 0;
+  uint32_t temp;
+
+  if (!key || !aeskey) {
+    return -1;
+  }
+
+  switch (bits) {
+    case 128:
+      aeskey->rounds = 10;
+      break;
+    case 192:
+      aeskey->rounds = 12;
+      break;
+    case 256:
+      aeskey->rounds = 14;
+      break;
+    default:
+      return -2;
+  }
+
+  rk = aeskey->rd_key;
+
+  rk[0] = GETU32(key);
+  rk[1] = GETU32(key + 4);
+  rk[2] = GETU32(key + 8);
+  rk[3] = GETU32(key + 12);
+  if (bits == 128) {
+    while (1) {
+      temp = rk[3];
+      rk[4] = rk[0] ^ (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+              (Te3[(temp >> 8) & 0xff] & 0x00ff0000) ^
+              (Te0[(temp) & 0xff] & 0x0000ff00) ^
+              (Te1[(temp >> 24)] & 0x000000ff) ^ rcon[i];
+      rk[5] = rk[1] ^ rk[4];
+      rk[6] = rk[2] ^ rk[5];
+      rk[7] = rk[3] ^ rk[6];
+      if (++i == 10) {
+        return 0;
+      }
+      rk += 4;
+    }
+  }
+  rk[4] = GETU32(key + 16);
+  rk[5] = GETU32(key + 20);
+  if (bits == 192) {
+    while (1) {
+      temp = rk[5];
+      rk[6] = rk[0] ^ (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+              (Te3[(temp >> 8) & 0xff] & 0x00ff0000) ^
+              (Te0[(temp) & 0xff] & 0x0000ff00) ^
+              (Te1[(temp >> 24)] & 0x000000ff) ^ rcon[i];
+      rk[7] = rk[1] ^ rk[6];
+      rk[8] = rk[2] ^ rk[7];
+      rk[9] = rk[3] ^ rk[8];
+      if (++i == 8) {
+        return 0;
+      }
+      rk[10] = rk[4] ^ rk[9];
+      rk[11] = rk[5] ^ rk[10];
+      rk += 6;
+    }
+  }
+  rk[6] = GETU32(key + 24);
+  rk[7] = GETU32(key + 28);
+  if (bits == 256) {
+    while (1) {
+      temp = rk[7];
+      rk[8] = rk[0] ^ (Te2[(temp >> 16) & 0xff] & 0xff000000) ^
+              (Te3[(temp >> 8) & 0xff] & 0x00ff0000) ^
+              (Te0[(temp) & 0xff] & 0x0000ff00) ^
+              (Te1[(temp >> 24)] & 0x000000ff) ^ rcon[i];
+      rk[9] = rk[1] ^ rk[8];
+      rk[10] = rk[2] ^ rk[9];
+      rk[11] = rk[3] ^ rk[10];
+      if (++i == 7) {
+        return 0;
+      }
+      temp = rk[11];
+      rk[12] = rk[4] ^ (Te2[(temp >> 24)] & 0xff000000) ^
+               (Te3[(temp >> 16) & 0xff] & 0x00ff0000) ^
+               (Te0[(temp >> 8) & 0xff] & 0x0000ff00) ^
+               (Te1[(temp) & 0xff] & 0x000000ff);
+      rk[13] = rk[5] ^ rk[12];
+      rk[14] = rk[6] ^ rk[13];
+      rk[15] = rk[7] ^ rk[14];
+
+      rk += 8;
+    }
+  }
+  return 0;
+}
+
+int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
+  uint32_t *rk;
+  int i, j, status;
+  uint32_t temp;
+
+  /* first, start with an encryption schedule */
+  status = AES_set_encrypt_key(key, bits, aeskey);
+  if (status < 0) {
+    return status;
+  }
+
+  rk = aeskey->rd_key;
+
+  /* invert the order of the round keys: */
+  for (i = 0, j = 4 * aeskey->rounds; i < j; i += 4, j -= 4) {
+    temp = rk[i];
+    rk[i] = rk[j];
+    rk[j] = temp;
+    temp = rk[i + 1];
+    rk[i + 1] = rk[j + 1];
+    rk[j + 1] = temp;
+    temp = rk[i + 2];
+    rk[i + 2] = rk[j + 2];
+    rk[j + 2] = temp;
+    temp = rk[i + 3];
+    rk[i + 3] = rk[j + 3];
+    rk[j + 3] = temp;
+  }
+  /* apply the inverse MixColumn transform to all round keys but the first and
+   * the last: */
+  for (i = 1; i < (int)aeskey->rounds; i++) {
+    rk += 4;
+    rk[0] =
+        Td0[Te1[(rk[0] >> 24)] & 0xff] ^ Td1[Te1[(rk[0] >> 16) & 0xff] & 0xff] ^
+        Td2[Te1[(rk[0] >> 8) & 0xff] & 0xff] ^ Td3[Te1[(rk[0]) & 0xff] & 0xff];
+    rk[1] =
+        Td0[Te1[(rk[1] >> 24)] & 0xff] ^ Td1[Te1[(rk[1] >> 16) & 0xff] & 0xff] ^
+        Td2[Te1[(rk[1] >> 8) & 0xff] & 0xff] ^ Td3[Te1[(rk[1]) & 0xff] & 0xff];
+    rk[2] =
+        Td0[Te1[(rk[2] >> 24)] & 0xff] ^ Td1[Te1[(rk[2] >> 16) & 0xff] & 0xff] ^
+        Td2[Te1[(rk[2] >> 8) & 0xff] & 0xff] ^ Td3[Te1[(rk[2]) & 0xff] & 0xff];
+    rk[3] =
+        Td0[Te1[(rk[3] >> 24)] & 0xff] ^ Td1[Te1[(rk[3] >> 16) & 0xff] & 0xff] ^
+        Td2[Te1[(rk[3] >> 8) & 0xff] & 0xff] ^ Td3[Te1[(rk[3]) & 0xff] & 0xff];
+  }
+  return 0;
+}
+
+void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  const uint32_t *rk;
+  uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+  int r;
+#endif /* ?FULL_UNROLL */
+
+  assert(in && out && key);
+  rk = key->rd_key;
+
+  /* map byte array block to cipher state
+   * and add initial round key: */
+  s0 = GETU32(in) ^ rk[0];
+  s1 = GETU32(in + 4) ^ rk[1];
+  s2 = GETU32(in + 8) ^ rk[2];
+  s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+  /* round 1: */
+  t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+       Te3[s3 & 0xff] ^ rk[4];
+  t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+       Te3[s0 & 0xff] ^ rk[5];
+  t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+       Te3[s1 & 0xff] ^ rk[6];
+  t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+       Te3[s2 & 0xff] ^ rk[7];
+  /* round 2: */
+  s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+       Te3[t3 & 0xff] ^ rk[8];
+  s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+       Te3[t0 & 0xff] ^ rk[9];
+  s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+       Te3[t1 & 0xff] ^ rk[10];
+  s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+       Te3[t2 & 0xff] ^ rk[11];
+  /* round 3: */
+  t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+       Te3[s3 & 0xff] ^ rk[12];
+  t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+       Te3[s0 & 0xff] ^ rk[13];
+  t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+       Te3[s1 & 0xff] ^ rk[14];
+  t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+       Te3[s2 & 0xff] ^ rk[15];
+  /* round 4: */
+  s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+       Te3[t3 & 0xff] ^ rk[16];
+  s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+       Te3[t0 & 0xff] ^ rk[17];
+  s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+       Te3[t1 & 0xff] ^ rk[18];
+  s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+       Te3[t2 & 0xff] ^ rk[19];
+  /* round 5: */
+  t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+       Te3[s3 & 0xff] ^ rk[20];
+  t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+       Te3[s0 & 0xff] ^ rk[21];
+  t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+       Te3[s1 & 0xff] ^ rk[22];
+  t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+       Te3[s2 & 0xff] ^ rk[23];
+  /* round 6: */
+  s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+       Te3[t3 & 0xff] ^ rk[24];
+  s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+       Te3[t0 & 0xff] ^ rk[25];
+  s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+       Te3[t1 & 0xff] ^ rk[26];
+  s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+       Te3[t2 & 0xff] ^ rk[27];
+  /* round 7: */
+  t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+       Te3[s3 & 0xff] ^ rk[28];
+  t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+       Te3[s0 & 0xff] ^ rk[29];
+  t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+       Te3[s1 & 0xff] ^ rk[30];
+  t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+       Te3[s2 & 0xff] ^ rk[31];
+  /* round 8: */
+  s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+       Te3[t3 & 0xff] ^ rk[32];
+  s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+       Te3[t0 & 0xff] ^ rk[33];
+  s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+       Te3[t1 & 0xff] ^ rk[34];
+  s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+       Te3[t2 & 0xff] ^ rk[35];
+  /* round 9: */
+  t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+       Te3[s3 & 0xff] ^ rk[36];
+  t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+       Te3[s0 & 0xff] ^ rk[37];
+  t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+       Te3[s1 & 0xff] ^ rk[38];
+  t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+       Te3[s2 & 0xff] ^ rk[39];
+  if (key->rounds > 10) {
+    /* round 10: */
+    s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+         Te3[t3 & 0xff] ^ rk[40];
+    s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+         Te3[t0 & 0xff] ^ rk[41];
+    s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+         Te3[t1 & 0xff] ^ rk[42];
+    s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+         Te3[t2 & 0xff] ^ rk[43];
+    /* round 11: */
+    t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+         Te3[s3 & 0xff] ^ rk[44];
+    t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+         Te3[s0 & 0xff] ^ rk[45];
+    t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+         Te3[s1 & 0xff] ^ rk[46];
+    t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+         Te3[s2 & 0xff] ^ rk[47];
+    if (key->rounds > 12) {
+      /* round 12: */
+      s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+           Te3[t3 & 0xff] ^ rk[48];
+      s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+           Te3[t0 & 0xff] ^ rk[49];
+      s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+           Te3[t1 & 0xff] ^ rk[50];
+      s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+           Te3[t2 & 0xff] ^ rk[51];
+      /* round 13: */
+      t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+           Te3[s3 & 0xff] ^ rk[52];
+      t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+           Te3[s0 & 0xff] ^ rk[53];
+      t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+           Te3[s1 & 0xff] ^ rk[54];
+      t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+           Te3[s2 & 0xff] ^ rk[55];
+    }
+  }
+  rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+  /*
+   * Nr - 1 full rounds:
+   */
+  r = key->rounds >> 1;
+  for (;;) {
+    t0 = Te0[(s0 >> 24)] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^
+         Te3[(s3) & 0xff] ^ rk[4];
+    t1 = Te0[(s1 >> 24)] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^
+         Te3[(s0) & 0xff] ^ rk[5];
+    t2 = Te0[(s2 >> 24)] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^
+         Te3[(s1) & 0xff] ^ rk[6];
+    t3 = Te0[(s3 >> 24)] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^
+         Te3[(s2) & 0xff] ^ rk[7];
+
+    rk += 8;
+    if (--r == 0) {
+      break;
+    }
+
+    s0 = Te0[(t0 >> 24)] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^
+         Te3[(t3) & 0xff] ^ rk[0];
+    s1 = Te0[(t1 >> 24)] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^
+         Te3[(t0) & 0xff] ^ rk[1];
+    s2 = Te0[(t2 >> 24)] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^
+         Te3[(t1) & 0xff] ^ rk[2];
+    s3 = Te0[(t3 >> 24)] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^
+         Te3[(t2) & 0xff] ^ rk[3];
+  }
+#endif /* ?FULL_UNROLL */
+  /*  apply last round and map cipher state to byte array block: */
+  s0 = (Te2[(t0 >> 24)] & 0xff000000) ^ (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
+       (Te0[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t3) & 0xff] & 0x000000ff) ^
+       rk[0];
+  PUTU32(out, s0);
+  s1 = (Te2[(t1 >> 24)] & 0xff000000) ^ (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^
+       (Te0[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t0) & 0xff] & 0x000000ff) ^
+       rk[1];
+  PUTU32(out + 4, s1);
+  s2 = (Te2[(t2 >> 24)] & 0xff000000) ^ (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^
+       (Te0[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t1) & 0xff] & 0x000000ff) ^
+       rk[2];
+  PUTU32(out + 8, s2);
+  s3 = (Te2[(t3 >> 24)] & 0xff000000) ^ (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^
+       (Te0[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (Te1[(t2) & 0xff] & 0x000000ff) ^
+       rk[3];
+  PUTU32(out + 12, s3);
+}
+
+void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  const uint32_t *rk;
+  uint32_t s0, s1, s2, s3, t0, t1, t2, t3;
+#ifndef FULL_UNROLL
+  int r;
+#endif /* ?FULL_UNROLL */
+
+  assert(in && out && key);
+  rk = key->rd_key;
+
+  /* map byte array block to cipher state
+   * and add initial round key: */
+  s0 = GETU32(in) ^ rk[0];
+  s1 = GETU32(in + 4) ^ rk[1];
+  s2 = GETU32(in + 8) ^ rk[2];
+  s3 = GETU32(in + 12) ^ rk[3];
+#ifdef FULL_UNROLL
+  /* round 1: */
+  t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+       Td3[s1 & 0xff] ^ rk[4];
+  t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+       Td3[s2 & 0xff] ^ rk[5];
+  t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+       Td3[s3 & 0xff] ^ rk[6];
+  t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+       Td3[s0 & 0xff] ^ rk[7];
+  /* round 2: */
+  s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+       Td3[t1 & 0xff] ^ rk[8];
+  s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+       Td3[t2 & 0xff] ^ rk[9];
+  s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+       Td3[t3 & 0xff] ^ rk[10];
+  s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+       Td3[t0 & 0xff] ^ rk[11];
+  /* round 3: */
+  t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+       Td3[s1 & 0xff] ^ rk[12];
+  t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+       Td3[s2 & 0xff] ^ rk[13];
+  t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+       Td3[s3 & 0xff] ^ rk[14];
+  t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+       Td3[s0 & 0xff] ^ rk[15];
+  /* round 4: */
+  s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+       Td3[t1 & 0xff] ^ rk[16];
+  s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+       Td3[t2 & 0xff] ^ rk[17];
+  s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+       Td3[t3 & 0xff] ^ rk[18];
+  s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+       Td3[t0 & 0xff] ^ rk[19];
+  /* round 5: */
+  t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+       Td3[s1 & 0xff] ^ rk[20];
+  t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+       Td3[s2 & 0xff] ^ rk[21];
+  t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+       Td3[s3 & 0xff] ^ rk[22];
+  t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+       Td3[s0 & 0xff] ^ rk[23];
+  /* round 6: */
+  s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+       Td3[t1 & 0xff] ^ rk[24];
+  s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+       Td3[t2 & 0xff] ^ rk[25];
+  s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+       Td3[t3 & 0xff] ^ rk[26];
+  s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+       Td3[t0 & 0xff] ^ rk[27];
+  /* round 7: */
+  t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+       Td3[s1 & 0xff] ^ rk[28];
+  t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+       Td3[s2 & 0xff] ^ rk[29];
+  t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+       Td3[s3 & 0xff] ^ rk[30];
+  t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+       Td3[s0 & 0xff] ^ rk[31];
+  /* round 8: */
+  s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+       Td3[t1 & 0xff] ^ rk[32];
+  s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+       Td3[t2 & 0xff] ^ rk[33];
+  s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+       Td3[t3 & 0xff] ^ rk[34];
+  s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+       Td3[t0 & 0xff] ^ rk[35];
+  /* round 9: */
+  t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+       Td3[s1 & 0xff] ^ rk[36];
+  t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+       Td3[s2 & 0xff] ^ rk[37];
+  t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+       Td3[s3 & 0xff] ^ rk[38];
+  t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+       Td3[s0 & 0xff] ^ rk[39];
+  if (key->rounds > 10) {
+    /* round 10: */
+    s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+         Td3[t1 & 0xff] ^ rk[40];
+    s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+         Td3[t2 & 0xff] ^ rk[41];
+    s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+         Td3[t3 & 0xff] ^ rk[42];
+    s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+         Td3[t0 & 0xff] ^ rk[43];
+    /* round 11: */
+    t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+         Td3[s1 & 0xff] ^ rk[44];
+    t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+         Td3[s2 & 0xff] ^ rk[45];
+    t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+         Td3[s3 & 0xff] ^ rk[46];
+    t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+         Td3[s0 & 0xff] ^ rk[47];
+    if (key->rounds > 12) {
+      /* round 12: */
+      s0 = Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+           Td3[t1 & 0xff] ^ rk[48];
+      s1 = Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+           Td3[t2 & 0xff] ^ rk[49];
+      s2 = Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+           Td3[t3 & 0xff] ^ rk[50];
+      s3 = Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+           Td3[t0 & 0xff] ^ rk[51];
+      /* round 13: */
+      t0 = Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+           Td3[s1 & 0xff] ^ rk[52];
+      t1 = Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+           Td3[s2 & 0xff] ^ rk[53];
+      t2 = Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+           Td3[s3 & 0xff] ^ rk[54];
+      t3 = Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+           Td3[s0 & 0xff] ^ rk[55];
+    }
+  }
+  rk += key->rounds << 2;
+#else  /* !FULL_UNROLL */
+  /*
+   * Nr - 1 full rounds:
+   */
+  r = key->rounds >> 1;
+  for (;;) {
+    t0 = Td0[(s0 >> 24)] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff] ^
+         Td3[(s1) & 0xff] ^ rk[4];
+    t1 = Td0[(s1 >> 24)] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff] ^
+         Td3[(s2) & 0xff] ^ rk[5];
+    t2 = Td0[(s2 >> 24)] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff] ^
+         Td3[(s3) & 0xff] ^ rk[6];
+    t3 = Td0[(s3 >> 24)] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff] ^
+         Td3[(s0) & 0xff] ^ rk[7];
+
+    rk += 8;
+    if (--r == 0) {
+      break;
+    }
+
+    s0 = Td0[(t0 >> 24)] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff] ^
+         Td3[(t1) & 0xff] ^ rk[0];
+    s1 = Td0[(t1 >> 24)] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff] ^
+         Td3[(t2) & 0xff] ^ rk[1];
+    s2 = Td0[(t2 >> 24)] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff] ^
+         Td3[(t3) & 0xff] ^ rk[2];
+    s3 = Td0[(t3 >> 24)] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff] ^
+         Td3[(t0) & 0xff] ^ rk[3];
+  }
+#endif /* ?FULL_UNROLL */
+  /* apply last round and
+   * map cipher state to byte array block: */
+  s0 = ((uint32_t)Td4[(t0 >> 24)] << 24) ^
+       ((uint32_t)Td4[(t3 >> 16) & 0xff] << 16) ^
+       ((uint32_t)Td4[(t2 >> 8) & 0xff] << 8) ^
+       ((uint32_t)Td4[(t1) & 0xff]) ^ rk[0];
+  PUTU32(out, s0);
+  s1 = ((uint32_t)Td4[(t1 >> 24)] << 24) ^
+       ((uint32_t)Td4[(t0 >> 16) & 0xff] << 16) ^
+       ((uint32_t)Td4[(t3 >> 8) & 0xff] << 8) ^
+       ((uint32_t)Td4[(t2) & 0xff]) ^ rk[1];
+  PUTU32(out + 4, s1);
+  s2 = ((uint32_t)Td4[(t2 >> 24)] << 24) ^
+       ((uint32_t)Td4[(t1 >> 16) & 0xff] << 16) ^
+       ((uint32_t)Td4[(t0 >> 8) & 0xff] << 8) ^
+       ((uint32_t)Td4[(t3) & 0xff]) ^ rk[2];
+  PUTU32(out + 8, s2);
+  s3 = ((uint32_t)Td4[(t3 >> 24)] << 24) ^
+       ((uint32_t)Td4[(t2 >> 16) & 0xff] << 16) ^
+       ((uint32_t)Td4[(t1 >> 8) & 0xff] << 8) ^
+       ((uint32_t)Td4[(t0) & 0xff]) ^ rk[3];
+  PUTU32(out + 12, s3);
+}
+
+#else
+
+#if defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64)
+
+static int hwaes_capable(void) {
+  return CRYPTO_is_ARMv8_AES_capable();
+}
+
+int aes_hw_set_encrypt_key(const uint8_t *user_key, const int bits,
+                           AES_KEY *key);
+int aes_hw_set_decrypt_key(const uint8_t *user_key, const int bits,
+                           AES_KEY *key);
+void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
+void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
+
+#else
+
+static int hwaes_capable(void) {
+  return 0;
+}
+
+static int aes_hw_set_encrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) {
+  abort();
+}
+
+static int aes_hw_set_decrypt_key(const uint8_t *user_key, int bits, AES_KEY *key) {
+  abort();
+}
+
+static void aes_hw_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  abort();
+}
+
+static void aes_hw_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  abort();
+}
+
+#endif
+
+
+/* In this case several functions are provided by asm code. However, one cannot
+ * control asm symbol visibility with command line flags and such so they are
+ * always hidden and wrapped by these C functions, which can be so
+ * controlled. */
+
+void asm_AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
+void AES_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  if (hwaes_capable()) {
+    aes_hw_encrypt(in, out, key);
+  } else {
+    asm_AES_encrypt(in, out, key);
+  }
+}
+
+void asm_AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
+void AES_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key) {
+  if (hwaes_capable()) {
+    aes_hw_decrypt(in, out, key);
+  } else {
+    asm_AES_decrypt(in, out, key);
+  }
+}
+
+int asm_AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey);
+int AES_set_encrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
+  if (hwaes_capable()) {
+    return aes_hw_set_encrypt_key(key, bits, aeskey);
+  } else {
+    return asm_AES_set_encrypt_key(key, bits, aeskey);
+  }
+}
+
+int asm_AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey);
+int AES_set_decrypt_key(const uint8_t *key, unsigned bits, AES_KEY *aeskey) {
+  if (hwaes_capable()) {
+    return aes_hw_set_decrypt_key(key, bits, aeskey);
+  } else {
+    return asm_AES_set_decrypt_key(key, bits, aeskey);
+  }
+}
+
+#endif  /* OPENSSL_NO_ASM || (!OPENSSL_X86 && !OPENSSL_X86_64 && !OPENSSL_ARM) */
diff --git a/src/crypto/fipsmodule/aes/aes_test.cc b/src/crypto/fipsmodule/aes/aes_test.cc
new file mode 100644
index 0000000..e1e9ca6
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/aes_test.cc
@@ -0,0 +1,190 @@
+/* Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <memory>
+#include <vector>
+
+#include <openssl/aes.h>
+#include <openssl/crypto.h>
+
+#include "../../internal.h"
+#include "../../test/file_test.h"
+
+
+static bool TestRaw(FileTest *t) {
+  std::vector<uint8_t> key, plaintext, ciphertext;
+  if (!t->GetBytes(&key, "Key") ||
+      !t->GetBytes(&plaintext, "Plaintext") ||
+      !t->GetBytes(&ciphertext, "Ciphertext")) {
+    return false;
+  }
+
+  if (plaintext.size() != AES_BLOCK_SIZE ||
+      ciphertext.size() != AES_BLOCK_SIZE) {
+    t->PrintLine("Plaintext or Ciphertext not a block size.");
+    return false;
+  }
+
+  AES_KEY aes_key;
+  if (AES_set_encrypt_key(key.data(), 8 * key.size(), &aes_key) != 0) {
+    t->PrintLine("AES_set_encrypt_key failed.");
+    return false;
+  }
+
+  // Test encryption.
+  uint8_t block[AES_BLOCK_SIZE];
+  AES_encrypt(plaintext.data(), block, &aes_key);
+  if (!t->ExpectBytesEqual(block, AES_BLOCK_SIZE, ciphertext.data(),
+                           ciphertext.size())) {
+    t->PrintLine("AES_encrypt gave the wrong output.");
+    return false;
+  }
+
+  // Test in-place encryption.
+  OPENSSL_memcpy(block, plaintext.data(), AES_BLOCK_SIZE);
+  AES_encrypt(block, block, &aes_key);
+  if (!t->ExpectBytesEqual(block, AES_BLOCK_SIZE, ciphertext.data(),
+                           ciphertext.size())) {
+    t->PrintLine("In-place AES_encrypt gave the wrong output.");
+    return false;
+  }
+
+  if (AES_set_decrypt_key(key.data(), 8 * key.size(), &aes_key) != 0) {
+    t->PrintLine("AES_set_decrypt_key failed.");
+    return false;
+  }
+
+  // Test decryption.
+  AES_decrypt(ciphertext.data(), block, &aes_key);
+  if (!t->ExpectBytesEqual(block, AES_BLOCK_SIZE, plaintext.data(),
+                           plaintext.size())) {
+    t->PrintLine("AES_decrypt gave the wrong output.");
+    return false;
+  }
+
+  // Test in-place decryption.
+  OPENSSL_memcpy(block, ciphertext.data(), AES_BLOCK_SIZE);
+  AES_decrypt(block, block, &aes_key);
+  if (!t->ExpectBytesEqual(block, AES_BLOCK_SIZE, plaintext.data(),
+                           plaintext.size())) {
+    t->PrintLine("In-place AES_decrypt gave the wrong output.");
+    return false;
+  }
+
+  return true;
+}
+
+static bool TestKeyWrap(FileTest *t) {
+  // All test vectors use the default IV, so test both with implicit and
+  // explicit IV.
+  //
+  // TODO(davidben): Find test vectors that use a different IV.
+  static const uint8_t kDefaultIV[] = {
+      0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6,
+  };
+
+  std::vector<uint8_t> key, plaintext, ciphertext;
+  if (!t->GetBytes(&key, "Key") ||
+      !t->GetBytes(&plaintext, "Plaintext") ||
+      !t->GetBytes(&ciphertext, "Ciphertext")) {
+    return false;
+  }
+
+  if (plaintext.size() + 8 != ciphertext.size()) {
+    t->PrintLine("Invalid Plaintext and Ciphertext lengths.");
+    return false;
+  }
+
+  AES_KEY aes_key;
+  if (AES_set_encrypt_key(key.data(), 8 * key.size(), &aes_key) != 0) {
+    t->PrintLine("AES_set_encrypt_key failed.");
+    return false;
+  }
+
+  std::unique_ptr<uint8_t[]> buf(new uint8_t[ciphertext.size()]);
+  if (AES_wrap_key(&aes_key, nullptr /* iv */, buf.get(), plaintext.data(),
+                   plaintext.size()) != static_cast<int>(ciphertext.size()) ||
+      !t->ExpectBytesEqual(buf.get(), ciphertext.size(), ciphertext.data(),
+                           ciphertext.size())) {
+    t->PrintLine("AES_wrap_key with implicit IV failed.");
+    return false;
+  }
+
+  OPENSSL_memset(buf.get(), 0, ciphertext.size());
+  if (AES_wrap_key(&aes_key, kDefaultIV, buf.get(), plaintext.data(),
+                   plaintext.size()) != static_cast<int>(ciphertext.size()) ||
+      !t->ExpectBytesEqual(buf.get(), ciphertext.size(), ciphertext.data(),
+                           ciphertext.size())) {
+    t->PrintLine("AES_wrap_key with explicit IV failed.");
+    return false;
+  }
+
+  if (AES_set_decrypt_key(key.data(), 8 * key.size(), &aes_key) != 0) {
+    t->PrintLine("AES_set_decrypt_key failed.");
+    return false;
+  }
+
+  buf.reset(new uint8_t[plaintext.size()]);
+  if (AES_unwrap_key(&aes_key, nullptr /* iv */, buf.get(), ciphertext.data(),
+                     ciphertext.size()) != static_cast<int>(plaintext.size()) ||
+      !t->ExpectBytesEqual(buf.get(), plaintext.size(), plaintext.data(),
+                           plaintext.size())) {
+    t->PrintLine("AES_unwrap_key with implicit IV failed.");
+    return false;
+  }
+
+  OPENSSL_memset(buf.get(), 0, plaintext.size());
+  if (AES_unwrap_key(&aes_key, kDefaultIV, buf.get(), ciphertext.data(),
+                     ciphertext.size()) != static_cast<int>(plaintext.size()) ||
+      !t->ExpectBytesEqual(buf.get(), plaintext.size(), plaintext.data(),
+                           plaintext.size())) {
+    t->PrintLine("AES_unwrap_key with explicit IV failed.");
+    return false;
+  }
+
+  ciphertext[0] ^= 1;
+  if (AES_unwrap_key(&aes_key, nullptr /* iv */, buf.get(), ciphertext.data(),
+                     ciphertext.size()) != -1) {
+    t->PrintLine("AES_unwrap_key with bad input unexpectedly succeeded.");
+    return false;
+  }
+
+  return true;
+}
+
+static bool TestAES(FileTest *t, void *arg) {
+  if (t->GetParameter() == "Raw") {
+    return TestRaw(t);
+  }
+  if (t->GetParameter() == "KeyWrap") {
+    return TestKeyWrap(t);
+  }
+
+  t->PrintLine("Unknown mode '%s'.", t->GetParameter().c_str());
+  return false;
+}
+
+int main(int argc, char **argv) {
+  CRYPTO_library_init();
+
+  if (argc != 2) {
+    fprintf(stderr, "%s <test file.txt>\n", argv[0]);
+    return 1;
+  }
+
+  return FileTestMain(TestAES, nullptr, argv[1]);
+}
diff --git a/src/crypto/fipsmodule/aes/aes_tests.txt b/src/crypto/fipsmodule/aes/aes_tests.txt
new file mode 100644
index 0000000..d4e4c61
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/aes_tests.txt
@@ -0,0 +1,50 @@
+# Test vectors from FIPS-197, Appendix C.
+
+Mode = Raw
+Key = 000102030405060708090a0b0c0d0e0f
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = 69c4e0d86a7b0430d8cdb78070b4c55a
+
+Mode = Raw
+Key = 000102030405060708090a0b0c0d0e0f1011121314151617
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = dda97ca4864cdfe06eaf70a0ec0d7191
+
+Mode = Raw
+Key = 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = 8ea2b7ca516745bfeafc49904b496089
+
+
+# Test vectors from
+# http://csrc.nist.gov/groups/ST/toolkit/documents/kms/key-wrap.pdf
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = 1fa68b0a8112b447aef34bd8fb5a7b829d3e862371d2cfe5
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f1011121314151617
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = 96778b25ae6ca435f92b5b97c050aed2468ab8a17ad84e5d
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+Plaintext = 00112233445566778899aabbccddeeff
+Ciphertext = 64e8c3f9ce0f5ba263e9777905818a2a93c8191e7d6e8ae7
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f1011121314151617
+Plaintext = 00112233445566778899aabbccddeeff0001020304050607
+Ciphertext = 031d33264e15d33268f24ec260743edce1c6c7ddee725a936ba814915c6762d2
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+Plaintext = 00112233445566778899aabbccddeeff0001020304050607
+Ciphertext = a8f9bc1612c68b3ff6e6f4fbe30e71e4769c8b80a32cb8958cd5d17d6b254da1
+
+Mode = KeyWrap
+Key = 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f
+Plaintext = 00112233445566778899aabbccddeeff000102030405060708090a0b0c0d0e0f
+Ciphertext = 28c9f404c4b810f4cbccb35cfb87f8263f5786e2d80ed326cbc7f0e71a99f43bfb988b9b7a02dd21
diff --git a/src/crypto/fipsmodule/aes/asm/aes-586.pl b/src/crypto/fipsmodule/aes/asm/aes-586.pl
new file mode 100755
index 0000000..a2c6eb5
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aes-586.pl
@@ -0,0 +1,2993 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Version 4.3.
+#
+# You might fail to appreciate this module performance from the first
+# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
+# to be *the* best Intel C compiler without -KPIC, performance appears
+# to be virtually identical... But try to re-configure with shared
+# library support... Aha! Intel compiler "suddenly" lags behind by 30%
+# [on P4, more on others]:-) And if compared to position-independent
+# code generated by GNU C, this code performs *more* than *twice* as
+# fast! Yes, all this buzz about PIC means that unlike other hand-
+# coded implementations, this one was explicitly designed to be safe
+# to use even in shared library context... This also means that this
+# code isn't necessarily absolutely fastest "ever," because in order
+# to achieve position independence an extra register has to be
+# off-loaded to stack, which affects the benchmark result.
+#
+# Special note about instruction choice. Do you recall RC4_INT code
+# performing poorly on P4? It might be the time to figure out why.
+# RC4_INT code implies effective address calculations in base+offset*4
+# form. Trouble is that it seems that offset scaling turned to be
+# critical path... At least eliminating scaling resulted in 2.8x RC4
+# performance improvement [as you might recall]. As AES code is hungry
+# for scaling too, I [try to] avoid the latter by favoring off-by-2
+# shifts and masking the result with 0xFF<<2 instead of "boring" 0xFF.
+#
+# As was shown by Dean Gaudet <dean@arctic.org>, the above note turned
+# void. Performance improvement with off-by-2 shifts was observed on
+# intermediate implementation, which was spilling yet another register
+# to stack... Final offset*4 code below runs just a tad faster on P4,
+# but exhibits up to 10% improvement on other cores.
+#
+# Second version is "monolithic" replacement for aes_core.c, which in
+# addition to AES_[de|en]crypt implements AES_set_[de|en]cryption_key.
+# This made it possible to implement little-endian variant of the
+# algorithm without modifying the base C code. Motivating factor for
+# the undertaken effort was that it appeared that in tight IA-32
+# register window little-endian flavor could achieve slightly higher
+# Instruction Level Parallelism, and it indeed resulted in up to 15%
+# better performance on most recent µ-archs...
+#
+# Third version adds AES_cbc_encrypt implementation, which resulted in
+# up to 40% performance imrovement of CBC benchmark results. 40% was
+# observed on P4 core, where "overall" imrovement coefficient, i.e. if
+# compared to PIC generated by GCC and in CBC mode, was observed to be
+# as large as 4x:-) CBC performance is virtually identical to ECB now
+# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
+# Opteron, because certain function prologues and epilogues are
+# effectively taken out of the loop...
+#
+# Version 3.2 implements compressed tables and prefetch of these tables
+# in CBC[!] mode. Former means that 3/4 of table references are now
+# misaligned, which unfortunately has negative impact on elder IA-32
+# implementations, Pentium suffered 30% penalty, PIII - 10%.
+#
+# Version 3.3 avoids L1 cache aliasing between stack frame and
+# S-boxes, and 3.4 - L1 cache aliasing even between key schedule. The
+# latter is achieved by copying the key schedule to controlled place in
+# stack. This unfortunately has rather strong impact on small block CBC
+# performance, ~2x deterioration on 16-byte block if compared to 3.3.
+#
+# Version 3.5 checks if there is L1 cache aliasing between user-supplied
+# key schedule and S-boxes and abstains from copying the former if
+# there is no. This allows end-user to consciously retain small block
+# performance by aligning key schedule in specific manner.
+#
+# Version 3.6 compresses Td4 to 256 bytes and prefetches it in ECB.
+#
+# Current ECB performance numbers for 128-bit key in CPU cycles per
+# processed byte [measure commonly used by AES benchmarkers] are:
+#
+#		small footprint		fully unrolled
+# P4		24			22
+# AMD K8	20			19
+# PIII		25			23
+# Pentium	81			78
+#
+# Version 3.7 reimplements outer rounds as "compact." Meaning that
+# first and last rounds reference compact 256 bytes S-box. This means
+# that first round consumes a lot more CPU cycles and that encrypt
+# and decrypt performance becomes asymmetric. Encrypt performance
+# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
+# aggressively pre-fetched.
+#
+# Version 4.0 effectively rolls back to 3.6 and instead implements
+# additional set of functions, _[x86|sse]_AES_[en|de]crypt_compact,
+# which use exclusively 256 byte S-box. These functions are to be
+# called in modes not concealing plain text, such as ECB, or when
+# we're asked to process smaller amount of data [or unconditionally
+# on hyper-threading CPU]. Currently it's called unconditionally from
+# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
+# still needs to be modified to switch between slower and faster
+# mode when appropriate... But in either case benchmark landscape
+# changes dramatically and below numbers are CPU cycles per processed
+# byte for 128-bit key.
+#
+#		ECB encrypt	ECB decrypt	CBC large chunk
+# P4		52[54]		83[95]		23
+# AMD K8	46[41]		66[70]		18
+# PIII		41[50]		60[77]		24
+# Core 2	31[36]		45[64]		18.5
+# Atom		76[100]		96[138]		60
+# Pentium	115		150		77
+#
+# Version 4.1 switches to compact S-box even in key schedule setup.
+#
+# Version 4.2 prefetches compact S-box in every SSE round or in other
+# words every cache-line is *guaranteed* to be accessed within ~50
+# cycles window. Why just SSE? Because it's needed on hyper-threading
+# CPU! Which is also why it's prefetched with 64 byte stride. Best
+# part is that it has no negative effect on performance:-)
+#
+# Version 4.3 implements switch between compact and non-compact block
+# functions in AES_cbc_encrypt depending on how much data was asked
+# to be processed in one stroke.
+#
+######################################################################
+# Timing attacks are classified in two classes: synchronous when
+# attacker consciously initiates cryptographic operation and collects
+# timing data of various character afterwards, and asynchronous when
+# malicious code is executed on same CPU simultaneously with AES,
+# instruments itself and performs statistical analysis of this data.
+#
+# As far as synchronous attacks go the root to the AES timing
+# vulnerability is twofold. Firstly, of 256 S-box elements at most 160
+# are referred to in single 128-bit block operation. Well, in C
+# implementation with 4 distinct tables it's actually as little as 40
+# references per 256 elements table, but anyway... Secondly, even
+# though S-box elements are clustered into smaller amount of cache-
+# lines, smaller than 160 and even 40, it turned out that for certain
+# plain-text pattern[s] or simply put chosen plain-text and given key
+# few cache-lines remain unaccessed during block operation. Now, if
+# attacker can figure out this access pattern, he can deduct the key
+# [or at least part of it]. The natural way to mitigate this kind of
+# attacks is to minimize the amount of cache-lines in S-box and/or
+# prefetch them to ensure that every one is accessed for more uniform
+# timing. But note that *if* plain-text was concealed in such way that
+# input to block function is distributed *uniformly*, then attack
+# wouldn't apply. Now note that some encryption modes, most notably
+# CBC, do mask the plain-text in this exact way [secure cipher output
+# is distributed uniformly]. Yes, one still might find input that
+# would reveal the information about given key, but if amount of
+# candidate inputs to be tried is larger than amount of possible key
+# combinations then attack becomes infeasible. This is why revised
+# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
+# of data is to be processed in one stroke. The current size limit of
+# 512 bytes is chosen to provide same [diminishigly low] probability
+# for cache-line to remain untouched in large chunk operation with
+# large S-box as for single block operation with compact S-box and
+# surely needs more careful consideration...
+#
+# As for asynchronous attacks. There are two flavours: attacker code
+# being interleaved with AES on hyper-threading CPU at *instruction*
+# level, and two processes time sharing single core. As for latter.
+# Two vectors. 1. Given that attacker process has higher priority,
+# yield execution to process performing AES just before timer fires
+# off the scheduler, immediately regain control of CPU and analyze the
+# cache state. For this attack to be efficient attacker would have to
+# effectively slow down the operation by several *orders* of magnitute,
+# by ratio of time slice to duration of handful of AES rounds, which
+# unlikely to remain unnoticed. Not to mention that this also means
+# that he would spend correspondigly more time to collect enough
+# statistical data to mount the attack. It's probably appropriate to
+# say that if adeversary reckons that this attack is beneficial and
+# risks to be noticed, you probably have larger problems having him
+# mere opportunity. In other words suggested code design expects you
+# to preclude/mitigate this attack by overall system security design.
+# 2. Attacker manages to make his code interrupt driven. In order for
+# this kind of attack to be feasible, interrupt rate has to be high
+# enough, again comparable to duration of handful of AES rounds. But
+# is there interrupt source of such rate? Hardly, not even 1Gbps NIC
+# generates interrupts at such raging rate...
+#
+# And now back to the former, hyper-threading CPU or more specifically
+# Intel P4. Recall that asynchronous attack implies that malicious
+# code instruments itself. And naturally instrumentation granularity
+# has be noticeably lower than duration of codepath accessing S-box.
+# Given that all cache-lines are accessed during that time that is.
+# Current implementation accesses *all* cache-lines within ~50 cycles
+# window, which is actually *less* than RDTSC latency on Intel P4!
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open OUT,">$output";
+*STDOUT=*OUT;
+
+&asm_init($ARGV[0],"aes-586.pl",$x86only = $ARGV[$#ARGV] eq "386");
+&static_label("AES_Te");
+&static_label("AES_Td");
+
+$s0="eax";
+$s1="ebx";
+$s2="ecx";
+$s3="edx";
+$key="edi";
+$acc="esi";
+$tbl="ebp";
+
+# stack frame layout in _[x86|sse]_AES_* routines, frame is allocated
+# by caller
+$__ra=&DWP(0,"esp");	# return address
+$__s0=&DWP(4,"esp");	# s0 backing store
+$__s1=&DWP(8,"esp");	# s1 backing store
+$__s2=&DWP(12,"esp");	# s2 backing store
+$__s3=&DWP(16,"esp");	# s3 backing store
+$__key=&DWP(20,"esp");	# pointer to key schedule
+$__end=&DWP(24,"esp");	# pointer to end of key schedule
+$__tbl=&DWP(28,"esp");	# %ebp backing store
+
+# stack frame layout in AES_[en|crypt] routines, which differs from
+# above by 4 and overlaps by %ebp backing store
+$_tbl=&DWP(24,"esp");
+$_esp=&DWP(28,"esp");
+
+sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
+
+$speed_limit=512;	# chunks smaller than $speed_limit are
+			# processed with compact routine in CBC mode
+$small_footprint=1;	# $small_footprint=1 code is ~5% slower [on
+			# recent µ-archs], but ~5 times smaller!
+			# I favor compact code to minimize cache
+			# contention and in hope to "collect" 5% back
+			# in real-life applications...
+
+$vertical_spin=0;	# shift "verticaly" defaults to 0, because of
+			# its proof-of-concept status...
+# Note that there is no decvert(), as well as last encryption round is
+# performed with "horizontal" shifts. This is because this "vertical"
+# implementation [one which groups shifts on a given $s[i] to form a
+# "column," unlike "horizontal" one, which groups shifts on different
+# $s[i] to form a "row"] is work in progress. It was observed to run
+# few percents faster on Intel cores, but not AMD. On AMD K8 core it's
+# whole 12% slower:-( So we face a trade-off... Shall it be resolved
+# some day? Till then the code is considered experimental and by
+# default remains dormant...
+
+sub encvert()
+{ my ($te,@s) = @_;
+  my ($v0,$v1) = ($acc,$key);
+
+	&mov	($v0,$s[3]);				# copy s3
+	&mov	(&DWP(4,"esp"),$s[2]);			# save s2
+	&mov	($v1,$s[0]);				# copy s0
+	&mov	(&DWP(8,"esp"),$s[1]);			# save s1
+
+	&movz	($s[2],&HB($s[0]));
+	&and	($s[0],0xFF);
+	&mov	($s[0],&DWP(0,$te,$s[0],8));		# s0>>0
+	&shr	($v1,16);
+	&mov	($s[3],&DWP(3,$te,$s[2],8));		# s0>>8
+	&movz	($s[1],&HB($v1));
+	&and	($v1,0xFF);
+	&mov	($s[2],&DWP(2,$te,$v1,8));		# s0>>16
+	 &mov	($v1,$v0);
+	&mov	($s[1],&DWP(1,$te,$s[1],8));		# s0>>24
+
+	&and	($v0,0xFF);
+	&xor	($s[3],&DWP(0,$te,$v0,8));		# s3>>0
+	&movz	($v0,&HB($v1));
+	&shr	($v1,16);
+	&xor	($s[2],&DWP(3,$te,$v0,8));		# s3>>8
+	&movz	($v0,&HB($v1));
+	&and	($v1,0xFF);
+	&xor	($s[1],&DWP(2,$te,$v1,8));		# s3>>16
+	 &mov	($v1,&DWP(4,"esp"));			# restore s2
+	&xor	($s[0],&DWP(1,$te,$v0,8));		# s3>>24
+
+	&mov	($v0,$v1);
+	&and	($v1,0xFF);
+	&xor	($s[2],&DWP(0,$te,$v1,8));		# s2>>0
+	&movz	($v1,&HB($v0));
+	&shr	($v0,16);
+	&xor	($s[1],&DWP(3,$te,$v1,8));		# s2>>8
+	&movz	($v1,&HB($v0));
+	&and	($v0,0xFF);
+	&xor	($s[0],&DWP(2,$te,$v0,8));		# s2>>16
+	 &mov	($v0,&DWP(8,"esp"));			# restore s1
+	&xor	($s[3],&DWP(1,$te,$v1,8));		# s2>>24
+
+	&mov	($v1,$v0);
+	&and	($v0,0xFF);
+	&xor	($s[1],&DWP(0,$te,$v0,8));		# s1>>0
+	&movz	($v0,&HB($v1));
+	&shr	($v1,16);
+	&xor	($s[0],&DWP(3,$te,$v0,8));		# s1>>8
+	&movz	($v0,&HB($v1));
+	&and	($v1,0xFF);
+	&xor	($s[3],&DWP(2,$te,$v1,8));		# s1>>16
+	 &mov	($key,$__key);				# reincarnate v1 as key
+	&xor	($s[2],&DWP(1,$te,$v0,8));		# s1>>24
+}
+
+# Another experimental routine, which features "horizontal spin," but
+# eliminates one reference to stack. Strangely enough runs slower...
+sub enchoriz()
+{ my ($v0,$v1) = ($key,$acc);
+
+	&movz	($v0,&LB($s0));			#  3, 2, 1, 0*
+	&rotr	($s2,8);			#  8,11,10, 9
+	&mov	($v1,&DWP(0,$te,$v0,8));	#  0
+	&movz	($v0,&HB($s1));			#  7, 6, 5*, 4
+	&rotr	($s3,16);			# 13,12,15,14
+	&xor	($v1,&DWP(3,$te,$v0,8));	#  5
+	&movz	($v0,&HB($s2));			#  8,11,10*, 9
+	&rotr	($s0,16);			#  1, 0, 3, 2
+	&xor	($v1,&DWP(2,$te,$v0,8));	# 10
+	&movz	($v0,&HB($s3));			# 13,12,15*,14
+	&xor	($v1,&DWP(1,$te,$v0,8));	# 15, t[0] collected
+	&mov	($__s0,$v1);			# t[0] saved
+
+	&movz	($v0,&LB($s1));			#  7, 6, 5, 4*
+	&shr	($s1,16);			#  -, -, 7, 6
+	&mov	($v1,&DWP(0,$te,$v0,8));	#  4
+	&movz	($v0,&LB($s3));			# 13,12,15,14*
+	&xor	($v1,&DWP(2,$te,$v0,8));	# 14
+	&movz	($v0,&HB($s0));			#  1, 0, 3*, 2
+	&and	($s3,0xffff0000);		# 13,12, -, -
+	&xor	($v1,&DWP(1,$te,$v0,8));	#  3
+	&movz	($v0,&LB($s2));			#  8,11,10, 9*
+	&or	($s3,$s1);			# 13,12, 7, 6
+	&xor	($v1,&DWP(3,$te,$v0,8));	#  9, t[1] collected
+	&mov	($s1,$v1);			#  s[1]=t[1]
+
+	&movz	($v0,&LB($s0));			#  1, 0, 3, 2*
+	&shr	($s2,16);			#  -, -, 8,11
+	&mov	($v1,&DWP(2,$te,$v0,8));	#  2
+	&movz	($v0,&HB($s3));			# 13,12, 7*, 6
+	&xor	($v1,&DWP(1,$te,$v0,8));	#  7
+	&movz	($v0,&HB($s2));			#  -, -, 8*,11
+	&xor	($v1,&DWP(0,$te,$v0,8));	#  8
+	&mov	($v0,$s3);
+	&shr	($v0,24);			# 13
+	&xor	($v1,&DWP(3,$te,$v0,8));	# 13, t[2] collected
+
+	&movz	($v0,&LB($s2));			#  -, -, 8,11*
+	&shr	($s0,24);			#  1*
+	&mov	($s2,&DWP(1,$te,$v0,8));	# 11
+	&xor	($s2,&DWP(3,$te,$s0,8));	#  1
+	&mov	($s0,$__s0);			# s[0]=t[0]
+	&movz	($v0,&LB($s3));			# 13,12, 7, 6*
+	&shr	($s3,16);			#   ,  ,13,12
+	&xor	($s2,&DWP(2,$te,$v0,8));	#  6
+	&mov	($key,$__key);			# reincarnate v0 as key
+	&and	($s3,0xff);			#   ,  ,13,12*
+	&mov	($s3,&DWP(0,$te,$s3,8));	# 12
+	&xor	($s3,$s2);			# s[2]=t[3] collected
+	&mov	($s2,$v1);			# s[2]=t[2]
+}
+
+# More experimental code... SSE one... Even though this one eliminates
+# *all* references to stack, it's not faster...
+sub sse_encbody()
+{
+	&movz	($acc,&LB("eax"));		#  0
+	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  0
+	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
+	&movz	("edx",&HB("eax"));		#  1
+	&mov	("edx",&DWP(3,$tbl,"edx",8));	#  1
+	&shr	("eax",16);			#  5, 4
+
+	&movz	($acc,&LB("ebx"));		# 10
+	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 10
+	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
+	&movz	($acc,&HB("ebx"));		# 11
+	&xor	("edx",&DWP(1,$tbl,$acc,8));	# 11
+	&shr	("ebx",16);			# 15,14
+
+	&movz	($acc,&HB("eax"));		#  5
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  5
+	&movq	("mm3",QWP(16,$key));
+	&movz	($acc,&HB("ebx"));		# 15
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	# 15
+	&movd	("mm0","ecx");			# t[0] collected
+
+	&movz	($acc,&LB("eax"));		#  4
+	&mov	("ecx",&DWP(0,$tbl,$acc,8));	#  4
+	&movd	("eax","mm2");			#  7, 6, 3, 2
+	&movz	($acc,&LB("ebx"));		# 14
+	&xor	("ecx",&DWP(2,$tbl,$acc,8));	# 14
+	&movd	("ebx","mm6");			# 13,12, 9, 8
+
+	&movz	($acc,&HB("eax"));		#  3
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  3
+	&movz	($acc,&HB("ebx"));		#  9
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	#  9
+	&movd	("mm1","ecx");			# t[1] collected
+
+	&movz	($acc,&LB("eax"));		#  2
+	&mov	("ecx",&DWP(2,$tbl,$acc,8));	#  2
+	&shr	("eax",16);			#  7, 6
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+	&movz	($acc,&LB("ebx"));		#  8
+	&xor	("ecx",&DWP(0,$tbl,$acc,8));	#  8
+	&shr	("ebx",16);			# 13,12
+
+	&movz	($acc,&HB("eax"));		#  7
+	&xor	("ecx",&DWP(1,$tbl,$acc,8));	#  7
+	&pxor	("mm0","mm3");
+	&movz	("eax",&LB("eax"));		#  6
+	&xor	("edx",&DWP(2,$tbl,"eax",8));	#  6
+	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
+	&movz	($acc,&HB("ebx"));		# 13
+	&xor	("ecx",&DWP(3,$tbl,$acc,8));	# 13
+	&xor	("ecx",&DWP(24,$key));		# t[2]
+	&movd	("mm4","ecx");			# t[2] collected
+	&movz	("ebx",&LB("ebx"));		# 12
+	&xor	("edx",&DWP(0,$tbl,"ebx",8));	# 12
+	&shr	("ecx",16);
+	&movd	("eax","mm1");			#  5, 4, 1, 0
+	&mov	("ebx",&DWP(28,$key));		# t[3]
+	&xor	("ebx","edx");
+	&movd	("mm5","ebx");			# t[3] collected
+	&and	("ebx",0xffff0000);
+	&or	("ebx","ecx");
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub enccompact()
+{ my $Fn = \&mov;
+  while ($#_>5) { pop(@_); $Fn=sub{}; }
+  my ($i,$te,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# $Fn is used in first compact round and its purpose is to
+	# void restoration of some values from stack, so that after
+	# 4xenccompact with extra argument $key value is left there...
+	if ($i==3)  {	&$Fn	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&movz	($out,&BP(-128,$te,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]);
+			&shr	($tmp,24);			}
+			&movz	($tmp,&BP(-128,$te,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+	&comment();
+}
+
+sub enctransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+  my $i = shift;
+  my $tmp = $tbl;
+  my $r2  = $key ;
+
+	&and	($tmp,$s[$i]);
+	&lea	($r2,&DWP(0,$s[$i],$s[$i]));
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&and	($r2,0xfefefefe);
+	&sub	($acc,$tmp);
+	&mov	($tmp,$s[$i]);
+	&and	($acc,0x1b1b1b1b);
+	&rotr	($tmp,16);
+	&xor	($acc,$r2);	# r2
+	&mov	($r2,$s[$i]);
+
+	&xor	($s[$i],$acc);	# r0 ^ r2
+	&rotr	($r2,16+8);
+	&xor	($acc,$tmp);
+	&rotl	($s[$i],24);
+	&xor	($acc,$r2);
+	&mov	($tmp,0x80808080)	if ($i!=1);
+	&xor	($s[$i],$acc);	# ROTATE(r2^r0,24) ^ r2
+}
+
+&function_begin_B("_x86_AES_encrypt_compact");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	# prefetch Te4
+	&mov	($key,&DWP(0-128,$tbl));
+	&mov	($acc,&DWP(32-128,$tbl));
+	&mov	($key,&DWP(64-128,$tbl));
+	&mov	($acc,&DWP(96-128,$tbl));
+	&mov	($key,&DWP(128-128,$tbl));
+	&mov	($acc,&DWP(160-128,$tbl));
+	&mov	($key,&DWP(192-128,$tbl));
+	&mov	($acc,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+
+		&enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
+		&enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
+		&enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
+		&enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
+		&mov	($tbl,0x80808080);
+		&enctransform(2);
+		&enctransform(3);
+		&enctransform(0);
+		&enctransform(1);
+		&mov 	($key,$__key);
+		&mov	($tbl,$__tbl);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+	&cmp	($key,$__end);
+	&mov	($__key,$key);
+	&jb	(&label("loop"));
+
+	&enccompact(0,$tbl,$s0,$s1,$s2,$s3);
+	&enccompact(1,$tbl,$s1,$s2,$s3,$s0);
+	&enccompact(2,$tbl,$s2,$s3,$s0,$s1);
+	&enccompact(3,$tbl,$s3,$s0,$s1,$s2);
+
+	&xor	($s0,&DWP(16,$key));
+	&xor	($s1,&DWP(20,$key));
+	&xor	($s2,&DWP(24,$key));
+	&xor	($s3,&DWP(28,$key));
+
+	&ret	();
+&function_end_B("_x86_AES_encrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+#
+# Performance is not actually extraordinary in comparison to pure
+# x86 code. In particular encrypt performance is virtually the same.
+# Decrypt performance on the other hand is 15-20% better on newer
+# µ-archs [but we're thankful for *any* improvement here], and ~50%
+# better on PIII:-) And additionally on the pros side this code
+# eliminates redundant references to stack and thus relieves/
+# minimizes the pressure on the memory bus.
+#
+# MMX register layout                           lsb
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |          mm4          |          mm0          |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |     s3    |     s2    |     s1    |     s0    |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+#
+# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
+# In this terms encryption and decryption "compact" permutation
+# matrices can be depicted as following:
+#
+# encryption              lsb	# decryption              lsb
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t0 || 15 | 10 |  5 |  0 |	# | t0 ||  7 | 10 | 13 |  0 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t1 ||  3 | 14 |  9 |  4 |	# | t1 || 11 | 14 |  1 |  4 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t2 ||  7 |  2 | 13 |  8 |	# | t2 || 15 |  2 |  5 |  8 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+# | t3 || 11 |  6 |  1 | 12 |	# | t3 ||  3 |  6 |  9 | 12 |
+# +----++----+----+----+----+	# +----++----+----+----+----+
+#
+######################################################################
+# Why not xmm registers? Short answer. It was actually tested and
+# was not any faster, but *contrary*, most notably on Intel CPUs.
+# Longer answer. Main advantage of using mm registers is that movd
+# latency is lower, especially on Intel P4. While arithmetic
+# instructions are twice as many, they can be scheduled every cycle
+# and not every second one when they are operating on xmm register,
+# so that "arithmetic throughput" remains virtually the same. And
+# finally the code can be executed even on elder SSE-only CPUs:-)
+
+sub sse_enccompact()
+{
+	&pshufw	("mm1","mm0",0x08);		#  5, 4, 1, 0
+	&pshufw	("mm5","mm4",0x0d);		# 15,14,11,10
+	&movd	("eax","mm1");			#  5, 4, 1, 0
+	&movd	("ebx","mm5");			# 15,14,11,10
+	&mov	($__key,$key);
+
+	&movz	($acc,&LB("eax"));		#  0
+	&movz	("edx",&HB("eax"));		#  1
+	&pshufw	("mm2","mm0",0x0d);		#  7, 6, 3, 2
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
+	&movz	($key,&LB("ebx"));		# 10
+	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
+	&shr	("eax",16);			#  5, 4
+	&shl	("edx",8);			#  1
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 10
+	&movz	($key,&HB("ebx"));		# 11
+	&shl	($acc,16);			# 10
+	&pshufw	("mm6","mm4",0x08);		# 13,12, 9, 8
+	&or	("ecx",$acc);			# 10
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 11
+	&movz	($key,&HB("eax"));		#  5
+	&shl	($acc,24);			# 11
+	&shr	("ebx",16);			# 15,14
+	&or	("edx",$acc);			# 11
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  5
+	&movz	($key,&HB("ebx"));		# 15
+	&shl	($acc,8);			#  5
+	&or	("ecx",$acc);			#  5
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 15
+	&movz	($key,&LB("eax"));		#  4
+	&shl	($acc,24);			# 15
+	&or	("ecx",$acc);			# 15
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  4
+	&movz	($key,&LB("ebx"));		# 14
+	&movd	("eax","mm2");			#  7, 6, 3, 2
+	&movd	("mm0","ecx");			# t[0] collected
+	&movz	("ecx",&BP(-128,$tbl,$key,1));	# 14
+	&movz	($key,&HB("eax"));		#  3
+	&shl	("ecx",16);			# 14
+	&movd	("ebx","mm6");			# 13,12, 9, 8
+	&or	("ecx",$acc);			# 14
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  3
+	&movz	($key,&HB("ebx"));		#  9
+	&shl	($acc,24);			#  3
+	&or	("ecx",$acc);			#  3
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  9
+	&movz	($key,&LB("ebx"));		#  8
+	&shl	($acc,8);			#  9
+	&shr	("ebx",16);			# 13,12
+	&or	("ecx",$acc);			#  9
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  8
+	&movz	($key,&LB("eax"));		#  2
+	&shr	("eax",16);			#  7, 6
+	&movd	("mm1","ecx");			# t[1] collected
+	&movz	("ecx",&BP(-128,$tbl,$key,1));	#  2
+	&movz	($key,&HB("eax"));		#  7
+	&shl	("ecx",16);			#  2
+	&and	("eax",0xff);			#  6
+	&or	("ecx",$acc);			#  2
+
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  7
+	&movz	($key,&HB("ebx"));		# 13
+	&shl	($acc,24);			#  7
+	&and	("ebx",0xff);			# 12
+	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  6
+	&or	("ecx",$acc);			#  7
+	&shl	("eax",16);			#  6
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 13
+	&or	("edx","eax");			#  6
+	&shl	($acc,8);			# 13
+	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	# 12
+	&or	("ecx",$acc);			# 13
+	&or	("edx","ebx");			# 12
+	&mov	($key,$__key);
+	&movd	("mm4","ecx");			# t[2] collected
+	&movd	("mm5","edx");			# t[3] collected
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+					if (!$x86only) {
+&function_begin_B("_sse_AES_encrypt_compact");
+	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
+	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	&mov	($s0,0x1b1b1b1b);		# magic constant
+	&mov	(&DWP(8,"esp"),$s0);
+	&mov	(&DWP(12,"esp"),$s0);
+
+	# prefetch Te4
+	&mov	($s0,&DWP(0-128,$tbl));
+	&mov	($s1,&DWP(32-128,$tbl));
+	&mov	($s2,&DWP(64-128,$tbl));
+	&mov	($s3,&DWP(96-128,$tbl));
+	&mov	($s0,&DWP(128-128,$tbl));
+	&mov	($s1,&DWP(160-128,$tbl));
+	&mov	($s2,&DWP(192-128,$tbl));
+	&mov	($s3,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+		&sse_enccompact();
+		&add	($key,16);
+		&cmp	($key,$__end);
+		&ja	(&label("out"));
+
+		&movq	("mm2",&QWP(8,"esp"));
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&movq	("mm1","mm0");		&movq	("mm5","mm4");	# r0
+		&pcmpgtb("mm3","mm0");		&pcmpgtb("mm7","mm4");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&pshufw	("mm2","mm0",0xb1);	&pshufw	("mm6","mm4",0xb1);# ROTATE(r0,16)
+		&paddb	("mm0","mm0");		&paddb	("mm4","mm4");
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# = r2
+		&pshufw	("mm3","mm2",0xb1);	&pshufw	("mm7","mm6",0xb1);# r0
+		&pxor	("mm1","mm0");		&pxor	("mm5","mm4");	# r0^r2
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(r0,16)
+
+		&movq	("mm2","mm3");		&movq	("mm6","mm7");
+		&pslld	("mm3",8);		&pslld	("mm7",8);
+		&psrld	("mm2",24);		&psrld	("mm6",24);
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= r0<<8
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= r0>>24
+
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
+		&psrld	("mm1",8);		&psrld	("mm5",8);
+		&mov	($s0,&DWP(0-128,$tbl));
+		&pslld	("mm3",24);		&pslld	("mm7",24);
+		&mov	($s1,&DWP(64-128,$tbl));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= (r2^r0)<<8
+		&mov	($s2,&DWP(128-128,$tbl));
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= (r2^r0)>>24
+		&mov	($s3,&DWP(192-128,$tbl));
+
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
+	&jmp	(&label("loop"));
+
+	&set_label("out",16);
+	&pxor	("mm0",&QWP(0,$key));
+	&pxor	("mm4",&QWP(8,$key));
+
+	&ret	();
+&function_end_B("_sse_AES_encrypt_compact");
+					}
+
+######################################################################
+# Vanilla block function.
+######################################################################
+
+sub encstep()
+{ my ($i,$te,@s) = @_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# lines marked with #%e?x[i] denote "reordered" instructions...
+	if ($i==3)  {	&mov	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);
+			&and	($out,0xFF);			}
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&mov	($out,&DWP(0,$te,$out,8));
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&xor	($out,&DWP(3,$te,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&xor	($out,&DWP(2,$te,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]);
+			&shr	($tmp,24)			}
+			&xor	($out,&DWP(1,$te,$tmp,8));
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+			&comment();
+}
+
+sub enclast()
+{ my ($i,$te,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	if ($i==3)  {	&mov	($key,$__key);			}##%edx
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+	if ($i==1)  {	&shr	($s[0],16);			}#%ebx[1]
+	if ($i==2)  {	&shr	($s[0],24);			}#%ecx[2]
+			&mov	($out,&DWP(2,$te,$out,8));
+			&and	($out,0x000000ff);
+
+	if ($i==3)  {	$tmp=$s[1];				}##%eax
+			&movz	($tmp,&HB($s[1]));
+			&mov	($tmp,&DWP(0,$te,$tmp,8));
+			&and	($tmp,0x0000ff00);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$__s0);		}##%ebx
+	else        {	&mov	($tmp,$s[2]);
+			&shr	($tmp,16);			}
+	if ($i==2)  {	&and	($s[1],0xFF);			}#%edx[2]
+			&and	($tmp,0xFF);
+			&mov	($tmp,&DWP(0,$te,$tmp,8));
+			&and	($tmp,0x00ff0000);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}##%ecx
+	elsif($i==2){	&movz	($tmp,&HB($s[3]));		}#%ebx[2]
+	else        {	&mov	($tmp,$s[3]);
+			&shr	($tmp,24);			}
+			&mov	($tmp,&DWP(2,$te,$tmp,8));
+			&and	($tmp,0xff000000);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$acc);			}
+}
+
+&function_begin_B("_x86_AES_encrypt");
+	if ($vertical_spin) {
+		# I need high parts of volatile registers to be accessible...
+		&exch	($s1="edi",$key="ebx");
+		&mov	($s2="esi",$acc="ecx");
+	}
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	if ($small_footprint) {
+	    &lea	($acc,&DWP(-2,$acc,$acc));
+	    &lea	($acc,&DWP(0,$key,$acc,8));
+	    &mov	($__end,$acc);		# end of key schedule
+
+	    &set_label("loop",16);
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+	    &cmp	($key,$__end);
+	    &mov	($__key,$key);
+	    &jb		(&label("loop"));
+	}
+	else {
+	    &cmp	($acc,10);
+	    &jle	(&label("10rounds"));
+	    &cmp	($acc,12);
+	    &jle	(&label("12rounds"));
+
+	&set_label("14rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("12rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("10rounds",4);
+	    for ($i=1;$i<10;$i++) {
+		if ($vertical_spin) {
+		    &encvert($tbl,$s0,$s1,$s2,$s3);
+		} else {
+		    &encstep(0,$tbl,$s0,$s1,$s2,$s3);
+		    &encstep(1,$tbl,$s1,$s2,$s3,$s0);
+		    &encstep(2,$tbl,$s2,$s3,$s0,$s1);
+		    &encstep(3,$tbl,$s3,$s0,$s1,$s2);
+		}
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	}
+
+	if ($vertical_spin) {
+	    # "reincarnate" some registers for "horizontal" spin...
+	    &mov	($s1="ebx",$key="edi");
+	    &mov	($s2="ecx",$acc="esi");
+	}
+	&enclast(0,$tbl,$s0,$s1,$s2,$s3);
+	&enclast(1,$tbl,$s1,$s2,$s3,$s0);
+	&enclast(2,$tbl,$s2,$s3,$s0,$s1);
+	&enclast(3,$tbl,$s3,$s0,$s1,$s2);
+
+	&add	($key,$small_footprint?16:160);
+	&xor	($s0,&DWP(0,$key));
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&ret	();
+
+&set_label("AES_Te",64);	# Yes! I keep it in the code segment!
+	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
+	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
+	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
+	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
+	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
+	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
+	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
+	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
+	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
+	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
+	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
+	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
+	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
+	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
+	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
+	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
+	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
+	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
+	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
+	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
+	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
+	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
+	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
+	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
+	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
+	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
+	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
+	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
+	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
+	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
+	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
+	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
+	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
+	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
+	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
+	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
+	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
+	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
+	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
+	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
+	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
+	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
+	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
+	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
+	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
+	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
+	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
+	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
+	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
+	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
+	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
+	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
+	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
+	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
+	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
+	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
+	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
+	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
+	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
+	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
+	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
+	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
+	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
+	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+#rcon:
+	&data_word(0x00000001, 0x00000002, 0x00000004, 0x00000008);
+	&data_word(0x00000010, 0x00000020, 0x00000040, 0x00000080);
+	&data_word(0x0000001b, 0x00000036, 0x00000000, 0x00000000);
+	&data_word(0x00000000, 0x00000000, 0x00000000, 0x00000000);
+&function_end_B("_x86_AES_encrypt");
+
+# void asm_AES_encrypt (const void *inp,void *out,const AES_KEY *key);
+&function_begin("asm_AES_encrypt");
+	&mov	($acc,&wparam(0));		# load inp
+	&mov	($key,&wparam(2));		# load key
+
+	&mov	($s0,"esp");
+	&sub	("esp",36);
+	&and	("esp",-64);			# align to cache-line
+
+	# place stack frame just "above" the key schedule
+	&lea	($s1,&DWP(-64-63,$key));
+	&sub	($s1,"esp");
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	("esp",$s1);
+	&add	("esp",4);	# 4 is reserved for caller's return address
+	&mov	($_esp,$s0);			# save stack pointer
+
+	&call   (&label("pic_point"));          # make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if (!$x86only);
+	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+
+	# pick Te4 copy which can't "overlap" with stack frame or key schedule
+	&lea	($s1,&DWP(768-4,"esp"));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),25);	# check for SSE bit
+	&jnc	(&label("x86"));
+
+	&movq	("mm0",&QWP(0,$acc));
+	&movq	("mm4",&QWP(8,$acc));
+	&call	("_sse_AES_encrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&movq	(&QWP(0,$acc),"mm0");		# write output data
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&function_end_A();
+					}
+	&set_label("x86",16);
+	&mov	($_tbl,$tbl);
+	&mov	($s0,&DWP(0,$acc));		# load input data
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+	&call	("_x86_AES_encrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&mov	(&DWP(0,$acc),$s0);		# write output data
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+&function_end("asm_AES_encrypt");
+
+#--------------------------------------------------------------------#
+
+######################################################################
+# "Compact" block function
+######################################################################
+
+sub deccompact()
+{ my $Fn = \&mov;
+  while ($#_>5) { pop(@_); $Fn=sub{}; }
+  my ($i,$td,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# $Fn is used in first compact round and its purpose is to
+	# void restoration of some values from stack, so that after
+	# 4xdeccompact with extra argument $key, $s0 and $s1 values
+	# are left there...
+	if($i==3)   {	&$Fn	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&movz	($out,&BP(-128,$td,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &$Fn ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&movz	($tmp,&BP(-128,$td,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&$Fn	($s[3],$__s0);			}
+}
+
+# must be called with 2,3,0,1 as argument sequence!!!
+sub dectransform()
+{ my @s = ($s0,$s1,$s2,$s3);
+  my $i = shift;
+  my $tmp = $key;
+  my $tp2 = @s[($i+2)%4]; $tp2 = @s[2] if ($i==1);
+  my $tp4 = @s[($i+3)%4]; $tp4 = @s[3] if ($i==1);
+  my $tp8 = $tbl;
+
+	&mov	($tmp,0x80808080);
+	&and	($tmp,$s[$i]);
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&lea	($tp2,&DWP(0,$s[$i],$s[$i]));
+	&sub	($acc,$tmp);
+	&and	($tp2,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	&xor	($tp2,$acc);
+	&mov	($tmp,0x80808080);
+
+	&and	($tmp,$tp2);
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&lea	($tp4,&DWP(0,$tp2,$tp2));
+	&sub	($acc,$tmp);
+	&and	($tp4,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp2,$s[$i]);	# tp2^tp1
+	&xor	($tp4,$acc);
+	&mov	($tmp,0x80808080);
+
+	&and	($tmp,$tp4);
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&lea	($tp8,&DWP(0,$tp4,$tp4));
+	&sub	($acc,$tmp);
+	&and	($tp8,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp4,$s[$i]);	# tp4^tp1
+	 &rotl	($s[$i],8);	# = ROTATE(tp1,8)
+	&xor	($tp8,$acc);
+
+	&xor	($s[$i],$tp2);
+	&xor	($tp2,$tp8);
+	&xor	($s[$i],$tp4);
+	&xor	($tp4,$tp8);
+	&rotl	($tp2,24);
+	&xor	($s[$i],$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
+	&rotl	($tp4,16);
+	&xor	($s[$i],$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
+	&rotl	($tp8,8);
+	&xor	($s[$i],$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
+	 &mov	($s[0],$__s0)			if($i==2); #prefetch $s0
+	 &mov	($s[1],$__s1)			if($i==3); #prefetch $s1
+	 &mov	($s[2],$__s2)			if($i==1);
+	&xor	($s[$i],$tp8);	# ^= ROTATE(tp8,8)
+
+	&mov	($s[3],$__s3)			if($i==1);
+	&mov	(&DWP(4+4*$i,"esp"),$s[$i])	if($i>=2);
+}
+
+&function_begin_B("_x86_AES_decrypt_compact");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	# prefetch Td4
+	&mov	($key,&DWP(0-128,$tbl));
+	&mov	($acc,&DWP(32-128,$tbl));
+	&mov	($key,&DWP(64-128,$tbl));
+	&mov	($acc,&DWP(96-128,$tbl));
+	&mov	($key,&DWP(128-128,$tbl));
+	&mov	($acc,&DWP(160-128,$tbl));
+	&mov	($key,&DWP(192-128,$tbl));
+	&mov	($acc,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+
+		&deccompact(0,$tbl,$s0,$s3,$s2,$s1,1);
+		&deccompact(1,$tbl,$s1,$s0,$s3,$s2,1);
+		&deccompact(2,$tbl,$s2,$s1,$s0,$s3,1);
+		&deccompact(3,$tbl,$s3,$s2,$s1,$s0,1);
+		&dectransform(2);
+		&dectransform(3);
+		&dectransform(0);
+		&dectransform(1);
+		&mov 	($key,$__key);
+		&mov	($tbl,$__tbl);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+	&cmp	($key,$__end);
+	&mov	($__key,$key);
+	&jb	(&label("loop"));
+
+	&deccompact(0,$tbl,$s0,$s3,$s2,$s1);
+	&deccompact(1,$tbl,$s1,$s0,$s3,$s2);
+	&deccompact(2,$tbl,$s2,$s1,$s0,$s3);
+	&deccompact(3,$tbl,$s3,$s2,$s1,$s0);
+
+	&xor	($s0,&DWP(16,$key));
+	&xor	($s1,&DWP(20,$key));
+	&xor	($s2,&DWP(24,$key));
+	&xor	($s3,&DWP(28,$key));
+
+	&ret	();
+&function_end_B("_x86_AES_decrypt_compact");
+
+######################################################################
+# "Compact" SSE block function.
+######################################################################
+
+sub sse_deccompact()
+{
+	&pshufw	("mm1","mm0",0x0c);		#  7, 6, 1, 0
+	&pshufw	("mm5","mm4",0x09);		# 13,12,11,10
+	&movd	("eax","mm1");			#  7, 6, 1, 0
+	&movd	("ebx","mm5");			# 13,12,11,10
+	&mov	($__key,$key);
+
+	&movz	($acc,&LB("eax"));		#  0
+	&movz	("edx",&HB("eax"));		#  1
+	&pshufw	("mm2","mm0",0x06);		#  3, 2, 5, 4
+	&movz	("ecx",&BP(-128,$tbl,$acc,1));	#  0
+	&movz	($key,&LB("ebx"));		# 10
+	&movz	("edx",&BP(-128,$tbl,"edx",1));	#  1
+	&shr	("eax",16);			#  7, 6
+	&shl	("edx",8);			#  1
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 10
+	&movz	($key,&HB("ebx"));		# 11
+	&shl	($acc,16);			# 10
+	&pshufw	("mm6","mm4",0x03);		# 9, 8,15,14
+	&or	("ecx",$acc);			# 10
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 11
+	&movz	($key,&HB("eax"));		#  7
+	&shl	($acc,24);			# 11
+	&shr	("ebx",16);			# 13,12
+	&or	("edx",$acc);			# 11
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  7
+	&movz	($key,&HB("ebx"));		# 13
+	&shl	($acc,24);			#  7
+	&or	("ecx",$acc);			#  7
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 13
+	&movz	($key,&LB("eax"));		#  6
+	&shl	($acc,8);			# 13
+	&movd	("eax","mm2");			#  3, 2, 5, 4
+	&or	("ecx",$acc);			# 13
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  6
+	&movz	($key,&LB("ebx"));		# 12
+	&shl	($acc,16);			#  6
+	&movd	("ebx","mm6");			#  9, 8,15,14
+	&movd	("mm0","ecx");			# t[0] collected
+	&movz	("ecx",&BP(-128,$tbl,$key,1));	# 12
+	&movz	($key,&LB("eax"));		#  4
+	&or	("ecx",$acc);			# 12
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  4
+	&movz	($key,&LB("ebx"));		# 14
+	&or	("edx",$acc);			#  4
+	&movz	($acc,&BP(-128,$tbl,$key,1));	# 14
+	&movz	($key,&HB("eax"));		#  5
+	&shl	($acc,16);			# 14
+	&shr	("eax",16);			#  3, 2
+	&or	("edx",$acc);			# 14
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  5
+	&movz	($key,&HB("ebx"));		# 15
+	&shr	("ebx",16);			#  9, 8
+	&shl	($acc,8);			#  5
+	&movd	("mm1","edx");			# t[1] collected
+	&movz	("edx",&BP(-128,$tbl,$key,1));	# 15
+	&movz	($key,&HB("ebx"));		#  9
+	&shl	("edx",24);			# 15
+	&and	("ebx",0xff);			#  8
+	&or	("edx",$acc);			# 15
+
+	&punpckldq	("mm0","mm1");		# t[0,1] collected
+
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  9
+	&movz	($key,&LB("eax"));		#  2
+	&shl	($acc,8);			#  9
+	&movz	("eax",&HB("eax"));		#  3
+	&movz	("ebx",&BP(-128,$tbl,"ebx",1));	#  8
+	&or	("ecx",$acc);			#  9
+	&movz	($acc,&BP(-128,$tbl,$key,1));	#  2
+	&or	("edx","ebx");			#  8
+	&shl	($acc,16);			#  2
+	&movz	("eax",&BP(-128,$tbl,"eax",1));	#  3
+	&or	("edx",$acc);			#  2
+	&shl	("eax",24);			#  3
+	&or	("ecx","eax");			#  3
+	&mov	($key,$__key);
+	&movd	("mm4","edx");			# t[2] collected
+	&movd	("mm5","ecx");			# t[3] collected
+
+	&punpckldq	("mm4","mm5");		# t[2,3] collected
+}
+
+					if (!$x86only) {
+&function_begin_B("_sse_AES_decrypt_compact");
+	&pxor	("mm0",&QWP(0,$key));	#  7, 6, 5, 4, 3, 2, 1, 0
+	&pxor	("mm4",&QWP(8,$key));	# 15,14,13,12,11,10, 9, 8
+
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	($__end,$acc);			# end of key schedule
+
+	&mov	($s0,0x1b1b1b1b);		# magic constant
+	&mov	(&DWP(8,"esp"),$s0);
+	&mov	(&DWP(12,"esp"),$s0);
+
+	# prefetch Td4
+	&mov	($s0,&DWP(0-128,$tbl));
+	&mov	($s1,&DWP(32-128,$tbl));
+	&mov	($s2,&DWP(64-128,$tbl));
+	&mov	($s3,&DWP(96-128,$tbl));
+	&mov	($s0,&DWP(128-128,$tbl));
+	&mov	($s1,&DWP(160-128,$tbl));
+	&mov	($s2,&DWP(192-128,$tbl));
+	&mov	($s3,&DWP(224-128,$tbl));
+
+	&set_label("loop",16);
+		&sse_deccompact();
+		&add	($key,16);
+		&cmp	($key,$__end);
+		&ja	(&label("out"));
+
+		# ROTATE(x^y,N) == ROTATE(x,N)^ROTATE(y,N)
+		&movq	("mm3","mm0");		&movq	("mm7","mm4");
+		&movq	("mm2","mm0",1);	&movq	("mm6","mm4",1);
+		&movq	("mm1","mm0");		&movq	("mm5","mm4");
+		&pshufw	("mm0","mm0",0xb1);	&pshufw	("mm4","mm4",0xb1);# = ROTATE(tp0,16)
+		&pslld	("mm2",8);		&pslld	("mm6",8);
+		&psrld	("mm3",8);		&psrld	("mm7",8);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<8
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>8
+		&pslld	("mm2",16);		&pslld	("mm6",16);
+		&psrld	("mm3",16);		&psrld	("mm7",16);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp0<<24
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp0>>24
+
+		&movq	("mm3",&QWP(8,"esp"));
+		&pxor	("mm2","mm2");		&pxor	("mm6","mm6");
+		&pcmpgtb("mm2","mm1");		&pcmpgtb("mm6","mm5");
+		&pand	("mm2","mm3");		&pand	("mm6","mm3");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm2");		&pxor	("mm5","mm6");	# tp2
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&movq	("mm2","mm1");		&movq	("mm6","mm5");
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp2
+		&pslld	("mm3",24);		&pslld	("mm7",24);
+		&psrld	("mm2",8);		&psrld	("mm6",8);
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp2<<24
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= tp2>>8
+
+		&movq	("mm2",&QWP(8,"esp"));
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp4
+		&pshufw	("mm3","mm1",0xb1);	&pshufw	("mm7","mm5",0xb1);
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp4
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= ROTATE(tp4,16)
+
+		&pxor	("mm3","mm3");		&pxor	("mm7","mm7");
+		&pcmpgtb("mm3","mm1");		&pcmpgtb("mm7","mm5");
+		&pand	("mm3","mm2");		&pand	("mm7","mm2");
+		&paddb	("mm1","mm1");		&paddb	("mm5","mm5");
+		&pxor	("mm1","mm3");		&pxor	("mm5","mm7");	# tp8
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8
+		&movq	("mm3","mm1");		&movq	("mm7","mm5");
+		&pshufw	("mm2","mm1",0xb1);	&pshufw	("mm6","mm5",0xb1);
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");	# ^= ROTATE(tp8,16)
+		&pslld	("mm1",8);		&pslld	("mm5",8);
+		&psrld	("mm3",8);		&psrld	("mm7",8);
+		&movq	("mm2",&QWP(0,$key));	&movq	("mm6",&QWP(8,$key));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<8
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>8
+		&mov	($s0,&DWP(0-128,$tbl));
+		&pslld	("mm1",16);		&pslld	("mm5",16);
+		&mov	($s1,&DWP(64-128,$tbl));
+		&psrld	("mm3",16);		&psrld	("mm7",16);
+		&mov	($s2,&DWP(128-128,$tbl));
+		&pxor	("mm0","mm1");		&pxor	("mm4","mm5");	# ^= tp8<<24
+		&mov	($s3,&DWP(192-128,$tbl));
+		&pxor	("mm0","mm3");		&pxor	("mm4","mm7");	# ^= tp8>>24
+
+		&pxor	("mm0","mm2");		&pxor	("mm4","mm6");
+	&jmp	(&label("loop"));
+
+	&set_label("out",16);
+	&pxor	("mm0",&QWP(0,$key));
+	&pxor	("mm4",&QWP(8,$key));
+
+	&ret	();
+&function_end_B("_sse_AES_decrypt_compact");
+					}
+
+######################################################################
+# Vanilla block function.
+######################################################################
+
+sub decstep()
+{ my ($i,$td,@s) = @_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	# no instructions are reordered, as performance appears
+	# optimal... or rather that all attempts to reorder didn't
+	# result in better performance [which by the way is not a
+	# bit lower than ecryption].
+	if($i==3)   {	&mov	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&mov	($out,&DWP(0,$td,$out,8));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&xor	($out,&DWP(3,$td,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	&mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&xor	($out,&DWP(2,$td,$tmp,8));
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&xor	($out,&DWP(1,$td,$tmp,8));
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$__s0);			}
+			&comment();
+}
+
+sub declast()
+{ my ($i,$td,@s)=@_;
+  my $tmp = $key;
+  my $out = $i==3?$s[0]:$acc;
+
+	if($i==0)   {	&lea	($td,&DWP(2048+128,$td));
+			&mov	($tmp,&DWP(0-128,$td));
+			&mov	($acc,&DWP(32-128,$td));
+			&mov	($tmp,&DWP(64-128,$td));
+			&mov	($acc,&DWP(96-128,$td));
+			&mov	($tmp,&DWP(128-128,$td));
+			&mov	($acc,&DWP(160-128,$td));
+			&mov	($tmp,&DWP(192-128,$td));
+			&mov	($acc,&DWP(224-128,$td));
+			&lea	($td,&DWP(-128,$td));		}
+	if($i==3)   {	&mov	($key,$__key);			}
+	else        {	&mov	($out,$s[0]);			}
+			&and	($out,0xFF);
+			&movz	($out,&BP(0,$td,$out,1));
+
+	if ($i==3)  {	$tmp=$s[1];				}
+			&movz	($tmp,&HB($s[1]));
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,8);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[2]; &mov ($s[1],$acc);		}
+	else        {	mov	($tmp,$s[2]);			}
+			&shr	($tmp,16);
+			&and	($tmp,0xFF);
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,16);
+			&xor	($out,$tmp);
+
+	if ($i==3)  {	$tmp=$s[3]; &mov ($s[2],$__s1);		}
+	else        {	&mov	($tmp,$s[3]);			}
+			&shr	($tmp,24);
+			&movz	($tmp,&BP(0,$td,$tmp,1));
+			&shl	($tmp,24);
+			&xor	($out,$tmp);
+	if ($i<2)   {	&mov	(&DWP(4+4*$i,"esp"),$out);	}
+	if ($i==3)  {	&mov	($s[3],$__s0);
+			&lea	($td,&DWP(-2048,$td));		}
+}
+
+&function_begin_B("_x86_AES_decrypt");
+	# note that caller is expected to allocate stack frame for me!
+	&mov	($__key,$key);			# save key
+
+	&xor	($s0,&DWP(0,$key));		# xor with key
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&mov	($acc,&DWP(240,$key));		# load key->rounds
+
+	if ($small_footprint) {
+	    &lea	($acc,&DWP(-2,$acc,$acc));
+	    &lea	($acc,&DWP(0,$key,$acc,8));
+	    &mov	($__end,$acc);		# end of key schedule
+	    &set_label("loop",16);
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&add	($key,16);		# advance rd_key
+		&xor	($s0,&DWP(0,$key));
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+	    &cmp	($key,$__end);
+	    &mov	($__key,$key);
+	    &jb		(&label("loop"));
+	}
+	else {
+	    &cmp	($acc,10);
+	    &jle	(&label("10rounds"));
+	    &cmp	($acc,12);
+	    &jle	(&label("12rounds"));
+
+	&set_label("14rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("12rounds",4);
+	    for ($i=1;$i<3;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	    &add	($key,32);
+	    &mov	($__key,$key);		# advance rd_key
+	&set_label("10rounds",4);
+	    for ($i=1;$i<10;$i++) {
+		&decstep(0,$tbl,$s0,$s3,$s2,$s1);
+		&decstep(1,$tbl,$s1,$s0,$s3,$s2);
+		&decstep(2,$tbl,$s2,$s1,$s0,$s3);
+		&decstep(3,$tbl,$s3,$s2,$s1,$s0);
+		&xor	($s0,&DWP(16*$i+0,$key));
+		&xor	($s1,&DWP(16*$i+4,$key));
+		&xor	($s2,&DWP(16*$i+8,$key));
+		&xor	($s3,&DWP(16*$i+12,$key));
+	    }
+	}
+
+	&declast(0,$tbl,$s0,$s3,$s2,$s1);
+	&declast(1,$tbl,$s1,$s0,$s3,$s2);
+	&declast(2,$tbl,$s2,$s1,$s0,$s3);
+	&declast(3,$tbl,$s3,$s2,$s1,$s0);
+
+	&add	($key,$small_footprint?16:160);
+	&xor	($s0,&DWP(0,$key));
+	&xor	($s1,&DWP(4,$key));
+	&xor	($s2,&DWP(8,$key));
+	&xor	($s3,&DWP(12,$key));
+
+	&ret	();
+
+&set_label("AES_Td",64);	# Yes! I keep it in the code segment!
+	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
+	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
+	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
+	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
+	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
+	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
+	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
+	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
+	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
+	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
+	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
+	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
+	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
+	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
+	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
+	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
+	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
+	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
+	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
+	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
+	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
+	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
+	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
+	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
+	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
+	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
+	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
+	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
+	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
+	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
+	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
+	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
+	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
+	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
+	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
+	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
+	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
+	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
+	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
+	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
+	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
+	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
+	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
+	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
+	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
+	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
+	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
+	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
+	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
+	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
+	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
+	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
+	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
+	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
+	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
+	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
+	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
+	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
+	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
+	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
+	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
+	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
+	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
+	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
+
+#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+&function_end_B("_x86_AES_decrypt");
+
+# void asm_AES_decrypt (const void *inp,void *out,const AES_KEY *key);
+&function_begin("asm_AES_decrypt");
+	&mov	($acc,&wparam(0));		# load inp
+	&mov	($key,&wparam(2));		# load key
+
+	&mov	($s0,"esp");
+	&sub	("esp",36);
+	&and	("esp",-64);			# align to cache-line
+
+	# place stack frame just "above" the key schedule
+	&lea	($s1,&DWP(-64-63,$key));
+	&sub	($s1,"esp");
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	("esp",$s1);
+	&add	("esp",4);	# 4 is reserved for caller's return address
+	&mov	($_esp,$s0);	# save stack pointer
+
+	&call   (&label("pic_point"));          # make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
+	&lea    ($tbl,&DWP(&label("AES_Td")."-".&label("pic_point"),$tbl));
+
+	# pick Td4 copy which can't "overlap" with stack frame or key schedule
+	&lea	($s1,&DWP(768-4,"esp"));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),25);	# check for SSE bit
+	&jnc	(&label("x86"));
+
+	&movq	("mm0",&QWP(0,$acc));
+	&movq	("mm4",&QWP(8,$acc));
+	&call	("_sse_AES_decrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&movq	(&QWP(0,$acc),"mm0");		# write output data
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&function_end_A();
+					}
+	&set_label("x86",16);
+	&mov	($_tbl,$tbl);
+	&mov	($s0,&DWP(0,$acc));		# load input data
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+	&call	("_x86_AES_decrypt_compact");
+	&mov	("esp",$_esp);			# restore stack pointer
+	&mov	($acc,&wparam(1));		# load out
+	&mov	(&DWP(0,$acc),$s0);		# write output data
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+&function_end("asm_AES_decrypt");
+
+# void asm_AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#			    size_t length, const AES_KEY *key,
+#			    unsigned char *ivp,const int enc);
+{
+# stack frame layout
+#             -4(%esp)		# return address	 0(%esp)
+#              0(%esp)		# s0 backing store	 4(%esp)
+#              4(%esp)		# s1 backing store	 8(%esp)
+#              8(%esp)		# s2 backing store	12(%esp)
+#             12(%esp)		# s3 backing store	16(%esp)
+#             16(%esp)		# key backup		20(%esp)
+#             20(%esp)		# end of key schedule	24(%esp)
+#             24(%esp)		# %ebp backup		28(%esp)
+#             28(%esp)		# %esp backup
+my $_inp=&DWP(32,"esp");	# copy of wparam(0)
+my $_out=&DWP(36,"esp");	# copy of wparam(1)
+my $_len=&DWP(40,"esp");	# copy of wparam(2)
+my $_key=&DWP(44,"esp");	# copy of wparam(3)
+my $_ivp=&DWP(48,"esp");	# copy of wparam(4)
+my $_tmp=&DWP(52,"esp");	# volatile variable
+#
+my $ivec=&DWP(60,"esp");	# ivec[16]
+my $aes_key=&DWP(76,"esp");	# copy of aes_key
+my $mark=&DWP(76+240,"esp");	# copy of aes_key->rounds
+
+&function_begin("asm_AES_cbc_encrypt");
+	&mov	($s2 eq "ecx"? $s2 : "",&wparam(2));	# load len
+	&cmp	($s2,0);
+	&je	(&label("drop_out"));
+
+	&call   (&label("pic_point"));		# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
+
+	&cmp	(&wparam(5),0);
+	&lea    ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+	&jne	(&label("picked_te"));
+	&lea	($tbl,&DWP(&label("AES_Td")."-".&label("AES_Te"),$tbl));
+	&set_label("picked_te");
+
+	# one can argue if this is required
+	&pushf	();
+	&cld	();
+
+	&cmp	($s2,$speed_limit);
+	&jb	(&label("slow_way"));
+	&test	($s2,15);
+	&jnz	(&label("slow_way"));
+					if (!$x86only) {
+	&bt	(&DWP(0,$s0),28);	# check for hyper-threading bit
+	&jc	(&label("slow_way"));
+					}
+	# pre-allocate aligned stack frame...
+	&lea	($acc,&DWP(-80-244,"esp"));
+	&and	($acc,-64);
+
+	# ... and make sure it doesn't alias with $tbl modulo 4096
+	&mov	($s0,$tbl);
+	&lea	($s1,&DWP(2048+256,$tbl));
+	&mov	($s3,$acc);
+	&and	($s0,0xfff);		# s = %ebp&0xfff
+	&and	($s1,0xfff);		# e = (%ebp+2048+256)&0xfff
+	&and	($s3,0xfff);		# p = %esp&0xfff
+
+	&cmp	($s3,$s1);		# if (p>=e) %esp =- (p-e);
+	&jb	(&label("tbl_break_out"));
+	&sub	($s3,$s1);
+	&sub	($acc,$s3);
+	&jmp	(&label("tbl_ok"));
+	&set_label("tbl_break_out",4);	# else %esp -= (p-s)&0xfff + framesz;
+	&sub	($s3,$s0);
+	&and	($s3,0xfff);
+	&add	($s3,384);
+	&sub	($acc,$s3);
+	&set_label("tbl_ok",4);
+
+	&lea	($s3,&wparam(0));	# obtain pointer to parameter block
+	&exch	("esp",$acc);		# allocate stack frame
+	&add	("esp",4);		# reserve for return address!
+	&mov	($_tbl,$tbl);		# save %ebp
+	&mov	($_esp,$acc);		# save %esp
+
+	&mov	($s0,&DWP(0,$s3));	# load inp
+	&mov	($s1,&DWP(4,$s3));	# load out
+	#&mov	($s2,&DWP(8,$s3));	# load len
+	&mov	($key,&DWP(12,$s3));	# load key
+	&mov	($acc,&DWP(16,$s3));	# load ivp
+	&mov	($s3,&DWP(20,$s3));	# load enc flag
+
+	&mov	($_inp,$s0);		# save copy of inp
+	&mov	($_out,$s1);		# save copy of out
+	&mov	($_len,$s2);		# save copy of len
+	&mov	($_key,$key);		# save copy of key
+	&mov	($_ivp,$acc);		# save copy of ivp
+
+	&mov	($mark,0);		# copy of aes_key->rounds = 0;
+	# do we copy key schedule to stack?
+	&mov	($s1 eq "ebx" ? $s1 : "",$key);
+	&mov	($s2 eq "ecx" ? $s2 : "",244/4);
+	&sub	($s1,$tbl);
+	&mov	("esi",$key);
+	&and	($s1,0xfff);
+	&lea	("edi",$aes_key);
+	&cmp	($s1,2048+256);
+	&jb	(&label("do_copy"));
+	&cmp	($s1,4096-244);
+	&jb	(&label("skip_copy"));
+	&set_label("do_copy",4);
+		&mov	($_key,"edi");
+		&data_word(0xA5F3F689);	# rep movsd
+	&set_label("skip_copy");
+
+	&mov	($key,16);
+	&set_label("prefetch_tbl",4);
+		&mov	($s0,&DWP(0,$tbl));
+		&mov	($s1,&DWP(32,$tbl));
+		&mov	($s2,&DWP(64,$tbl));
+		&mov	($acc,&DWP(96,$tbl));
+		&lea	($tbl,&DWP(128,$tbl));
+		&sub	($key,1);
+	&jnz	(&label("prefetch_tbl"));
+	&sub	($tbl,2048);
+
+	&mov	($acc,$_inp);
+	&mov	($key,$_ivp);
+
+	&cmp	($s3,0);
+	&je	(&label("fast_decrypt"));
+
+#----------------------------- ENCRYPT -----------------------------#
+	&mov	($s0,&DWP(0,$key));		# load iv
+	&mov	($s1,&DWP(4,$key));
+
+	&set_label("fast_enc_loop",16);
+		&mov	($s2,&DWP(8,$key));
+		&mov	($s3,&DWP(12,$key));
+
+		&xor	($s0,&DWP(0,$acc));	# xor input data
+		&xor	($s1,&DWP(4,$acc));
+		&xor	($s2,&DWP(8,$acc));
+		&xor	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_encrypt");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+
+		&mov	(&DWP(0,$key),$s0);	# save output data
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($s2,$_len);		# load len
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_enc_loop"));
+	&mov	($acc,$_ivp);		# load ivp
+	&mov	($s2,&DWP(8,$key));	# restore last 2 dwords
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# save ivec
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&cmp	($mark,0);		# was the key schedule copied?
+	&mov	("edi",$_key);
+	&je	(&label("skip_ezero"));
+	# zero copy of key schedule
+	&mov	("ecx",240/4);
+	&xor	("eax","eax");
+	&align	(4);
+	&data_word(0xABF3F689);		# rep stosd
+	&set_label("skip_ezero");
+	&mov	("esp",$_esp);
+	&popf	();
+    &set_label("drop_out");
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+#----------------------------- DECRYPT -----------------------------#
+&set_label("fast_decrypt",16);
+
+	&cmp	($acc,$_out);
+	&je	(&label("fast_dec_in_place"));	# in-place processing...
+
+	&mov	($_tmp,$key);
+
+	&align	(4);
+	&set_label("fast_dec_loop",16);
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt");
+
+		&mov	($key,$_tmp);		# load ivp
+		&mov	($acc,$_len);		# load len
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&mov	($key,$_out);		# load out
+		&mov	($acc,$_inp);		# load inp
+
+		&mov	(&DWP(0,$key),$s0);	# write output
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($s2,$_len);		# load len
+		&mov	($_tmp,$acc);		# save ivp
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($key,&DWP(16,$key));	# advance out
+		&mov	($_out,$key);		# save out
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_dec_loop"));
+	&mov	($key,$_tmp);		# load temp ivp
+	&mov	($acc,$_ivp);		# load user ivp
+	&mov	($s0,&DWP(0,$key));	# load iv
+	&mov	($s1,&DWP(4,$key));
+	&mov	($s2,&DWP(8,$key));
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# copy back to user
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+	&jmp	(&label("fast_dec_out"));
+
+    &set_label("fast_dec_in_place",16);
+	&set_label("fast_dec_in_place_loop");
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&lea	($key,$ivec);
+		&mov	(&DWP(0,$key),$s0);	# copy to temp
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt");
+
+		&mov	($key,$_ivp);		# load ivp
+		&mov	($acc,$_out);		# load out
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&mov	(&DWP(0,$acc),$s0);	# write output
+		&mov	(&DWP(4,$acc),$s1);
+		&mov	(&DWP(8,$acc),$s2);
+		&mov	(&DWP(12,$acc),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance out
+		&mov	($_out,$acc);		# save out
+
+		&lea	($acc,$ivec);
+		&mov	($s0,&DWP(0,$acc));	# read temp
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	(&DWP(0,$key),$s0);	# copy iv
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($s2,$_len);		# load len
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&sub	($s2,16);		# decrease len
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("fast_dec_in_place_loop"));
+
+    &set_label("fast_dec_out",4);
+	&cmp	($mark,0);		# was the key schedule copied?
+	&mov	("edi",$_key);
+	&je	(&label("skip_dzero"));
+	# zero copy of key schedule
+	&mov	("ecx",240/4);
+	&xor	("eax","eax");
+	&align	(4);
+	&data_word(0xABF3F689);		# rep stosd
+	&set_label("skip_dzero");
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+&set_label("slow_way",16);
+
+	&mov	($s0,&DWP(0,$s0)) if (!$x86only);# load OPENSSL_ia32cap
+	&mov	($key,&wparam(3));	# load key
+
+	# pre-allocate aligned stack frame...
+	&lea	($acc,&DWP(-80,"esp"));
+	&and	($acc,-64);
+
+	# ... and make sure it doesn't alias with $key modulo 1024
+	&lea	($s1,&DWP(-80-63,$key));
+	&sub	($s1,$acc);
+	&neg	($s1);
+	&and	($s1,0x3C0);	# modulo 1024, but aligned to cache-line
+	&sub	($acc,$s1);
+
+	# pick S-box copy which can't overlap with stack frame or $key
+	&lea	($s1,&DWP(768,$acc));
+	&sub	($s1,$tbl);
+	&and	($s1,0x300);
+	&lea	($tbl,&DWP(2048+128,$tbl,$s1));
+
+	&lea	($s3,&wparam(0));	# pointer to parameter block
+
+	&exch	("esp",$acc);
+	&add	("esp",4);		# reserve for return address!
+	&mov	($_tbl,$tbl);		# save %ebp
+	&mov	($_esp,$acc);		# save %esp
+	&mov	($_tmp,$s0);		# save OPENSSL_ia32cap
+
+	&mov	($s0,&DWP(0,$s3));	# load inp
+	&mov	($s1,&DWP(4,$s3));	# load out
+	#&mov	($s2,&DWP(8,$s3));	# load len
+	#&mov	($key,&DWP(12,$s3));	# load key
+	&mov	($acc,&DWP(16,$s3));	# load ivp
+	&mov	($s3,&DWP(20,$s3));	# load enc flag
+
+	&mov	($_inp,$s0);		# save copy of inp
+	&mov	($_out,$s1);		# save copy of out
+	&mov	($_len,$s2);		# save copy of len
+	&mov	($_key,$key);		# save copy of key
+	&mov	($_ivp,$acc);		# save copy of ivp
+
+	&mov	($key,$acc);
+	&mov	($acc,$s0);
+
+	&cmp	($s3,0);
+	&je	(&label("slow_decrypt"));
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+	&cmp	($s2,16);
+	&mov	($s3,$s1);
+	&jb	(&label("slow_enc_tail"));
+
+					if (!$x86only) {
+	&bt	($_tmp,25);		# check for SSE bit
+	&jnc	(&label("slow_enc_x86"));
+
+	&movq	("mm0",&QWP(0,$key));	# load iv
+	&movq	("mm4",&QWP(8,$key));
+
+	&set_label("slow_enc_loop_sse",16);
+		&pxor	("mm0",&QWP(0,$acc));	# xor input data
+		&pxor	("mm4",&QWP(8,$acc));
+
+		&mov	($key,$_key);
+		&call	("_sse_AES_encrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+		&mov	($s2,$_len);		# load len
+
+		&movq	(&QWP(0,$key),"mm0");	# save output data
+		&movq	(&QWP(8,$key),"mm4");
+
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&cmp	($s2,16);
+		&mov	($_len,$s2);		# save len
+	&jae	(&label("slow_enc_loop_sse"));
+	&test	($s2,15);
+	&jnz	(&label("slow_enc_tail"));
+	&mov	($acc,$_ivp);		# load ivp
+	&movq	(&QWP(0,$acc),"mm0");	# save ivec
+	&movq	(&QWP(8,$acc),"mm4");
+	&emms	();
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+					}
+    &set_label("slow_enc_x86",16);
+	&mov	($s0,&DWP(0,$key));	# load iv
+	&mov	($s1,&DWP(4,$key));
+
+	&set_label("slow_enc_loop_x86",4);
+		&mov	($s2,&DWP(8,$key));
+		&mov	($s3,&DWP(12,$key));
+
+		&xor	($s0,&DWP(0,$acc));	# xor input data
+		&xor	($s1,&DWP(4,$acc));
+		&xor	($s2,&DWP(8,$acc));
+		&xor	($s3,&DWP(12,$acc));
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_encrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&mov	($key,$_out);		# load out
+
+		&mov	(&DWP(0,$key),$s0);	# save output data
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($s2,$_len);		# load len
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&lea	($s3,&DWP(16,$key));	# advance out
+		&mov	($_out,$s3);		# save out
+		&sub	($s2,16);		# decrease len
+		&cmp	($s2,16);
+		&mov	($_len,$s2);		# save len
+	&jae	(&label("slow_enc_loop_x86"));
+	&test	($s2,15);
+	&jnz	(&label("slow_enc_tail"));
+	&mov	($acc,$_ivp);		# load ivp
+	&mov	($s2,&DWP(8,$key));	# restore last dwords
+	&mov	($s3,&DWP(12,$key));
+	&mov	(&DWP(0,$acc),$s0);	# save ivec
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_enc_tail",16);
+	&emms	()	if (!$x86only);
+	&mov	($key eq "edi"? $key:"",$s3);	# load out to edi
+	&mov	($s1,16);
+	&sub	($s1,$s2);
+	&cmp	($key,$acc eq "esi"? $acc:"");	# compare with inp
+	&je	(&label("enc_in_place"));
+	&align	(4);
+	&data_word(0xA4F3F689);	# rep movsb	# copy input
+	&jmp	(&label("enc_skip_in_place"));
+    &set_label("enc_in_place");
+	&lea	($key,&DWP(0,$key,$s2));
+    &set_label("enc_skip_in_place");
+	&mov	($s2,$s1);
+	&xor	($s0,$s0);
+	&align	(4);
+	&data_word(0xAAF3F689);	# rep stosb	# zero tail
+
+	&mov	($key,$_ivp);			# restore ivp
+	&mov	($acc,$s3);			# output as input
+	&mov	($s0,&DWP(0,$key));
+	&mov	($s1,&DWP(4,$key));
+	&mov	($_len,16);			# len=16
+	&jmp	(&label("slow_enc_loop_x86"));	# one more spin...
+
+#--------------------------- SLOW DECRYPT ---------------------------#
+&set_label("slow_decrypt",16);
+					if (!$x86only) {
+	&bt	($_tmp,25);		# check for SSE bit
+	&jnc	(&label("slow_dec_loop_x86"));
+
+	&set_label("slow_dec_loop_sse",4);
+		&movq	("mm0",&QWP(0,$acc));	# read input
+		&movq	("mm4",&QWP(8,$acc));
+
+		&mov	($key,$_key);
+		&call	("_sse_AES_decrypt_compact");
+
+		&mov	($acc,$_inp);		# load inp
+		&lea	($s0,$ivec);
+		&mov	($s1,$_out);		# load out
+		&mov	($s2,$_len);		# load len
+		&mov	($key,$_ivp);		# load ivp
+
+		&movq	("mm1",&QWP(0,$acc));	# re-read input
+		&movq	("mm5",&QWP(8,$acc));
+
+		&pxor	("mm0",&QWP(0,$key));	# xor iv
+		&pxor	("mm4",&QWP(8,$key));
+
+		&movq	(&QWP(0,$key),"mm1");	# copy input to iv
+		&movq	(&QWP(8,$key),"mm5");
+
+		&sub	($s2,16);		# decrease len
+		&jc	(&label("slow_dec_partial_sse"));
+
+		&movq	(&QWP(0,$s1),"mm0");	# write output
+		&movq	(&QWP(8,$s1),"mm4");
+
+		&lea	($s1,&DWP(16,$s1));	# advance out
+		&mov	($_out,$s1);		# save out
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+		&mov	($_len,$s2);		# save len
+	&jnz	(&label("slow_dec_loop_sse"));
+	&emms	();
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_dec_partial_sse",16);
+	&movq	(&QWP(0,$s0),"mm0");	# save output to temp
+	&movq	(&QWP(8,$s0),"mm4");
+	&emms	();
+
+	&add	($s2 eq "ecx" ? "ecx":"",16);
+	&mov	("edi",$s1);		# out
+	&mov	("esi",$s0);		# temp
+	&align	(4);
+	&data_word(0xA4F3F689);		# rep movsb # copy partial output
+
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+					}
+	&set_label("slow_dec_loop_x86",16);
+		&mov	($s0,&DWP(0,$acc));	# read input
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&lea	($key,$ivec);
+		&mov	(&DWP(0,$key),$s0);	# copy to temp
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($key,$_key);		# load key
+		&call	("_x86_AES_decrypt_compact");
+
+		&mov	($key,$_ivp);		# load ivp
+		&mov	($acc,$_len);		# load len
+		&xor	($s0,&DWP(0,$key));	# xor iv
+		&xor	($s1,&DWP(4,$key));
+		&xor	($s2,&DWP(8,$key));
+		&xor	($s3,&DWP(12,$key));
+
+		&sub	($acc,16);
+		&jc	(&label("slow_dec_partial_x86"));
+
+		&mov	($_len,$acc);		# save len
+		&mov	($acc,$_out);		# load out
+
+		&mov	(&DWP(0,$acc),$s0);	# write output
+		&mov	(&DWP(4,$acc),$s1);
+		&mov	(&DWP(8,$acc),$s2);
+		&mov	(&DWP(12,$acc),$s3);
+
+		&lea	($acc,&DWP(16,$acc));	# advance out
+		&mov	($_out,$acc);		# save out
+
+		&lea	($acc,$ivec);
+		&mov	($s0,&DWP(0,$acc));	# read temp
+		&mov	($s1,&DWP(4,$acc));
+		&mov	($s2,&DWP(8,$acc));
+		&mov	($s3,&DWP(12,$acc));
+
+		&mov	(&DWP(0,$key),$s0);	# copy it to iv
+		&mov	(&DWP(4,$key),$s1);
+		&mov	(&DWP(8,$key),$s2);
+		&mov	(&DWP(12,$key),$s3);
+
+		&mov	($acc,$_inp);		# load inp
+		&lea	($acc,&DWP(16,$acc));	# advance inp
+		&mov	($_inp,$acc);		# save inp
+	&jnz	(&label("slow_dec_loop_x86"));
+	&mov	("esp",$_esp);
+	&popf	();
+	&function_end_A();
+	&pushf	();			# kludge, never executed
+
+    &set_label("slow_dec_partial_x86",16);
+	&lea	($acc,$ivec);
+	&mov	(&DWP(0,$acc),$s0);	# save output to temp
+	&mov	(&DWP(4,$acc),$s1);
+	&mov	(&DWP(8,$acc),$s2);
+	&mov	(&DWP(12,$acc),$s3);
+
+	&mov	($acc,$_inp);
+	&mov	($s0,&DWP(0,$acc));	# re-read input
+	&mov	($s1,&DWP(4,$acc));
+	&mov	($s2,&DWP(8,$acc));
+	&mov	($s3,&DWP(12,$acc));
+
+	&mov	(&DWP(0,$key),$s0);	# copy it to iv
+	&mov	(&DWP(4,$key),$s1);
+	&mov	(&DWP(8,$key),$s2);
+	&mov	(&DWP(12,$key),$s3);
+
+	&mov	("ecx",$_len);
+	&mov	("edi",$_out);
+	&lea	("esi",$ivec);
+	&align	(4);
+	&data_word(0xA4F3F689);		# rep movsb # copy partial output
+
+	&mov	("esp",$_esp);
+	&popf	();
+&function_end("asm_AES_cbc_encrypt");
+}
+
+#------------------------------------------------------------------#
+
+sub enckey()
+{
+	&movz	("esi",&LB("edx"));		# rk[i]>>0
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&movz	("esi",&HB("edx"));		# rk[i]>>8
+	&shl	("ebx",24);
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&shr	("edx",16);
+	&movz	("esi",&LB("edx"));		# rk[i]>>16
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&movz	("esi",&HB("edx"));		# rk[i]>>24
+	&shl	("ebx",8);
+	&xor	("eax","ebx");
+
+	&movz	("ebx",&BP(-128,$tbl,"esi",1));
+	&shl	("ebx",16);
+	&xor	("eax","ebx");
+
+	&xor	("eax",&DWP(1024-128,$tbl,"ecx",4));	# rcon
+}
+
+&function_begin("_x86_AES_set_encrypt_key");
+	&mov	("esi",&wparam(1));		# user supplied key
+	&mov	("edi",&wparam(3));		# private key schedule
+
+	&test	("esi",-1);
+	&jz	(&label("badpointer"));
+	&test	("edi",-1);
+	&jz	(&label("badpointer"));
+
+	&call	(&label("pic_point"));
+	&set_label("pic_point");
+	&blindpop($tbl);
+	&lea	($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
+	&lea	($tbl,&DWP(2048+128,$tbl));
+
+	# prefetch Te4
+	&mov	("eax",&DWP(0-128,$tbl));
+	&mov	("ebx",&DWP(32-128,$tbl));
+	&mov	("ecx",&DWP(64-128,$tbl));
+	&mov	("edx",&DWP(96-128,$tbl));
+	&mov	("eax",&DWP(128-128,$tbl));
+	&mov	("ebx",&DWP(160-128,$tbl));
+	&mov	("ecx",&DWP(192-128,$tbl));
+	&mov	("edx",&DWP(224-128,$tbl));
+
+	&mov	("ecx",&wparam(2));		# number of bits in key
+	&cmp	("ecx",128);
+	&je	(&label("10rounds"));
+	&cmp	("ecx",192);
+	&je	(&label("12rounds"));
+	&cmp	("ecx",256);
+	&je	(&label("14rounds"));
+	&mov	("eax",-2);			# invalid number of bits
+	&jmp	(&label("exit"));
+
+    &set_label("10rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 4 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("10shortcut"));
+
+	&align	(4);
+	&set_label("10loop");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+		&mov	("edx",&DWP(12,"edi"));		# rk[3]
+	&set_label("10shortcut");
+		&enckey	();
+
+		&mov	(&DWP(16,"edi"),"eax");		# rk[4]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(20,"edi"),"eax");		# rk[5]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
+		&inc	("ecx");
+		&add	("edi",16);
+		&cmp	("ecx",10);
+	&jl	(&label("10loop"));
+
+	&mov	(&DWP(80,"edi"),10);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+
+    &set_label("12rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 6 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+	&mov	("ecx",&DWP(16,"esi"));
+	&mov	("edx",&DWP(20,"esi"));
+	&mov	(&DWP(16,"edi"),"ecx");
+	&mov	(&DWP(20,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("12shortcut"));
+
+	&align	(4);
+	&set_label("12loop");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+		&mov	("edx",&DWP(20,"edi"));		# rk[5]
+	&set_label("12shortcut");
+		&enckey	();
+
+		&mov	(&DWP(24,"edi"),"eax");		# rk[6]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(28,"edi"),"eax");		# rk[7]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
+
+		&cmp	("ecx",7);
+		&je	(&label("12break"));
+		&inc	("ecx");
+
+		&xor	("eax",&DWP(16,"edi"));
+		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
+		&xor	("eax",&DWP(20,"edi"));
+		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
+
+		&add	("edi",24);
+	&jmp	(&label("12loop"));
+
+	&set_label("12break");
+	&mov	(&DWP(72,"edi"),12);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+
+    &set_label("14rounds");
+	&mov	("eax",&DWP(0,"esi"));		# copy first 8 dwords
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"edi"),"eax");
+	&mov	(&DWP(4,"edi"),"ebx");
+	&mov	(&DWP(8,"edi"),"ecx");
+	&mov	(&DWP(12,"edi"),"edx");
+	&mov	("eax",&DWP(16,"esi"));
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("edx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"edi"),"eax");
+	&mov	(&DWP(20,"edi"),"ebx");
+	&mov	(&DWP(24,"edi"),"ecx");
+	&mov	(&DWP(28,"edi"),"edx");
+
+	&xor	("ecx","ecx");
+	&jmp	(&label("14shortcut"));
+
+	&align	(4);
+	&set_label("14loop");
+		&mov	("edx",&DWP(28,"edi"));		# rk[7]
+	&set_label("14shortcut");
+		&mov	("eax",&DWP(0,"edi"));		# rk[0]
+
+		&enckey	();
+
+		&mov	(&DWP(32,"edi"),"eax");		# rk[8]
+		&xor	("eax",&DWP(4,"edi"));
+		&mov	(&DWP(36,"edi"),"eax");		# rk[9]
+		&xor	("eax",&DWP(8,"edi"));
+		&mov	(&DWP(40,"edi"),"eax");		# rk[10]
+		&xor	("eax",&DWP(12,"edi"));
+		&mov	(&DWP(44,"edi"),"eax");		# rk[11]
+
+		&cmp	("ecx",6);
+		&je	(&label("14break"));
+		&inc	("ecx");
+
+		&mov	("edx","eax");
+		&mov	("eax",&DWP(16,"edi"));		# rk[4]
+		&movz	("esi",&LB("edx"));		# rk[11]>>0
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&movz	("esi",&HB("edx"));		# rk[11]>>8
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&shr	("edx",16);
+		&shl	("ebx",8);
+		&movz	("esi",&LB("edx"));		# rk[11]>>16
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&movz	("esi",&HB("edx"));		# rk[11]>>24
+		&shl	("ebx",16);
+		&xor	("eax","ebx");
+
+		&movz	("ebx",&BP(-128,$tbl,"esi",1));
+		&shl	("ebx",24);
+		&xor	("eax","ebx");
+
+		&mov	(&DWP(48,"edi"),"eax");		# rk[12]
+		&xor	("eax",&DWP(20,"edi"));
+		&mov	(&DWP(52,"edi"),"eax");		# rk[13]
+		&xor	("eax",&DWP(24,"edi"));
+		&mov	(&DWP(56,"edi"),"eax");		# rk[14]
+		&xor	("eax",&DWP(28,"edi"));
+		&mov	(&DWP(60,"edi"),"eax");		# rk[15]
+
+		&add	("edi",32);
+	&jmp	(&label("14loop"));
+
+	&set_label("14break");
+	&mov	(&DWP(48,"edi"),14);		# setup number of rounds
+	&xor	("eax","eax");
+	&jmp	(&label("exit"));
+
+    &set_label("badpointer");
+	&mov	("eax",-1);
+    &set_label("exit");
+&function_end("_x86_AES_set_encrypt_key");
+
+# int asm_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
+#                             AES_KEY *key)
+&function_begin_B("asm_AES_set_encrypt_key");
+	&call	("_x86_AES_set_encrypt_key");
+	&ret	();
+&function_end_B("asm_AES_set_encrypt_key");
+
+sub deckey()
+{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
+  my $tmp = $tbl;
+
+	&mov	($tmp,0x80808080);
+	&and	($tmp,$tp1);
+	&lea	($tp2,&DWP(0,$tp1,$tp1));
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&sub	($acc,$tmp);
+	&and	($tp2,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	&xor	($tp2,$acc);
+	&mov	($tmp,0x80808080);
+
+	&and	($tmp,$tp2);
+	&lea	($tp4,&DWP(0,$tp2,$tp2));
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	&sub	($acc,$tmp);
+	&and	($tp4,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &xor	($tp2,$tp1);	# tp2^tp1
+	&xor	($tp4,$acc);
+	&mov	($tmp,0x80808080);
+
+	&and	($tmp,$tp4);
+	&lea	($tp8,&DWP(0,$tp4,$tp4));
+	&mov	($acc,$tmp);
+	&shr	($tmp,7);
+	 &xor	($tp4,$tp1);	# tp4^tp1
+	&sub	($acc,$tmp);
+	&and	($tp8,0xfefefefe);
+	&and	($acc,0x1b1b1b1b);
+	 &rotl	($tp1,8);	# = ROTATE(tp1,8)
+	&xor	($tp8,$acc);
+
+	&mov	($tmp,&DWP(4*($i+1),$key));	# modulo-scheduled load
+
+	&xor	($tp1,$tp2);
+	&xor	($tp2,$tp8);
+	&xor	($tp1,$tp4);
+	&rotl	($tp2,24);
+	&xor	($tp4,$tp8);
+	&xor	($tp1,$tp8);	# ^= tp8^(tp4^tp1)^(tp2^tp1)
+	&rotl	($tp4,16);
+	&xor	($tp1,$tp2);	# ^= ROTATE(tp8^tp2^tp1,24)
+	&rotl	($tp8,8);
+	&xor	($tp1,$tp4);	# ^= ROTATE(tp8^tp4^tp1,16)
+	&mov	($tp2,$tmp);
+	&xor	($tp1,$tp8);	# ^= ROTATE(tp8,8)
+
+	&mov	(&DWP(4*$i,$key),$tp1);
+}
+
+# int asm_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
+#                             AES_KEY *key)
+&function_begin_B("asm_AES_set_decrypt_key");
+	&call	("_x86_AES_set_encrypt_key");
+	&cmp	("eax",0);
+	&je	(&label("proceed"));
+	&ret	();
+
+    &set_label("proceed");
+	&push	("ebp");
+	&push	("ebx");
+	&push	("esi");
+	&push	("edi");
+
+	&mov	("esi",&wparam(2));
+	&mov	("ecx",&DWP(240,"esi"));	# pull number of rounds
+	&lea	("ecx",&DWP(0,"","ecx",4));
+	&lea	("edi",&DWP(0,"esi","ecx",4));	# pointer to last chunk
+
+	&set_label("invert",4);			# invert order of chunks
+		&mov	("eax",&DWP(0,"esi"));
+		&mov	("ebx",&DWP(4,"esi"));
+		&mov	("ecx",&DWP(0,"edi"));
+		&mov	("edx",&DWP(4,"edi"));
+		&mov	(&DWP(0,"edi"),"eax");
+		&mov	(&DWP(4,"edi"),"ebx");
+		&mov	(&DWP(0,"esi"),"ecx");
+		&mov	(&DWP(4,"esi"),"edx");
+		&mov	("eax",&DWP(8,"esi"));
+		&mov	("ebx",&DWP(12,"esi"));
+		&mov	("ecx",&DWP(8,"edi"));
+		&mov	("edx",&DWP(12,"edi"));
+		&mov	(&DWP(8,"edi"),"eax");
+		&mov	(&DWP(12,"edi"),"ebx");
+		&mov	(&DWP(8,"esi"),"ecx");
+		&mov	(&DWP(12,"esi"),"edx");
+		&add	("esi",16);
+		&sub	("edi",16);
+		&cmp	("esi","edi");
+	&jne	(&label("invert"));
+
+	&mov	($key,&wparam(2));
+	&mov	($acc,&DWP(240,$key));		# pull number of rounds
+	&lea	($acc,&DWP(-2,$acc,$acc));
+	&lea	($acc,&DWP(0,$key,$acc,8));
+	&mov	(&wparam(2),$acc);
+
+	&mov	($s0,&DWP(16,$key));		# modulo-scheduled load
+	&set_label("permute",4);		# permute the key schedule
+		&add	($key,16);
+		&deckey	(0,$key,$s0,$s1,$s2,$s3);
+		&deckey	(1,$key,$s1,$s2,$s3,$s0);
+		&deckey	(2,$key,$s2,$s3,$s0,$s1);
+		&deckey	(3,$key,$s3,$s0,$s1,$s2);
+		&cmp	($key,&wparam(2));
+	&jb	(&label("permute"));
+
+	&xor	("eax","eax");			# return success
+&function_end("asm_AES_set_decrypt_key");
+&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/aes-armv4.pl b/src/crypto/fipsmodule/aes/asm/aes-armv4.pl
new file mode 100644
index 0000000..562faab
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aes-armv4.pl
@@ -0,0 +1,1244 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# AES for ARMv4
+
+# January 2007.
+#
+# Code uses single 1K S-box and is >2 times faster than code generated
+# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
+# allows to merge logical or arithmetic operation with shift or rotate
+# in one instruction and emit combined result every cycle. The module
+# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
+# key [on single-issue Xscale PXA250 core].
+
+# May 2007.
+#
+# AES_set_[en|de]crypt_key is added.
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 12% improvement on
+# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~21.5 cycles per byte.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+$s0="r0";
+$s1="r1";
+$s2="r2";
+$s3="r3";
+$t1="r4";
+$t2="r5";
+$t3="r6";
+$i1="r7";
+$i2="r8";
+$i3="r9";
+
+$tbl="r10";
+$key="r11";
+$rounds="r12";
+
+$code=<<___;
+#if defined(__arm__)
+#ifndef __KERNEL__
+# include <openssl/arm_arch.h>
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+.text
+#if __ARM_ARCH__<7
+.code	32
+#else
+.syntax	unified
+# if defined(__thumb2__) && !defined(__APPLE__)
+.thumb
+# else
+.code	32
+# endif
+#endif
+
+.type	AES_Te,%object
+.align	5
+AES_Te:
+.word	0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word	0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word	0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word	0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word	0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word	0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word	0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word	0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word	0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word	0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word	0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word	0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word	0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word	0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word	0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word	0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word	0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word	0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word	0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word	0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word	0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word	0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word	0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word	0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word	0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word	0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word	0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word	0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word	0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word	0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word	0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word	0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word	0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word	0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word	0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word	0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word	0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word	0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word	0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word	0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word	0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word	0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word	0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word	0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word	0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word	0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word	0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word	0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word	0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word	0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word	0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word	0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word	0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word	0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word	0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word	0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word	0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word	0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word	0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word	0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word	0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word	0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word	0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word	0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte	0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte	0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte	0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte	0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte	0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte	0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte	0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte	0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte	0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte	0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte	0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte	0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte	0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte	0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte	0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte	0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte	0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte	0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte	0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte	0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte	0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte	0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte	0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte	0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte	0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte	0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte	0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte	0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte	0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte	0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte	0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte	0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word	0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word	0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word	0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size	AES_Te,.-AES_Te
+
+@ void asm_AES_encrypt(const unsigned char *in, unsigned char *out,
+@ 		       const AES_KEY *key) {
+.global asm_AES_encrypt
+.type   asm_AES_encrypt,%function
+.align	5
+asm_AES_encrypt:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ asm_AES_encrypt
+#else
+	adr	r3,asm_AES_encrypt
+#endif
+	stmdb   sp!,{r1,r4-r12,lr}
+#ifdef	__APPLE__
+	adr	$tbl,AES_Te
+#else
+	sub	$tbl,r3,#asm_AES_encrypt-AES_Te	@ Te
+#endif
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_encrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	asm_AES_encrypt,.-asm_AES_encrypt
+
+.type   _armv4_AES_encrypt,%function
+.align	2
+_armv4_AES_encrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0,lsr#16
+	mov	$s0,$s0,lsr#24
+.Lenc_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Te3[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Te2[s0>>8]
+	and	$i2,lr,$s1
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Te1[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Te0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te1[s1>>16]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te3[s1>>0]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te2[s1>>8]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Te0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,ror#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$t3,$i3,ror#8
+	and	$i3,lr,$s2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te2[s2>>8]
+	eor	$s1,$s1,$t1,ror#24
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te1[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te3[s2>>0]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Te0[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,ror#8
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$t3,$i3,ror#16
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Te3[s3>>0]
+	eor	$s2,$s2,$t2,ror#16
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Te2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Te1[s3>>16]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Te0[s3>>24]
+	eor	$s2,$s2,$i3,ror#8
+	ldr	$t1,[$key,#-12]
+	eor	$s3,$s3,$t3,ror#8
+
+	ldr	$t2,[$key,#-8]
+	eor	$s0,$s0,$i1
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0,lsr#16
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Lenc_loop
+
+	add	$tbl,$tbl,#2
+
+	ldrb	$t1,[$tbl,$i1,lsl#2]	@ Te4[s0>>0]
+	and	$i1,lr,$s1,lsr#16	@ i0
+	ldrb	$t2,[$tbl,$i2,lsl#2]	@ Te4[s0>>8]
+	and	$i2,lr,$s1
+	ldrb	$t3,[$tbl,$i3,lsl#2]	@ Te4[s0>>16]
+	and	$i3,lr,$s1,lsr#8
+	ldrb	$s0,[$tbl,$s0,lsl#2]	@ Te4[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s1>>16]
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s1>>0]
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s1>>8]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s1,[$tbl,$s1,lsl#2]	@ Te4[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,lsl#8
+	and	$i2,lr,$s2,lsr#16	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s2>>8]
+	eor	$s1,$t1,$s1,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s2>>16]
+	mov	$s2,$s2,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s2>>0]
+	eor	$s0,$i1,$s0,lsl#8
+	ldrb	$s2,[$tbl,$s2,lsl#2]	@ Te4[s2>>24]
+	and	$i1,lr,$s3		@ i0
+	eor	$s1,$s1,$i2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,lsl#8
+	and	$i3,lr,$s3,lsr#16	@ i2
+	ldrb	$i1,[$tbl,$i1,lsl#2]	@ Te4[s3>>0]
+	eor	$s2,$t2,$s2,lsl#24
+	ldrb	$i2,[$tbl,$i2,lsl#2]	@ Te4[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldrb	$i3,[$tbl,$i3,lsl#2]	@ Te4[s3>>16]
+	eor	$s0,$i1,$s0,lsl#8
+	ldr	$i1,[$key,#0]
+	ldrb	$s3,[$tbl,$s3,lsl#2]	@ Te4[s3>>24]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$s2,$i3,lsl#16
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#2
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global asm_AES_set_encrypt_key
+.type   asm_AES_set_encrypt_key,%function
+.align	5
+asm_AES_set_encrypt_key:
+_armv4_AES_set_encrypt_key:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ asm_AES_set_encrypt_key
+#else
+	adr	r3,asm_AES_set_encrypt_key
+#endif
+	teq	r0,#0
+#if __ARM_ARCH__>=7
+	itt	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	moveq	r0,#-1
+	beq	.Labrt
+	teq	r2,#0
+#if __ARM_ARCH__>=7
+	itt	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	moveq	r0,#-1
+	beq	.Labrt
+
+	teq	r1,#128
+	beq	.Lok
+	teq	r1,#192
+	beq	.Lok
+	teq	r1,#256
+#if __ARM_ARCH__>=7
+	itt	ne			@ Thumb2 thing, sanity check in ARM
+#endif
+	movne	r0,#-1
+	bne	.Labrt
+
+.Lok:	stmdb   sp!,{r4-r12,lr}
+	mov	$rounds,r0		@ inp
+	mov	lr,r1			@ bits
+	mov	$key,r2			@ key
+
+#ifdef	__APPLE__
+	adr	$tbl,AES_Te+1024				@ Te4
+#else
+	sub	$tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024	@ Te4
+#endif
+
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	str	$s0,[$key],#16
+	orr	$s3,$s3,$t2,lsl#16
+	str	$s1,[$key,#-12]
+	orr	$s3,$s3,$t3,lsl#24
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$key],#16
+	str	$s1,[$key,#-12]
+	str	$s2,[$key,#-8]
+	str	$s3,[$key,#-4]
+#endif
+
+	teq	lr,#128
+	bne	.Lnot128
+	mov	$rounds,#10
+	str	$rounds,[$key,#240-16]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+
+.L128_loop:
+	and	$t2,lr,$s3,lsr#24
+	and	$i1,lr,$s3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$t2,$t2,$t1
+	eor	$s0,$s0,$t2			@ rk[4]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[5]=rk[1]^rk[4]
+	str	$s0,[$key],#16
+	eor	$s2,$s2,$s1			@ rk[6]=rk[2]^rk[5]
+	str	$s1,[$key,#-12]
+	eor	$s3,$s3,$s2			@ rk[7]=rk[3]^rk[6]
+	str	$s2,[$key,#-8]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-4]
+	bne	.L128_loop
+	sub	r2,$key,#176
+	b	.Ldone
+
+.Lnot128:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#19]
+	ldrb	$t1,[$rounds,#18]
+	ldrb	$t2,[$rounds,#17]
+	ldrb	$t3,[$rounds,#16]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#23]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#22]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#21]
+	ldrb	$t3,[$rounds,#20]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#16]
+	ldr	$i3,[$rounds,#20]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	teq	lr,#192
+	bne	.Lnot192
+	mov	$rounds,#12
+	str	$rounds,[$key,#240-24]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#8
+
+.L192_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[6]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[7]=rk[1]^rk[6]
+	str	$s0,[$key],#24
+	eor	$s2,$s2,$s1			@ rk[8]=rk[2]^rk[7]
+	str	$s1,[$key,#-20]
+	eor	$s3,$s3,$s2			@ rk[9]=rk[3]^rk[8]
+	str	$s2,[$key,#-16]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-12]
+#if __ARM_ARCH__>=7
+	itt	eq				@ Thumb2 thing, sanity check in ARM
+#endif
+	subeq	r2,$key,#216
+	beq	.Ldone
+
+	ldr	$i1,[$key,#-32]
+	ldr	$i2,[$key,#-28]
+	eor	$i1,$i1,$s3			@ rk[10]=rk[4]^rk[9]
+	eor	$i3,$i2,$i1			@ rk[11]=rk[5]^rk[10]
+	str	$i1,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L192_loop
+
+.Lnot192:
+#if __ARM_ARCH__<7
+	ldrb	$i2,[$rounds,#27]
+	ldrb	$t1,[$rounds,#26]
+	ldrb	$t2,[$rounds,#25]
+	ldrb	$t3,[$rounds,#24]
+	orr	$i2,$i2,$t1,lsl#8
+	ldrb	$i3,[$rounds,#31]
+	orr	$i2,$i2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#30]
+	orr	$i2,$i2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#29]
+	ldrb	$t3,[$rounds,#28]
+	orr	$i3,$i3,$t1,lsl#8
+	orr	$i3,$i3,$t2,lsl#16
+	str	$i2,[$key],#8
+	orr	$i3,$i3,$t3,lsl#24
+	str	$i3,[$key,#-4]
+#else
+	ldr	$i2,[$rounds,#24]
+	ldr	$i3,[$rounds,#28]
+#ifdef __ARMEL__
+	rev	$i2,$i2
+	rev	$i3,$i3
+#endif
+	str	$i2,[$key],#8
+	str	$i3,[$key,#-4]
+#endif
+
+	mov	$rounds,#14
+	str	$rounds,[$key,#240-32]
+	add	$t3,$tbl,#256			@ rcon
+	mov	lr,#255
+	mov	$rounds,#7
+
+.L256_loop:
+	and	$t2,lr,$i3,lsr#24
+	and	$i1,lr,$i3,lsr#16
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$i3,lsr#8
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$i3
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#24
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$t3],#4			@ rcon[i++]
+	orr	$t2,$t2,$i3,lsl#8
+	eor	$i3,$t2,$t1
+	eor	$s0,$s0,$i3			@ rk[8]=rk[0]^...
+	eor	$s1,$s1,$s0			@ rk[9]=rk[1]^rk[8]
+	str	$s0,[$key],#32
+	eor	$s2,$s2,$s1			@ rk[10]=rk[2]^rk[9]
+	str	$s1,[$key,#-28]
+	eor	$s3,$s3,$s2			@ rk[11]=rk[3]^rk[10]
+	str	$s2,[$key,#-24]
+	subs	$rounds,$rounds,#1
+	str	$s3,[$key,#-20]
+#if __ARM_ARCH__>=7
+	itt	eq				@ Thumb2 thing, sanity check in ARM
+#endif
+	subeq	r2,$key,#256
+	beq	.Ldone
+
+	and	$t2,lr,$s3
+	and	$i1,lr,$s3,lsr#8
+	ldrb	$t2,[$tbl,$t2]
+	and	$i2,lr,$s3,lsr#16
+	ldrb	$i1,[$tbl,$i1]
+	and	$i3,lr,$s3,lsr#24
+	ldrb	$i2,[$tbl,$i2]
+	orr	$t2,$t2,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]
+	orr	$t2,$t2,$i2,lsl#16
+	ldr	$t1,[$key,#-48]
+	orr	$t2,$t2,$i3,lsl#24
+
+	ldr	$i1,[$key,#-44]
+	ldr	$i2,[$key,#-40]
+	eor	$t1,$t1,$t2			@ rk[12]=rk[4]^...
+	ldr	$i3,[$key,#-36]
+	eor	$i1,$i1,$t1			@ rk[13]=rk[5]^rk[12]
+	str	$t1,[$key,#-16]
+	eor	$i2,$i2,$i1			@ rk[14]=rk[6]^rk[13]
+	str	$i1,[$key,#-12]
+	eor	$i3,$i3,$i2			@ rk[15]=rk[7]^rk[14]
+	str	$i2,[$key,#-8]
+	str	$i3,[$key,#-4]
+	b	.L256_loop
+
+.align	2
+.Ldone:	mov	r0,#0
+	ldmia   sp!,{r4-r12,lr}
+.Labrt:
+#if __ARM_ARCH__>=5
+	ret				@ bx lr
+#else
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	asm_AES_set_encrypt_key,.-asm_AES_set_encrypt_key
+
+.global asm_AES_set_decrypt_key
+.type   asm_AES_set_decrypt_key,%function
+.align	5
+asm_AES_set_decrypt_key:
+	str	lr,[sp,#-4]!            @ push lr
+	bl	_armv4_AES_set_encrypt_key
+	teq	r0,#0
+	ldr	lr,[sp],#4              @ pop lr
+	bne	.Labrt
+
+	mov	r0,r2			@ asm_AES_set_encrypt_key preserves r2,
+	mov	r1,r2			@ which is AES_KEY *key
+	b	_armv4_AES_set_enc2dec_key
+.size	asm_AES_set_decrypt_key,.-asm_AES_set_decrypt_key
+
+@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
+.global	AES_set_enc2dec_key
+.type	AES_set_enc2dec_key,%function
+.align	5
+AES_set_enc2dec_key:
+_armv4_AES_set_enc2dec_key:
+	stmdb   sp!,{r4-r12,lr}
+
+	ldr	$rounds,[r0,#240]
+	mov	$i1,r0			@ input
+	add	$i2,r0,$rounds,lsl#4
+	mov	$key,r1			@ ouput
+	add	$tbl,r1,$rounds,lsl#4
+	str	$rounds,[r1,#240]
+
+.Linv:	ldr	$s0,[$i1],#16
+	ldr	$s1,[$i1,#-12]
+	ldr	$s2,[$i1,#-8]
+	ldr	$s3,[$i1,#-4]
+	ldr	$t1,[$i2],#-16
+	ldr	$t2,[$i2,#16+4]
+	ldr	$t3,[$i2,#16+8]
+	ldr	$i3,[$i2,#16+12]
+	str	$s0,[$tbl],#-16
+	str	$s1,[$tbl,#16+4]
+	str	$s2,[$tbl,#16+8]
+	str	$s3,[$tbl,#16+12]
+	str	$t1,[$key],#16
+	str	$t2,[$key,#-12]
+	str	$t3,[$key,#-8]
+	str	$i3,[$key,#-4]
+	teq	$i1,$i2
+	bne	.Linv
+
+	ldr	$s0,[$i1]
+	ldr	$s1,[$i1,#4]
+	ldr	$s2,[$i1,#8]
+	ldr	$s3,[$i1,#12]
+	str	$s0,[$key]
+	str	$s1,[$key,#4]
+	str	$s2,[$key,#8]
+	str	$s3,[$key,#12]
+	sub	$key,$key,$rounds,lsl#3
+___
+$mask80=$i1;
+$mask1b=$i2;
+$mask7f=$i3;
+$code.=<<___;
+	ldr	$s0,[$key,#16]!		@ prefetch tp1
+	mov	$mask80,#0x80
+	mov	$mask1b,#0x1b
+	orr	$mask80,$mask80,#0x8000
+	orr	$mask1b,$mask1b,#0x1b00
+	orr	$mask80,$mask80,$mask80,lsl#16
+	orr	$mask1b,$mask1b,$mask1b,lsl#16
+	sub	$rounds,$rounds,#1
+	mvn	$mask7f,$mask80
+	mov	$rounds,$rounds,lsl#2	@ (rounds-1)*4
+
+.Lmix:	and	$t1,$s0,$mask80
+	and	$s1,$s0,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s1,$t1,$s1,lsl#1	@ tp2
+
+	and	$t1,$s1,$mask80
+	and	$s2,$s1,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s2,$t1,$s2,lsl#1	@ tp4
+
+	and	$t1,$s2,$mask80
+	and	$s3,$s2,$mask7f
+	sub	$t1,$t1,$t1,lsr#7
+	and	$t1,$t1,$mask1b
+	eor	$s3,$t1,$s3,lsl#1	@ tp8
+
+	eor	$t1,$s1,$s2
+	eor	$t2,$s0,$s3		@ tp9
+	eor	$t1,$t1,$s3		@ tpe
+	eor	$t1,$t1,$s1,ror#24
+	eor	$t1,$t1,$t2,ror#24	@ ^= ROTATE(tpb=tp9^tp2,8)
+	eor	$t1,$t1,$s2,ror#16
+	eor	$t1,$t1,$t2,ror#16	@ ^= ROTATE(tpd=tp9^tp4,16)
+	eor	$t1,$t1,$t2,ror#8	@ ^= ROTATE(tp9,24)
+
+	ldr	$s0,[$key,#4]		@ prefetch tp1
+	str	$t1,[$key],#4
+	subs	$rounds,$rounds,#1
+	bne	.Lmix
+
+	mov	r0,#0
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	AES_set_enc2dec_key,.-AES_set_enc2dec_key
+
+.type	AES_Td,%object
+.align	5
+AES_Td:
+.word	0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word	0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word	0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word	0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word	0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word	0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word	0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word	0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word	0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word	0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word	0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word	0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word	0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word	0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word	0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word	0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word	0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word	0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word	0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word	0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word	0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word	0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word	0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word	0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word	0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word	0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word	0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word	0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word	0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word	0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word	0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word	0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word	0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word	0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word	0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word	0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word	0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word	0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word	0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word	0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word	0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word	0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word	0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word	0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word	0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word	0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word	0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word	0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word	0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word	0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word	0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word	0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word	0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word	0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word	0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word	0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word	0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word	0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word	0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word	0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word	0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word	0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word	0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word	0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte	0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte	0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte	0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte	0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte	0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte	0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte	0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte	0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte	0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte	0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte	0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte	0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte	0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte	0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte	0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte	0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte	0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte	0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte	0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte	0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte	0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte	0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte	0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte	0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte	0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte	0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte	0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte	0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte	0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte	0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte	0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte	0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size	AES_Td,.-AES_Td
+
+@ void asm_AES_decrypt(const unsigned char *in, unsigned char *out,
+@ 		       const AES_KEY *key) {
+.global asm_AES_decrypt
+.type   asm_AES_decrypt,%function
+.align	5
+asm_AES_decrypt:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ asm_AES_decrypt
+#else
+	adr	r3,asm_AES_decrypt
+#endif
+	stmdb   sp!,{r1,r4-r12,lr}
+#ifdef	__APPLE__
+	adr	$tbl,AES_Td
+#else
+	sub	$tbl,r3,#asm_AES_decrypt-AES_Td	@ Td
+#endif
+	mov	$rounds,r0		@ inp
+	mov	$key,r2
+#if __ARM_ARCH__<7
+	ldrb	$s0,[$rounds,#3]	@ load input data in endian-neutral
+	ldrb	$t1,[$rounds,#2]	@ manner...
+	ldrb	$t2,[$rounds,#1]
+	ldrb	$t3,[$rounds,#0]
+	orr	$s0,$s0,$t1,lsl#8
+	ldrb	$s1,[$rounds,#7]
+	orr	$s0,$s0,$t2,lsl#16
+	ldrb	$t1,[$rounds,#6]
+	orr	$s0,$s0,$t3,lsl#24
+	ldrb	$t2,[$rounds,#5]
+	ldrb	$t3,[$rounds,#4]
+	orr	$s1,$s1,$t1,lsl#8
+	ldrb	$s2,[$rounds,#11]
+	orr	$s1,$s1,$t2,lsl#16
+	ldrb	$t1,[$rounds,#10]
+	orr	$s1,$s1,$t3,lsl#24
+	ldrb	$t2,[$rounds,#9]
+	ldrb	$t3,[$rounds,#8]
+	orr	$s2,$s2,$t1,lsl#8
+	ldrb	$s3,[$rounds,#15]
+	orr	$s2,$s2,$t2,lsl#16
+	ldrb	$t1,[$rounds,#14]
+	orr	$s2,$s2,$t3,lsl#24
+	ldrb	$t2,[$rounds,#13]
+	ldrb	$t3,[$rounds,#12]
+	orr	$s3,$s3,$t1,lsl#8
+	orr	$s3,$s3,$t2,lsl#16
+	orr	$s3,$s3,$t3,lsl#24
+#else
+	ldr	$s0,[$rounds,#0]
+	ldr	$s1,[$rounds,#4]
+	ldr	$s2,[$rounds,#8]
+	ldr	$s3,[$rounds,#12]
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+#endif
+	bl	_armv4_AES_decrypt
+
+	ldr	$rounds,[sp],#4		@ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+	rev	$s0,$s0
+	rev	$s1,$s1
+	rev	$s2,$s2
+	rev	$s3,$s3
+#endif
+	str	$s0,[$rounds,#0]
+	str	$s1,[$rounds,#4]
+	str	$s2,[$rounds,#8]
+	str	$s3,[$rounds,#12]
+#else
+	mov	$t1,$s0,lsr#24		@ write output in endian-neutral
+	mov	$t2,$s0,lsr#16		@ manner...
+	mov	$t3,$s0,lsr#8
+	strb	$t1,[$rounds,#0]
+	strb	$t2,[$rounds,#1]
+	mov	$t1,$s1,lsr#24
+	strb	$t3,[$rounds,#2]
+	mov	$t2,$s1,lsr#16
+	strb	$s0,[$rounds,#3]
+	mov	$t3,$s1,lsr#8
+	strb	$t1,[$rounds,#4]
+	strb	$t2,[$rounds,#5]
+	mov	$t1,$s2,lsr#24
+	strb	$t3,[$rounds,#6]
+	mov	$t2,$s2,lsr#16
+	strb	$s1,[$rounds,#7]
+	mov	$t3,$s2,lsr#8
+	strb	$t1,[$rounds,#8]
+	strb	$t2,[$rounds,#9]
+	mov	$t1,$s3,lsr#24
+	strb	$t3,[$rounds,#10]
+	mov	$t2,$s3,lsr#16
+	strb	$s2,[$rounds,#11]
+	mov	$t3,$s3,lsr#8
+	strb	$t1,[$rounds,#12]
+	strb	$t2,[$rounds,#13]
+	strb	$t3,[$rounds,#14]
+	strb	$s3,[$rounds,#15]
+#endif
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia   sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	asm_AES_decrypt,.-asm_AES_decrypt
+
+.type   _armv4_AES_decrypt,%function
+.align	2
+_armv4_AES_decrypt:
+	str	lr,[sp,#-4]!		@ push lr
+	ldmia	$key!,{$t1-$i1}
+	eor	$s0,$s0,$t1
+	ldr	$rounds,[$key,#240-16]
+	eor	$s1,$s1,$t2
+	eor	$s2,$s2,$t3
+	eor	$s3,$s3,$i1
+	sub	$rounds,$rounds,#1
+	mov	lr,#255
+
+	and	$i1,lr,$s0,lsr#16
+	and	$i2,lr,$s0,lsr#8
+	and	$i3,lr,$s0
+	mov	$s0,$s0,lsr#24
+.Ldec_loop:
+	ldr	$t1,[$tbl,$i1,lsl#2]	@ Td1[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldr	$t2,[$tbl,$i2,lsl#2]	@ Td2[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldr	$t3,[$tbl,$i3,lsl#2]	@ Td3[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+	ldr	$s0,[$tbl,$s0,lsl#2]	@ Td0[s0>>24]
+	mov	$s1,$s1,lsr#24
+
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td3[s1>>0]
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td1[s1>>16]
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td2[s1>>8]
+	eor	$s0,$s0,$i1,ror#24
+	ldr	$s1,[$tbl,$s1,lsl#2]	@ Td0[s1>>24]
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$i2,$t2,ror#8
+	and	$i2,lr,$s2		@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s2,lsr#16
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td2[s2>>8]
+	eor	$s1,$s1,$t1,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td3[s2>>0]
+	mov	$s2,$s2,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td1[s2>>16]
+	eor	$s0,$s0,$i1,ror#16
+	ldr	$s2,[$tbl,$s2,lsl#2]	@ Td0[s2>>24]
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s1,$s1,$i2,ror#24
+	and	$i2,lr,$s3,lsr#8	@ i1
+	eor	$t3,$i3,$t3,ror#8
+	and	$i3,lr,$s3		@ i2
+	ldr	$i1,[$tbl,$i1,lsl#2]	@ Td1[s3>>16]
+	eor	$s2,$s2,$t2,ror#8
+	ldr	$i2,[$tbl,$i2,lsl#2]	@ Td2[s3>>8]
+	mov	$s3,$s3,lsr#24
+
+	ldr	$i3,[$tbl,$i3,lsl#2]	@ Td3[s3>>0]
+	eor	$s0,$s0,$i1,ror#8
+	ldr	$i1,[$key],#16
+	eor	$s1,$s1,$i2,ror#16
+	ldr	$s3,[$tbl,$s3,lsl#2]	@ Td0[s3>>24]
+	eor	$s2,$s2,$i3,ror#24
+
+	ldr	$t1,[$key,#-12]
+	eor	$s0,$s0,$i1
+	ldr	$t2,[$key,#-8]
+	eor	$s3,$s3,$t3,ror#8
+	ldr	$t3,[$key,#-4]
+	and	$i1,lr,$s0,lsr#16
+	eor	$s1,$s1,$t1
+	and	$i2,lr,$s0,lsr#8
+	eor	$s2,$s2,$t2
+	and	$i3,lr,$s0
+	eor	$s3,$s3,$t3
+	mov	$s0,$s0,lsr#24
+
+	subs	$rounds,$rounds,#1
+	bne	.Ldec_loop
+
+	add	$tbl,$tbl,#1024
+
+	ldr	$t2,[$tbl,#0]		@ prefetch Td4
+	ldr	$t3,[$tbl,#32]
+	ldr	$t1,[$tbl,#64]
+	ldr	$t2,[$tbl,#96]
+	ldr	$t3,[$tbl,#128]
+	ldr	$t1,[$tbl,#160]
+	ldr	$t2,[$tbl,#192]
+	ldr	$t3,[$tbl,#224]
+
+	ldrb	$s0,[$tbl,$s0]		@ Td4[s0>>24]
+	ldrb	$t1,[$tbl,$i1]		@ Td4[s0>>16]
+	and	$i1,lr,$s1		@ i0
+	ldrb	$t2,[$tbl,$i2]		@ Td4[s0>>8]
+	and	$i2,lr,$s1,lsr#16
+	ldrb	$t3,[$tbl,$i3]		@ Td4[s0>>0]
+	and	$i3,lr,$s1,lsr#8
+
+	add	$s1,$tbl,$s1,lsr#24
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s1>>0]
+	ldrb	$s1,[$s1]		@ Td4[s1>>24]
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s1>>16]
+	eor	$s0,$i1,$s0,lsl#24
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s1>>8]
+	eor	$s1,$t1,$s1,lsl#8
+	and	$i1,lr,$s2,lsr#8	@ i0
+	eor	$t2,$t2,$i2,lsl#8
+	and	$i2,lr,$s2		@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s2>>8]
+	eor	$t3,$t3,$i3,lsl#8
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s2>>0]
+	and	$i3,lr,$s2,lsr#16
+
+	add	$s2,$tbl,$s2,lsr#24
+	ldrb	$s2,[$s2]		@ Td4[s2>>24]
+	eor	$s0,$s0,$i1,lsl#8
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s2>>16]
+	eor	$s1,$i2,$s1,lsl#16
+	and	$i1,lr,$s3,lsr#16	@ i0
+	eor	$s2,$t2,$s2,lsl#16
+	and	$i2,lr,$s3,lsr#8	@ i1
+	ldrb	$i1,[$tbl,$i1]		@ Td4[s3>>16]
+	eor	$t3,$t3,$i3,lsl#16
+	ldrb	$i2,[$tbl,$i2]		@ Td4[s3>>8]
+	and	$i3,lr,$s3		@ i2
+
+	add	$s3,$tbl,$s3,lsr#24
+	ldrb	$i3,[$tbl,$i3]		@ Td4[s3>>0]
+	ldrb	$s3,[$s3]		@ Td4[s3>>24]
+	eor	$s0,$s0,$i1,lsl#16
+	ldr	$i1,[$key,#0]
+	eor	$s1,$s1,$i2,lsl#8
+	ldr	$t1,[$key,#4]
+	eor	$s2,$i3,$s2,lsl#8
+	ldr	$t2,[$key,#8]
+	eor	$s3,$t3,$s3,lsl#24
+	ldr	$t3,[$key,#12]
+
+	eor	$s0,$s0,$i1
+	eor	$s1,$s1,$t1
+	eor	$s2,$s2,$t2
+	eor	$s3,$s3,$t3
+
+	sub	$tbl,$tbl,#1024
+	ldr	pc,[sp],#4		@ pop and return
+.size	_armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz	"AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+
+#endif
+___
+
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx\tlr/gm;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+	last if (!s/^#/@/ and !/^$/);
+	print;
+}
+close SELF;
+
+print $code;
+close STDOUT;	# enforce flush
diff --git a/src/crypto/fipsmodule/aes/asm/aes-x86_64.pl b/src/crypto/fipsmodule/aes/asm/aes-x86_64.pl
new file mode 100755
index 0000000..362c1c9
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aes-x86_64.pl
@@ -0,0 +1,2804 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Version 2.1.
+#
+# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
+# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
+# [you'll notice a lot of resemblance], such as compressed S-boxes
+# in little-endian byte order, prefetch of these tables in CBC mode,
+# as well as avoiding L1 cache aliasing between stack frame and key
+# schedule and already mentioned tables, compressed Td4...
+#
+# Performance in number of cycles per processed byte for 128-bit key:
+#
+#		ECB encrypt	ECB decrypt	CBC large chunk
+# AMD64		33		43		13.0
+# EM64T		38		56		18.6(*)
+# Core 2	30		42		14.5(*)
+# Atom		65		86		32.1(*)
+#
+# (*) with hyper-threading off
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$verticalspin=1;	# unlike 32-bit version $verticalspin performs
+			# ~15% better on both AMD and Intel cores
+$speed_limit=512;	# see aes-586.pl for details
+
+$code=".text\n";
+
+$s0="%eax";
+$s1="%ebx";
+$s2="%ecx";
+$s3="%edx";
+$acc0="%esi";	$mask80="%rsi";
+$acc1="%edi";	$maskfe="%rdi";
+$acc2="%ebp";	$mask1b="%rbp";
+$inp="%r8";
+$out="%r9";
+$t0="%r10d";
+$t1="%r11d";
+$t2="%r12d";
+$rnds="%r13d";
+$sbox="%r14";
+$key="%r15";
+
+sub hi() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1h/;	$r; }
+sub lo() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1l/;
+			$r =~ s/%[er]([sd]i)/%\1l/;
+			$r =~ s/%(r[0-9]+)[d]?/%\1b/;	$r; }
+sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
+			$r =~ s/%r([0-9]+)/%r\1d/;	$r; }
+sub _data_word()
+{ my $i;
+    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
+}
+sub data_word()
+{ my $i;
+  my $last=pop(@_);
+    $code.=".long\t";
+    while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
+    $code.=sprintf"0x%08x\n",$last;
+}
+
+sub data_byte()
+{ my $i;
+  my $last=pop(@_);
+    $code.=".byte\t";
+    while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
+    $code.=sprintf"0x%02x\n",$last&0xff;
+}
+
+sub encvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	# favor 3-way issue Opteron pipeline...
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	mov	0($sbox,$acc0,8),$t0
+	mov	0($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	movzb	`&lo("$s3")`,$acc2
+	xor	3($sbox,$acc0,8),$t0
+	xor	3($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s3")`,$acc0
+	shr	\$16,$s2
+	movzb	`&hi("$s0")`,$acc2
+	xor	3($sbox,$acc0,8),$t2
+	shr	\$16,$s3
+	xor	3($sbox,$acc2,8),$t3
+
+	shr	\$16,$s1
+	lea	16($key),$key
+	shr	\$16,$s0
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	xor	2($sbox,$acc0,8),$t0
+	xor	2($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	movzb	`&lo("$s1")`,$acc2
+	xor	1($sbox,$acc0,8),$t0
+	xor	1($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t3
+
+	mov	12($key),$s3
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	mov	0($key),$s0
+	xor	1($sbox,$acc1,8),$t2
+	xor	1($sbox,$acc2,8),$t3
+
+	mov	4($key),$s1
+	mov	8($key),$s2
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub enclastvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	movzb	2($sbox,$acc0,8),$t0
+	movzb	2($sbox,$acc1,8),$t1
+	movzb	2($sbox,$acc2,8),$t2
+
+	movzb	`&lo("$s3")`,$acc0
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	movzb	2($sbox,$acc0,8),$t3
+	mov	0($sbox,$acc1,8),$acc1	#$t0
+	mov	0($sbox,$acc2,8),$acc2	#$t1
+
+	and	\$0x0000ff00,$acc1
+	and	\$0x0000ff00,$acc2
+
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+	shr	\$16,$s2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	shr	\$16,$s3
+	mov	0($sbox,$acc0,8),$acc0	#$t2
+	mov	0($sbox,$acc1,8),$acc1	#$t3
+
+	and	\$0x0000ff00,$acc0
+	and	\$0x0000ff00,$acc1
+	shr	\$16,$s1
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+	shr	\$16,$s0
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	mov	0($sbox,$acc0,8),$acc0	#$t0
+	mov	0($sbox,$acc1,8),$acc1	#$t1
+	mov	0($sbox,$acc2,8),$acc2	#$t2
+
+	and	\$0x00ff0000,$acc0
+	and	\$0x00ff0000,$acc1
+	and	\$0x00ff0000,$acc2
+
+	xor	$acc0,$t0
+	xor	$acc1,$t1
+	xor	$acc2,$t2
+
+	movzb	`&lo("$s1")`,$acc0
+	movzb	`&hi("$s3")`,$acc1
+	movzb	`&hi("$s0")`,$acc2
+	mov	0($sbox,$acc0,8),$acc0	#$t3
+	mov	2($sbox,$acc1,8),$acc1	#$t0
+	mov	2($sbox,$acc2,8),$acc2	#$t1
+
+	and	\$0x00ff0000,$acc0
+	and	\$0xff000000,$acc1
+	and	\$0xff000000,$acc2
+
+	xor	$acc0,$t3
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	mov	16+12($key),$s3
+	mov	2($sbox,$acc0,8),$acc0	#$t2
+	mov	2($sbox,$acc1,8),$acc1	#$t3
+	mov	16+0($key),$s0
+
+	and	\$0xff000000,$acc0
+	and	\$0xff000000,$acc1
+
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+
+	mov	16+4($key),$s1
+	mov	16+8($key),$s2
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub encstep()
+{ my ($i,@s) = @_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	if ($i==3) {
+		$tmp0=$s[1];
+		$tmp1=$s[2];
+		$tmp2=$s[3];
+	}
+	$code.="	movzb	".&lo($s[0]).",$out\n";
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	lea	16($key),$key\n"	if ($i==0);
+
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	mov	0($sbox,$out,8),$out\n";
+
+	$code.="	shr	\$16,$tmp1\n";
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+	$code.="	xor	3($sbox,$tmp0,8),$out\n";
+
+	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+	$code.="	xor	4*$i($key),$out\n";
+
+	$code.="	xor	2($sbox,$tmp1,8),$out\n";
+	$code.="	xor	1($sbox,$tmp2,8),$out\n";
+
+	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+sub enclast()
+{ my ($i,@s)=@_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	if ($i==3) {
+		$tmp0=$s[1];
+		$tmp1=$s[2];
+		$tmp2=$s[3];
+	}
+	$code.="	movzb	".&lo($s[0]).",$out\n";
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+
+	$code.="	mov	2($sbox,$out,8),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+	$code.="	and	\$0x000000ff,$out\n";
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	mov	0($sbox,$tmp0,8),$tmp0\n";
+	$code.="	mov	0($sbox,$tmp1,8),$tmp1\n";
+	$code.="	mov	2($sbox,$tmp2,8),$tmp2\n";
+
+	$code.="	and	\$0x0000ff00,$tmp0\n";
+	$code.="	and	\$0x00ff0000,$tmp1\n";
+	$code.="	and	\$0xff000000,$tmp2\n";
+
+	$code.="	xor	$tmp0,$out\n";
+	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
+	$code.="	xor	$tmp1,$out\n";
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	xor	$tmp2,$out\n";
+	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+$code.=<<___;
+.type	_x86_64_AES_encrypt,\@abi-omnipotent
+.align	16
+_x86_64_AES_encrypt:
+	xor	0($key),$s0			# xor with key
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+
+	mov	240($key),$rnds			# load key->rounds
+	sub	\$1,$rnds
+	jmp	.Lenc_loop
+.align	16
+.Lenc_loop:
+___
+	if ($verticalspin) { &encvert(); }
+	else {	&encstep(0,$s0,$s1,$s2,$s3);
+		&encstep(1,$s1,$s2,$s3,$s0);
+		&encstep(2,$s2,$s3,$s0,$s1);
+		&encstep(3,$s3,$s0,$s1,$s2);
+	}
+$code.=<<___;
+	sub	\$1,$rnds
+	jnz	.Lenc_loop
+___
+	if ($verticalspin) { &enclastvert(); }
+	else {	&enclast(0,$s0,$s1,$s2,$s3);
+		&enclast(1,$s1,$s2,$s3,$s0);
+		&enclast(2,$s2,$s3,$s0,$s1);
+		&enclast(3,$s3,$s0,$s1,$s2);
+		$code.=<<___;
+		xor	16+0($key),$s0		# xor with key
+		xor	16+4($key),$s1
+		xor	16+8($key),$s2
+		xor	16+12($key),$s3
+___
+	}
+$code.=<<___;
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_encrypt,.-_x86_64_AES_encrypt
+___
+
+# it's possible to implement this by shifting tN by 8, filling least
+# significant byte with byte load and finally bswap-ing at the end,
+# but such partial register load kills Core 2...
+sub enccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$t0
+	movzb	`&lo("$s1")`,$t1
+	movzb	`&lo("$s2")`,$t2
+	movzb	`&lo("$s3")`,$t3
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	shr	\$16,$s2
+	movzb	`&hi("$s3")`,$acc2
+	movzb	($sbox,$t0,1),$t0
+	movzb	($sbox,$t1,1),$t1
+	movzb	($sbox,$t2,1),$t2
+	movzb	($sbox,$t3,1),$t3
+
+	movzb	($sbox,$acc0,1),$t4	#$t0
+	movzb	`&hi("$s0")`,$acc0
+	movzb	($sbox,$acc1,1),$t5	#$t1
+	movzb	`&lo("$s2")`,$acc1
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+
+	shl	\$8,$t4
+	shr	\$16,$s3
+	shl	\$8,$t5
+	xor	$t4,$t0
+	shr	\$16,$s0
+	movzb	`&lo("$s3")`,$t4
+	shr	\$16,$s1
+	xor	$t5,$t1
+	shl	\$8,$acc2
+	movzb	`&lo("$s0")`,$t5
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	xor	$acc2,$t2
+
+	shl	\$8,$acc0
+	movzb	`&lo("$s1")`,$acc2
+	shl	\$16,$acc1
+	xor	$acc0,$t3
+	movzb	($sbox,$t4,1),$t4	#$t1
+	movzb	`&hi("$s3")`,$acc0
+	movzb	($sbox,$t5,1),$t5	#$t2
+	xor	$acc1,$t0
+
+	shr	\$8,$s2
+	movzb	`&hi("$s0")`,$acc1
+	shl	\$16,$t4
+	shr	\$8,$s1
+	shl	\$16,$t5
+	xor	$t4,$t1
+	movzb	($sbox,$acc2,1),$acc2	#$t3
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	movzb	($sbox,$acc1,1),$acc1	#$t1
+	movzb	($sbox,$s2,1),$s3	#$t3
+	movzb	($sbox,$s1,1),$s2	#$t2
+
+	shl	\$16,$acc2
+	xor	$t5,$t2
+	shl	\$24,$acc0
+	xor	$acc2,$t3
+	shl	\$24,$acc1
+	xor	$acc0,$t0
+	shl	\$24,$s3
+	xor	$acc1,$t1
+	shl	\$24,$s2
+	mov	$t0,$s0
+	mov	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub enctransform_ref()
+{ my $sn = shift;
+  my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	mov	$sn,$acc
+	and	\$0x80808080,$acc
+	mov	$acc,$tmp
+	shr	\$7,$tmp
+	lea	($sn,$sn),$r2
+	sub	$tmp,$acc
+	and	\$0xfefefefe,$r2
+	and	\$0x1b1b1b1b,$acc
+	mov	$sn,$tmp
+	xor	$acc,$r2
+
+	xor	$r2,$sn
+	rol	\$24,$sn
+	xor	$r2,$sn
+	ror	\$16,$tmp
+	xor	$tmp,$sn
+	ror	\$8,$tmp
+	xor	$tmp,$sn
+___
+}
+
+# unlike decrypt case it does not pay off to parallelize enctransform
+sub enctransform()
+{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
+
+$code.=<<___;
+	mov	\$0x80808080,$t0
+	mov	\$0x80808080,$t1
+	and	$s0,$t0
+	and	$s1,$t1
+	mov	$t0,$acc0
+	mov	$t1,$acc1
+	shr	\$7,$t0
+	lea	($s0,$s0),$r20
+	shr	\$7,$t1
+	lea	($s1,$s1),$r21
+	sub	$t0,$acc0
+	sub	$t1,$acc1
+	and	\$0xfefefefe,$r20
+	and	\$0xfefefefe,$r21
+	and	\$0x1b1b1b1b,$acc0
+	and	\$0x1b1b1b1b,$acc1
+	mov	$s0,$t0
+	mov	$s1,$t1
+	xor	$acc0,$r20
+	xor	$acc1,$r21
+
+	xor	$r20,$s0
+	xor	$r21,$s1
+	 mov	\$0x80808080,$t2
+	rol	\$24,$s0
+	 mov	\$0x80808080,$t3
+	rol	\$24,$s1
+	 and	$s2,$t2
+	 and	$s3,$t3
+	xor	$r20,$s0
+	xor	$r21,$s1
+	 mov	$t2,$acc0
+	ror	\$16,$t0
+	 mov	$t3,$acc1
+	ror	\$16,$t1
+	 lea	($s2,$s2),$r20
+	 shr	\$7,$t2
+	xor	$t0,$s0
+	 shr	\$7,$t3
+	xor	$t1,$s1
+	ror	\$8,$t0
+	 lea	($s3,$s3),$r21
+	ror	\$8,$t1
+	 sub	$t2,$acc0
+	 sub	$t3,$acc1
+	xor	$t0,$s0
+	xor	$t1,$s1
+
+	and	\$0xfefefefe,$r20
+	and	\$0xfefefefe,$r21
+	and	\$0x1b1b1b1b,$acc0
+	and	\$0x1b1b1b1b,$acc1
+	mov	$s2,$t2
+	mov	$s3,$t3
+	xor	$acc0,$r20
+	xor	$acc1,$r21
+
+	ror	\$16,$t2
+	xor	$r20,$s2
+	ror	\$16,$t3
+	xor	$r21,$s3
+	rol	\$24,$s2
+	mov	0($sbox),$acc0			# prefetch Te4
+	rol	\$24,$s3
+	xor	$r20,$s2
+	mov	64($sbox),$acc1
+	xor	$r21,$s3
+	mov	128($sbox),$r20
+	xor	$t2,$s2
+	ror	\$8,$t2
+	xor	$t3,$s3
+	ror	\$8,$t3
+	xor	$t2,$s2
+	mov	192($sbox),$r21
+	xor	$t3,$s3
+___
+}
+
+$code.=<<___;
+.type	_x86_64_AES_encrypt_compact,\@abi-omnipotent
+.align	16
+_x86_64_AES_encrypt_compact:
+	lea	128($sbox),$inp			# size optimization
+	mov	0-128($inp),$acc1		# prefetch Te4
+	mov	32-128($inp),$acc2
+	mov	64-128($inp),$t0
+	mov	96-128($inp),$t1
+	mov	128-128($inp),$acc1
+	mov	160-128($inp),$acc2
+	mov	192-128($inp),$t0
+	mov	224-128($inp),$t1
+	jmp	.Lenc_loop_compact
+.align	16
+.Lenc_loop_compact:
+		xor	0($key),$s0		# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+		lea	16($key),$key
+___
+		&enccompactvert();
+$code.=<<___;
+		cmp	16(%rsp),$key
+		je	.Lenc_compact_done
+___
+		&enctransform();
+$code.=<<___;
+	jmp	.Lenc_loop_compact
+.align	16
+.Lenc_compact_done:
+	xor	0($key),$s0
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
+___
+
+# void asm_AES_encrypt (const void *inp,void *out,const AES_KEY *key);
+$code.=<<___;
+.align	16
+.globl	asm_AES_encrypt
+.type	asm_AES_encrypt,\@function,3
+.hidden	asm_AES_encrypt
+asm_AES_encrypt:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	# allocate frame "above" key schedule
+	lea	-63(%rdx),%rcx	# %rdx is key argument
+	and	\$-64,%rsp
+	sub	%rsp,%rcx
+	neg	%rcx
+	and	\$0x3c0,%rcx
+	sub	%rcx,%rsp
+	sub	\$32,%rsp
+
+	mov	%rsi,16(%rsp)	# save out
+	mov	%rax,24(%rsp)	# save original stack pointer
+.Lenc_prologue:
+
+	mov	%rdx,$key
+	mov	240($key),$rnds	# load rounds
+
+	mov	0(%rdi),$s0	# load input vector
+	mov	4(%rdi),$s1
+	mov	8(%rdi),$s2
+	mov	12(%rdi),$s3
+
+	shl	\$4,$rnds
+	lea	($key,$rnds),%rbp
+	mov	$key,(%rsp)	# key schedule
+	mov	%rbp,8(%rsp)	# end of key schedule
+
+	# pick Te4 copy which can't "overlap" with stack frame or key schedule
+	lea	.LAES_Te+2048(%rip),$sbox
+	lea	768(%rsp),%rbp
+	sub	$sbox,%rbp
+	and	\$0x300,%rbp
+	lea	($sbox,%rbp),$sbox
+
+	call	_x86_64_AES_encrypt_compact
+
+	mov	16(%rsp),$out	# restore out
+	mov	24(%rsp),%rsi	# restore saved stack pointer
+	mov	$s0,0($out)	# write output vector
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lenc_epilogue:
+	ret
+.size	asm_AES_encrypt,.-asm_AES_encrypt
+___
+
+#------------------------------------------------------------------#
+
+sub decvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	# favor 3-way issue Opteron pipeline...
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	mov	0($sbox,$acc0,8),$t0
+	mov	0($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	movzb	`&lo("$s3")`,$acc2
+	xor	3($sbox,$acc0,8),$t0
+	xor	3($sbox,$acc1,8),$t1
+	mov	0($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s1")`,$acc0
+	shr	\$16,$s0
+	movzb	`&hi("$s2")`,$acc2
+	xor	3($sbox,$acc0,8),$t2
+	shr	\$16,$s3
+	xor	3($sbox,$acc2,8),$t3
+
+	shr	\$16,$s1
+	lea	16($key),$key
+	shr	\$16,$s2
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	xor	2($sbox,$acc0,8),$t0
+	xor	2($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t2
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	movzb	`&lo("$s1")`,$acc2
+	xor	1($sbox,$acc0,8),$t0
+	xor	1($sbox,$acc1,8),$t1
+	xor	2($sbox,$acc2,8),$t3
+
+	movzb	`&hi("$s3")`,$acc0
+	mov	12($key),$s3
+	movzb	`&hi("$s0")`,$acc2
+	xor	1($sbox,$acc0,8),$t2
+	mov	0($key),$s0
+	xor	1($sbox,$acc2,8),$t3
+
+	xor	$t0,$s0
+	mov	4($key),$s1
+	mov	8($key),$s2
+	xor	$t2,$s2
+	xor	$t1,$s1
+	xor	$t3,$s3
+___
+}
+
+sub declastvert()
+{ my $t3="%r8d";	# zaps $inp!
+
+$code.=<<___;
+	lea	2048($sbox),$sbox	# size optimization
+	movzb	`&lo("$s0")`,$acc0
+	movzb	`&lo("$s1")`,$acc1
+	movzb	`&lo("$s2")`,$acc2
+	movzb	($sbox,$acc0,1),$t0
+	movzb	($sbox,$acc1,1),$t1
+	movzb	($sbox,$acc2,1),$t2
+
+	movzb	`&lo("$s3")`,$acc0
+	movzb	`&hi("$s3")`,$acc1
+	movzb	`&hi("$s0")`,$acc2
+	movzb	($sbox,$acc0,1),$t3
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	movzb	($sbox,$acc2,1),$acc2	#$t1
+
+	shl	\$8,$acc1
+	shl	\$8,$acc2
+
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+	shr	\$16,$s3
+
+	movzb	`&hi("$s1")`,$acc0
+	movzb	`&hi("$s2")`,$acc1
+	shr	\$16,$s0
+	movzb	($sbox,$acc0,1),$acc0	#$t2
+	movzb	($sbox,$acc1,1),$acc1	#$t3
+
+	shl	\$8,$acc0
+	shl	\$8,$acc1
+	shr	\$16,$s1
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+	shr	\$16,$s2
+
+	movzb	`&lo("$s2")`,$acc0
+	movzb	`&lo("$s3")`,$acc1
+	movzb	`&lo("$s0")`,$acc2
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	movzb	($sbox,$acc1,1),$acc1	#$t1
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+
+	shl	\$16,$acc0
+	shl	\$16,$acc1
+	shl	\$16,$acc2
+
+	xor	$acc0,$t0
+	xor	$acc1,$t1
+	xor	$acc2,$t2
+
+	movzb	`&lo("$s1")`,$acc0
+	movzb	`&hi("$s1")`,$acc1
+	movzb	`&hi("$s2")`,$acc2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	movzb	($sbox,$acc2,1),$acc2	#$t1
+
+	shl	\$16,$acc0
+	shl	\$24,$acc1
+	shl	\$24,$acc2
+
+	xor	$acc0,$t3
+	xor	$acc1,$t0
+	xor	$acc2,$t1
+
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	mov	16+12($key),$s3
+	movzb	($sbox,$acc0,1),$acc0	#$t2
+	movzb	($sbox,$acc1,1),$acc1	#$t3
+	mov	16+0($key),$s0
+
+	shl	\$24,$acc0
+	shl	\$24,$acc1
+
+	xor	$acc0,$t2
+	xor	$acc1,$t3
+
+	mov	16+4($key),$s1
+	mov	16+8($key),$s2
+	lea	-2048($sbox),$sbox
+	xor	$t0,$s0
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+sub decstep()
+{ my ($i,@s) = @_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	$code.="	mov	$s[0],$out\n"		if ($i!=3);
+			$tmp1=$s[2]			if ($i==3);
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	and	\$0xFF,$out\n";
+
+	$code.="	mov	0($sbox,$out,8),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+			$tmp2=$s[3]			if ($i==3);
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+			$tmp0=$s[1]			if ($i==3);
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	and	\$0xFF,$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	xor	3($sbox,$tmp0,8),$out\n";
+	$code.="	xor	2($sbox,$tmp1,8),$out\n";
+	$code.="	xor	1($sbox,$tmp2,8),$out\n";
+
+	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+sub declast()
+{ my ($i,@s)=@_;
+  my $tmp0=$acc0;
+  my $tmp1=$acc1;
+  my $tmp2=$acc2;
+  my $out=($t0,$t1,$t2,$s[0])[$i];
+
+	$code.="	mov	$s[0],$out\n"		if ($i!=3);
+			$tmp1=$s[2]			if ($i==3);
+	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
+	$code.="	and	\$0xFF,$out\n";
+
+	$code.="	movzb	2048($sbox,$out,1),$out\n";
+	$code.="	shr	\$16,$tmp1\n";
+			$tmp2=$s[3]			if ($i==3);
+	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
+
+			$tmp0=$s[1]			if ($i==3);
+	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
+	$code.="	and	\$0xFF,$tmp1\n";
+	$code.="	shr	\$24,$tmp2\n";
+
+	$code.="	movzb	2048($sbox,$tmp0,1),$tmp0\n";
+	$code.="	movzb	2048($sbox,$tmp1,1),$tmp1\n";
+	$code.="	movzb	2048($sbox,$tmp2,1),$tmp2\n";
+
+	$code.="	shl	\$8,$tmp0\n";
+	$code.="	shl	\$16,$tmp1\n";
+	$code.="	shl	\$24,$tmp2\n";
+
+	$code.="	xor	$tmp0,$out\n";
+	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
+	$code.="	xor	$tmp1,$out\n";
+	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
+	$code.="	xor	$tmp2,$out\n";
+	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
+	$code.="\n";
+}
+
+$code.=<<___;
+.type	_x86_64_AES_decrypt,\@abi-omnipotent
+.align	16
+_x86_64_AES_decrypt:
+	xor	0($key),$s0			# xor with key
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+
+	mov	240($key),$rnds			# load key->rounds
+	sub	\$1,$rnds
+	jmp	.Ldec_loop
+.align	16
+.Ldec_loop:
+___
+	if ($verticalspin) { &decvert(); }
+	else {	&decstep(0,$s0,$s3,$s2,$s1);
+		&decstep(1,$s1,$s0,$s3,$s2);
+		&decstep(2,$s2,$s1,$s0,$s3);
+		&decstep(3,$s3,$s2,$s1,$s0);
+		$code.=<<___;
+		lea	16($key),$key
+		xor	0($key),$s0			# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+___
+	}
+$code.=<<___;
+	sub	\$1,$rnds
+	jnz	.Ldec_loop
+___
+	if ($verticalspin) { &declastvert(); }
+	else {	&declast(0,$s0,$s3,$s2,$s1);
+		&declast(1,$s1,$s0,$s3,$s2);
+		&declast(2,$s2,$s1,$s0,$s3);
+		&declast(3,$s3,$s2,$s1,$s0);
+		$code.=<<___;
+		xor	16+0($key),$s0			# xor with key
+		xor	16+4($key),$s1
+		xor	16+8($key),$s2
+		xor	16+12($key),$s3
+___
+	}
+$code.=<<___;
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_decrypt,.-_x86_64_AES_decrypt
+___
+
+sub deccompactvert()
+{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
+
+$code.=<<___;
+	movzb	`&lo("$s0")`,$t0
+	movzb	`&lo("$s1")`,$t1
+	movzb	`&lo("$s2")`,$t2
+	movzb	`&lo("$s3")`,$t3
+	movzb	`&hi("$s3")`,$acc0
+	movzb	`&hi("$s0")`,$acc1
+	shr	\$16,$s3
+	movzb	`&hi("$s1")`,$acc2
+	movzb	($sbox,$t0,1),$t0
+	movzb	($sbox,$t1,1),$t1
+	movzb	($sbox,$t2,1),$t2
+	movzb	($sbox,$t3,1),$t3
+
+	movzb	($sbox,$acc0,1),$t4	#$t0
+	movzb	`&hi("$s2")`,$acc0
+	movzb	($sbox,$acc1,1),$t5	#$t1
+	movzb	($sbox,$acc2,1),$acc2	#$t2
+	movzb	($sbox,$acc0,1),$acc0	#$t3
+
+	shr	\$16,$s2
+	shl	\$8,$t5
+	shl	\$8,$t4
+	movzb	`&lo("$s2")`,$acc1
+	shr	\$16,$s0
+	xor	$t4,$t0
+	shr	\$16,$s1
+	movzb	`&lo("$s3")`,$t4
+
+	shl	\$8,$acc2
+	xor	$t5,$t1
+	shl	\$8,$acc0
+	movzb	`&lo("$s0")`,$t5
+	movzb	($sbox,$acc1,1),$acc1	#$t0
+	xor	$acc2,$t2
+	movzb	`&lo("$s1")`,$acc2
+
+	shl	\$16,$acc1
+	xor	$acc0,$t3
+	movzb	($sbox,$t4,1),$t4	#$t1
+	movzb	`&hi("$s1")`,$acc0
+	movzb	($sbox,$acc2,1),$acc2	#$t3
+	xor	$acc1,$t0
+	movzb	($sbox,$t5,1),$t5	#$t2
+	movzb	`&hi("$s2")`,$acc1
+
+	shl	\$16,$acc2
+	shl	\$16,$t4
+	shl	\$16,$t5
+	xor	$acc2,$t3
+	movzb	`&hi("$s3")`,$acc2
+	xor	$t4,$t1
+	shr	\$8,$s0
+	xor	$t5,$t2
+
+	movzb	($sbox,$acc0,1),$acc0	#$t0
+	movzb	($sbox,$acc1,1),$s1	#$t1
+	movzb	($sbox,$acc2,1),$s2	#$t2
+	movzb	($sbox,$s0,1),$s3	#$t3
+
+	mov	$t0,$s0
+	shl	\$24,$acc0
+	shl	\$24,$s1
+	shl	\$24,$s2
+	xor	$acc0,$s0
+	shl	\$24,$s3
+	xor	$t1,$s1
+	xor	$t2,$s2
+	xor	$t3,$s3
+___
+}
+
+# parallelized version! input is pair of 64-bit values: %rax=s1.s0
+# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
+# %ecx=s2 and %edx=s3.
+sub dectransform()
+{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
+  my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
+  my $prefetch = shift;
+
+$code.=<<___;
+	mov	$mask80,$tp40
+	mov	$mask80,$tp48
+	and	$tp10,$tp40
+	and	$tp18,$tp48
+	mov	$tp40,$acc0
+	mov	$tp48,$acc8
+	shr	\$7,$tp40
+	lea	($tp10,$tp10),$tp20
+	shr	\$7,$tp48
+	lea	($tp18,$tp18),$tp28
+	sub	$tp40,$acc0
+	sub	$tp48,$acc8
+	and	$maskfe,$tp20
+	and	$maskfe,$tp28
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$acc0,$tp20
+	xor	$acc8,$tp28
+	mov	$mask80,$tp80
+	mov	$mask80,$tp88
+
+	and	$tp20,$tp80
+	and	$tp28,$tp88
+	mov	$tp80,$acc0
+	mov	$tp88,$acc8
+	shr	\$7,$tp80
+	lea	($tp20,$tp20),$tp40
+	shr	\$7,$tp88
+	lea	($tp28,$tp28),$tp48
+	sub	$tp80,$acc0
+	sub	$tp88,$acc8
+	and	$maskfe,$tp40
+	and	$maskfe,$tp48
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$acc0,$tp40
+	xor	$acc8,$tp48
+	mov	$mask80,$tp80
+	mov	$mask80,$tp88
+
+	and	$tp40,$tp80
+	and	$tp48,$tp88
+	mov	$tp80,$acc0
+	mov	$tp88,$acc8
+	shr	\$7,$tp80
+	 xor	$tp10,$tp20		# tp2^=tp1
+	shr	\$7,$tp88
+	 xor	$tp18,$tp28		# tp2^=tp1
+	sub	$tp80,$acc0
+	sub	$tp88,$acc8
+	lea	($tp40,$tp40),$tp80
+	lea	($tp48,$tp48),$tp88
+	 xor	$tp10,$tp40		# tp4^=tp1
+	 xor	$tp18,$tp48		# tp4^=tp1
+	and	$maskfe,$tp80
+	and	$maskfe,$tp88
+	and	$mask1b,$acc0
+	and	$mask1b,$acc8
+	xor	$acc0,$tp80
+	xor	$acc8,$tp88
+
+	xor	$tp80,$tp10		# tp1^=tp8
+	xor	$tp88,$tp18		# tp1^=tp8
+	xor	$tp80,$tp20		# tp2^tp1^=tp8
+	xor	$tp88,$tp28		# tp2^tp1^=tp8
+	mov	$tp10,$acc0
+	mov	$tp18,$acc8
+	xor	$tp80,$tp40		# tp4^tp1^=tp8
+	shr	\$32,$acc0
+	xor	$tp88,$tp48		# tp4^tp1^=tp8
+	shr	\$32,$acc8
+	xor	$tp20,$tp80		# tp8^=tp8^tp2^tp1=tp2^tp1
+	rol	\$8,`&LO("$tp10")`	# ROTATE(tp1^tp8,8)
+	xor	$tp28,$tp88		# tp8^=tp8^tp2^tp1=tp2^tp1
+	rol	\$8,`&LO("$tp18")`	# ROTATE(tp1^tp8,8)
+	xor	$tp40,$tp80		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+	rol	\$8,`&LO("$acc0")`	# ROTATE(tp1^tp8,8)
+	xor	$tp48,$tp88		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
+
+	rol	\$8,`&LO("$acc8")`	# ROTATE(tp1^tp8,8)
+	xor	`&LO("$tp80")`,`&LO("$tp10")`
+	shr	\$32,$tp80
+	xor	`&LO("$tp88")`,`&LO("$tp18")`
+	shr	\$32,$tp88
+	xor	`&LO("$tp80")`,`&LO("$acc0")`
+	xor	`&LO("$tp88")`,`&LO("$acc8")`
+
+	mov	$tp20,$tp80
+	rol	\$24,`&LO("$tp20")`	# ROTATE(tp2^tp1^tp8,24)
+	mov	$tp28,$tp88
+	rol	\$24,`&LO("$tp28")`	# ROTATE(tp2^tp1^tp8,24)
+	shr	\$32,$tp80
+	xor	`&LO("$tp20")`,`&LO("$tp10")`
+	shr	\$32,$tp88
+	xor	`&LO("$tp28")`,`&LO("$tp18")`
+	rol	\$24,`&LO("$tp80")`	# ROTATE(tp2^tp1^tp8,24)
+	mov	$tp40,$tp20
+	rol	\$24,`&LO("$tp88")`	# ROTATE(tp2^tp1^tp8,24)
+	mov	$tp48,$tp28
+	shr	\$32,$tp20
+	xor	`&LO("$tp80")`,`&LO("$acc0")`
+	shr	\$32,$tp28
+	xor	`&LO("$tp88")`,`&LO("$acc8")`
+
+	`"mov	0($sbox),$mask80"	if ($prefetch)`
+	rol	\$16,`&LO("$tp40")`	# ROTATE(tp4^tp1^tp8,16)
+	`"mov	64($sbox),$maskfe"	if ($prefetch)`
+	rol	\$16,`&LO("$tp48")`	# ROTATE(tp4^tp1^tp8,16)
+	`"mov	128($sbox),$mask1b"	if ($prefetch)`
+	rol	\$16,`&LO("$tp20")`	# ROTATE(tp4^tp1^tp8,16)
+	`"mov	192($sbox),$tp80"	if ($prefetch)`
+	xor	`&LO("$tp40")`,`&LO("$tp10")`
+	rol	\$16,`&LO("$tp28")`	# ROTATE(tp4^tp1^tp8,16)
+	xor	`&LO("$tp48")`,`&LO("$tp18")`
+	`"mov	256($sbox),$tp88"	if ($prefetch)`
+	xor	`&LO("$tp20")`,`&LO("$acc0")`
+	xor	`&LO("$tp28")`,`&LO("$acc8")`
+___
+}
+
+$code.=<<___;
+.type	_x86_64_AES_decrypt_compact,\@abi-omnipotent
+.align	16
+_x86_64_AES_decrypt_compact:
+	lea	128($sbox),$inp			# size optimization
+	mov	0-128($inp),$acc1		# prefetch Td4
+	mov	32-128($inp),$acc2
+	mov	64-128($inp),$t0
+	mov	96-128($inp),$t1
+	mov	128-128($inp),$acc1
+	mov	160-128($inp),$acc2
+	mov	192-128($inp),$t0
+	mov	224-128($inp),$t1
+	jmp	.Ldec_loop_compact
+
+.align	16
+.Ldec_loop_compact:
+		xor	0($key),$s0		# xor with key
+		xor	4($key),$s1
+		xor	8($key),$s2
+		xor	12($key),$s3
+		lea	16($key),$key
+___
+		&deccompactvert();
+$code.=<<___;
+		cmp	16(%rsp),$key
+		je	.Ldec_compact_done
+
+		mov	256+0($sbox),$mask80
+		shl	\$32,%rbx
+		shl	\$32,%rdx
+		mov	256+8($sbox),$maskfe
+		or	%rbx,%rax
+		or	%rdx,%rcx
+		mov	256+16($sbox),$mask1b
+___
+		&dectransform(1);
+$code.=<<___;
+	jmp	.Ldec_loop_compact
+.align	16
+.Ldec_compact_done:
+	xor	0($key),$s0
+	xor	4($key),$s1
+	xor	8($key),$s2
+	xor	12($key),$s3
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
+___
+
+# void asm_AES_decrypt (const void *inp,void *out,const AES_KEY *key);
+$code.=<<___;
+.align	16
+.globl	asm_AES_decrypt
+.type	asm_AES_decrypt,\@function,3
+.hidden	asm_AES_decrypt
+asm_AES_decrypt:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	# allocate frame "above" key schedule
+	lea	-63(%rdx),%rcx	# %rdx is key argument
+	and	\$-64,%rsp
+	sub	%rsp,%rcx
+	neg	%rcx
+	and	\$0x3c0,%rcx
+	sub	%rcx,%rsp
+	sub	\$32,%rsp
+
+	mov	%rsi,16(%rsp)	# save out
+	mov	%rax,24(%rsp)	# save original stack pointer
+.Ldec_prologue:
+
+	mov	%rdx,$key
+	mov	240($key),$rnds	# load rounds
+
+	mov	0(%rdi),$s0	# load input vector
+	mov	4(%rdi),$s1
+	mov	8(%rdi),$s2
+	mov	12(%rdi),$s3
+
+	shl	\$4,$rnds
+	lea	($key,$rnds),%rbp
+	mov	$key,(%rsp)	# key schedule
+	mov	%rbp,8(%rsp)	# end of key schedule
+
+	# pick Td4 copy which can't "overlap" with stack frame or key schedule
+	lea	.LAES_Td+2048(%rip),$sbox
+	lea	768(%rsp),%rbp
+	sub	$sbox,%rbp
+	and	\$0x300,%rbp
+	lea	($sbox,%rbp),$sbox
+	shr	\$3,%rbp	# recall "magic" constants!
+	add	%rbp,$sbox
+
+	call	_x86_64_AES_decrypt_compact
+
+	mov	16(%rsp),$out	# restore out
+	mov	24(%rsp),%rsi	# restore saved stack pointer
+	mov	$s0,0($out)	# write output vector
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Ldec_epilogue:
+	ret
+.size	asm_AES_decrypt,.-asm_AES_decrypt
+___
+#------------------------------------------------------------------#
+
+sub enckey()
+{
+$code.=<<___;
+	movz	%dl,%esi		# rk[i]>>0
+	movzb	-128(%rbp,%rsi),%ebx
+	movz	%dh,%esi		# rk[i]>>8
+	shl	\$24,%ebx
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	shr	\$16,%edx
+	movz	%dl,%esi		# rk[i]>>16
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	movz	%dh,%esi		# rk[i]>>24
+	shl	\$8,%ebx
+	xor	%ebx,%eax
+
+	movzb	-128(%rbp,%rsi),%ebx
+	shl	\$16,%ebx
+	xor	%ebx,%eax
+
+	xor	1024-128(%rbp,%rcx,4),%eax		# rcon
+___
+}
+
+# int asm_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key)
+$code.=<<___;
+.align	16
+.globl asm_AES_set_encrypt_key
+.type  asm_AES_set_encrypt_key,\@function,3
+asm_AES_set_encrypt_key:
+	push	%rbx
+	push	%rbp
+	push	%r12			# redundant, but allows to share
+	push	%r13			# exception handler...
+	push	%r14
+	push	%r15
+	sub	\$8,%rsp
+.Lenc_key_prologue:
+
+	call	_x86_64_AES_set_encrypt_key
+
+	mov	40(%rsp),%rbp
+	mov	48(%rsp),%rbx
+	add	\$56,%rsp
+.Lenc_key_epilogue:
+	ret
+.size asm_AES_set_encrypt_key,.-asm_AES_set_encrypt_key
+
+.type	_x86_64_AES_set_encrypt_key,\@abi-omnipotent
+.align	16
+_x86_64_AES_set_encrypt_key:
+	mov	%esi,%ecx			# %ecx=bits
+	mov	%rdi,%rsi			# %rsi=userKey
+	mov	%rdx,%rdi			# %rdi=key
+
+	test	\$-1,%rsi
+	jz	.Lbadpointer
+	test	\$-1,%rdi
+	jz	.Lbadpointer
+
+	lea	.LAES_Te(%rip),%rbp
+	lea	2048+128(%rbp),%rbp
+
+	# prefetch Te4
+	mov	0-128(%rbp),%eax
+	mov	32-128(%rbp),%ebx
+	mov	64-128(%rbp),%r8d
+	mov	96-128(%rbp),%edx
+	mov	128-128(%rbp),%eax
+	mov	160-128(%rbp),%ebx
+	mov	192-128(%rbp),%r8d
+	mov	224-128(%rbp),%edx
+
+	cmp	\$128,%ecx
+	je	.L10rounds
+	cmp	\$192,%ecx
+	je	.L12rounds
+	cmp	\$256,%ecx
+	je	.L14rounds
+	mov	\$-2,%rax			# invalid number of bits
+	jmp	.Lexit
+
+.L10rounds:
+	mov	0(%rsi),%rax			# copy first 4 dwords
+	mov	8(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rdx,8(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L10shortcut
+.align	4
+.L10loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	12(%rdi),%edx			# rk[3]
+.L10shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,16(%rdi)			# rk[4]
+		xor	4(%rdi),%eax
+		mov	%eax,20(%rdi)			# rk[5]
+		xor	8(%rdi),%eax
+		mov	%eax,24(%rdi)			# rk[6]
+		xor	12(%rdi),%eax
+		mov	%eax,28(%rdi)			# rk[7]
+		add	\$1,%ecx
+		lea	16(%rdi),%rdi
+		cmp	\$10,%ecx
+	jl	.L10loop
+
+	movl	\$10,80(%rdi)			# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.L12rounds:
+	mov	0(%rsi),%rax			# copy first 6 dwords
+	mov	8(%rsi),%rbx
+	mov	16(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rbx,8(%rdi)
+	mov	%rdx,16(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L12shortcut
+.align	4
+.L12loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	20(%rdi),%edx			# rk[5]
+.L12shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,24(%rdi)			# rk[6]
+		xor	4(%rdi),%eax
+		mov	%eax,28(%rdi)			# rk[7]
+		xor	8(%rdi),%eax
+		mov	%eax,32(%rdi)			# rk[8]
+		xor	12(%rdi),%eax
+		mov	%eax,36(%rdi)			# rk[9]
+
+		cmp	\$7,%ecx
+		je	.L12break
+		add	\$1,%ecx
+
+		xor	16(%rdi),%eax
+		mov	%eax,40(%rdi)			# rk[10]
+		xor	20(%rdi),%eax
+		mov	%eax,44(%rdi)			# rk[11]
+
+		lea	24(%rdi),%rdi
+	jmp	.L12loop
+.L12break:
+	movl	\$12,72(%rdi)		# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.L14rounds:
+	mov	0(%rsi),%rax			# copy first 8 dwords
+	mov	8(%rsi),%rbx
+	mov	16(%rsi),%rcx
+	mov	24(%rsi),%rdx
+	mov	%rax,0(%rdi)
+	mov	%rbx,8(%rdi)
+	mov	%rcx,16(%rdi)
+	mov	%rdx,24(%rdi)
+
+	shr	\$32,%rdx
+	xor	%ecx,%ecx
+	jmp	.L14shortcut
+.align	4
+.L14loop:
+		mov	0(%rdi),%eax			# rk[0]
+		mov	28(%rdi),%edx			# rk[4]
+.L14shortcut:
+___
+		&enckey	();
+$code.=<<___;
+		mov	%eax,32(%rdi)			# rk[8]
+		xor	4(%rdi),%eax
+		mov	%eax,36(%rdi)			# rk[9]
+		xor	8(%rdi),%eax
+		mov	%eax,40(%rdi)			# rk[10]
+		xor	12(%rdi),%eax
+		mov	%eax,44(%rdi)			# rk[11]
+
+		cmp	\$6,%ecx
+		je	.L14break
+		add	\$1,%ecx
+
+		mov	%eax,%edx
+		mov	16(%rdi),%eax			# rk[4]
+		movz	%dl,%esi			# rk[11]>>0
+		movzb	-128(%rbp,%rsi),%ebx
+		movz	%dh,%esi			# rk[11]>>8
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		shr	\$16,%edx
+		shl	\$8,%ebx
+		movz	%dl,%esi			# rk[11]>>16
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		movz	%dh,%esi			# rk[11]>>24
+		shl	\$16,%ebx
+		xor	%ebx,%eax
+
+		movzb	-128(%rbp,%rsi),%ebx
+		shl	\$24,%ebx
+		xor	%ebx,%eax
+
+		mov	%eax,48(%rdi)			# rk[12]
+		xor	20(%rdi),%eax
+		mov	%eax,52(%rdi)			# rk[13]
+		xor	24(%rdi),%eax
+		mov	%eax,56(%rdi)			# rk[14]
+		xor	28(%rdi),%eax
+		mov	%eax,60(%rdi)			# rk[15]
+
+		lea	32(%rdi),%rdi
+	jmp	.L14loop
+.L14break:
+	movl	\$14,48(%rdi)		# setup number of rounds
+	xor	%rax,%rax
+	jmp	.Lexit
+
+.Lbadpointer:
+	mov	\$-1,%rax
+.Lexit:
+	.byte	0xf3,0xc3			# rep ret
+.size	_x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
+___
+
+sub deckey_ref()
+{ my ($i,$ptr,$te,$td) = @_;
+  my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
+$code.=<<___;
+	mov	$i($ptr),$tp1
+	mov	$tp1,$acc
+	and	\$0x80808080,$acc
+	mov	$acc,$tp4
+	shr	\$7,$tp4
+	lea	0($tp1,$tp1),$tp2
+	sub	$tp4,$acc
+	and	\$0xfefefefe,$tp2
+	and	\$0x1b1b1b1b,$acc
+	xor	$tp2,$acc
+	mov	$acc,$tp2
+
+	and	\$0x80808080,$acc
+	mov	$acc,$tp8
+	shr	\$7,$tp8
+	lea	0($tp2,$tp2),$tp4
+	sub	$tp8,$acc
+	and	\$0xfefefefe,$tp4
+	and	\$0x1b1b1b1b,$acc
+	 xor	$tp1,$tp2		# tp2^tp1
+	xor	$tp4,$acc
+	mov	$acc,$tp4
+
+	and	\$0x80808080,$acc
+	mov	$acc,$tp8
+	shr	\$7,$tp8
+	sub	$tp8,$acc
+	lea	0($tp4,$tp4),$tp8
+	 xor	$tp1,$tp4		# tp4^tp1
+	and	\$0xfefefefe,$tp8
+	and	\$0x1b1b1b1b,$acc
+	xor	$acc,$tp8
+
+	xor	$tp8,$tp1		# tp1^tp8
+	rol	\$8,$tp1		# ROTATE(tp1^tp8,8)
+	xor	$tp8,$tp2		# tp2^tp1^tp8
+	xor	$tp8,$tp4		# tp4^tp1^tp8
+	xor	$tp2,$tp8
+	xor	$tp4,$tp8		# tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
+
+	xor	$tp8,$tp1
+	rol	\$24,$tp2		# ROTATE(tp2^tp1^tp8,24)
+	xor	$tp2,$tp1
+	rol	\$16,$tp4		# ROTATE(tp4^tp1^tp8,16)
+	xor	$tp4,$tp1
+
+	mov	$tp1,$i($ptr)
+___
+}
+
+# int asm_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key)
+$code.=<<___;
+.align	16
+.globl asm_AES_set_decrypt_key
+.type  asm_AES_set_decrypt_key,\@function,3
+asm_AES_set_decrypt_key:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	push	%rdx			# save key schedule
+.Ldec_key_prologue:
+
+	call	_x86_64_AES_set_encrypt_key
+	mov	(%rsp),%r8		# restore key schedule
+	cmp	\$0,%eax
+	jne	.Labort
+
+	mov	240(%r8),%r14d		# pull number of rounds
+	xor	%rdi,%rdi
+	lea	(%rdi,%r14d,4),%rcx
+	mov	%r8,%rsi
+	lea	(%r8,%rcx,4),%rdi	# pointer to last chunk
+.align	4
+.Linvert:
+		mov	0(%rsi),%rax
+		mov	8(%rsi),%rbx
+		mov	0(%rdi),%rcx
+		mov	8(%rdi),%rdx
+		mov	%rax,0(%rdi)
+		mov	%rbx,8(%rdi)
+		mov	%rcx,0(%rsi)
+		mov	%rdx,8(%rsi)
+		lea	16(%rsi),%rsi
+		lea	-16(%rdi),%rdi
+		cmp	%rsi,%rdi
+	jne	.Linvert
+
+	lea	.LAES_Te+2048+1024(%rip),%rax	# rcon
+
+	mov	40(%rax),$mask80
+	mov	48(%rax),$maskfe
+	mov	56(%rax),$mask1b
+
+	mov	%r8,$key
+	sub	\$1,%r14d
+.align	4
+.Lpermute:
+		lea	16($key),$key
+		mov	0($key),%rax
+		mov	8($key),%rcx
+___
+		&dectransform ();
+$code.=<<___;
+		mov	%eax,0($key)
+		mov	%ebx,4($key)
+		mov	%ecx,8($key)
+		mov	%edx,12($key)
+		sub	\$1,%r14d
+	jnz	.Lpermute
+
+	xor	%rax,%rax
+.Labort:
+	mov	8(%rsp),%r15
+	mov	16(%rsp),%r14
+	mov	24(%rsp),%r13
+	mov	32(%rsp),%r12
+	mov	40(%rsp),%rbp
+	mov	48(%rsp),%rbx
+	add	\$56,%rsp
+.Ldec_key_epilogue:
+	ret
+.size	asm_AES_set_decrypt_key,.-asm_AES_set_decrypt_key
+___
+
+# void asm_AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#			    size_t length, const AES_KEY *key,
+#			    unsigned char *ivp,const int enc);
+{
+# stack frame layout
+# -8(%rsp)		return address
+my $keyp="0(%rsp)";		# one to pass as $key
+my $keyend="8(%rsp)";		# &(keyp->rd_key[4*keyp->rounds])
+my $_rsp="16(%rsp)";		# saved %rsp
+my $_inp="24(%rsp)";		# copy of 1st parameter, inp
+my $_out="32(%rsp)";		# copy of 2nd parameter, out
+my $_len="40(%rsp)";		# copy of 3rd parameter, length
+my $_key="48(%rsp)";		# copy of 4th parameter, key
+my $_ivp="56(%rsp)";		# copy of 5th parameter, ivp
+my $ivec="64(%rsp)";		# ivec[16]
+my $aes_key="80(%rsp)";		# copy of aes_key
+my $mark="80+240(%rsp)";	# copy of aes_key->rounds
+
+$code.=<<___;
+.align	16
+.globl	asm_AES_cbc_encrypt
+.type	asm_AES_cbc_encrypt,\@function,6
+.extern	OPENSSL_ia32cap_addr
+.hidden	asm_AES_cbc_encrypt
+asm_AES_cbc_encrypt:
+	cmp	\$0,%rdx	# check length
+	je	.Lcbc_epilogue
+	pushfq
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+.Lcbc_prologue:
+
+	cld
+	mov	%r9d,%r9d	# clear upper half of enc
+
+	lea	.LAES_Te(%rip),$sbox
+	lea	.LAES_Td(%rip),%r10
+	cmp	\$0,%r9
+	cmoveq	%r10,$sbox
+
+	mov	OPENSSL_ia32cap_addr(%rip),%r10
+	mov	(%r10), %r10d
+	cmp	\$$speed_limit,%rdx
+	jb	.Lcbc_slow_prologue
+	test	\$15,%rdx
+	jnz	.Lcbc_slow_prologue
+	bt	\$28,%r10d
+	jc	.Lcbc_slow_prologue
+
+	# allocate aligned stack frame...
+	lea	-88-248(%rsp),$key
+	and	\$-64,$key
+
+	# ... and make sure it doesn't alias with AES_T[ed] modulo 4096
+	mov	$sbox,%r10
+	lea	2304($sbox),%r11
+	mov	$key,%r12
+	and	\$0xFFF,%r10	# s = $sbox&0xfff
+	and	\$0xFFF,%r11	# e = ($sbox+2048)&0xfff
+	and	\$0xFFF,%r12	# p = %rsp&0xfff
+
+	cmp	%r11,%r12	# if (p=>e) %rsp =- (p-e);
+	jb	.Lcbc_te_break_out
+	sub	%r11,%r12
+	sub	%r12,$key
+	jmp	.Lcbc_te_ok
+.Lcbc_te_break_out:		# else %rsp -= (p-s)&0xfff + framesz
+	sub	%r10,%r12
+	and	\$0xFFF,%r12
+	add	\$320,%r12
+	sub	%r12,$key
+.align	4
+.Lcbc_te_ok:
+
+	xchg	%rsp,$key
+	#add	\$8,%rsp	# reserve for return address!
+	mov	$key,$_rsp	# save %rsp
+.Lcbc_fast_body:
+	mov	%rdi,$_inp	# save copy of inp
+	mov	%rsi,$_out	# save copy of out
+	mov	%rdx,$_len	# save copy of len
+	mov	%rcx,$_key	# save copy of key
+	mov	%r8,$_ivp	# save copy of ivp
+	movl	\$0,$mark	# copy of aes_key->rounds = 0;
+	mov	%r8,%rbp	# rearrange input arguments
+	mov	%r9,%rbx
+	mov	%rsi,$out
+	mov	%rdi,$inp
+	mov	%rcx,$key
+
+	mov	240($key),%eax		# key->rounds
+	# do we copy key schedule to stack?
+	mov	$key,%r10
+	sub	$sbox,%r10
+	and	\$0xfff,%r10
+	cmp	\$2304,%r10
+	jb	.Lcbc_do_ecopy
+	cmp	\$4096-248,%r10
+	jb	.Lcbc_skip_ecopy
+.align	4
+.Lcbc_do_ecopy:
+		mov	$key,%rsi
+		lea	$aes_key,%rdi
+		lea	$aes_key,$key
+		mov	\$240/8,%ecx
+		.long	0x90A548F3	# rep movsq
+		mov	%eax,(%rdi)	# copy aes_key->rounds
+.Lcbc_skip_ecopy:
+	mov	$key,$keyp	# save key pointer
+
+	mov	\$18,%ecx
+.align	4
+.Lcbc_prefetch_te:
+		mov	0($sbox),%r10
+		mov	32($sbox),%r11
+		mov	64($sbox),%r12
+		mov	96($sbox),%r13
+		lea	128($sbox),$sbox
+		sub	\$1,%ecx
+	jnz	.Lcbc_prefetch_te
+	lea	-2304($sbox),$sbox
+
+	cmp	\$0,%rbx
+	je	.LFAST_DECRYPT
+
+#----------------------------- ENCRYPT -----------------------------#
+	mov	0(%rbp),$s0		# load iv
+	mov	4(%rbp),$s1
+	mov	8(%rbp),$s2
+	mov	12(%rbp),$s3
+
+.align	4
+.Lcbc_fast_enc_loop:
+		xor	0($inp),$s0
+		xor	4($inp),$s1
+		xor	8($inp),$s2
+		xor	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_encrypt
+
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10
+		mov	$s0,0($out)
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		sub	\$16,%r10
+		test	\$-16,%r10
+		mov	%r10,$_len
+	jnz	.Lcbc_fast_enc_loop
+	mov	$_ivp,%rbp	# restore ivp
+	mov	$s0,0(%rbp)	# save ivec
+	mov	$s1,4(%rbp)
+	mov	$s2,8(%rbp)
+	mov	$s3,12(%rbp)
+
+	jmp	.Lcbc_fast_cleanup
+
+#----------------------------- DECRYPT -----------------------------#
+.align	16
+.LFAST_DECRYPT:
+	cmp	$inp,$out
+	je	.Lcbc_fast_dec_in_place
+
+	mov	%rbp,$ivec
+.align	4
+.Lcbc_fast_dec_loop:
+		mov	0($inp),$s0	# read input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_decrypt
+
+		mov	$ivec,%rbp	# load ivp
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10	# load len
+		xor	0(%rbp),$s0	# xor iv
+		xor	4(%rbp),$s1
+		xor	8(%rbp),$s2
+		xor	12(%rbp),$s3
+		mov	$inp,%rbp	# current input, next iv
+
+		sub	\$16,%r10
+		mov	%r10,$_len	# update len
+		mov	%rbp,$ivec	# update ivp
+
+		mov	$s0,0($out)	# write output
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+	jnz	.Lcbc_fast_dec_loop
+	mov	$_ivp,%r12		# load user ivp
+	mov	0(%rbp),%r10		# load iv
+	mov	8(%rbp),%r11
+	mov	%r10,0(%r12)		# copy back to user
+	mov	%r11,8(%r12)
+	jmp	.Lcbc_fast_cleanup
+
+.align	16
+.Lcbc_fast_dec_in_place:
+	mov	0(%rbp),%r10		# copy iv to stack
+	mov	8(%rbp),%r11
+	mov	%r10,0+$ivec
+	mov	%r11,8+$ivec
+.align	4
+.Lcbc_fast_dec_in_place_loop:
+		mov	0($inp),$s0	# load input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# if ($verticalspin) save inp
+
+		call	_x86_64_AES_decrypt
+
+		mov	$_inp,$inp	# if ($verticalspin) restore inp
+		mov	$_len,%r10
+		xor	0+$ivec,$s0
+		xor	4+$ivec,$s1
+		xor	8+$ivec,$s2
+		xor	12+$ivec,$s3
+
+		mov	0($inp),%r11	# load input
+		mov	8($inp),%r12
+		sub	\$16,%r10
+		jz	.Lcbc_fast_dec_in_place_done
+
+		mov	%r11,0+$ivec	# copy input to iv
+		mov	%r12,8+$ivec
+
+		mov	$s0,0($out)	# save output [zaps input]
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		mov	%r10,$_len
+	jmp	.Lcbc_fast_dec_in_place_loop
+.Lcbc_fast_dec_in_place_done:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)	# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0($out)	# save output [zaps input]
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+.align	4
+.Lcbc_fast_cleanup:
+	cmpl	\$0,$mark	# was the key schedule copied?
+	lea	$aes_key,%rdi
+	je	.Lcbc_exit
+		mov	\$240/8,%ecx
+		xor	%rax,%rax
+		.long	0x90AB48F3	# rep stosq
+
+	jmp	.Lcbc_exit
+
+#--------------------------- SLOW ROUTINE ---------------------------#
+.align	16
+.Lcbc_slow_prologue:
+	# allocate aligned stack frame...
+	lea	-88(%rsp),%rbp
+	and	\$-64,%rbp
+	# ... just "above" key schedule
+	lea	-88-63(%rcx),%r10
+	sub	%rbp,%r10
+	neg	%r10
+	and	\$0x3c0,%r10
+	sub	%r10,%rbp
+
+	xchg	%rsp,%rbp
+	#add	\$8,%rsp	# reserve for return address!
+	mov	%rbp,$_rsp	# save %rsp
+.Lcbc_slow_body:
+	#mov	%rdi,$_inp	# save copy of inp
+	#mov	%rsi,$_out	# save copy of out
+	#mov	%rdx,$_len	# save copy of len
+	#mov	%rcx,$_key	# save copy of key
+	mov	%r8,$_ivp	# save copy of ivp
+	mov	%r8,%rbp	# rearrange input arguments
+	mov	%r9,%rbx
+	mov	%rsi,$out
+	mov	%rdi,$inp
+	mov	%rcx,$key
+	mov	%rdx,%r10
+
+	mov	240($key),%eax
+	mov	$key,$keyp	# save key pointer
+	shl	\$4,%eax
+	lea	($key,%rax),%rax
+	mov	%rax,$keyend
+
+	# pick Te4 copy which can't "overlap" with stack frame or key scdedule
+	lea	2048($sbox),$sbox
+	lea	768-8(%rsp),%rax
+	sub	$sbox,%rax
+	and	\$0x300,%rax
+	lea	($sbox,%rax),$sbox
+
+	cmp	\$0,%rbx
+	je	.LSLOW_DECRYPT
+
+#--------------------------- SLOW ENCRYPT ---------------------------#
+	test	\$-16,%r10		# check upon length
+	mov	0(%rbp),$s0		# load iv
+	mov	4(%rbp),$s1
+	mov	8(%rbp),$s2
+	mov	12(%rbp),$s3
+	jz	.Lcbc_slow_enc_tail	# short input...
+
+.align	4
+.Lcbc_slow_enc_loop:
+		xor	0($inp),$s0
+		xor	4($inp),$s1
+		xor	8($inp),$s2
+		xor	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# save inp
+		mov	$out,$_out	# save out
+		mov	%r10,$_len	# save len
+
+		call	_x86_64_AES_encrypt_compact
+
+		mov	$_inp,$inp	# restore inp
+		mov	$_out,$out	# restore out
+		mov	$_len,%r10	# restore len
+		mov	$s0,0($out)
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+		sub	\$16,%r10
+		test	\$-16,%r10
+	jnz	.Lcbc_slow_enc_loop
+	test	\$15,%r10
+	jnz	.Lcbc_slow_enc_tail
+	mov	$_ivp,%rbp	# restore ivp
+	mov	$s0,0(%rbp)	# save ivec
+	mov	$s1,4(%rbp)
+	mov	$s2,8(%rbp)
+	mov	$s3,12(%rbp)
+
+	jmp	.Lcbc_exit
+
+.align	4
+.Lcbc_slow_enc_tail:
+	mov	%rax,%r11
+	mov	%rcx,%r12
+	mov	%r10,%rcx
+	mov	$inp,%rsi
+	mov	$out,%rdi
+	.long	0x9066A4F3		# rep movsb
+	mov	\$16,%rcx		# zero tail
+	sub	%r10,%rcx
+	xor	%rax,%rax
+	.long	0x9066AAF3		# rep stosb
+	mov	$out,$inp		# this is not a mistake!
+	mov	\$16,%r10		# len=16
+	mov	%r11,%rax
+	mov	%r12,%rcx
+	jmp	.Lcbc_slow_enc_loop	# one more spin...
+#--------------------------- SLOW DECRYPT ---------------------------#
+.align	16
+.LSLOW_DECRYPT:
+	shr	\$3,%rax
+	add	%rax,$sbox		# recall "magic" constants!
+
+	mov	0(%rbp),%r11		# copy iv to stack
+	mov	8(%rbp),%r12
+	mov	%r11,0+$ivec
+	mov	%r12,8+$ivec
+
+.align	4
+.Lcbc_slow_dec_loop:
+		mov	0($inp),$s0	# load input
+		mov	4($inp),$s1
+		mov	8($inp),$s2
+		mov	12($inp),$s3
+		mov	$keyp,$key	# restore key
+		mov	$inp,$_inp	# save inp
+		mov	$out,$_out	# save out
+		mov	%r10,$_len	# save len
+
+		call	_x86_64_AES_decrypt_compact
+
+		mov	$_inp,$inp	# restore inp
+		mov	$_out,$out	# restore out
+		mov	$_len,%r10
+		xor	0+$ivec,$s0
+		xor	4+$ivec,$s1
+		xor	8+$ivec,$s2
+		xor	12+$ivec,$s3
+
+		mov	0($inp),%r11	# load input
+		mov	8($inp),%r12
+		sub	\$16,%r10
+		jc	.Lcbc_slow_dec_partial
+		jz	.Lcbc_slow_dec_done
+
+		mov	%r11,0+$ivec	# copy input to iv
+		mov	%r12,8+$ivec
+
+		mov	$s0,0($out)	# save output [can zap input]
+		mov	$s1,4($out)
+		mov	$s2,8($out)
+		mov	$s3,12($out)
+
+		lea	16($inp),$inp
+		lea	16($out),$out
+	jmp	.Lcbc_slow_dec_loop
+.Lcbc_slow_dec_done:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)		# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0($out)		# save output [can zap input]
+	mov	$s1,4($out)
+	mov	$s2,8($out)
+	mov	$s3,12($out)
+
+	jmp	.Lcbc_exit
+
+.align	4
+.Lcbc_slow_dec_partial:
+	mov	$_ivp,%rdi
+	mov	%r11,0(%rdi)		# copy iv back to user
+	mov	%r12,8(%rdi)
+
+	mov	$s0,0+$ivec		# save output to stack
+	mov	$s1,4+$ivec
+	mov	$s2,8+$ivec
+	mov	$s3,12+$ivec
+
+	mov	$out,%rdi
+	lea	$ivec,%rsi
+	lea	16(%r10),%rcx
+	.long	0x9066A4F3	# rep movsb
+	jmp	.Lcbc_exit
+
+.align	16
+.Lcbc_exit:
+	mov	$_rsp,%rsi
+	mov	(%rsi),%r15
+	mov	8(%rsi),%r14
+	mov	16(%rsi),%r13
+	mov	24(%rsi),%r12
+	mov	32(%rsi),%rbp
+	mov	40(%rsi),%rbx
+	lea	48(%rsi),%rsp
+.Lcbc_popfq:
+	popfq
+.Lcbc_epilogue:
+	ret
+.size	asm_AES_cbc_encrypt,.-asm_AES_cbc_encrypt
+___
+}
+
+$code.=<<___;
+.align	64
+.LAES_Te:
+___
+	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
+	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
+	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
+	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
+	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
+	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
+	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
+	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
+	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
+	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
+	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
+	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
+	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
+	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
+	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
+	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
+	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
+	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
+	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
+	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
+	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
+	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
+	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
+	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
+	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
+	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
+	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
+	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
+	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
+	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
+	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
+	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
+	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
+	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
+	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
+	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
+	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
+	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
+	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
+	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
+	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
+	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
+	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
+	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
+	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
+	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
+	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
+	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
+	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
+	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
+	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
+	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
+	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
+	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
+	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
+	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
+	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
+	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
+	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
+	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
+	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
+	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
+	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
+	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
+
+#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+
+	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
+	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
+	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
+	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
+	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
+	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
+	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
+	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
+	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
+	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
+	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
+	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
+	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
+	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
+	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
+	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
+	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
+	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
+	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
+	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
+	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
+	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
+	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
+	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
+	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
+	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
+	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
+	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
+	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
+	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
+	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
+	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
+#rcon:
+$code.=<<___;
+	.long	0x00000001, 0x00000002, 0x00000004, 0x00000008
+	.long	0x00000010, 0x00000020, 0x00000040, 0x00000080
+	.long	0x0000001b, 0x00000036, 0x80808080, 0x80808080
+	.long	0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
+___
+$code.=<<___;
+.align	64
+.LAES_Td:
+___
+	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
+	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
+	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
+	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
+	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
+	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
+	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
+	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
+	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
+	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
+	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
+	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
+	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
+	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
+	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
+	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
+	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
+	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
+	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
+	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
+	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
+	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
+	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
+	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
+	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
+	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
+	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
+	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
+	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
+	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
+	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
+	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
+	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
+	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
+	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
+	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
+	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
+	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
+	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
+	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
+	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
+	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
+	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
+	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
+	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
+	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
+	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
+	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
+	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
+	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
+	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
+	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
+	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
+	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
+	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
+	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
+	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
+	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
+	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
+	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
+	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
+	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
+	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
+	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
+
+#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+___
+	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
+	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
+	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
+	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
+	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
+	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
+	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
+	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
+	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
+	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
+	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
+	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
+	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
+	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
+	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
+	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
+	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
+	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
+	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
+	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
+	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
+	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
+	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
+	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
+	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
+	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
+	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
+	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
+	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
+	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
+	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
+	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
+$code.=<<___;
+	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
+	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
+.asciz  "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	block_se_handler,\@abi-omnipotent
+.align	16
+block_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_block_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_block_prologue
+
+	mov	24(%rax),%rax		# pull saved real stack pointer
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_block_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	jmp	.Lcommon_seh_exit
+.size	block_se_handler,.-block_se_handler
+
+.type	key_se_handler,\@abi-omnipotent
+.align	16
+key_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_key_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_key_prologue
+
+	lea	56(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_key_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	jmp	.Lcommon_seh_exit
+.size	key_se_handler,.-key_se_handler
+
+.type	cbc_se_handler,\@abi-omnipotent
+.align	16
+cbc_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lcbc_prologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_prologue
+	jb	.Lin_cbc_prologue
+
+	lea	.Lcbc_fast_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_fast_body
+	jb	.Lin_cbc_frame_setup
+
+	lea	.Lcbc_slow_prologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_prologue
+	jb	.Lin_cbc_body
+
+	lea	.Lcbc_slow_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_body
+	jb	.Lin_cbc_frame_setup
+
+.Lin_cbc_body:
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lcbc_epilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lcbc_epilogue
+	jae	.Lin_cbc_prologue
+
+	lea	8(%rax),%rax
+
+	lea	.Lcbc_popfq(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lcbc_popfq
+	jae	.Lin_cbc_prologue
+
+	mov	`16-8`(%rax),%rax	# biased $_rsp
+	lea	56(%rax),%rax
+
+.Lin_cbc_frame_setup:
+	mov	-16(%rax),%rbx
+	mov	-24(%rax),%rbp
+	mov	-32(%rax),%r12
+	mov	-40(%rax),%r13
+	mov	-48(%rax),%r14
+	mov	-56(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_cbc_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+.Lcommon_seh_exit:
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	cbc_se_handler,.-cbc_se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_asm_AES_encrypt
+	.rva	.LSEH_end_asm_AES_encrypt
+	.rva	.LSEH_info_asm_AES_encrypt
+
+	.rva	.LSEH_begin_asm_AES_decrypt
+	.rva	.LSEH_end_asm_AES_decrypt
+	.rva	.LSEH_info_asm_AES_decrypt
+
+	.rva	.LSEH_begin_asm_AES_set_encrypt_key
+	.rva	.LSEH_end_asm_AES_set_encrypt_key
+	.rva	.LSEH_info_asm_AES_set_encrypt_key
+
+	.rva	.LSEH_begin_asm_AES_set_decrypt_key
+	.rva	.LSEH_end_asm_AES_set_decrypt_key
+	.rva	.LSEH_info_asm_AES_set_decrypt_key
+
+	.rva	.LSEH_begin_asm_AES_cbc_encrypt
+	.rva	.LSEH_end_asm_AES_cbc_encrypt
+	.rva	.LSEH_info_asm_AES_cbc_encrypt
+
+.section	.xdata
+.align	8
+.LSEH_info_asm_AES_encrypt:
+	.byte	9,0,0,0
+	.rva	block_se_handler
+	.rva	.Lenc_prologue,.Lenc_epilogue	# HandlerData[]
+.LSEH_info_asm_AES_decrypt:
+	.byte	9,0,0,0
+	.rva	block_se_handler
+	.rva	.Ldec_prologue,.Ldec_epilogue	# HandlerData[]
+.LSEH_info_asm_AES_set_encrypt_key:
+	.byte	9,0,0,0
+	.rva	key_se_handler
+	.rva	.Lenc_key_prologue,.Lenc_key_epilogue	# HandlerData[]
+.LSEH_info_asm_AES_set_decrypt_key:
+	.byte	9,0,0,0
+	.rva	key_se_handler
+	.rva	.Ldec_key_prologue,.Ldec_key_epilogue	# HandlerData[]
+.LSEH_info_asm_AES_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	cbc_se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/aesni-x86.pl b/src/crypto/fipsmodule/aes/asm/aesni-x86.pl
new file mode 100644
index 0000000..66af51b
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aesni-x86.pl
@@ -0,0 +1,2533 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
+# details].
+#
+# Performance.
+#
+# To start with see corresponding paragraph in aesni-x86_64.pl...
+# Instead of filling table similar to one found there I've chosen to
+# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
+# The simplified table below represents 32-bit performance relative
+# to 64-bit one in every given point. Ratios vary for different
+# encryption modes, therefore interval values.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+#	53-67%      67-84%      91-94%      95-98%      97-99.5%
+#
+# Lower ratios for smaller block sizes are perfectly understandable,
+# because function call overhead is higher in 32-bit mode. Largest
+# 8-KB block performance is virtually same: 32-bit code is less than
+# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
+
+# January 2011
+#
+# See aesni-x86_64.pl for details. Unlike x86_64 version this module
+# interleaves at most 6 aes[enc|dec] instructions, because there are
+# not enough registers for 8x interleave [which should be optimal for
+# Sandy Bridge]. Actually, performance results for 6x interleave
+# factor presented in aesni-x86_64.pl (except for CTR) are for this
+# module.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
+
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS	ECB
+# Westmere	3.77/1.37	1.37	1.52	1.27
+# * Bridge	5.07/0.98	0.99	1.09	0.91
+# Haswell	4.44/0.80	0.97	1.03	0.72
+# Skylake	2.68/0.65	0.65	0.66	0.64
+# Silvermont	5.77/3.56	3.67	4.03	3.46
+# Goldmont	3.84/1.39	1.39	1.63	1.31
+# Bulldozer	5.80/0.98	1.05	1.24	0.93
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-586.pl:-)
+$inline=1;		# inline _aesni_[en|de]crypt
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open OUT,">$output";
+*STDOUT=*OUT;
+
+&asm_init($ARGV[0],$0);
+
+&external_label("OPENSSL_ia32cap_P");
+&static_label("key_const");
+
+if ($PREFIX eq "aesni")	{ $movekey=\&movups; }
+else			{ $movekey=\&movups; }
+
+$len="eax";
+$rounds="ecx";
+$key="edx";
+$inp="esi";
+$out="edi";
+$rounds_="ebx";	# backup copy for $rounds
+$key_="ebp";	# backup copy for $key
+
+$rndkey0="xmm0";
+$rndkey1="xmm1";
+$inout0="xmm2";
+$inout1="xmm3";
+$inout2="xmm4";
+$inout3="xmm5";	$in1="xmm5";
+$inout4="xmm6";	$in0="xmm6";
+$inout5="xmm7";	$ivec="xmm7";
+
+# AESNI extension
+sub aeskeygenassist
+{ my($dst,$src,$imm)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm);	}
+}
+sub aescommon
+{ my($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
+}
+sub aesimc	{ aescommon(0xdb,@_); }
+sub aesenc	{ aescommon(0xdc,@_); }
+sub aesenclast	{ aescommon(0xdd,@_); }
+sub aesdec	{ aescommon(0xde,@_); }
+sub aesdeclast	{ aescommon(0xdf,@_); }
+
+# Inline version of internal aesni_[en|de]crypt1
+{ my $sn;
+sub aesni_inline_generate1
+{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
+  $sn++;
+
+    &$movekey		($rndkey0,&QWP(0,$key));
+    &$movekey		($rndkey1,&QWP(16,$key));
+    &xorps		($ivec,$rndkey0)	if (defined($ivec));
+    &lea		($key,&DWP(32,$key));
+    &xorps		($inout,$ivec)		if (defined($ivec));
+    &xorps		($inout,$rndkey0)	if (!defined($ivec));
+    &set_label("${p}1_loop_$sn");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&dec		($rounds);
+	&$movekey	($rndkey1,&QWP(0,$key));
+	&lea		($key,&DWP(16,$key));
+    &jnz		(&label("${p}1_loop_$sn"));
+    eval"&aes${p}last	($inout,$rndkey1)";
+}}
+
+sub aesni_generate1	# fully unrolled loop
+{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
+
+    &function_begin_B("_aesni_${p}rypt1");
+	&movups		($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(0x10,$key));
+	&xorps		($inout,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0x20,$key));
+	&lea		($key,&DWP(0x30,$key));
+	&cmp		($rounds,11);
+	&jb		(&label("${p}128"));
+	&lea		($key,&DWP(0x20,$key));
+	&je		(&label("${p}192"));
+	&lea		($key,&DWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x30,$key));
+    &set_label("${p}192");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(-0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-0x10,$key));
+    &set_label("${p}128");
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x10,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x20,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x30,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x40,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x50,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0x60,$key));
+	eval"&aes${p}	($inout,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(0x70,$key));
+	eval"&aes${p}	($inout,$rndkey1)";
+    eval"&aes${p}last	($inout,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt1");
+}
+
+# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("enc") if (!$inline);
+&function_begin_B("${PREFIX}_encrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&pxor	($rndkey0,$rndkey0);		# clear register bank
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,"eax"),$inout0);
+	&pxor	($inout0,$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_encrypt");
+
+# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
+&aesni_generate1("dec") if(!$inline);
+&function_begin_B("${PREFIX}_decrypt");
+	&mov	("eax",&wparam(0));
+	&mov	($key,&wparam(2));
+	&movups	($inout0,&QWP(0,"eax"));
+	&mov	($rounds,&DWP(240,$key));
+	&mov	("eax",&wparam(1));
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&pxor	($rndkey0,$rndkey0);		# clear register bank
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,"eax"),$inout0);
+	&pxor	($inout0,$inout0);
+	&ret	();
+&function_end_B("${PREFIX}_decrypt");
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it originally made no sense to implement 2x subroutine.
+# But times change and it became appropriate to spend extra 192 bytes
+# on 2x subroutine on Atom Silvermont account. For processors that
+# can schedule aes[enc|dec] every cycle optimal interleave factor
+# equals to corresponding instructions latency. 8x is optimal for
+# * Bridge, but it's unfeasible to accommodate such implementation
+# in XMM registers addreassable in 32-bit mode and therefore maximum
+# of 6x is used instead...
+
+sub aesni_generate2
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt2");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&add		($rounds,16);
+
+    &set_label("${p}2_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("${p}2_loop"));
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt2");
+}
+
+sub aesni_generate3
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt3");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&add		($rounds,16);
+
+    &set_label("${p}3_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("${p}3_loop"));
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt3");
+}
+
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement  would be <20%,
+# so it's not worth it...
+sub aesni_generate4
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt4");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&shl		($rounds,4);
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&$movekey	($rndkey0,&QWP(32,$key));
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	&data_byte	(0x0f,0x1f,0x40,0x00);
+	&add		($rounds,16);
+
+    &set_label("${p}4_loop");
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+	eval"&aes${p}	($inout3,$rndkey1)";
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+    &jnz		(&label("${p}4_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt4");
+}
+
+sub aesni_generate6
+{ my $p=shift;
+
+    &function_begin_B("_aesni_${p}rypt6");
+    &static_label("_aesni_${p}rypt6_enter");
+	&$movekey	($rndkey0,&QWP(0,$key));
+	&shl		($rounds,4);
+	&$movekey	($rndkey1,&QWP(16,$key));
+	&xorps		($inout0,$rndkey0);
+	&pxor		($inout1,$rndkey0);	# pxor does better here
+	&pxor		($inout2,$rndkey0);
+	eval"&aes${p}	($inout0,$rndkey1)";
+	&pxor		($inout3,$rndkey0);
+	&pxor		($inout4,$rndkey0);
+	eval"&aes${p}	($inout1,$rndkey1)";
+	&lea		($key,&DWP(32,$key,$rounds));
+	&neg		($rounds);
+	eval"&aes${p}	($inout2,$rndkey1)";
+	&pxor		($inout5,$rndkey0);
+	&$movekey	($rndkey0,&QWP(0,$key,$rounds));
+	&add		($rounds,16);
+	&jmp		(&label("_aesni_${p}rypt6_inner"));
+
+    &set_label("${p}6_loop",16);
+	eval"&aes${p}	($inout0,$rndkey1)";
+	eval"&aes${p}	($inout1,$rndkey1)";
+	eval"&aes${p}	($inout2,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_inner");
+	eval"&aes${p}	($inout3,$rndkey1)";
+	eval"&aes${p}	($inout4,$rndkey1)";
+	eval"&aes${p}	($inout5,$rndkey1)";
+    &set_label("_aesni_${p}rypt6_enter");
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	eval"&aes${p}	($inout0,$rndkey0)";
+	eval"&aes${p}	($inout1,$rndkey0)";
+	eval"&aes${p}	($inout2,$rndkey0)";
+	eval"&aes${p}	($inout3,$rndkey0)";
+	eval"&aes${p}	($inout4,$rndkey0)";
+	eval"&aes${p}	($inout5,$rndkey0)";
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+    &jnz		(&label("${p}6_loop"));
+
+    eval"&aes${p}	($inout0,$rndkey1)";
+    eval"&aes${p}	($inout1,$rndkey1)";
+    eval"&aes${p}	($inout2,$rndkey1)";
+    eval"&aes${p}	($inout3,$rndkey1)";
+    eval"&aes${p}	($inout4,$rndkey1)";
+    eval"&aes${p}	($inout5,$rndkey1)";
+    eval"&aes${p}last	($inout0,$rndkey0)";
+    eval"&aes${p}last	($inout1,$rndkey0)";
+    eval"&aes${p}last	($inout2,$rndkey0)";
+    eval"&aes${p}last	($inout3,$rndkey0)";
+    eval"&aes${p}last	($inout4,$rndkey0)";
+    eval"&aes${p}last	($inout5,$rndkey0)";
+    &ret();
+    &function_end_B("_aesni_${p}rypt6");
+}
+&aesni_generate2("enc") if ($PREFIX eq "aesni");
+&aesni_generate2("dec");
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+
+if ($PREFIX eq "aesni") {
+######################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#                         size_t length, const AES_KEY *key,
+#                         int enc);
+&function_begin("aesni_ecb_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&and	($len,-16);
+	&jz	(&label("ecb_ret"));
+	&mov	($rounds,&DWP(240,$key));
+	&test	($rounds_,$rounds_);
+	&jz	(&label("ecb_decrypt"));
+
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_enc_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_enc_loop6_enter"));
+
+&set_label("ecb_enc_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_enc_loop6_enter");
+
+	&call	("_aesni_encrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_enc_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_enc_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_enc_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_enc_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_enc_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_enc_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_two",16);
+	&call	("_aesni_encrypt2");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_enc_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&jmp	(&label("ecb_ret"));
+######################################################################
+&set_label("ecb_decrypt",16);
+	&mov	($key_,$key);		# backup $key
+	&mov	($rounds_,$rounds);	# backup $rounds
+	&cmp	($len,0x60);
+	&jb	(&label("ecb_dec_tail"));
+
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&sub	($len,0x60);
+	&jmp	(&label("ecb_dec_loop6_enter"));
+
+&set_label("ecb_dec_loop6",16);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+&set_label("ecb_dec_loop6_enter");
+
+	&call	("_aesni_decrypt6");
+
+	&mov	($key,$key_);		# restore $key
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&sub	($len,0x60);
+	&jnc	(&label("ecb_dec_loop6"));
+
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+	&add	($len,0x60);
+	&jz	(&label("ecb_ret"));
+
+&set_label("ecb_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&cmp	($len,0x20);
+	&jb	(&label("ecb_dec_one"));
+	&movups	($inout1,&QWP(0x10,$inp));
+	&je	(&label("ecb_dec_two"));
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x40);
+	&jb	(&label("ecb_dec_three"));
+	&movups	($inout3,&QWP(0x30,$inp));
+	&je	(&label("ecb_dec_four"));
+	&movups	($inout4,&QWP(0x40,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&movups	(&QWP(0,$out),$inout0);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_two",16);
+	&call	("_aesni_decrypt2");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ecb_ret"));
+
+&set_label("ecb_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ecb_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ecb_encrypt");
+
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{ my $cmac=$inout1;
+&function_begin("aesni_ccm64_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&lea	($key_,&DWP(0,$key));
+	&movdqa	($inout3,&QWP(0,"esp"));
+	&movdqa	($inout0,$ivec);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&sub	($rounds_,$rounds);
+	&pshufb	($ivec,$inout3);
+
+&set_label("ccm64_enc_outer");
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&mov		($rounds,$rounds_);
+	&movups		($in0,&QWP(0,$inp));
+
+	&xorps		($inout0,$rndkey0);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($rndkey0,$in0);
+	&xorps		($cmac,$rndkey0);		# cmac^=inp
+	&$movekey	($rndkey0,&QWP(32,$key_));
+
+&set_label("ccm64_enc2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	&aesenc		($inout0,$rndkey0);
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("ccm64_enc2_loop"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&paddq		($ivec,&QWP(16,"esp"));
+	&dec		($len);
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+
+	&lea	($inp,&DWP(16,$inp));
+	&xorps	($in0,$inout0);			# inp^=E(ivec)
+	&movdqa	($inout0,$ivec);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&pshufb	($inout0,$inout3);
+	&lea	($out,&DWP(16,$out));
+	&jnz	(&label("ccm64_enc_outer"));
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ccm64_encrypt_blocks");
+
+&function_begin("aesni_ccm64_decrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($rounds,&wparam(5));
+	&mov	($key_,"esp");
+	&sub	("esp",60);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(48,"esp"),$key_);
+
+	&movdqu	($ivec,&QWP(0,$rounds_));	# load ivec
+	&movdqu	($cmac,&QWP(0,$rounds));	# load cmac
+	&mov	($rounds,&DWP(240,$key));
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds_,1);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds_);
+	&mov	(&DWP(20,"esp"),$key_);
+	&mov	(&DWP(24,"esp"),$key_);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&movdqa	($inout3,&QWP(0,"esp"));	# bswap mask
+	&movdqa	($inout0,$ivec);
+
+	&mov	($key_,$key);
+	&mov	($rounds_,$rounds);
+
+	&pshufb	($ivec,$inout3);
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&shl	($rounds_,4);
+	&mov	($rounds,16);
+	&movups	($in0,&QWP(0,$inp));		# load inp
+	&paddq	($ivec,&QWP(16,"esp"));
+	&lea	($inp,&QWP(16,$inp));
+	&sub	($rounds,$rounds_);
+	&lea	($key,&DWP(32,$key_,$rounds_));
+	&mov	($rounds_,$rounds);
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_outer",16);
+	&xorps	($in0,$inout0);			# inp ^= E(ivec)
+	&movdqa	($inout0,$ivec);
+	&movups	(&QWP(0,$out),$in0);		# save output
+	&lea	($out,&DWP(16,$out));
+	&pshufb	($inout0,$inout3);
+
+	&sub	($len,1);
+	&jz	(&label("ccm64_dec_break"));
+
+	&$movekey	($rndkey0,&QWP(0,$key_));
+	&mov		($rounds,$rounds_);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&xorps		($in0,$rndkey0);
+	&xorps		($inout0,$rndkey0);
+	&xorps		($cmac,$in0);		# cmac^=out
+	&$movekey	($rndkey0,&QWP(32,$key_));
+
+&set_label("ccm64_dec2_loop");
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&$movekey	($rndkey1,&QWP(0,$key,$rounds));
+	&add		($rounds,32);
+	&aesenc		($inout0,$rndkey0);
+	&aesenc		($cmac,$rndkey0);
+	&$movekey	($rndkey0,&QWP(-16,$key,$rounds));
+	&jnz		(&label("ccm64_dec2_loop"));
+	&movups		($in0,&QWP(0,$inp));	# load inp
+	&paddq		($ivec,&QWP(16,"esp"));
+	&aesenc		($inout0,$rndkey1);
+	&aesenc		($cmac,$rndkey1);
+	&aesenclast	($inout0,$rndkey0);
+	&aesenclast	($cmac,$rndkey0);
+	&lea		($inp,&QWP(16,$inp));
+	&jmp	(&label("ccm64_dec_outer"));
+
+&set_label("ccm64_dec_break",16);
+	&mov	($rounds,&DWP(240,$key_));
+	&mov	($key,$key_);
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$cmac,$in0);	}
+	else
+	{   &call	("_aesni_encrypt1",$cmac);	}
+
+	&mov	("esp",&DWP(48,"esp"));
+	&mov	($out,&wparam(5));
+	&movups	(&QWP(0,$out),$cmac);
+
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+&function_end("aesni_ccm64_decrypt_blocks");
+}
+
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see crypto/modes/ctr128.c for details)
+#
+# stack layout:
+#	0	pshufb mask
+#	16	vector addend: 0,6,6,6
+# 	32	counter-less ivec
+#	48	1st triplet of counter vector
+#	64	2nd triplet of counter vector
+#	80	saved %esp
+
+&function_begin("aesni_ctr32_encrypt_blocks");
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));
+	&mov	($rounds_,&wparam(4));
+	&mov	($key_,"esp");
+	&sub	("esp",88);
+	&and	("esp",-16);			# align stack
+	&mov	(&DWP(80,"esp"),$key_);
+
+	&cmp	($len,1);
+	&je	(&label("ctr32_one_shortcut"));
+
+	&movdqu	($inout5,&QWP(0,$rounds_));	# load ivec
+
+	# compose byte-swap control mask for pshufb on stack
+	&mov	(&DWP(0,"esp"),0x0c0d0e0f);
+	&mov	(&DWP(4,"esp"),0x08090a0b);
+	&mov	(&DWP(8,"esp"),0x04050607);
+	&mov	(&DWP(12,"esp"),0x00010203);
+
+	# compose counter increment vector on stack
+	&mov	($rounds,6);
+	&xor	($key_,$key_);
+	&mov	(&DWP(16,"esp"),$rounds);
+	&mov	(&DWP(20,"esp"),$rounds);
+	&mov	(&DWP(24,"esp"),$rounds);
+	&mov	(&DWP(28,"esp"),$key_);
+
+	&pextrd	($rounds_,$inout5,3);		# pull 32-bit counter
+	&pinsrd	($inout5,$key_,3);		# wipe 32-bit counter
+
+	&mov	($rounds,&DWP(240,$key));	# key->rounds
+
+	# compose 2 vectors of 3x32-bit counters
+	&bswap	($rounds_);
+	&pxor	($rndkey0,$rndkey0);
+	&pxor	($rndkey1,$rndkey1);
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte-swap mask
+	&pinsrd	($rndkey0,$rounds_,0);
+	&lea	($key_,&DWP(3,$rounds_));
+	&pinsrd	($rndkey1,$key_,0);
+	&inc	($rounds_);
+	&pinsrd	($rndkey0,$rounds_,1);
+	&inc	($key_);
+	&pinsrd	($rndkey1,$key_,1);
+	&inc	($rounds_);
+	&pinsrd	($rndkey0,$rounds_,2);
+	&inc	($key_);
+	&pinsrd	($rndkey1,$key_,2);
+	&movdqa	(&QWP(48,"esp"),$rndkey0);	# save 1st triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+	&movdqu	($inout4,&QWP(0,$key));		# key[0]
+	&movdqa	(&QWP(64,"esp"),$rndkey1);	# save 2nd triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+
+	&pshufd	($inout0,$rndkey0,3<<6);	# place counter to upper dword
+	&pshufd	($inout1,$rndkey0,2<<6);
+	&cmp	($len,6);
+	&jb	(&label("ctr32_tail"));
+	&pxor	($inout5,$inout4);		# counter-less ivec^key[0]
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&movdqa	(&QWP(32,"esp"),$inout5);	# save counter-less ivec^key[0]
+	&mov	($key_,$key);			# backup $key
+	&sub	($rounds_,$rounds);		# backup twisted $rounds
+	&lea	($key,&DWP(32,$key,$rounds));
+	&sub	($len,6);
+	&jmp	(&label("ctr32_loop6"));
+
+&set_label("ctr32_loop6",16);
+	# inlining _aesni_encrypt6's prologue gives ~6% improvement...
+	&pshufd	($inout2,$rndkey0,1<<6);
+	&movdqa	($rndkey0,&QWP(32,"esp"));	# pull counter-less ivec
+	&pshufd	($inout3,$rndkey1,3<<6);
+	&pxor		($inout0,$rndkey0);	# merge counter-less ivec
+	&pshufd	($inout4,$rndkey1,2<<6);
+	&pxor		($inout1,$rndkey0);
+	&pshufd	($inout5,$rndkey1,1<<6);
+	&$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor		($inout2,$rndkey0);
+	&pxor		($inout3,$rndkey0);
+	&aesenc		($inout0,$rndkey1);
+	&pxor		($inout4,$rndkey0);
+	&pxor		($inout5,$rndkey0);
+	&aesenc		($inout1,$rndkey1);
+	&$movekey	($rndkey0,&QWP(32,$key_));
+	&mov		($rounds,$rounds_);
+	&aesenc		($inout2,$rndkey1);
+	&aesenc		($inout3,$rndkey1);
+	&aesenc		($inout4,$rndkey1);
+	&aesenc		($inout5,$rndkey1);
+
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movdqa	($rndkey0,&QWP(16,"esp"));	# load increment
+	&xorps	($inout2,$rndkey1);
+	&movdqa	($rndkey1,&QWP(64,"esp"));	# load 2nd triplet
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+
+	&paddd	($rndkey1,$rndkey0);		# 2nd triplet increment
+	&paddd	($rndkey0,&QWP(48,"esp"));	# 1st triplet increment
+	&movdqa	($inout0,&QWP(0,"esp"));	# load byte swap mask
+
+	&movups	($inout1,&QWP(0x30,$inp));
+	&movups	($inout2,&QWP(0x40,$inp));
+	&xorps	($inout3,$inout1);
+	&movups	($inout1,&QWP(0x50,$inp));
+	&lea	($inp,&DWP(0x60,$inp));
+	&movdqa	(&QWP(48,"esp"),$rndkey0);	# save 1st triplet
+	&pshufb	($rndkey0,$inout0);		# byte swap
+	&xorps	($inout4,$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&xorps	($inout5,$inout1);
+	&movdqa	(&QWP(64,"esp"),$rndkey1);	# save 2nd triplet
+	&pshufb	($rndkey1,$inout0);		# byte swap
+	&movups	(&QWP(0x40,$out),$inout4);
+	&pshufd	($inout0,$rndkey0,3<<6);
+	&movups	(&QWP(0x50,$out),$inout5);
+	&lea	($out,&DWP(0x60,$out));
+
+	&pshufd	($inout1,$rndkey0,2<<6);
+	&sub	($len,6);
+	&jnc	(&label("ctr32_loop6"));
+
+	&add	($len,6);
+	&jz	(&label("ctr32_ret"));
+	&movdqu	($inout5,&QWP(0,$key_));
+	&mov	($key,$key_);
+	&pxor	($inout5,&QWP(32,"esp"));	# restore count-less ivec
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+
+&set_label("ctr32_tail");
+	&por	($inout0,$inout5);
+	&cmp	($len,2);
+	&jb	(&label("ctr32_one"));
+
+	&pshufd	($inout2,$rndkey0,1<<6);
+	&por	($inout1,$inout5);
+	&je	(&label("ctr32_two"));
+
+	&pshufd	($inout3,$rndkey1,3<<6);
+	&por	($inout2,$inout5);
+	&cmp	($len,4);
+	&jb	(&label("ctr32_three"));
+
+	&pshufd	($inout4,$rndkey1,2<<6);
+	&por	($inout3,$inout5);
+	&je	(&label("ctr32_four"));
+
+	&por	($inout4,$inout5);
+	&call	("_aesni_encrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout1,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout2,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout4,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&movups	(&QWP(0x40,$out),$inout4);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_one_shortcut",16);
+	&movups	($inout0,&QWP(0,$rounds_));	# load ivec
+	&mov	($rounds,&DWP(240,$key));
+
+&set_label("ctr32_one");
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&movups	($in0,&QWP(0,$inp));
+	&xorps	($in0,$inout0);
+	&movups	(&QWP(0,$out),$in0);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_two",16);
+	&call	("_aesni_encrypt2");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_three",16);
+	&call	("_aesni_encrypt3");
+	&movups	($inout3,&QWP(0,$inp));
+	&movups	($inout4,&QWP(0x10,$inp));
+	&xorps	($inout0,$inout3);
+	&movups	($inout5,&QWP(0x20,$inp));
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&jmp	(&label("ctr32_ret"));
+
+&set_label("ctr32_four",16);
+	&call	("_aesni_encrypt4");
+	&movups	($inout4,&QWP(0,$inp));
+	&movups	($inout5,&QWP(0x10,$inp));
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout0,$inout4);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout1,$inout5);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+
+&set_label("ctr32_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(32,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(48,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(64,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&mov	("esp",&DWP(80,"esp"));
+&function_end("aesni_ctr32_encrypt_blocks");
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
+
+&function_begin("aesni_xts_encrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&and	("esp",-16);			# align stack
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&sub	($len,16*6);
+	&jc	(&label("xts_enc_short"));
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&sub	($rounds_,$rounds);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&jmp	(&label("xts_enc_loop6"));
+
+&set_label("xts_enc_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesenc	($inout0,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesenc	($inout1,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &$movekey	($rndkey0,&QWP(32,$key_));
+	 &aesenc	($inout2,$rndkey1);
+	 &aesenc	($inout3,$rndkey1);
+	 &aesenc	($inout4,$rndkey1);
+	 &aesenc	($inout5,$rndkey1);
+	&call		(&label("_aesni_encrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_enc_loop6"));
+
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_enc_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_enc_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_enc_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_enc_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_enc_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_enc_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_encrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+
+	&call	("_aesni_encrypt2");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_encrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_encrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_enc_done"));
+
+&set_label("xts_enc_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+	&movdqa	($inout3,$tweak);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_enc_steal"));
+
+&set_label("xts_enc_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_enc_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($inout3,$twtmp,0x13);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,&QWP(16*6,"esp"));	# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+&set_label("xts_enc_steal");
+	&movz	($rounds,&BP(0,$inp));
+	&movz	($key,&BP(-16,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(-16,$out),&LB($rounds));
+	&mov	(&BP(0,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_enc_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(-16,$out));	# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(-16,$out),$inout0);	# write output
+
+&set_label("xts_enc_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&movdqa	(&QWP(16*0,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm3","xmm3");
+	&movdqa	(&QWP(16*1,"esp"),"xmm0");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(16*2,"esp"),"xmm0");
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(16*3,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(16*4,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&movdqa	(&QWP(16*5,"esp"),"xmm0");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_encrypt");
+
+&function_begin("aesni_xts_decrypt");
+	&mov	($key,&wparam(4));		# key2
+	&mov	($inp,&wparam(5));		# clear-text tweak
+
+	&mov	($rounds,&DWP(240,$key));	# key2->rounds
+	&movups	($inout0,&QWP(0,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc");	}
+	else
+	{   &call	("_aesni_encrypt1");	}
+
+	&mov	($inp,&wparam(0));
+	&mov	($out,&wparam(1));
+	&mov	($len,&wparam(2));
+	&mov	($key,&wparam(3));		# key1
+
+	&mov	($key_,"esp");
+	&sub	("esp",16*7+8);
+	&and	("esp",-16);			# align stack
+
+	&xor	($rounds_,$rounds_);		# if(len%16) len-=16;
+	&test	($len,15);
+	&setnz	(&LB($rounds_));
+	&shl	($rounds_,4);
+	&sub	($len,$rounds_);
+
+	&mov	(&DWP(16*6+0,"esp"),0x87);	# compose the magic constant
+	&mov	(&DWP(16*6+4,"esp"),0);
+	&mov	(&DWP(16*6+8,"esp"),1);
+	&mov	(&DWP(16*6+12,"esp"),0);
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save original $len
+	&mov	(&DWP(16*7+4,"esp"),$key_);	# save original %esp
+
+	&mov	($rounds,&DWP(240,$key));	# key1->rounds
+	&mov	($key_,$key);			# backup $key
+	&mov	($rounds_,$rounds);		# backup $rounds
+
+	&movdqa	($tweak,$inout0);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(6*16,"esp"));	# 0x0...010...87
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+
+	&and	($len,-16);
+	&sub	($len,16*6);
+	&jc	(&label("xts_dec_short"));
+
+	&shl	($rounds,4);
+	&mov	($rounds_,16);
+	&sub	($rounds_,$rounds);
+	&lea	($key,&DWP(32,$key,$rounds));
+	&jmp	(&label("xts_dec_loop6"));
+
+&set_label("xts_dec_loop6",16);
+	for ($i=0;$i<4;$i++) {
+	    &pshufd	($twres,$twtmp,0x13);
+	    &pxor	($twtmp,$twtmp);
+	    &movdqa	(&QWP(16*$i,"esp"),$tweak);
+	    &paddq	($tweak,$tweak);	# &psllq($tweak,1);
+	    &pand	($twres,$twmask);	# isolate carry and residue
+	    &pcmpgtd	($twtmp,$tweak);	# broadcast upper bits
+	    &pxor	($tweak,$twres);
+	}
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*$i++,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	 &$movekey	($rndkey0,&QWP(0,$key_));
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	 &movups	($inout0,&QWP(0,$inp));	# load input
+	&pxor	($inout5,$tweak);
+
+	# inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
+	&mov	($rounds,$rounds_);
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	 &xorps		($inout0,$rndkey0);	# input^=rndkey[0]
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	 &pxor		($inout1,$rndkey0);
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	 &pxor		($inout2,$rndkey0);
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	 &pxor		($inout3,$rndkey0);
+	&movdqu	($rndkey1,&QWP(16*5,$inp));
+	 &pxor		($inout4,$rndkey0);
+	&lea	($inp,&DWP(16*6,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqa	(&QWP(16*$i,"esp"),$inout5);	# save last tweak
+	&pxor	($inout5,$rndkey1);
+
+	 &$movekey	($rndkey1,&QWP(16,$key_));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	 &aesdec	($inout0,$rndkey1);
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&pxor	($inout4,&QWP(16*4,"esp"));
+	 &aesdec	($inout1,$rndkey1);
+	&pxor		($inout5,$rndkey0);
+	 &$movekey	($rndkey0,&QWP(32,$key_));
+	 &aesdec	($inout2,$rndkey1);
+	 &aesdec	($inout3,$rndkey1);
+	 &aesdec	($inout4,$rndkey1);
+	 &aesdec	($inout5,$rndkey1);
+	&call		(&label("_aesni_decrypt6_enter"));
+
+	&movdqa	($tweak,&QWP(16*5,"esp"));	# last tweak
+       &pxor	($twtmp,$twtmp);
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+       &pcmpgtd	($twtmp,$tweak);		# broadcast upper bits
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*2,$out),$inout2);
+	&xorps	($inout4,&QWP(16*4,"esp"));
+	&movups	(&QWP(16*3,$out),$inout3);
+	&xorps	($inout5,$tweak);
+	&movups	(&QWP(16*4,$out),$inout4);
+       &pshufd	($twres,$twtmp,0x13);
+	&movups	(&QWP(16*5,$out),$inout5);
+	&lea	($out,&DWP(16*6,$out));
+       &movdqa	($twmask,&QWP(16*6,"esp"));	# 0x0...010...87
+
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+	&sub	($len,16*6);
+	&jnc	(&label("xts_dec_loop6"));
+
+	&mov	($rounds,&DWP(240,$key_));	# restore $rounds
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds_,$rounds);
+
+&set_label("xts_dec_short");
+	&add	($len,16*6);
+	&jz	(&label("xts_dec_done6x"));
+
+	&movdqa	($inout3,$tweak);		# put aside previous tweak
+	&cmp	($len,0x20);
+	&jb	(&label("xts_dec_one"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&je	(&label("xts_dec_two"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&cmp	($len,0x40);
+	&jb	(&label("xts_dec_three"));
+
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($inout5,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+	&movdqa	(&QWP(16*0,"esp"),$inout3);
+	&movdqa	(&QWP(16*1,"esp"),$inout4);
+	&je	(&label("xts_dec_four"));
+
+	&movdqa	(&QWP(16*2,"esp"),$inout5);
+	&pshufd	($inout5,$twtmp,0x13);
+	&movdqa	(&QWP(16*3,"esp"),$tweak);
+	&paddq	($tweak,$tweak);		# &psllq($inout0,1);
+	&pand	($inout5,$twmask);		# isolate carry and residue
+	&pxor	($inout5,$tweak);
+
+	&movdqu	($inout0,&QWP(16*0,$inp));	# load input
+	&movdqu	($inout1,&QWP(16*1,$inp));
+	&movdqu	($inout2,&QWP(16*2,$inp));
+	&pxor	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movdqu	($inout3,&QWP(16*3,$inp));
+	&pxor	($inout1,&QWP(16*1,"esp"));
+	&movdqu	($inout4,&QWP(16*4,$inp));
+	&pxor	($inout2,&QWP(16*2,"esp"));
+	&lea	($inp,&DWP(16*5,$inp));
+	&pxor	($inout3,&QWP(16*3,"esp"));
+	&movdqa	(&QWP(16*4,"esp"),$inout5);	# save last tweak
+	&pxor	($inout4,$inout5);
+
+	&call	("_aesni_decrypt6");
+
+	&movaps	($tweak,&QWP(16*4,"esp"));	# last tweak
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,&QWP(16*2,"esp"));
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,&QWP(16*3,"esp"));
+	&movups	(&QWP(16*1,$out),$inout1);
+	&xorps	($inout4,$tweak);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&movups	(&QWP(16*4,$out),$inout4);
+	&lea	($out,&DWP(16*5,$out));
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_one",16);
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&lea	($inp,&DWP(16*1,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&lea	($out,&DWP(16*1,$out));
+
+	&movdqa	($tweak,$inout3);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_two",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&lea	($inp,&DWP(16*2,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+
+	&call	("_aesni_decrypt2");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&lea	($out,&DWP(16*2,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_three",16);
+	&movaps	($inout5,$tweak);		# put aside last tweak
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&lea	($inp,&DWP(16*3,$inp));
+	&xorps	($inout0,$inout3);		# input^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+
+	&call	("_aesni_decrypt3");
+
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&xorps	($inout1,$inout4);
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&lea	($out,&DWP(16*3,$out));
+
+	&movdqa	($tweak,$inout5);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_four",16);
+	&movaps	($inout4,$tweak);		# put aside last tweak
+
+	&movups	($inout0,&QWP(16*0,$inp));	# load input
+	&movups	($inout1,&QWP(16*1,$inp));
+	&movups	($inout2,&QWP(16*2,$inp));
+	&xorps	($inout0,&QWP(16*0,"esp"));	# input^=tweak
+	&movups	($inout3,&QWP(16*3,$inp));
+	&lea	($inp,&DWP(16*4,$inp));
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&xorps	($inout3,$inout4);
+
+	&call	("_aesni_decrypt4");
+
+	&xorps	($inout0,&QWP(16*0,"esp"));	# output^=tweak
+	&xorps	($inout1,&QWP(16*1,"esp"));
+	&xorps	($inout2,$inout5);
+	&movups	(&QWP(16*0,$out),$inout0);	# write output
+	&xorps	($inout3,$inout4);
+	&movups	(&QWP(16*1,$out),$inout1);
+	&movups	(&QWP(16*2,$out),$inout2);
+	&movups	(&QWP(16*3,$out),$inout3);
+	&lea	($out,&DWP(16*4,$out));
+
+	&movdqa	($tweak,$inout4);		# last tweak
+	&jmp	(&label("xts_dec_done"));
+
+&set_label("xts_dec_done6x",16);		# $tweak is pre-calculated
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&jmp	(&label("xts_dec_only_one_more"));
+
+&set_label("xts_dec_done",16);
+	&mov	($len,&DWP(16*7+0,"esp"));	# restore original $len
+	&pxor	($twtmp,$twtmp);
+	&and	($len,15);
+	&jz	(&label("xts_dec_ret"));
+
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&mov	(&DWP(16*7+0,"esp"),$len);	# save $len%16
+	&pshufd	($twres,$twtmp,0x13);
+	&pxor	($twtmp,$twtmp);
+	&movdqa	($twmask,&QWP(16*6,"esp"));
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($twres,$twmask);		# isolate carry and residue
+	&pcmpgtd($twtmp,$tweak);		# broadcast upper bits
+	&pxor	($tweak,$twres);
+
+&set_label("xts_dec_only_one_more");
+	&pshufd	($inout3,$twtmp,0x13);
+	&movdqa	($inout4,$tweak);		# put aside previous tweak
+	&paddq	($tweak,$tweak);		# &psllq($tweak,1);
+	&pand	($inout3,$twmask);		# isolate carry and residue
+	&pxor	($inout3,$tweak);
+
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$inp));		# load input
+	&xorps	($inout0,$inout3);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout3);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_steal");
+	&movz	($rounds,&BP(16,$inp));
+	&movz	($key,&BP(0,$out));
+	&lea	($inp,&DWP(1,$inp));
+	&mov	(&BP(0,$out),&LB($rounds));
+	&mov	(&BP(16,$out),&LB($key));
+	&lea	($out,&DWP(1,$out));
+	&sub	($len,1);
+	&jnz	(&label("xts_dec_steal"));
+
+	&sub	($out,&DWP(16*7+0,"esp"));	# rewind $out
+	&mov	($key,$key_);			# restore $key
+	&mov	($rounds,$rounds_);		# restore $rounds
+
+	&movups	($inout0,&QWP(0,$out));		# load input
+	&xorps	($inout0,$inout4);		# input^=tweak
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$inout4);		# output^=tweak
+	&movups	(&QWP(0,$out),$inout0);		# write output
+
+&set_label("xts_dec_ret");
+	&pxor	("xmm0","xmm0");		# clear register bank
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&movdqa	(&QWP(16*0,"esp"),"xmm0");	# clear stack
+	&pxor	("xmm3","xmm3");
+	&movdqa	(&QWP(16*1,"esp"),"xmm0");
+	&pxor	("xmm4","xmm4");
+	&movdqa	(&QWP(16*2,"esp"),"xmm0");
+	&pxor	("xmm5","xmm5");
+	&movdqa	(&QWP(16*3,"esp"),"xmm0");
+	&pxor	("xmm6","xmm6");
+	&movdqa	(&QWP(16*4,"esp"),"xmm0");
+	&pxor	("xmm7","xmm7");
+	&movdqa	(&QWP(16*5,"esp"),"xmm0");
+	&mov	("esp",&DWP(16*7+4,"esp"));	# restore %esp
+&function_end("aesni_xts_decrypt");
+}
+}
+
+######################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#                           size_t length, const AES_KEY *key,
+#                           unsigned char *ivp,const int enc);
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));
+	&mov	($rounds_,"esp");
+	&mov	($out,&wparam(1));
+	&sub	($rounds_,24);
+	&mov	($len,&wparam(2));
+	&and	($rounds_,-16);
+	&mov	($key,&wparam(3));
+	&mov	($key_,&wparam(4));
+	&test	($len,$len);
+	&jz	(&label("cbc_abort"));
+
+	&cmp	(&wparam(5),0);
+	&xchg	($rounds_,"esp");		# alloca
+	&movups	($ivec,&QWP(0,$key_));		# load IV
+	&mov	($rounds,&DWP(240,$key));
+	&mov	($key_,$key);			# backup $key
+	&mov	(&DWP(16,"esp"),$rounds_);	# save original %esp
+	&mov	($rounds_,$rounds);		# backup $rounds
+	&je	(&label("cbc_decrypt"));
+
+	&movaps	($inout0,$ivec);
+	&cmp	($len,16);
+	&jb	(&label("cbc_enc_tail"));
+	&sub	($len,16);
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movups	($ivec,&QWP(0,$inp));		# input actually
+	&lea	($inp,&DWP(16,$inp));
+	if ($inline)
+	{   &aesni_inline_generate1("enc",$inout0,$ivec);	}
+	else
+	{   &xorps($inout0,$ivec); &call("_aesni_encrypt1");	}
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($key,$key_);		# restore $key
+	&movups	(&QWP(0,$out),$inout0);	# store output
+	&lea	($out,&DWP(16,$out));
+	&sub	($len,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&add	($len,16);
+	&jnz	(&label("cbc_enc_tail"));
+	&movaps	($ivec,$inout0);
+	&pxor	($inout0,$inout0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_enc_tail");
+	&mov	("ecx",$len);		# zaps $rounds
+	&data_word(0xA4F3F689);		# rep movsb
+	&mov	("ecx",16);		# zero tail
+	&sub	("ecx",$len);
+	&xor	("eax","eax");		# zaps $len
+	&data_word(0xAAF3F689);		# rep stosb
+	&lea	($out,&DWP(-16,$out));	# rewind $out by 1 block
+	&mov	($rounds,$rounds_);	# restore $rounds
+	&mov	($inp,$out);		# $inp and $out are the same
+	&mov	($key,$key_);		# restore $key
+	&jmp	(&label("cbc_enc_loop"));
+######################################################################
+&set_label("cbc_decrypt",16);
+	&cmp	($len,0x50);
+	&jbe	(&label("cbc_dec_tail"));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_loop6_enter"));
+
+&set_label("cbc_dec_loop6",16);
+	&movaps	(&QWP(0,"esp"),$rndkey0);	# save IV
+	&movups	(&QWP(0,$out),$inout5);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_loop6_enter");
+	&movdqu	($inout0,&QWP(0,$inp));
+	&movdqu	($inout1,&QWP(0x10,$inp));
+	&movdqu	($inout2,&QWP(0x20,$inp));
+	&movdqu	($inout3,&QWP(0x30,$inp));
+	&movdqu	($inout4,&QWP(0x40,$inp));
+	&movdqu	($inout5,&QWP(0x50,$inp));
+
+	&call	("_aesni_decrypt6");
+
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^=IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($rndkey1,&QWP(0x40,$inp));
+	&xorps	($inout4,$rndkey0);
+	&movups	($rndkey0,&QWP(0x50,$inp));	# IV
+	&xorps	($inout5,$rndkey1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&lea	($inp,&DWP(0x60,$inp));
+	&movups	(&QWP(0x20,$out),$inout2);
+	&mov	($rounds,$rounds_);		# restore $rounds
+	&movups	(&QWP(0x30,$out),$inout3);
+	&mov	($key,$key_);			# restore $key
+	&movups	(&QWP(0x40,$out),$inout4);
+	&lea	($out,&DWP(0x50,$out));
+	&sub	($len,0x60);
+	&ja	(&label("cbc_dec_loop6"));
+
+	&movaps	($inout0,$inout5);
+	&movaps	($ivec,$rndkey0);
+	&add	($len,0x50);
+	&jle	(&label("cbc_dec_clear_tail_collected"));
+	&movups	(&QWP(0,$out),$inout0);
+	&lea	($out,&DWP(0x10,$out));
+&set_label("cbc_dec_tail");
+	&movups	($inout0,&QWP(0,$inp));
+	&movaps	($in0,$inout0);
+	&cmp	($len,0x10);
+	&jbe	(&label("cbc_dec_one"));
+
+	&movups	($inout1,&QWP(0x10,$inp));
+	&movaps	($in1,$inout1);
+	&cmp	($len,0x20);
+	&jbe	(&label("cbc_dec_two"));
+
+	&movups	($inout2,&QWP(0x20,$inp));
+	&cmp	($len,0x30);
+	&jbe	(&label("cbc_dec_three"));
+
+	&movups	($inout3,&QWP(0x30,$inp));
+	&cmp	($len,0x40);
+	&jbe	(&label("cbc_dec_four"));
+
+	&movups	($inout4,&QWP(0x40,$inp));
+	&movaps	(&QWP(0,"esp"),$ivec);		# save IV
+	&movups	($inout0,&QWP(0,$inp));
+	&xorps	($inout5,$inout5);
+	&call	("_aesni_decrypt6");
+	&movups	($rndkey1,&QWP(0,$inp));
+	&movups	($rndkey0,&QWP(0x10,$inp));
+	&xorps	($inout0,&QWP(0,"esp"));	# ^= IV
+	&xorps	($inout1,$rndkey1);
+	&movups	($rndkey1,&QWP(0x20,$inp));
+	&xorps	($inout2,$rndkey0);
+	&movups	($rndkey0,&QWP(0x30,$inp));
+	&xorps	($inout3,$rndkey1);
+	&movups	($ivec,&QWP(0x40,$inp));	# IV
+	&xorps	($inout4,$rndkey0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&pxor	($inout2,$inout2);
+	&movups	(&QWP(0x30,$out),$inout3);
+	&pxor	($inout3,$inout3);
+	&lea	($out,&DWP(0x40,$out));
+	&movaps	($inout0,$inout4);
+	&pxor	($inout4,$inout4);
+	&sub	($len,0x50);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_one",16);
+	if ($inline)
+	{   &aesni_inline_generate1("dec");	}
+	else
+	{   &call	("_aesni_decrypt1");	}
+	&xorps	($inout0,$ivec);
+	&movaps	($ivec,$in0);
+	&sub	($len,0x10);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_two",16);
+	&call	("_aesni_decrypt2");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout1);
+	&pxor	($inout1,$inout1);
+	&lea	($out,&DWP(0x10,$out));
+	&movaps	($ivec,$in1);
+	&sub	($len,0x20);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_three",16);
+	&call	("_aesni_decrypt3");
+	&xorps	($inout0,$ivec);
+	&xorps	($inout1,$in0);
+	&xorps	($inout2,$in1);
+	&movups	(&QWP(0,$out),$inout0);
+	&movaps	($inout0,$inout2);
+	&pxor	($inout2,$inout2);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&lea	($out,&DWP(0x20,$out));
+	&movups	($ivec,&QWP(0x20,$inp));
+	&sub	($len,0x30);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_four",16);
+	&call	("_aesni_decrypt4");
+	&movups	($rndkey1,&QWP(0x10,$inp));
+	&movups	($rndkey0,&QWP(0x20,$inp));
+	&xorps	($inout0,$ivec);
+	&movups	($ivec,&QWP(0x30,$inp));
+	&xorps	($inout1,$in0);
+	&movups	(&QWP(0,$out),$inout0);
+	&xorps	($inout2,$rndkey1);
+	&movups	(&QWP(0x10,$out),$inout1);
+	&pxor	($inout1,$inout1);
+	&xorps	($inout3,$rndkey0);
+	&movups	(&QWP(0x20,$out),$inout2);
+	&pxor	($inout2,$inout2);
+	&lea	($out,&DWP(0x30,$out));
+	&movaps	($inout0,$inout3);
+	&pxor	($inout3,$inout3);
+	&sub	($len,0x40);
+	&jmp	(&label("cbc_dec_tail_collected"));
+
+&set_label("cbc_dec_clear_tail_collected",16);
+	&pxor	($inout1,$inout1);
+	&pxor	($inout2,$inout2);
+	&pxor	($inout3,$inout3);
+	&pxor	($inout4,$inout4);
+&set_label("cbc_dec_tail_collected");
+	&and	($len,15);
+	&jnz	(&label("cbc_dec_tail_partial"));
+	&movups	(&QWP(0,$out),$inout0);
+	&pxor	($rndkey0,$rndkey0);
+	&jmp	(&label("cbc_ret"));
+
+&set_label("cbc_dec_tail_partial",16);
+	&movaps	(&QWP(0,"esp"),$inout0);
+	&pxor	($rndkey0,$rndkey0);
+	&mov	("ecx",16);
+	&mov	($inp,"esp");
+	&sub	("ecx",$len);
+	&data_word(0xA4F3F689);		# rep movsb
+	&movdqa	(&QWP(0,"esp"),$inout0);
+
+&set_label("cbc_ret");
+	&mov	("esp",&DWP(16,"esp"));	# pull original %esp
+	&mov	($key_,&wparam(4));
+	&pxor	($inout0,$inout0);
+	&pxor	($rndkey1,$rndkey1);
+	&movups	(&QWP(0,$key_),$ivec);	# output IV
+	&pxor	($ivec,$ivec);
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+######################################################################
+# Mechanical port from aesni-x86_64.pl.
+#
+# _aesni_set_encrypt_key is private interface,
+# input:
+#	"eax"	const unsigned char *userKey
+#	$rounds	int bits
+#	$key	AES_KEY *key
+# output:
+#	"eax"	return code
+#	$round	rounds
+
+&function_begin_B("_aesni_set_encrypt_key");
+	&push	("ebp");
+	&push	("ebx");
+	&test	("eax","eax");
+	&jz	(&label("bad_pointer"));
+	&test	($key,$key);
+	&jz	(&label("bad_pointer"));
+
+	&call	(&label("pic"));
+&set_label("pic");
+	&blindpop("ebx");
+	&lea	("ebx",&DWP(&label("key_const")."-".&label("pic"),"ebx"));
+
+	&picmeup("ebp","OPENSSL_ia32cap_P","ebx",&label("key_const"));
+	&movups	("xmm0",&QWP(0,"eax"));	# pull first 128 bits of *userKey
+	&xorps	("xmm4","xmm4");	# low dword of xmm4 is assumed 0
+	&mov	("ebp",&DWP(4,"ebp"));
+	&lea	($key,&DWP(16,$key));
+	&and	("ebp",1<<28|1<<11);	# AVX and XOP bits
+	&cmp	($rounds,256);
+	&je	(&label("14rounds"));
+	&cmp	($rounds,192);
+	&je	(&label("12rounds"));
+	&cmp	($rounds,128);
+	&jne	(&label("bad_keybits"));
+
+&set_label("10rounds",16);
+	&cmp		("ebp",1<<28);
+	&je		(&label("10rounds_alt"));
+
+	&mov		($rounds,9);
+	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 1
+	&call		(&label("key_128_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x2);		# round 2
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 3
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 4
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 5
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 6
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x40);		# round 7
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x80);		# round 8
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x1b);		# round 9
+	&call		(&label("key_128"));
+	&aeskeygenassist("xmm1","xmm0",0x36);		# round 10
+	&call		(&label("key_128"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(80,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_128",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_128_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("10rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x00,"ebx"));
+	&mov		($rounds,8);
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&movdqa		("xmm2","xmm0");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+&set_label("loop_key128");
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+	&pslld		("xmm4",1);
+	&lea		($key,&DWP(16,$key));
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+	&movdqa		("xmm2","xmm0");
+
+	&dec		($rounds);
+	&jnz		(&label("loop_key128"));
+
+	&movdqa		("xmm4",&QWP(0x30,"ebx"));
+
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+	&pslld		("xmm4",1);
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(0,$key),"xmm0");
+
+	&movdqa		("xmm2","xmm0");
+	&pshufb		("xmm0","xmm5");
+	&aesenclast	("xmm0","xmm4");
+
+	&movdqa		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm3","xmm2");
+	&pslldq		("xmm2",4);
+	&pxor		("xmm2","xmm3");
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(16,$key),"xmm0");
+
+	&mov		($rounds,9);
+	&mov		(&DWP(96,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("12rounds",16);
+	&movq		("xmm2",&QWP(16,"eax"));	# remaining 1/3 of *userKey
+	&cmp		("ebp",1<<28);
+	&je		(&label("12rounds_alt"));
+
+	&mov		($rounds,11);
+	&$movekey	(&QWP(-16,$key),"xmm0");	# round 0
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 1,2
+	&call		(&label("key_192a_cold"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 2,3
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 4,5
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 5,6
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 7,8
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 8,9
+	&call		(&label("key_192b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 10,11
+	&call		(&label("key_192a"));
+	&aeskeygenassist("xmm1","xmm2",0x80);		# round 11,12
+	&call		(&label("key_192b"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(48,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_192a",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_192a_cold",16);
+	&movaps		("xmm5","xmm2");
+&set_label("key_192b_warm");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&movdqa		("xmm3","xmm2");
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&pslldq		("xmm3",4);
+	&xorps		("xmm0","xmm4");
+	&pshufd		("xmm1","xmm1",0b01010101);	# critical path
+	&pxor		("xmm2","xmm3");
+	&pxor		("xmm0","xmm1");
+	&pshufd		("xmm3","xmm0",0b11111111);
+	&pxor		("xmm2","xmm3");
+	&ret();
+
+&set_label("key_192b",16);
+	&movaps		("xmm3","xmm0");
+	&shufps		("xmm5","xmm0",0b01000100);
+	&$movekey	(&QWP(0,$key),"xmm5");
+	&shufps		("xmm3","xmm2",0b01001110);
+	&$movekey	(&QWP(16,$key),"xmm3");
+	&lea		($key,&DWP(32,$key));
+	&jmp		(&label("key_192b_warm"));
+
+&set_label("12rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x10,"ebx"));
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&mov		($rounds,8);
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+&set_label("loop_key192");
+	&movq		(&QWP(0,$key),"xmm2");
+	&movdqa		("xmm1","xmm2");
+	&pshufb		("xmm2","xmm5");
+	&aesenclast	("xmm2","xmm4");
+	&pslld		("xmm4",1);
+	&lea		($key,&DWP(24,$key));
+
+	&movdqa		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm0","xmm3");
+
+	&pshufd		("xmm3","xmm0",0xff);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+
+	&pxor		("xmm0","xmm2");
+	&pxor		("xmm2","xmm3");
+	&movdqu		(&QWP(-16,$key),"xmm0");
+
+	&dec		($rounds);
+	&jnz		(&label("loop_key192"));
+
+	&mov	($rounds,11);
+	&mov	(&DWP(32,$key),$rounds);
+
+	&jmp	(&label("good_key"));
+
+&set_label("14rounds",16);
+	&movups		("xmm2",&QWP(16,"eax"));	# remaining half of *userKey
+	&lea		($key,&DWP(16,$key));
+	&cmp		("ebp",1<<28);
+	&je		(&label("14rounds_alt"));
+
+	&mov		($rounds,13);
+	&$movekey	(&QWP(-32,$key),"xmm0");	# round 0
+	&$movekey	(&QWP(-16,$key),"xmm2");	# round 1
+	&aeskeygenassist("xmm1","xmm2",0x01);		# round 2
+	&call		(&label("key_256a_cold"));
+	&aeskeygenassist("xmm1","xmm0",0x01);		# round 3
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x02);		# round 4
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x02);		# round 5
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x04);		# round 6
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x04);		# round 7
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x08);		# round 8
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x08);		# round 9
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x10);		# round 10
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x10);		# round 11
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x20);		# round 12
+	&call		(&label("key_256a"));
+	&aeskeygenassist("xmm1","xmm0",0x20);		# round 13
+	&call		(&label("key_256b"));
+	&aeskeygenassist("xmm1","xmm2",0x40);		# round 14
+	&call		(&label("key_256a"));
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&mov		(&DWP(16,$key),$rounds);
+	&xor		("eax","eax");
+
+	&jmp	(&label("good_key"));
+
+&set_label("key_256a",16);
+	&$movekey	(&QWP(0,$key),"xmm2");
+	&lea		($key,&DWP(16,$key));
+&set_label("key_256a_cold");
+	&shufps		("xmm4","xmm0",0b00010000);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm4","xmm0",0b10001100);
+	&xorps		("xmm0","xmm4");
+	&shufps		("xmm1","xmm1",0b11111111);	# critical path
+	&xorps		("xmm0","xmm1");
+	&ret();
+
+&set_label("key_256b",16);
+	&$movekey	(&QWP(0,$key),"xmm0");
+	&lea		($key,&DWP(16,$key));
+
+	&shufps		("xmm4","xmm2",0b00010000);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm4","xmm2",0b10001100);
+	&xorps		("xmm2","xmm4");
+	&shufps		("xmm1","xmm1",0b10101010);	# critical path
+	&xorps		("xmm2","xmm1");
+	&ret();
+
+&set_label("14rounds_alt",16);
+	&movdqa		("xmm5",&QWP(0x00,"ebx"));
+	&movdqa		("xmm4",&QWP(0x20,"ebx"));
+	&mov		($rounds,7);
+	&movdqu		(&QWP(-32,$key),"xmm0");
+	&movdqa		("xmm1","xmm2");
+	&movdqu		(&QWP(-16,$key),"xmm2");
+
+&set_label("loop_key256");
+	&pshufb		("xmm2","xmm5");
+	&aesenclast	("xmm2","xmm4");
+
+	&movdqa		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm3","xmm0");
+	&pslldq		("xmm0",4);
+	&pxor		("xmm0","xmm3");
+	&pslld		("xmm4",1);
+
+	&pxor		("xmm0","xmm2");
+	&movdqu		(&QWP(0,$key),"xmm0");
+
+	&dec		($rounds);
+	&jz		(&label("done_key256"));
+
+	&pshufd		("xmm2","xmm0",0xff);
+	&pxor		("xmm3","xmm3");
+	&aesenclast	("xmm2","xmm3");
+
+	&movdqa		("xmm3","xmm1")
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm3","xmm1");
+	&pslldq		("xmm1",4);
+	&pxor		("xmm1","xmm3");
+
+	&pxor		("xmm2","xmm1");
+	&movdqu		(&QWP(16,$key),"xmm2");
+	&lea		($key,&DWP(32,$key));
+	&movdqa		("xmm1","xmm2");
+	&jmp		(&label("loop_key256"));
+
+&set_label("done_key256");
+	&mov		($rounds,13);
+	&mov		(&DWP(16,$key),$rounds);
+
+&set_label("good_key");
+	&pxor	("xmm0","xmm0");
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&xor	("eax","eax");
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+
+&set_label("bad_pointer",4);
+	&mov	("eax",-1);
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+&set_label("bad_keybits",4);
+	&pxor	("xmm0","xmm0");
+	&mov	("eax",-2);
+	&pop	("ebx");
+	&pop	("ebp");
+	&ret	();
+&function_end_B("_aesni_set_encrypt_key");
+
+# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_encrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&ret	();
+&function_end_B("${PREFIX}_set_encrypt_key");
+
+# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
+#                              AES_KEY *key)
+&function_begin_B("${PREFIX}_set_decrypt_key");
+	&mov	("eax",&wparam(0));
+	&mov	($rounds,&wparam(1));
+	&mov	($key,&wparam(2));
+	&call	("_aesni_set_encrypt_key");
+	&mov	($key,&wparam(2));
+	&shl	($rounds,4);	# rounds-1 after _aesni_set_encrypt_key
+	&test	("eax","eax");
+	&jnz	(&label("dec_key_ret"));
+	&lea	("eax",&DWP(16,$key,$rounds));	# end of key schedule
+
+	&$movekey	("xmm0",&QWP(0,$key));	# just swap
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&$movekey	(&QWP(0,"eax"),"xmm0");
+	&$movekey	(&QWP(0,$key),"xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+
+&set_label("dec_key_inverse");
+	&$movekey	("xmm0",&QWP(0,$key));	# swap and inverse
+	&$movekey	("xmm1",&QWP(0,"eax"));
+	&aesimc		("xmm0","xmm0");
+	&aesimc		("xmm1","xmm1");
+	&lea		($key,&DWP(16,$key));
+	&lea		("eax",&DWP(-16,"eax"));
+	&$movekey	(&QWP(16,"eax"),"xmm0");
+	&$movekey	(&QWP(-16,$key),"xmm1");
+	&cmp		("eax",$key);
+	&ja		(&label("dec_key_inverse"));
+
+	&$movekey	("xmm0",&QWP(0,$key));	# inverse middle
+	&aesimc		("xmm0","xmm0");
+	&$movekey	(&QWP(0,$key),"xmm0");
+
+	&pxor		("xmm0","xmm0");
+	&pxor		("xmm1","xmm1");
+	&xor		("eax","eax");		# return success
+&set_label("dec_key_ret");
+	&ret	();
+&function_end_B("${PREFIX}_set_decrypt_key");
+
+&set_label("key_const",64);
+&data_word(0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d);
+&data_word(0x04070605,0x04070605,0x04070605,0x04070605);
+&data_word(1,1,1,1);
+&data_word(0x1b,0x1b,0x1b,0x1b);
+&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/aesni-x86_64.pl b/src/crypto/fipsmodule/aes/asm/aesni-x86_64.pl
new file mode 100644
index 0000000..5810980
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aesni-x86_64.pl
@@ -0,0 +1,5067 @@
+#! /usr/bin/env perl
+# Copyright 2009-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for Intel AES-NI extension. In
+# OpenSSL context it's used with Intel engine, but can also be used as
+# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
+# details].
+#
+# Performance.
+#
+# Given aes(enc|dec) instructions' latency asymptotic performance for
+# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
+# processed with 128-bit key. And given their throughput asymptotic
+# performance for parallelizable modes is 1.25 cycles per byte. Being
+# asymptotic limit it's not something you commonly achieve in reality,
+# but how close does one get? Below are results collected for
+# different modes and block sized. Pairs of numbers are for en-/
+# decryption.
+#
+#	16-byte     64-byte     256-byte    1-KB        8-KB
+# ECB	4.25/4.25   1.38/1.38   1.28/1.28   1.26/1.26	1.26/1.26
+# CTR	5.42/5.42   1.92/1.92   1.44/1.44   1.28/1.28   1.26/1.26
+# CBC	4.38/4.43   4.15/1.43   4.07/1.32   4.07/1.29   4.06/1.28
+# CCM	5.66/9.42   4.42/5.41   4.16/4.40   4.09/4.15   4.06/4.07
+# OFB	5.42/5.42   4.64/4.64   4.44/4.44   4.39/4.39   4.38/4.38
+# CFB	5.73/5.85   5.56/5.62   5.48/5.56   5.47/5.55   5.47/5.55
+#
+# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
+# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
+# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
+# The results were collected with specially crafted speed.c benchmark
+# in order to compare them with results reported in "Intel Advanced
+# Encryption Standard (AES) New Instruction Set" White Paper Revision
+# 3.0 dated May 2010. All above results are consistently better. This
+# module also provides better performance for block sizes smaller than
+# 128 bytes in points *not* represented in the above table.
+#
+# Looking at the results for 8-KB buffer.
+#
+# CFB and OFB results are far from the limit, because implementation
+# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
+# single-block aesni_encrypt, which is not the most optimal way to go.
+# CBC encrypt result is unexpectedly high and there is no documented
+# explanation for it. Seemingly there is a small penalty for feeding
+# the result back to AES unit the way it's done in CBC mode. There is
+# nothing one can do and the result appears optimal. CCM result is
+# identical to CBC, because CBC-MAC is essentially CBC encrypt without
+# saving output. CCM CTR "stays invisible," because it's neatly
+# interleaved wih CBC-MAC. This provides ~30% improvement over
+# "straghtforward" CCM implementation with CTR and CBC-MAC performed
+# disjointly. Parallelizable modes practically achieve the theoretical
+# limit.
+#
+# Looking at how results vary with buffer size.
+#
+# Curves are practically saturated at 1-KB buffer size. In most cases
+# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
+# CTR curve doesn't follow this pattern and is "slowest" changing one
+# with "256-byte" result being 87% of "8-KB." This is because overhead
+# in CTR mode is most computationally intensive. Small-block CCM
+# decrypt is slower than encrypt, because first CTR and last CBC-MAC
+# iterations can't be interleaved.
+#
+# Results for 192- and 256-bit keys.
+#
+# EVP-free results were observed to scale perfectly with number of
+# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
+# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
+# are a tad smaller, because the above mentioned penalty biases all
+# results by same constant value. In similar way function call
+# overhead affects small-block performance, as well as OFB and CFB
+# results. Differences are not large, most common coefficients are
+# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
+# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
+
+# January 2011
+#
+# While Westmere processor features 6 cycles latency for aes[enc|dec]
+# instructions, which can be scheduled every second cycle, Sandy
+# Bridge spends 8 cycles per instruction, but it can schedule them
+# every cycle. This means that code targeting Westmere would perform
+# suboptimally on Sandy Bridge. Therefore this update.
+#
+# In addition, non-parallelizable CBC encrypt (as well as CCM) is
+# optimized. Relative improvement might appear modest, 8% on Westmere,
+# but in absolute terms it's 3.77 cycles per byte encrypted with
+# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
+# should be compared to asymptotic limits of 3.75 for Westmere and
+# 5.00 for Sandy Bridge. Actually, the fact that they get this close
+# to asymptotic limits is quite amazing. Indeed, the limit is
+# calculated as latency times number of rounds, 10 for 128-bit key,
+# and divided by 16, the number of bytes in block, or in other words
+# it accounts *solely* for aesenc instructions. But there are extra
+# instructions, and numbers so close to the asymptotic limits mean
+# that it's as if it takes as little as *one* additional cycle to
+# execute all of them. How is it possible? It is possible thanks to
+# out-of-order execution logic, which manages to overlap post-
+# processing of previous block, things like saving the output, with
+# actual encryption of current block, as well as pre-processing of
+# current block, things like fetching input and xor-ing it with
+# 0-round element of the key schedule, with actual encryption of
+# previous block. Keep this in mind...
+#
+# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
+# performance is achieved by interleaving instructions working on
+# independent blocks. In which case asymptotic limit for such modes
+# can be obtained by dividing above mentioned numbers by AES
+# instructions' interleave factor. Westmere can execute at most 3
+# instructions at a time, meaning that optimal interleave factor is 3,
+# and that's where the "magic" number of 1.25 come from. "Optimal
+# interleave factor" means that increase of interleave factor does
+# not improve performance. The formula has proven to reflect reality
+# pretty well on Westmere... Sandy Bridge on the other hand can
+# execute up to 8 AES instructions at a time, so how does varying
+# interleave factor affect the performance? Here is table for ECB
+# (numbers are cycles per byte processed with 128-bit key):
+#
+# instruction interleave factor		3x	6x	8x
+# theoretical asymptotic limit		1.67	0.83	0.625
+# measured performance for 8KB block	1.05	0.86	0.84
+#
+# "as if" interleave factor		4.7x	5.8x	6.0x
+#
+# Further data for other parallelizable modes:
+#
+# CBC decrypt				1.16	0.93	0.74
+# CTR					1.14	0.91	0.74
+#
+# Well, given 3x column it's probably inappropriate to call the limit
+# asymptotic, if it can be surpassed, isn't it? What happens there?
+# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
+# magic is responsible for this. Processor overlaps not only the
+# additional instructions with AES ones, but even AES instuctions
+# processing adjacent triplets of independent blocks. In the 6x case
+# additional instructions  still claim disproportionally small amount
+# of additional cycles, but in 8x case number of instructions must be
+# a tad too high for out-of-order logic to cope with, and AES unit
+# remains underutilized... As you can see 8x interleave is hardly
+# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
+# utilizies 6x interleave because of limited register bank capacity.
+#
+# Higher interleave factors do have negative impact on Westmere
+# performance. While for ECB mode it's negligible ~1.5%, other
+# parallelizables perform ~5% worse, which is outweighed by ~25%
+# improvement on Sandy Bridge. To balance regression on Westmere
+# CTR mode was implemented with 6x aesenc interleave factor.
+
+# April 2011
+#
+# Add aesni_xts_[en|de]crypt. Westmere spends 1.25 cycles processing
+# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.90. Just like
+# in CTR mode AES instruction interleave factor was chosen to be 6x.
+
+# November 2015
+#
+# Add aesni_ocb_[en|de]crypt. AES instruction interleave factor was
+# chosen to be 6x.
+
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS	ECB	OCB
+# Westmere	3.77/1.25	1.25	1.25	1.26
+# * Bridge	5.07/0.74	0.75	0.90	0.85	0.98
+# Haswell	4.44/0.63	0.63	0.73	0.63	0.70
+# Skylake	2.62/0.63	0.63	0.63	0.63
+# Silvermont	5.75/3.54	3.56	4.12	3.87(*)	4.11
+# Goldmont	3.82/1.26	1.26	1.29	1.29	1.50
+# Bulldozer	5.77/0.70	0.72	0.90	0.70	0.95
+#
+# (*)	Atom Silvermont ECB result is suboptimal because of penalties
+#	incurred by operations on %xmm8-15. As ECB is not considered
+#	critical, nothing was done to mitigate the problem.
+
+$PREFIX="aesni";	# if $PREFIX is set to "AES", the script
+			# generates drop-in replacement for
+			# crypto/aes/asm/aes-x86_64.pl:-)
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
+@_4args=$win64?	("%rcx","%rdx","%r8", "%r9") :	# Win64 order
+		("%rdi","%rsi","%rdx","%rcx");	# Unix order
+
+$code=".text\n";
+$code.=".extern	OPENSSL_ia32cap_addr\n";
+
+$rounds="%eax";	# input to and changed by aesni_[en|de]cryptN !!!
+# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
+$inp="%rdi";
+$out="%rsi";
+$len="%rdx";
+$key="%rcx";	# input to and changed by aesni_[en|de]cryptN !!!
+$ivp="%r8";	# cbc, ctr, ...
+
+$rnds_="%r10d";	# backup copy for $rounds
+$key_="%r11";	# backup copy for $key
+
+# %xmm register layout
+$rndkey0="%xmm0";	$rndkey1="%xmm1";
+$inout0="%xmm2";	$inout1="%xmm3";
+$inout2="%xmm4";	$inout3="%xmm5";
+$inout4="%xmm6";	$inout5="%xmm7";
+$inout6="%xmm8";	$inout7="%xmm9";
+
+$in2="%xmm6";		$in1="%xmm7";	# used in CBC decrypt, CTR, ...
+$in0="%xmm8";		$iv="%xmm9";
+
+# Inline version of internal aesni_[en|de]crypt1.
+#
+# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
+# cycles which take care of loop variables...
+{ my $sn;
+sub aesni_generate1 {
+my ($p,$key,$rounds,$inout,$ivec)=@_;	$inout=$inout0 if (!defined($inout));
+++$sn;
+$code.=<<___;
+	$movkey	($key),$rndkey0
+	$movkey	16($key),$rndkey1
+___
+$code.=<<___ if (defined($ivec));
+	xorps	$rndkey0,$ivec
+	lea	32($key),$key
+	xorps	$ivec,$inout
+___
+$code.=<<___ if (!defined($ivec));
+	lea	32($key),$key
+	xorps	$rndkey0,$inout
+___
+$code.=<<___;
+.Loop_${p}1_$sn:
+	aes${p}	$rndkey1,$inout
+	dec	$rounds
+	$movkey	($key),$rndkey1
+	lea	16($key),$key
+	jnz	.Loop_${p}1_$sn	# loop body is 16 bytes
+	aes${p}last	$rndkey1,$inout
+___
+}}
+# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
+#
+{ my ($inp,$out,$key) = @_4args;
+
+$code.=<<___;
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_encrypt:
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($out)		# output
+	 pxor	$inout0,$inout0
+	ret
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@abi-omnipotent
+.align	16
+${PREFIX}_decrypt:
+	movups	($inp),$inout0		# load input
+	mov	240($key),$rounds	# key->rounds
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($out)		# output
+	 pxor	$inout0,$inout0
+	ret
+.size	${PREFIX}_decrypt, .-${PREFIX}_decrypt
+___
+}
+
+# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
+# factor. Why 3x subroutine were originally used in loops? Even though
+# aes[enc|dec] latency was originally 6, it could be scheduled only
+# every *2nd* cycle. Thus 3x interleave was the one providing optimal
+# utilization, i.e. when subroutine's throughput is virtually same as
+# of non-interleaved subroutine [for number of input blocks up to 3].
+# This is why it originally made no sense to implement 2x subroutine.
+# But times change and it became appropriate to spend extra 192 bytes
+# on 2x subroutine on Atom Silvermont account. For processors that
+# can schedule aes[enc|dec] every cycle optimal interleave factor
+# equals to corresponding instructions latency. 8x is optimal for
+# * Bridge and "super-optimal" for other Intel CPUs...
+
+sub aesni_generate2 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-1] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt2,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt2:
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	add	\$16,%rax
+
+.L${dir}_loop2:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop2
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	ret
+.size	_aesni_${dir}rypt2,.-_aesni_${dir}rypt2
+___
+}
+sub aesni_generate3 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-2] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt3,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt3:
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	add	\$16,%rax
+
+.L${dir}_loop3:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop3
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	ret
+.size	_aesni_${dir}rypt3,.-_aesni_${dir}rypt3
+___
+}
+# 4x interleave is implemented to improve small block performance,
+# most notably [and naturally] 4 block by ~30%. One can argue that one
+# should have implemented 5x as well, but improvement would be <20%,
+# so it's not worth it...
+sub aesni_generate4 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-3] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt4,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt4:
+	$movkey	($key),$rndkey0
+	shl	\$4,$rounds
+	$movkey	16($key),$rndkey1
+	xorps	$rndkey0,$inout0
+	xorps	$rndkey0,$inout1
+	xorps	$rndkey0,$inout2
+	xorps	$rndkey0,$inout3
+	$movkey	32($key),$rndkey0
+	lea	32($key,$rounds),$key
+	neg	%rax				# $rounds
+	.byte	0x0f,0x1f,0x00
+	add	\$16,%rax
+
+.L${dir}_loop4:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop4
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	ret
+.size	_aesni_${dir}rypt4,.-_aesni_${dir}rypt4
+___
+}
+sub aesni_generate6 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-5] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt6,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt6:
+	$movkey		($key),$rndkey0
+	shl		\$4,$rounds
+	$movkey		16($key),$rndkey1
+	xorps		$rndkey0,$inout0
+	pxor		$rndkey0,$inout1
+	pxor		$rndkey0,$inout2
+	aes${dir}	$rndkey1,$inout0
+	lea		32($key,$rounds),$key
+	neg		%rax			# $rounds
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	aes${dir}	$rndkey1,$inout2
+	pxor		$rndkey0,$inout5
+	$movkey		($key,%rax),$rndkey0
+	add		\$16,%rax
+	jmp		.L${dir}_loop6_enter
+.align	16
+.L${dir}_loop6:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+.L${dir}_loop6_enter:
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop6
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	ret
+.size	_aesni_${dir}rypt6,.-_aesni_${dir}rypt6
+___
+}
+sub aesni_generate8 {
+my $dir=shift;
+# As already mentioned it takes in $key and $rounds, which are *not*
+# preserved. $inout[0-7] is cipher/clear text...
+$code.=<<___;
+.type	_aesni_${dir}rypt8,\@abi-omnipotent
+.align	16
+_aesni_${dir}rypt8:
+	$movkey		($key),$rndkey0
+	shl		\$4,$rounds
+	$movkey		16($key),$rndkey1
+	xorps		$rndkey0,$inout0
+	xorps		$rndkey0,$inout1
+	pxor		$rndkey0,$inout2
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	lea		32($key,$rounds),$key
+	neg		%rax			# $rounds
+	aes${dir}	$rndkey1,$inout0
+	pxor		$rndkey0,$inout5
+	pxor		$rndkey0,$inout6
+	aes${dir}	$rndkey1,$inout1
+	pxor		$rndkey0,$inout7
+	$movkey		($key,%rax),$rndkey0
+	add		\$16,%rax
+	jmp		.L${dir}_loop8_inner
+.align	16
+.L${dir}_loop8:
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+.L${dir}_loop8_inner:
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+.L${dir}_loop8_enter:
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+	aes${dir}	$rndkey0,$inout0
+	aes${dir}	$rndkey0,$inout1
+	aes${dir}	$rndkey0,$inout2
+	aes${dir}	$rndkey0,$inout3
+	aes${dir}	$rndkey0,$inout4
+	aes${dir}	$rndkey0,$inout5
+	aes${dir}	$rndkey0,$inout6
+	aes${dir}	$rndkey0,$inout7
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.L${dir}_loop8
+
+	aes${dir}	$rndkey1,$inout0
+	aes${dir}	$rndkey1,$inout1
+	aes${dir}	$rndkey1,$inout2
+	aes${dir}	$rndkey1,$inout3
+	aes${dir}	$rndkey1,$inout4
+	aes${dir}	$rndkey1,$inout5
+	aes${dir}	$rndkey1,$inout6
+	aes${dir}	$rndkey1,$inout7
+	aes${dir}last	$rndkey0,$inout0
+	aes${dir}last	$rndkey0,$inout1
+	aes${dir}last	$rndkey0,$inout2
+	aes${dir}last	$rndkey0,$inout3
+	aes${dir}last	$rndkey0,$inout4
+	aes${dir}last	$rndkey0,$inout5
+	aes${dir}last	$rndkey0,$inout6
+	aes${dir}last	$rndkey0,$inout7
+	ret
+.size	_aesni_${dir}rypt8,.-_aesni_${dir}rypt8
+___
+}
+&aesni_generate2("enc") if ($PREFIX eq "aesni");
+&aesni_generate2("dec");
+&aesni_generate3("enc") if ($PREFIX eq "aesni");
+&aesni_generate3("dec");
+&aesni_generate4("enc") if ($PREFIX eq "aesni");
+&aesni_generate4("dec");
+&aesni_generate6("enc") if ($PREFIX eq "aesni");
+&aesni_generate6("dec");
+&aesni_generate8("enc") if ($PREFIX eq "aesni");
+&aesni_generate8("dec");
+
+if ($PREFIX eq "aesni") {
+########################################################################
+# void aesni_ecb_encrypt (const void *in, void *out,
+#			  size_t length, const AES_KEY *key,
+#			  int enc);
+$code.=<<___;
+.globl	aesni_ecb_encrypt
+.type	aesni_ecb_encrypt,\@function,5
+.align	16
+aesni_ecb_encrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# offload $inout4..7
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+.Lecb_enc_body:
+___
+$code.=<<___;
+	and	\$-16,$len		# if ($len<16)
+	jz	.Lecb_ret		# return
+
+	mov	240($key),$rounds	# key->rounds
+	$movkey	($key),$rndkey0
+	mov	$key,$key_		# backup $key
+	mov	$rounds,$rnds_		# backup $rounds
+	test	%r8d,%r8d		# 5th argument
+	jz	.Lecb_decrypt
+#--------------------------- ECB ENCRYPT ------------------------------#
+	cmp	\$0x80,$len		# if ($len<8*16)
+	jb	.Lecb_enc_tail		# short input
+
+	movdqu	($inp),$inout0		# load 8 input blocks
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+	sub	\$0x80,$len		# $len-=8*16 (can be zero)
+	jmp	.Lecb_enc_loop8_enter
+.align 16
+.Lecb_enc_loop8:
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0		# load 8 input blocks
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+.Lecb_enc_loop8_enter:
+
+	call	_aesni_encrypt8
+
+	sub	\$0x80,$len
+	jnc	.Lecb_enc_loop8		# loop if $len-=8*16 didn't borrow
+
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	add	\$0x80,$len		# restore real remaining $len
+	jz	.Lecb_ret		# done if ($len==0)
+
+.Lecb_enc_tail:				# $len is less than 8*16
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_enc_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_enc_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_enc_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_enc_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_enc_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_enc_six
+	movdqu	0x60($inp),$inout6
+	xorps	$inout7,$inout7
+	call	_aesni_encrypt8
+	movups	$inout0,($out)		# store 7 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	movups	$inout6,0x60($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_one:
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# store one output block
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_two:
+	call	_aesni_encrypt2
+	movups	$inout0,($out)		# store 2 output blocks
+	movups	$inout1,0x10($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_three:
+	call	_aesni_encrypt3
+	movups	$inout0,($out)		# store 3 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_four:
+	call	_aesni_encrypt4
+	movups	$inout0,($out)		# store 4 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_five:
+	xorps	$inout5,$inout5
+	call	_aesni_encrypt6
+	movups	$inout0,($out)		# store 5 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	jmp	.Lecb_ret
+.align	16
+.Lecb_enc_six:
+	call	_aesni_encrypt6
+	movups	$inout0,($out)		# store 6 output blocks
+	movups	$inout1,0x10($out)
+	movups	$inout2,0x20($out)
+	movups	$inout3,0x30($out)
+	movups	$inout4,0x40($out)
+	movups	$inout5,0x50($out)
+	jmp	.Lecb_ret
+#--------------------------- ECB DECRYPT ------------------------------#
+.align	16
+.Lecb_decrypt:
+	cmp	\$0x80,$len		# if ($len<8*16)
+	jb	.Lecb_dec_tail		# short input
+
+	movdqu	($inp),$inout0		# load 8 input blocks
+	movdqu	0x10($inp),$inout1
+	movdqu	0x20($inp),$inout2
+	movdqu	0x30($inp),$inout3
+	movdqu	0x40($inp),$inout4
+	movdqu	0x50($inp),$inout5
+	movdqu	0x60($inp),$inout6
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+	sub	\$0x80,$len		# $len-=8*16 (can be zero)
+	jmp	.Lecb_dec_loop8_enter
+.align 16
+.Lecb_dec_loop8:
+	movups	$inout0,($out)		# store 8 output blocks
+	mov	$key_,$key		# restore $key
+	movdqu	($inp),$inout0		# load 8 input blocks
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout1,0x10($out)
+	movdqu	0x10($inp),$inout1
+	movups	$inout2,0x20($out)
+	movdqu	0x20($inp),$inout2
+	movups	$inout3,0x30($out)
+	movdqu	0x30($inp),$inout3
+	movups	$inout4,0x40($out)
+	movdqu	0x40($inp),$inout4
+	movups	$inout5,0x50($out)
+	movdqu	0x50($inp),$inout5
+	movups	$inout6,0x60($out)
+	movdqu	0x60($inp),$inout6
+	movups	$inout7,0x70($out)
+	lea	0x80($out),$out		# $out+=8*16
+	movdqu	0x70($inp),$inout7
+	lea	0x80($inp),$inp		# $inp+=8*16
+.Lecb_dec_loop8_enter:
+
+	call	_aesni_decrypt8
+
+	$movkey	($key_),$rndkey0
+	sub	\$0x80,$len
+	jnc	.Lecb_dec_loop8		# loop if $len-=8*16 didn't borrow
+
+	movups	$inout0,($out)		# store 8 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	mov	$key_,$key		# restore $key
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	mov	$rnds_,$rounds		# restore $rounds
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	movups	$inout6,0x60($out)
+	 pxor	$inout6,$inout6
+	movups	$inout7,0x70($out)
+	 pxor	$inout7,$inout7
+	lea	0x80($out),$out		# $out+=8*16
+	add	\$0x80,$len		# restore real remaining $len
+	jz	.Lecb_ret		# done if ($len==0)
+
+.Lecb_dec_tail:
+	movups	($inp),$inout0
+	cmp	\$0x20,$len
+	jb	.Lecb_dec_one
+	movups	0x10($inp),$inout1
+	je	.Lecb_dec_two
+	movups	0x20($inp),$inout2
+	cmp	\$0x40,$len
+	jb	.Lecb_dec_three
+	movups	0x30($inp),$inout3
+	je	.Lecb_dec_four
+	movups	0x40($inp),$inout4
+	cmp	\$0x60,$len
+	jb	.Lecb_dec_five
+	movups	0x50($inp),$inout5
+	je	.Lecb_dec_six
+	movups	0x60($inp),$inout6
+	$movkey	($key),$rndkey0
+	xorps	$inout7,$inout7
+	call	_aesni_decrypt8
+	movups	$inout0,($out)		# store 7 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	movups	$inout6,0x60($out)
+	 pxor	$inout6,$inout6
+	 pxor	$inout7,$inout7
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_one:
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	movups	$inout0,($out)		# store one output block
+	 pxor	$inout0,$inout0		# clear register bank
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_two:
+	call	_aesni_decrypt2
+	movups	$inout0,($out)		# store 2 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_three:
+	call	_aesni_decrypt3
+	movups	$inout0,($out)		# store 3 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_four:
+	call	_aesni_decrypt4
+	movups	$inout0,($out)		# store 4 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_five:
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	movups	$inout0,($out)		# store 5 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	 pxor	$inout5,$inout5
+	jmp	.Lecb_ret
+.align	16
+.Lecb_dec_six:
+	call	_aesni_decrypt6
+	movups	$inout0,($out)		# store 6 output blocks
+	 pxor	$inout0,$inout0		# clear register bank
+	movups	$inout1,0x10($out)
+	 pxor	$inout1,$inout1
+	movups	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movups	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	movups	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	movups	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+
+.Lecb_ret:
+	xorps	$rndkey0,$rndkey0	# %xmm0
+	pxor	$rndkey1,$rndkey1
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lecb_enc_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ecb_encrypt,.-aesni_ecb_encrypt
+___
+
+{
+######################################################################
+# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec,char *cmac);
+#
+# Handles only complete blocks, operates on 64-bit counter and
+# does not update *ivec! Nor does it finalize CMAC value
+# (see engine/eng_aesni.c for details)
+#
+{
+my $cmac="%r9";	# 6th argument
+
+my $increment="%xmm9";
+my $iv="%xmm6";
+my $bswap_mask="%xmm7";
+
+$code.=<<___;
+.globl	aesni_ccm64_encrypt_blocks
+.type	aesni_ccm64_encrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_encrypt_blocks:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# $iv
+	movaps	%xmm7,0x10(%rsp)	# $bswap_mask
+	movaps	%xmm8,0x20(%rsp)	# $in0
+	movaps	%xmm9,0x30(%rsp)	# $increment
+.Lccm64_enc_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movdqu	($ivp),$iv
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	shl	\$4,$rounds
+	mov	\$16,$rnds_
+	lea	0($key),$key_
+	movdqu	($cmac),$inout1
+	movdqa	$iv,$inout0
+	lea	32($key,$rounds),$key		# end of key schedule
+	pshufb	$bswap_mask,$iv
+	sub	%rax,%r10			# twisted $rounds
+	jmp	.Lccm64_enc_outer
+.align	16
+.Lccm64_enc_outer:
+	$movkey	($key_),$rndkey0
+	mov	%r10,%rax
+	movups	($inp),$in0			# load inp
+
+	xorps	$rndkey0,$inout0		# counter
+	$movkey	16($key_),$rndkey1
+	xorps	$in0,$rndkey0
+	xorps	$rndkey0,$inout1		# cmac^=inp
+	$movkey	32($key_),$rndkey0
+
+.Lccm64_enc2_loop:
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	$movkey	($key,%rax),$rndkey1
+	add	\$32,%rax
+	aesenc	$rndkey0,$inout0
+	aesenc	$rndkey0,$inout1
+	$movkey	-16($key,%rax),$rndkey0
+	jnz	.Lccm64_enc2_loop
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	paddq	$increment,$iv
+	dec	$len				# $len-- ($len is in blocks)
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+
+	lea	16($inp),$inp
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	movups	$in0,($out)			# save output
+	pshufb	$bswap_mask,$inout0
+	lea	16($out),$out			# $out+=16
+	jnz	.Lccm64_enc_outer		# loop if ($len!=0)
+
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	 pxor	$inout0,$inout0
+	movups	$inout1,($cmac)			# store resulting mac
+	 pxor	$inout1,$inout1
+	 pxor	$in0,$in0
+	 pxor	$iv,$iv
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)			# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lccm64_enc_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
+___
+######################################################################
+$code.=<<___;
+.globl	aesni_ccm64_decrypt_blocks
+.type	aesni_ccm64_decrypt_blocks,\@function,6
+.align	16
+aesni_ccm64_decrypt_blocks:
+___
+$code.=<<___ if ($win64);
+	lea	-0x58(%rsp),%rsp
+	movaps	%xmm6,(%rsp)		# $iv
+	movaps	%xmm7,0x10(%rsp)	# $bswap_mask
+	movaps	%xmm8,0x20(%rsp)	# $in8
+	movaps	%xmm9,0x30(%rsp)	# $increment
+.Lccm64_dec_body:
+___
+$code.=<<___;
+	mov	240($key),$rounds		# key->rounds
+	movups	($ivp),$iv
+	movdqu	($cmac),$inout1
+	movdqa	.Lincrement64(%rip),$increment
+	movdqa	.Lbswap_mask(%rip),$bswap_mask
+
+	movaps	$iv,$inout0
+	mov	$rounds,$rnds_
+	mov	$key,$key_
+	pshufb	$bswap_mask,$iv
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	shl	\$4,$rnds_
+	mov	\$16,$rounds
+	movups	($inp),$in0			# load inp
+	paddq	$increment,$iv
+	lea	16($inp),$inp			# $inp+=16
+	sub	%r10,%rax			# twisted $rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	mov	%rax,%r10
+	jmp	.Lccm64_dec_outer
+.align	16
+.Lccm64_dec_outer:
+	xorps	$inout0,$in0			# inp ^= E(iv)
+	movdqa	$iv,$inout0
+	movups	$in0,($out)			# save output
+	lea	16($out),$out			# $out+=16
+	pshufb	$bswap_mask,$inout0
+
+	sub	\$1,$len			# $len-- ($len is in blocks)
+	jz	.Lccm64_dec_break		# if ($len==0) break
+
+	$movkey	($key_),$rndkey0
+	mov	%r10,%rax
+	$movkey	16($key_),$rndkey1
+	xorps	$rndkey0,$in0
+	xorps	$rndkey0,$inout0
+	xorps	$in0,$inout1			# cmac^=out
+	$movkey	32($key_),$rndkey0
+	jmp	.Lccm64_dec2_loop
+.align	16
+.Lccm64_dec2_loop:
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	$movkey	($key,%rax),$rndkey1
+	add	\$32,%rax
+	aesenc	$rndkey0,$inout0
+	aesenc	$rndkey0,$inout1
+	$movkey	-16($key,%rax),$rndkey0
+	jnz	.Lccm64_dec2_loop
+	movups	($inp),$in0			# load input
+	paddq	$increment,$iv
+	aesenc	$rndkey1,$inout0
+	aesenc	$rndkey1,$inout1
+	aesenclast	$rndkey0,$inout0
+	aesenclast	$rndkey0,$inout1
+	lea	16($inp),$inp			# $inp+=16
+	jmp	.Lccm64_dec_outer
+
+.align	16
+.Lccm64_dec_break:
+	#xorps	$in0,$inout1			# cmac^=out
+	mov	240($key_),$rounds
+___
+	&aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	 pxor	$inout0,$inout0
+	movups	$inout1,($cmac)			# store resulting mac
+	 pxor	$inout1,$inout1
+	 pxor	$in0,$in0
+	 pxor	$iv,$iv
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	%xmm0,(%rsp)			# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	lea	0x58(%rsp),%rsp
+.Lccm64_dec_ret:
+___
+$code.=<<___;
+	ret
+.size	aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
+___
+}
+######################################################################
+# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
+#                         size_t blocks, const AES_KEY *key,
+#                         const char *ivec);
+#
+# Handles only complete blocks, operates on 32-bit counter and
+# does not update *ivec! (see crypto/modes/ctr128.c for details)
+#
+# Overhaul based on suggestions from Shay Gueron and Vlad Krasnov,
+# http://rt.openssl.org/Ticket/Display.html?id=3021&user=guest&pass=guest.
+# Keywords are full unroll and modulo-schedule counter calculations
+# with zero-round key xor.
+{
+my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
+my ($key0,$ctr)=("%ebp","${ivp}d");
+my $frame_size = 0x80 + ($win64?160:0);
+
+$code.=<<___;
+.globl	aesni_ctr32_encrypt_blocks
+.type	aesni_ctr32_encrypt_blocks,\@function,5
+.align	16
+aesni_ctr32_encrypt_blocks:
+	cmp	\$1,$len
+	jne	.Lctr32_bulk
+
+	# handle single block without allocating stack frame,
+	# useful when handling edges
+	movups	($ivp),$inout0
+	movups	($inp),$inout1
+	mov	240($key),%edx			# key->rounds
+___
+	&aesni_generate1("enc",$key,"%edx");
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0		# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	xorps	$inout1,$inout0
+	 pxor	$inout1,$inout1
+	movups	$inout0,($out)
+	 xorps	$inout0,$inout0
+	jmp	.Lctr32_epilogue
+
+.align	16
+.Lctr32_bulk:
+	lea	(%rsp),$key_			# use $key_ as frame pointer
+	push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8($key_)		# offload everything
+	movaps	%xmm7,-0x98($key_)
+	movaps	%xmm8,-0x88($key_)
+	movaps	%xmm9,-0x78($key_)
+	movaps	%xmm10,-0x68($key_)
+	movaps	%xmm11,-0x58($key_)
+	movaps	%xmm12,-0x48($key_)
+	movaps	%xmm13,-0x38($key_)
+	movaps	%xmm14,-0x28($key_)
+	movaps	%xmm15,-0x18($key_)
+.Lctr32_body:
+___
+$code.=<<___;
+
+	# 8 16-byte words on top of stack are counter values
+	# xor-ed with zero-round key
+
+	movdqu	($ivp),$inout0
+	movdqu	($key),$rndkey0
+	mov	12($ivp),$ctr			# counter LSB
+	pxor	$rndkey0,$inout0
+	mov	12($key),$key0			# 0-round key LSB
+	movdqa	$inout0,0x00(%rsp)		# populate counter block
+	bswap	$ctr
+	movdqa	$inout0,$inout1
+	movdqa	$inout0,$inout2
+	movdqa	$inout0,$inout3
+	movdqa	$inout0,0x40(%rsp)
+	movdqa	$inout0,0x50(%rsp)
+	movdqa	$inout0,0x60(%rsp)
+	mov	%rdx,%r10			# about to borrow %rdx
+	movdqa	$inout0,0x70(%rsp)
+
+	lea	1($ctr),%rax
+	 lea	2($ctr),%rdx
+	bswap	%eax
+	 bswap	%edx
+	xor	$key0,%eax
+	 xor	$key0,%edx
+	pinsrd	\$3,%eax,$inout1
+	lea	3($ctr),%rax
+	movdqa	$inout1,0x10(%rsp)
+	 pinsrd	\$3,%edx,$inout2
+	bswap	%eax
+	 mov	%r10,%rdx			# restore %rdx
+	 lea	4($ctr),%r10
+	 movdqa	$inout2,0x20(%rsp)
+	xor	$key0,%eax
+	 bswap	%r10d
+	pinsrd	\$3,%eax,$inout3
+	 xor	$key0,%r10d
+	movdqa	$inout3,0x30(%rsp)
+	lea	5($ctr),%r9
+	 mov	%r10d,0x40+12(%rsp)
+	bswap	%r9d
+	 lea	6($ctr),%r10
+	mov	240($key),$rounds		# key->rounds
+	xor	$key0,%r9d
+	 bswap	%r10d
+	mov	%r9d,0x50+12(%rsp)
+	 xor	$key0,%r10d
+	lea	7($ctr),%r9
+	 mov	%r10d,0x60+12(%rsp)
+	bswap	%r9d
+	movq	OPENSSL_ia32cap_addr(%rip),%r10
+	 mov	4(%r10),%r10d
+	xor	$key0,%r9d
+	 and	\$`1<<26|1<<22`,%r10d		# isolate XSAVE+MOVBE
+	mov	%r9d,0x70+12(%rsp)
+
+	$movkey	0x10($key),$rndkey1
+
+	movdqa	0x40(%rsp),$inout4
+	movdqa	0x50(%rsp),$inout5
+
+	cmp	\$8,$len		# $len is in blocks
+	jb	.Lctr32_tail		# short input if ($len<8)
+
+	sub	\$6,$len		# $len is biased by -6
+	cmp	\$`1<<22`,%r10d		# check for MOVBE without XSAVE
+	je	.Lctr32_6x		# [which denotes Atom Silvermont]
+
+	lea	0x80($key),$key		# size optimization
+	sub	\$2,$len		# $len is biased by -8
+	jmp	.Lctr32_loop8
+
+.align	16
+.Lctr32_6x:
+	shl	\$4,$rounds
+	mov	\$48,$rnds_
+	bswap	$key0
+	lea	32($key,$rounds),$key	# end of key schedule
+	sub	%rax,%r10		# twisted $rounds
+	jmp	.Lctr32_loop6
+
+.align	16
+.Lctr32_loop6:
+	 add	\$6,$ctr		# next counter value
+	$movkey	-48($key,$rnds_),$rndkey0
+	aesenc	$rndkey1,$inout0
+	 mov	$ctr,%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey1,$inout1
+	 movbe	%eax,`0x00+12`(%rsp)	# store next counter value
+	 lea	1($ctr),%eax
+	aesenc	$rndkey1,$inout2
+	 xor	$key0,%eax
+	 movbe	%eax,`0x10+12`(%rsp)
+	aesenc	$rndkey1,$inout3
+	 lea	2($ctr),%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey1,$inout4
+	 movbe	%eax,`0x20+12`(%rsp)
+	 lea	3($ctr),%eax
+	aesenc	$rndkey1,$inout5
+	$movkey	-32($key,$rnds_),$rndkey1
+	 xor	$key0,%eax
+
+	aesenc	$rndkey0,$inout0
+	 movbe	%eax,`0x30+12`(%rsp)
+	 lea	4($ctr),%eax
+	aesenc	$rndkey0,$inout1
+	 xor	$key0,%eax
+	 movbe	%eax,`0x40+12`(%rsp)
+	aesenc	$rndkey0,$inout2
+	 lea	5($ctr),%eax
+	 xor	$key0,%eax
+	aesenc	$rndkey0,$inout3
+	 movbe	%eax,`0x50+12`(%rsp)
+	 mov	%r10,%rax		# mov	$rnds_,$rounds
+	aesenc	$rndkey0,$inout4
+	aesenc	$rndkey0,$inout5
+	$movkey	-16($key,$rnds_),$rndkey0
+
+	call	.Lenc_loop6
+
+	movdqu	($inp),$inout6		# load 6 input blocks
+	movdqu	0x10($inp),$inout7
+	movdqu	0x20($inp),$in0
+	movdqu	0x30($inp),$in1
+	movdqu	0x40($inp),$in2
+	movdqu	0x50($inp),$in3
+	lea	0x60($inp),$inp		# $inp+=6*16
+	$movkey	-64($key,$rnds_),$rndkey1
+	pxor	$inout0,$inout6		# inp^=E(ctr)
+	movaps	0x00(%rsp),$inout0	# load next counter [xor-ed with 0 round]
+	pxor	$inout1,$inout7
+	movaps	0x10(%rsp),$inout1
+	pxor	$inout2,$in0
+	movaps	0x20(%rsp),$inout2
+	pxor	$inout3,$in1
+	movaps	0x30(%rsp),$inout3
+	pxor	$inout4,$in2
+	movaps	0x40(%rsp),$inout4
+	pxor	$inout5,$in3
+	movaps	0x50(%rsp),$inout5
+	movdqu	$inout6,($out)		# store 6 output blocks
+	movdqu	$inout7,0x10($out)
+	movdqu	$in0,0x20($out)
+	movdqu	$in1,0x30($out)
+	movdqu	$in2,0x40($out)
+	movdqu	$in3,0x50($out)
+	lea	0x60($out),$out		# $out+=6*16
+
+	sub	\$6,$len
+	jnc	.Lctr32_loop6		# loop if $len-=6 didn't borrow
+
+	add	\$6,$len		# restore real remaining $len
+	jz	.Lctr32_done		# done if ($len==0)
+
+	lea	-48($rnds_),$rounds
+	lea	-80($key,$rnds_),$key	# restore $key
+	neg	$rounds
+	shr	\$4,$rounds		# restore $rounds
+	jmp	.Lctr32_tail
+
+.align	32
+.Lctr32_loop8:
+	 add		\$8,$ctr		# next counter value
+	movdqa		0x60(%rsp),$inout6
+	aesenc		$rndkey1,$inout0
+	 mov		$ctr,%r9d
+	movdqa		0x70(%rsp),$inout7
+	aesenc		$rndkey1,$inout1
+	 bswap		%r9d
+	$movkey		0x20-0x80($key),$rndkey0
+	aesenc		$rndkey1,$inout2
+	 xor		$key0,%r9d
+	 nop
+	aesenc		$rndkey1,$inout3
+	 mov		%r9d,0x00+12(%rsp)	# store next counter value
+	 lea		1($ctr),%r9
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0x30-0x80($key),$rndkey1
+___
+for($i=2;$i<8;$i++) {
+my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
+$code.=<<___;
+	 bswap		%r9d
+	aesenc		$rndkeyx,$inout0
+	aesenc		$rndkeyx,$inout1
+	 xor		$key0,%r9d
+	 .byte		0x66,0x90
+	aesenc		$rndkeyx,$inout2
+	aesenc		$rndkeyx,$inout3
+	 mov		%r9d,`0x10*($i-1)`+12(%rsp)
+	 lea		$i($ctr),%r9
+	aesenc		$rndkeyx,$inout4
+	aesenc		$rndkeyx,$inout5
+	aesenc		$rndkeyx,$inout6
+	aesenc		$rndkeyx,$inout7
+	$movkey		`0x20+0x10*$i`-0x80($key),$rndkeyx
+___
+}
+$code.=<<___;
+	 bswap		%r9d
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	 xor		$key0,%r9d
+	 movdqu		0x00($inp),$in0		# start loading input
+	aesenc		$rndkey0,$inout3
+	 mov		%r9d,0x70+12(%rsp)
+	 cmp		\$11,$rounds
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xa0-0x80($key),$rndkey0
+
+	jb		.Lctr32_enc_done
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0xb0-0x80($key),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xc0-0x80($key),$rndkey0
+	je		.Lctr32_enc_done
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	$movkey		0xd0-0x80($key),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	aesenc		$rndkey0,$inout6
+	aesenc		$rndkey0,$inout7
+	$movkey		0xe0-0x80($key),$rndkey0
+	jmp		.Lctr32_enc_done
+
+.align	16
+.Lctr32_enc_done:
+	movdqu		0x10($inp),$in1
+	pxor		$rndkey0,$in0		# input^=round[last]
+	movdqu		0x20($inp),$in2
+	pxor		$rndkey0,$in1
+	movdqu		0x30($inp),$in3
+	pxor		$rndkey0,$in2
+	movdqu		0x40($inp),$in4
+	pxor		$rndkey0,$in3
+	movdqu		0x50($inp),$in5
+	pxor		$rndkey0,$in4
+	pxor		$rndkey0,$in5
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+	aesenc		$rndkey1,$inout7
+	movdqu		0x60($inp),$rndkey1	# borrow $rndkey1 for inp[6]
+	lea		0x80($inp),$inp		# $inp+=8*16
+
+	aesenclast	$in0,$inout0		# $inN is inp[N]^round[last]
+	pxor		$rndkey0,$rndkey1	# borrowed $rndkey
+	movdqu		0x70-0x80($inp),$in0
+	aesenclast	$in1,$inout1
+	pxor		$rndkey0,$in0
+	movdqa		0x00(%rsp),$in1		# load next counter block
+	aesenclast	$in2,$inout2
+	aesenclast	$in3,$inout3
+	movdqa		0x10(%rsp),$in2
+	movdqa		0x20(%rsp),$in3
+	aesenclast	$in4,$inout4
+	aesenclast	$in5,$inout5
+	movdqa		0x30(%rsp),$in4
+	movdqa		0x40(%rsp),$in5
+	aesenclast	$rndkey1,$inout6
+	movdqa		0x50(%rsp),$rndkey0
+	$movkey		0x10-0x80($key),$rndkey1#real 1st-round key
+	aesenclast	$in0,$inout7
+
+	movups		$inout0,($out)		# store 8 output blocks
+	movdqa		$in1,$inout0
+	movups		$inout1,0x10($out)
+	movdqa		$in2,$inout1
+	movups		$inout2,0x20($out)
+	movdqa		$in3,$inout2
+	movups		$inout3,0x30($out)
+	movdqa		$in4,$inout3
+	movups		$inout4,0x40($out)
+	movdqa		$in5,$inout4
+	movups		$inout5,0x50($out)
+	movdqa		$rndkey0,$inout5
+	movups		$inout6,0x60($out)
+	movups		$inout7,0x70($out)
+	lea		0x80($out),$out		# $out+=8*16
+
+	sub	\$8,$len
+	jnc	.Lctr32_loop8			# loop if $len-=8 didn't borrow
+
+	add	\$8,$len			# restore real remainig $len
+	jz	.Lctr32_done			# done if ($len==0)
+	lea	-0x80($key),$key
+
+.Lctr32_tail:
+	# note that at this point $inout0..5 are populated with
+	# counter values xor-ed with 0-round key
+	lea	16($key),$key
+	cmp	\$4,$len
+	jb	.Lctr32_loop3
+	je	.Lctr32_loop4
+
+	# if ($len>4) compute 7 E(counter)
+	shl		\$4,$rounds
+	movdqa		0x60(%rsp),$inout6
+	pxor		$inout7,$inout7
+
+	$movkey		16($key),$rndkey0
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	lea		32-16($key,$rounds),$key# prepare for .Lenc_loop8_enter
+	neg		%rax
+	aesenc		$rndkey1,$inout2
+	add		\$16,%rax		# prepare for .Lenc_loop8_enter
+	 movups		($inp),$in0
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	 movups		0x10($inp),$in1		# pre-load input
+	 movups		0x20($inp),$in2
+	aesenc		$rndkey1,$inout5
+	aesenc		$rndkey1,$inout6
+
+	call            .Lenc_loop8_enter
+
+	movdqu	0x30($inp),$in3
+	pxor	$in0,$inout0
+	movdqu	0x40($inp),$in0
+	pxor	$in1,$inout1
+	movdqu	$inout0,($out)			# store output
+	pxor	$in2,$inout2
+	movdqu	$inout1,0x10($out)
+	pxor	$in3,$inout3
+	movdqu	$inout2,0x20($out)
+	pxor	$in0,$inout4
+	movdqu	$inout3,0x30($out)
+	movdqu	$inout4,0x40($out)
+	cmp	\$6,$len
+	jb	.Lctr32_done			# $len was 5, stop store
+
+	movups	0x50($inp),$in1
+	xorps	$in1,$inout5
+	movups	$inout5,0x50($out)
+	je	.Lctr32_done			# $len was 6, stop store
+
+	movups	0x60($inp),$in2
+	xorps	$in2,$inout6
+	movups	$inout6,0x60($out)
+	jmp	.Lctr32_done			# $len was 7, stop store
+
+.align	32
+.Lctr32_loop4:
+	aesenc		$rndkey1,$inout0
+	lea		16($key),$key
+	dec		$rounds
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		($key),$rndkey1
+	jnz		.Lctr32_loop4
+	aesenclast	$rndkey1,$inout0
+	aesenclast	$rndkey1,$inout1
+	 movups		($inp),$in0		# load input
+	 movups		0x10($inp),$in1
+	aesenclast	$rndkey1,$inout2
+	aesenclast	$rndkey1,$inout3
+	 movups		0x20($inp),$in2
+	 movups		0x30($inp),$in3
+
+	xorps	$in0,$inout0
+	movups	$inout0,($out)			# store output
+	xorps	$in1,$inout1
+	movups	$inout1,0x10($out)
+	pxor	$in2,$inout2
+	movdqu	$inout2,0x20($out)
+	pxor	$in3,$inout3
+	movdqu	$inout3,0x30($out)
+	jmp	.Lctr32_done			# $len was 4, stop store
+
+.align	32
+.Lctr32_loop3:
+	aesenc		$rndkey1,$inout0
+	lea		16($key),$key
+	dec		$rounds
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	$movkey		($key),$rndkey1
+	jnz		.Lctr32_loop3
+	aesenclast	$rndkey1,$inout0
+	aesenclast	$rndkey1,$inout1
+	aesenclast	$rndkey1,$inout2
+
+	movups	($inp),$in0			# load input
+	xorps	$in0,$inout0
+	movups	$inout0,($out)			# store output
+	cmp	\$2,$len
+	jb	.Lctr32_done			# $len was 1, stop store
+
+	movups	0x10($inp),$in1
+	xorps	$in1,$inout1
+	movups	$inout1,0x10($out)
+	je	.Lctr32_done			# $len was 2, stop store
+
+	movups	0x20($inp),$in2
+	xorps	$in2,$inout2
+	movups	$inout2,0x20($out)		# $len was 3, stop store
+
+.Lctr32_done:
+	xorps	%xmm0,%xmm0			# clear regiser bank
+	xor	$key0,$key0
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	movaps	%xmm0,0x70(%rsp)
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8($key_),%xmm6
+	movaps	%xmm0,-0xa8($key_)		# clear stack
+	movaps	-0x98($key_),%xmm7
+	movaps	%xmm0,-0x98($key_)
+	movaps	-0x88($key_),%xmm8
+	movaps	%xmm0,-0x88($key_)
+	movaps	-0x78($key_),%xmm9
+	movaps	%xmm0,-0x78($key_)
+	movaps	-0x68($key_),%xmm10
+	movaps	%xmm0,-0x68($key_)
+	movaps	-0x58($key_),%xmm11
+	movaps	%xmm0,-0x58($key_)
+	movaps	-0x48($key_),%xmm12
+	movaps	%xmm0,-0x48($key_)
+	movaps	-0x38($key_),%xmm13
+	movaps	%xmm0,-0x38($key_)
+	movaps	-0x28($key_),%xmm14
+	movaps	%xmm0,-0x28($key_)
+	movaps	-0x18($key_),%xmm15
+	movaps	%xmm0,-0x18($key_)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+	movaps	%xmm0,0x70(%rsp)
+___
+$code.=<<___;
+	mov	-8($key_),%rbp
+	lea	($key_),%rsp
+.Lctr32_epilogue:
+	ret
+.size	aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
+___
+}
+
+######################################################################
+# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2
+#	const unsigned char iv[16]);
+#
+{
+my @tweak=map("%xmm$_",(10..15));
+my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
+my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
+my $frame_size = 0x70 + ($win64?160:0);
+my $key_ = "%rbp";	# override so that we can use %r11 as FP
+
+$code.=<<___;
+.globl	aesni_xts_encrypt
+.type	aesni_xts_encrypt,\@function,6
+.align	16
+aesni_xts_encrypt:
+	lea	(%rsp),%r11			# frame pointer
+	push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8(%r11)		# offload everything
+	movaps	%xmm7,-0x98(%r11)
+	movaps	%xmm8,-0x88(%r11)
+	movaps	%xmm9,-0x78(%r11)
+	movaps	%xmm10,-0x68(%r11)
+	movaps	%xmm11,-0x58(%r11)
+	movaps	%xmm12,-0x48(%r11)
+	movaps	%xmm13,-0x38(%r11)
+	movaps	%xmm14,-0x28(%r11)
+	movaps	%xmm15,-0x18(%r11)
+.Lxts_enc_body:
+___
+$code.=<<___;
+	movups	($ivp),$inout0			# load clear-text tweak
+	mov	240(%r8),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,$inout0);
+$code.=<<___;
+	$movkey	($key),$rndkey0			# zero round key
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	shl	\$4,$rnds_
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	$movkey	16($key,$rnds_),$rndkey1	# last round key
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	movdqa	$inout0,@tweak[5]
+	pshufd	\$0x5f,$inout0,$twres
+	pxor	$rndkey0,$rndkey1
+___
+    # alternative tweak calculation algorithm is based on suggestions
+    # by Shay Gueron. psrad doesn't conflict with AES-NI instructions
+    # and should help in the future...
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	movdqa	@tweak[5],@tweak[$i]
+	psrad	\$31,$twtmp			# broadcast upper bits
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	pxor	$rndkey0,@tweak[$i]
+	pxor	$twtmp,@tweak[5]
+___
+    }
+$code.=<<___;
+	movdqa	@tweak[5],@tweak[4]
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twres
+	pxor	$rndkey0,@tweak[4]
+	pxor	$twres,@tweak[5]
+	movaps	$rndkey1,0x60(%rsp)		# save round[0]^round[last]
+
+	sub	\$16*6,$len
+	jc	.Lxts_enc_short			# if $len-=6*16 borrowed
+
+	mov	\$16+96,$rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	sub	%r10,%rax			# twisted $rounds
+	$movkey	16($key_),$rndkey1
+	mov	%rax,%r10			# backup twisted $rounds
+	lea	.Lxts_magic(%rip),%r8
+	jmp	.Lxts_enc_grandloop
+
+.align	32
+.Lxts_enc_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqa	$rndkey0,$twmask
+	movdqu	`16*1`($inp),$inout1
+	pxor	@tweak[0],$inout0		# input^=tweak^round[0]
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[1],$inout1
+	 aesenc		$rndkey1,$inout0
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[2],$inout2
+	 aesenc		$rndkey1,$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[3],$inout3
+	 aesenc		$rndkey1,$inout2
+	movdqu	`16*5`($inp),$inout5
+	pxor	@tweak[5],$twmask		# round[0]^=tweak[5]
+	 movdqa	0x60(%rsp),$twres		# load round[0]^round[last]
+	pxor	@tweak[4],$inout4
+	 aesenc		$rndkey1,$inout3
+	$movkey	32($key_),$rndkey0
+	lea	`16*6`($inp),$inp
+	pxor	$twmask,$inout5
+
+	 pxor	$twres,@tweak[0]		# calclulate tweaks^round[last]
+	aesenc		$rndkey1,$inout4
+	 pxor	$twres,@tweak[1]
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks^round[last]
+	aesenc		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor	$twres,@tweak[2]
+
+	aesenc		$rndkey0,$inout0
+	 pxor	$twres,@tweak[3]
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesenc		$rndkey0,$inout1
+	 pxor	$twres,@tweak[4]
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	 pxor	$twres,$twmask
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	 movdqa	$twmask,`16*5`(%rsp)
+	pshufd	\$0x5f,@tweak[5],$twres
+	jmp	.Lxts_enc_loop6
+.align	32
+.Lxts_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		-64($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		-80($key,%rax),$rndkey0
+	jnz		.Lxts_enc_loop6
+
+	movdqa	(%r8),$twmask			# start calculating next tweak
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	paddq	@tweak[5],@tweak[5]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey1,$inout1
+	pand	$twmask,$twtmp
+	$movkey	($key_),@tweak[0]		# load round[0]
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	 aesenc		$rndkey1,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[0],@tweak[1]		# copy round[0]
+	 aesenc		$rndkey1,$inout5
+	 $movkey	-64($key),$rndkey1
+
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey0,$inout0
+	paddd	$twres,$twres
+	pxor	@tweak[5],@tweak[0]
+	 aesenc		$rndkey0,$inout1
+	psrad	\$31,$twtmp
+	paddq	@tweak[5],@tweak[5]
+	 aesenc		$rndkey0,$inout2
+	 aesenc		$rndkey0,$inout3
+	pand	$twmask,$twtmp
+	movaps	@tweak[1],@tweak[2]
+	 aesenc		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey0,$inout5
+	 $movkey	-48($key),$rndkey0
+
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[1]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	pxor	$twtmp,@tweak[5]
+	 aesenc		$rndkey1,$inout4
+	movaps	@tweak[2],@tweak[3]
+	movdqa	$twres,$twtmp
+	 aesenc		$rndkey1,$inout5
+	 $movkey	-32($key),$rndkey1
+
+	paddd	$twres,$twres
+	 aesenc		$rndkey0,$inout0
+	pxor	@tweak[5],@tweak[2]
+	psrad	\$31,$twtmp
+	 aesenc		$rndkey0,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesenc		$rndkey0,$inout2
+	 aesenc		$rndkey0,$inout3
+	 aesenc		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[3],@tweak[4]
+	 aesenc		$rndkey0,$inout5
+
+	movdqa	$twres,$rndkey0
+	paddd	$twres,$twres
+	 aesenc		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[3]
+	psrad	\$31,$rndkey0
+	 aesenc		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$rndkey0
+	 aesenc		$rndkey1,$inout2
+	 aesenc		$rndkey1,$inout3
+	pxor	$rndkey0,@tweak[5]
+	$movkey		($key_),$rndkey0
+	 aesenc		$rndkey1,$inout4
+	 aesenc		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+
+	pxor	@tweak[5],@tweak[4]
+	 aesenclast	`16*0`(%rsp),$inout0
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	 aesenclast	`16*1`(%rsp),$inout1
+	 aesenclast	`16*2`(%rsp),$inout2
+	pand	$twmask,$twres
+	mov	%r10,%rax			# restore $rounds
+	 aesenclast	`16*3`(%rsp),$inout3
+	 aesenclast	`16*4`(%rsp),$inout4
+	 aesenclast	`16*5`(%rsp),$inout5
+	pxor	$twres,@tweak[5]
+
+	lea	`16*6`($out),$out		# $out+=6*16
+	movups	$inout0,`-16*6`($out)		# store 6 output blocks
+	movups	$inout1,`-16*5`($out)
+	movups	$inout2,`-16*4`($out)
+	movups	$inout3,`-16*3`($out)
+	movups	$inout4,`-16*2`($out)
+	movups	$inout5,`-16*1`($out)
+	sub	\$16*6,$len
+	jnc	.Lxts_enc_grandloop		# loop if $len-=6*16 didn't borrow
+
+	mov	\$16+96,$rounds
+	sub	$rnds_,$rounds
+	mov	$key_,$key			# restore $key
+	shr	\$4,$rounds			# restore original value
+
+.Lxts_enc_short:
+	# at the point @tweak[0..5] are populated with tweak values
+	mov	$rounds,$rnds_			# backup $rounds
+	pxor	$rndkey0,@tweak[0]
+	add	\$16*6,$len			# restore real remaining $len
+	jz	.Lxts_enc_done			# done if ($len==0)
+
+	pxor	$rndkey0,@tweak[1]
+	cmp	\$0x20,$len
+	jb	.Lxts_enc_one			# $len is 1*16
+	pxor	$rndkey0,@tweak[2]
+	je	.Lxts_enc_two			# $len is 2*16
+
+	pxor	$rndkey0,@tweak[3]
+	cmp	\$0x40,$len
+	jb	.Lxts_enc_three			# $len is 3*16
+	pxor	$rndkey0,@tweak[4]
+	je	.Lxts_enc_four			# $len is 4*16
+
+	movdqu	($inp),$inout0			# $len is 5*16
+	movdqu	16*1($inp),$inout1
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp			# $inp+=5*16
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+	pxor	$inout5,$inout5
+
+	call	_aesni_encrypt6
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[5],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 5 output blocks
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out			# $out+=5*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp			# inp+=1*16
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)			# store one output block
+	lea	16*1($out),$out			# $out+=1*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp			# $inp+=2*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_encrypt2
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movups	$inout0,($out)			# store 2 output blocks
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out			# $out+=2*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp			# $inp+=3*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_encrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)			# store 3 output blocks
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out			# $out+=3*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_four:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp			# $inp+=4*16
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_encrypt4
+
+	pxor	@tweak[0],$inout0
+	movdqa	@tweak[4],@tweak[0]
+	pxor	@tweak[1],$inout1
+	pxor	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 4 output blocks
+	pxor	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	lea	16*4($out),$out			# $out+=4*16
+	jmp	.Lxts_enc_done
+
+.align	16
+.Lxts_enc_done:
+	and	\$15,$len_			# see if $len%16 is 0
+	jz	.Lxts_enc_ret
+	mov	$len_,$len
+
+.Lxts_enc_steal:
+	movzb	($inp),%eax			# borrow $rounds ...
+	movzb	-16($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,-16($out)
+	mov	%cl,0($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_enc_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	-16($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("enc",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,-16($out)
+
+.Lxts_enc_ret:
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8(%r11),%xmm6
+	movaps	%xmm0,-0xa8(%r11)		# clear stack
+	movaps	-0x98(%r11),%xmm7
+	movaps	%xmm0,-0x98(%r11)
+	movaps	-0x88(%r11),%xmm8
+	movaps	%xmm0,-0x88(%r11)
+	movaps	-0x78(%r11),%xmm9
+	movaps	%xmm0,-0x78(%r11)
+	movaps	-0x68(%r11),%xmm10
+	movaps	%xmm0,-0x68(%r11)
+	movaps	-0x58(%r11),%xmm11
+	movaps	%xmm0,-0x58(%r11)
+	movaps	-0x48(%r11),%xmm12
+	movaps	%xmm0,-0x48(%r11)
+	movaps	-0x38(%r11),%xmm13
+	movaps	%xmm0,-0x38(%r11)
+	movaps	-0x28(%r11),%xmm14
+	movaps	%xmm0,-0x28(%r11)
+	movaps	-0x18(%r11),%xmm15
+	movaps	%xmm0,-0x18(%r11)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+	lea	(%r11),%rsp
+.Lxts_enc_epilogue:
+	ret
+.size	aesni_xts_encrypt,.-aesni_xts_encrypt
+___
+
+$code.=<<___;
+.globl	aesni_xts_decrypt
+.type	aesni_xts_decrypt,\@function,6
+.align	16
+aesni_xts_decrypt:
+	lea	(%rsp),%r11			# frame pointer
+	push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-0xa8(%r11)		# offload everything
+	movaps	%xmm7,-0x98(%r11)
+	movaps	%xmm8,-0x88(%r11)
+	movaps	%xmm9,-0x78(%r11)
+	movaps	%xmm10,-0x68(%r11)
+	movaps	%xmm11,-0x58(%r11)
+	movaps	%xmm12,-0x48(%r11)
+	movaps	%xmm13,-0x38(%r11)
+	movaps	%xmm14,-0x28(%r11)
+	movaps	%xmm15,-0x18(%r11)
+.Lxts_dec_body:
+___
+$code.=<<___;
+	movups	($ivp),$inout0			# load clear-text tweak
+	mov	240($key2),$rounds		# key2->rounds
+	mov	240($key),$rnds_		# key1->rounds
+___
+	# generate the tweak
+	&aesni_generate1("enc",$key2,$rounds,$inout0);
+$code.=<<___;
+	xor	%eax,%eax			# if ($len%16) len-=16;
+	test	\$15,$len
+	setnz	%al
+	shl	\$4,%rax
+	sub	%rax,$len
+
+	$movkey	($key),$rndkey0			# zero round key
+	mov	$key,$key_			# backup $key
+	mov	$rnds_,$rounds			# backup $rounds
+	shl	\$4,$rnds_
+	mov	$len,$len_			# backup $len
+	and	\$-16,$len
+
+	$movkey	16($key,$rnds_),$rndkey1	# last round key
+
+	movdqa	.Lxts_magic(%rip),$twmask
+	movdqa	$inout0,@tweak[5]
+	pshufd	\$0x5f,$inout0,$twres
+	pxor	$rndkey0,$rndkey1
+___
+    for ($i=0;$i<4;$i++) {
+    $code.=<<___;
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	movdqa	@tweak[5],@tweak[$i]
+	psrad	\$31,$twtmp			# broadcast upper bits
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	pxor	$rndkey0,@tweak[$i]
+	pxor	$twtmp,@tweak[5]
+___
+    }
+$code.=<<___;
+	movdqa	@tweak[5],@tweak[4]
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twres
+	pxor	$rndkey0,@tweak[4]
+	pxor	$twres,@tweak[5]
+	movaps	$rndkey1,0x60(%rsp)		# save round[0]^round[last]
+
+	sub	\$16*6,$len
+	jc	.Lxts_dec_short			# if $len-=6*16 borrowed
+
+	mov	\$16+96,$rounds
+	lea	32($key_,$rnds_),$key		# end of key schedule
+	sub	%r10,%rax			# twisted $rounds
+	$movkey	16($key_),$rndkey1
+	mov	%rax,%r10			# backup twisted $rounds
+	lea	.Lxts_magic(%rip),%r8
+	jmp	.Lxts_dec_grandloop
+
+.align	32
+.Lxts_dec_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqa	$rndkey0,$twmask
+	movdqu	`16*1`($inp),$inout1
+	pxor	@tweak[0],$inout0		# intput^=tweak^round[0]
+	movdqu	`16*2`($inp),$inout2
+	pxor	@tweak[1],$inout1
+	 aesdec		$rndkey1,$inout0
+	movdqu	`16*3`($inp),$inout3
+	pxor	@tweak[2],$inout2
+	 aesdec		$rndkey1,$inout1
+	movdqu	`16*4`($inp),$inout4
+	pxor	@tweak[3],$inout3
+	 aesdec		$rndkey1,$inout2
+	movdqu	`16*5`($inp),$inout5
+	pxor	@tweak[5],$twmask		# round[0]^=tweak[5]
+	 movdqa	0x60(%rsp),$twres		# load round[0]^round[last]
+	pxor	@tweak[4],$inout4
+	 aesdec		$rndkey1,$inout3
+	$movkey	32($key_),$rndkey0
+	lea	`16*6`($inp),$inp
+	pxor	$twmask,$inout5
+
+	 pxor	$twres,@tweak[0]		# calclulate tweaks^round[last]
+	aesdec		$rndkey1,$inout4
+	 pxor	$twres,@tweak[1]
+	 movdqa	@tweak[0],`16*0`(%rsp)		# put aside tweaks^last round key
+	aesdec		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor	$twres,@tweak[2]
+
+	aesdec		$rndkey0,$inout0
+	 pxor	$twres,@tweak[3]
+	 movdqa	@tweak[1],`16*1`(%rsp)
+	aesdec		$rndkey0,$inout1
+	 pxor	$twres,@tweak[4]
+	 movdqa	@tweak[2],`16*2`(%rsp)
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	 pxor	$twres,$twmask
+	 movdqa	@tweak[4],`16*4`(%rsp)
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	 movdqa	$twmask,`16*5`(%rsp)
+	pshufd	\$0x5f,@tweak[5],$twres
+	jmp	.Lxts_dec_loop6
+.align	32
+.Lxts_dec_loop6:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		-64($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		-80($key,%rax),$rndkey0
+	jnz		.Lxts_dec_loop6
+
+	movdqa	(%r8),$twmask			# start calculating next tweak
+	movdqa	$twres,$twtmp
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	paddq	@tweak[5],@tweak[5]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey1,$inout1
+	pand	$twmask,$twtmp
+	$movkey	($key_),@tweak[0]		# load round[0]
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	 aesdec		$rndkey1,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[0],@tweak[1]		# copy round[0]
+	 aesdec		$rndkey1,$inout5
+	 $movkey	-64($key),$rndkey1
+
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey0,$inout0
+	paddd	$twres,$twres
+	pxor	@tweak[5],@tweak[0]
+	 aesdec		$rndkey0,$inout1
+	psrad	\$31,$twtmp
+	paddq	@tweak[5],@tweak[5]
+	 aesdec		$rndkey0,$inout2
+	 aesdec		$rndkey0,$inout3
+	pand	$twmask,$twtmp
+	movaps	@tweak[1],@tweak[2]
+	 aesdec		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey0,$inout5
+	 $movkey	-48($key),$rndkey0
+
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[1]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	 movdqa	@tweak[3],`16*3`(%rsp)
+	pxor	$twtmp,@tweak[5]
+	 aesdec		$rndkey1,$inout4
+	movaps	@tweak[2],@tweak[3]
+	movdqa	$twres,$twtmp
+	 aesdec		$rndkey1,$inout5
+	 $movkey	-32($key),$rndkey1
+
+	paddd	$twres,$twres
+	 aesdec		$rndkey0,$inout0
+	pxor	@tweak[5],@tweak[2]
+	psrad	\$31,$twtmp
+	 aesdec		$rndkey0,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$twtmp
+	 aesdec		$rndkey0,$inout2
+	 aesdec		$rndkey0,$inout3
+	 aesdec		$rndkey0,$inout4
+	pxor	$twtmp,@tweak[5]
+	movaps	@tweak[3],@tweak[4]
+	 aesdec		$rndkey0,$inout5
+
+	movdqa	$twres,$rndkey0
+	paddd	$twres,$twres
+	 aesdec		$rndkey1,$inout0
+	pxor	@tweak[5],@tweak[3]
+	psrad	\$31,$rndkey0
+	 aesdec		$rndkey1,$inout1
+	paddq	@tweak[5],@tweak[5]
+	pand	$twmask,$rndkey0
+	 aesdec		$rndkey1,$inout2
+	 aesdec		$rndkey1,$inout3
+	pxor	$rndkey0,@tweak[5]
+	$movkey		($key_),$rndkey0
+	 aesdec		$rndkey1,$inout4
+	 aesdec		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+
+	pxor	@tweak[5],@tweak[4]
+	 aesdeclast	`16*0`(%rsp),$inout0
+	psrad	\$31,$twres
+	paddq	@tweak[5],@tweak[5]
+	 aesdeclast	`16*1`(%rsp),$inout1
+	 aesdeclast	`16*2`(%rsp),$inout2
+	pand	$twmask,$twres
+	mov	%r10,%rax			# restore $rounds
+	 aesdeclast	`16*3`(%rsp),$inout3
+	 aesdeclast	`16*4`(%rsp),$inout4
+	 aesdeclast	`16*5`(%rsp),$inout5
+	pxor	$twres,@tweak[5]
+
+	lea	`16*6`($out),$out		# $out+=6*16
+	movups	$inout0,`-16*6`($out)		# store 6 output blocks
+	movups	$inout1,`-16*5`($out)
+	movups	$inout2,`-16*4`($out)
+	movups	$inout3,`-16*3`($out)
+	movups	$inout4,`-16*2`($out)
+	movups	$inout5,`-16*1`($out)
+	sub	\$16*6,$len
+	jnc	.Lxts_dec_grandloop		# loop if $len-=6*16 didn't borrow
+
+	mov	\$16+96,$rounds
+	sub	$rnds_,$rounds
+	mov	$key_,$key			# restore $key
+	shr	\$4,$rounds			# restore original value
+
+.Lxts_dec_short:
+	# at the point @tweak[0..5] are populated with tweak values
+	mov	$rounds,$rnds_			# backup $rounds
+	pxor	$rndkey0,@tweak[0]
+	pxor	$rndkey0,@tweak[1]
+	add	\$16*6,$len			# restore real remaining $len
+	jz	.Lxts_dec_done			# done if ($len==0)
+
+	pxor	$rndkey0,@tweak[2]
+	cmp	\$0x20,$len
+	jb	.Lxts_dec_one			# $len is 1*16
+	pxor	$rndkey0,@tweak[3]
+	je	.Lxts_dec_two			# $len is 2*16
+
+	pxor	$rndkey0,@tweak[4]
+	cmp	\$0x40,$len
+	jb	.Lxts_dec_three			# $len is 3*16
+	je	.Lxts_dec_four			# $len is 4*16
+
+	movdqu	($inp),$inout0			# $len is 5*16
+	movdqu	16*1($inp),$inout1
+	movdqu	16*2($inp),$inout2
+	pxor	@tweak[0],$inout0
+	movdqu	16*3($inp),$inout3
+	pxor	@tweak[1],$inout1
+	movdqu	16*4($inp),$inout4
+	lea	16*5($inp),$inp			# $inp+=5*16
+	pxor	@tweak[2],$inout2
+	pxor	@tweak[3],$inout3
+	pxor	@tweak[4],$inout4
+
+	call	_aesni_decrypt6
+
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 5 output blocks
+	xorps	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	xorps	@tweak[4],$inout4
+	movdqu	$inout2,16*2($out)
+	 pxor		$twtmp,$twtmp
+	movdqu	$inout3,16*3($out)
+	 pcmpgtd	@tweak[5],$twtmp
+	movdqu	$inout4,16*4($out)
+	lea	16*5($out),$out			# $out+=5*16
+	 pshufd		\$0x13,$twtmp,@tweak[1]	# $twres
+	and	\$15,$len_
+	jz	.Lxts_dec_ret
+
+	movdqa	@tweak[5],@tweak[0]
+	paddq	@tweak[5],@tweak[5]		# psllq 1,$tweak
+	pand	$twmask,@tweak[1]		# isolate carry and residue
+	pxor	@tweak[5],@tweak[1]
+	jmp	.Lxts_dec_done2
+
+.align	16
+.Lxts_dec_one:
+	movups	($inp),$inout0
+	lea	16*1($inp),$inp			# $inp+=1*16
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[1],@tweak[0]
+	movups	$inout0,($out)			# store one output block
+	movdqa	@tweak[2],@tweak[1]
+	lea	16*1($out),$out			# $out+=1*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_two:
+	movups	($inp),$inout0
+	movups	16($inp),$inout1
+	lea	32($inp),$inp			# $inp+=2*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+
+	call	_aesni_decrypt2
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[2],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[3],@tweak[1]
+	movups	$inout0,($out)			# store 2 output blocks
+	movups	$inout1,16*1($out)
+	lea	16*2($out),$out			# $out+=2*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_three:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	lea	16*3($inp),$inp			# $inp+=3*16
+	xorps	@tweak[0],$inout0
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+
+	call	_aesni_decrypt3
+
+	xorps	@tweak[0],$inout0
+	movdqa	@tweak[3],@tweak[0]
+	xorps	@tweak[1],$inout1
+	movdqa	@tweak[4],@tweak[1]
+	xorps	@tweak[2],$inout2
+	movups	$inout0,($out)			# store 3 output blocks
+	movups	$inout1,16*1($out)
+	movups	$inout2,16*2($out)
+	lea	16*3($out),$out			# $out+=3*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_four:
+	movups	($inp),$inout0
+	movups	16*1($inp),$inout1
+	movups	16*2($inp),$inout2
+	xorps	@tweak[0],$inout0
+	movups	16*3($inp),$inout3
+	lea	16*4($inp),$inp			# $inp+=4*16
+	xorps	@tweak[1],$inout1
+	xorps	@tweak[2],$inout2
+	xorps	@tweak[3],$inout3
+
+	call	_aesni_decrypt4
+
+	pxor	@tweak[0],$inout0
+	movdqa	@tweak[4],@tweak[0]
+	pxor	@tweak[1],$inout1
+	movdqa	@tweak[5],@tweak[1]
+	pxor	@tweak[2],$inout2
+	movdqu	$inout0,($out)			# store 4 output blocks
+	pxor	@tweak[3],$inout3
+	movdqu	$inout1,16*1($out)
+	movdqu	$inout2,16*2($out)
+	movdqu	$inout3,16*3($out)
+	lea	16*4($out),$out			# $out+=4*16
+	jmp	.Lxts_dec_done
+
+.align	16
+.Lxts_dec_done:
+	and	\$15,$len_			# see if $len%16 is 0
+	jz	.Lxts_dec_ret
+.Lxts_dec_done2:
+	mov	$len_,$len
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($inp),$inout0
+	xorps	@tweak[1],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[1],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_steal:
+	movzb	16($inp),%eax			# borrow $rounds ...
+	movzb	($out),%ecx			# ... and $key
+	lea	1($inp),$inp
+	mov	%al,($out)
+	mov	%cl,16($out)
+	lea	1($out),$out
+	sub	\$1,$len
+	jnz	.Lxts_dec_steal
+
+	sub	$len_,$out			# rewind $out
+	mov	$key_,$key			# restore $key
+	mov	$rnds_,$rounds			# restore $rounds
+
+	movups	($out),$inout0
+	xorps	@tweak[0],$inout0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	@tweak[0],$inout0
+	movups	$inout0,($out)
+
+.Lxts_dec_ret:
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	pxor	%xmm8,%xmm8
+	movaps	%xmm0,0x10(%rsp)
+	pxor	%xmm9,%xmm9
+	movaps	%xmm0,0x20(%rsp)
+	pxor	%xmm10,%xmm10
+	movaps	%xmm0,0x30(%rsp)
+	pxor	%xmm11,%xmm11
+	movaps	%xmm0,0x40(%rsp)
+	pxor	%xmm12,%xmm12
+	movaps	%xmm0,0x50(%rsp)
+	pxor	%xmm13,%xmm13
+	movaps	%xmm0,0x60(%rsp)
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+___
+$code.=<<___ if ($win64);
+	movaps	-0xa8(%r11),%xmm6
+	movaps	%xmm0,-0xa8(%r11)		# clear stack
+	movaps	-0x98(%r11),%xmm7
+	movaps	%xmm0,-0x98(%r11)
+	movaps	-0x88(%r11),%xmm8
+	movaps	%xmm0,-0x88(%r11)
+	movaps	-0x78(%r11),%xmm9
+	movaps	%xmm0,-0x78(%r11)
+	movaps	-0x68(%r11),%xmm10
+	movaps	%xmm0,-0x68(%r11)
+	movaps	-0x58(%r11),%xmm11
+	movaps	%xmm0,-0x58(%r11)
+	movaps	-0x48(%r11),%xmm12
+	movaps	%xmm0,-0x48(%r11)
+	movaps	-0x38(%r11),%xmm13
+	movaps	%xmm0,-0x38(%r11)
+	movaps	-0x28(%r11),%xmm14
+	movaps	%xmm0,-0x28(%r11)
+	movaps	-0x18(%r11),%xmm15
+	movaps	%xmm0,-0x18(%r11)
+	movaps	%xmm0,0x00(%rsp)
+	movaps	%xmm0,0x10(%rsp)
+	movaps	%xmm0,0x20(%rsp)
+	movaps	%xmm0,0x30(%rsp)
+	movaps	%xmm0,0x40(%rsp)
+	movaps	%xmm0,0x50(%rsp)
+	movaps	%xmm0,0x60(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+	lea	(%r11),%rsp
+.Lxts_dec_epilogue:
+	ret
+.size	aesni_xts_decrypt,.-aesni_xts_decrypt
+___
+}
+
+######################################################################
+# void aesni_ocb_[en|de]crypt(const char *inp, char *out, size_t blocks,
+#	const AES_KEY *key, unsigned int start_block_num,
+#	unsigned char offset_i[16], const unsigned char L_[][16],
+#	unsigned char checksum[16]);
+#
+{
+my @offset=map("%xmm$_",(10..15));
+my ($checksum,$rndkey0l)=("%xmm8","%xmm9");
+my ($block_num,$offset_p)=("%r8","%r9");		# 5th and 6th arguments
+my ($L_p,$checksum_p) = ("%rbx","%rbp");
+my ($i1,$i3,$i5) = ("%r12","%r13","%r14");
+my $seventh_arg = $win64 ? 56 : 8;
+my $blocks = $len;
+
+$code.=<<___;
+.globl	aesni_ocb_encrypt
+.type	aesni_ocb_encrypt,\@function,6
+.align	32
+aesni_ocb_encrypt:
+	lea	(%rsp),%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp),%rsp
+	movaps	%xmm6,0x00(%rsp)		# offload everything
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,0x70(%rsp)
+	movaps	%xmm14,0x80(%rsp)
+	movaps	%xmm15,0x90(%rsp)
+.Locb_enc_body:
+___
+$code.=<<___;
+	mov	$seventh_arg(%rax),$L_p		# 7th argument
+	mov	$seventh_arg+8(%rax),$checksum_p# 8th argument
+
+	mov	240($key),$rnds_
+	mov	$key,$key_
+	shl	\$4,$rnds_
+	$movkey	($key),$rndkey0l		# round[0]
+	$movkey	16($key,$rnds_),$rndkey1	# round[last]
+
+	movdqu	($offset_p),@offset[5]		# load last offset_i
+	pxor	$rndkey1,$rndkey0l		# round[0] ^ round[last]
+	pxor	$rndkey1,@offset[5]		# offset_i ^ round[last]
+
+	mov	\$16+32,$rounds
+	lea	32($key_,$rnds_),$key
+	$movkey	16($key_),$rndkey1		# round[1]
+	sub	%r10,%rax			# twisted $rounds
+	mov	%rax,%r10			# backup twisted $rounds
+
+	movdqu	($L_p),@offset[0]		# L_0 for all odd-numbered blocks
+	movdqu	($checksum_p),$checksum		# load checksum
+
+	test	\$1,$block_num			# is first block number odd?
+	jnz	.Locb_enc_odd
+
+	bsf	$block_num,$i1
+	add	\$1,$block_num
+	shl	\$4,$i1
+	movdqu	($L_p,$i1),$inout5		# borrow
+	movdqu	($inp),$inout0
+	lea	16($inp),$inp
+
+	call	__ocb_encrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,($out)
+	lea	16($out),$out
+	sub	\$1,$blocks
+	jz	.Locb_enc_done
+
+.Locb_enc_odd:
+	lea	1($block_num),$i1		# even-numbered blocks
+	lea	3($block_num),$i3
+	lea	5($block_num),$i5
+	lea	6($block_num),$block_num
+	bsf	$i1,$i1				# ntz(block)
+	bsf	$i3,$i3
+	bsf	$i5,$i5
+	shl	\$4,$i1				# ntz(block) -> table offset
+	shl	\$4,$i3
+	shl	\$4,$i5
+
+	sub	\$6,$blocks
+	jc	.Locb_enc_short
+	jmp	.Locb_enc_grandloop
+
+.align	32
+.Locb_enc_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqu	`16*1`($inp),$inout1
+	movdqu	`16*2`($inp),$inout2
+	movdqu	`16*3`($inp),$inout3
+	movdqu	`16*4`($inp),$inout4
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+
+	call	__ocb_encrypt6
+
+	movups	$inout0,`16*0`($out)		# store output
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+	movups	$inout4,`16*4`($out)
+	movups	$inout5,`16*5`($out)
+	lea	`16*6`($out),$out
+	sub	\$6,$blocks
+	jnc	.Locb_enc_grandloop
+
+.Locb_enc_short:
+	add	\$6,$blocks
+	jz	.Locb_enc_done
+
+	movdqu	`16*0`($inp),$inout0
+	cmp	\$2,$blocks
+	jb	.Locb_enc_one
+	movdqu	`16*1`($inp),$inout1
+	je	.Locb_enc_two
+
+	movdqu	`16*2`($inp),$inout2
+	cmp	\$4,$blocks
+	jb	.Locb_enc_three
+	movdqu	`16*3`($inp),$inout3
+	je	.Locb_enc_four
+
+	movdqu	`16*4`($inp),$inout4
+	pxor	$inout5,$inout5
+
+	call	__ocb_encrypt6
+
+	movdqa	@offset[4],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+	movups	$inout4,`16*4`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_one:
+	movdqa	@offset[0],$inout5		# borrow
+
+	call	__ocb_encrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,`16*0`($out)
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_two:
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+
+	call	__ocb_encrypt4
+
+	movdqa	@offset[1],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_three:
+	pxor	$inout3,$inout3
+
+	call	__ocb_encrypt4
+
+	movdqa	@offset[2],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+
+	jmp	.Locb_enc_done
+
+.align	16
+.Locb_enc_four:
+	call	__ocb_encrypt4
+
+	movdqa	@offset[3],@offset[5]
+	movups	$inout0,`16*0`($out)
+	movups	$inout1,`16*1`($out)
+	movups	$inout2,`16*2`($out)
+	movups	$inout3,`16*3`($out)
+
+.Locb_enc_done:
+	pxor	$rndkey0,@offset[5]		# "remove" round[last]
+	movdqu	$checksum,($checksum_p)		# store checksum
+	movdqu	@offset[5],($offset_p)		# store last offset_i
+
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	pxor	%xmm8,%xmm8
+	pxor	%xmm9,%xmm9
+	pxor	%xmm10,%xmm10
+	pxor	%xmm11,%xmm11
+	pxor	%xmm12,%xmm12
+	pxor	%xmm13,%xmm13
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+	lea	0x28(%rsp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x00(%rsp),%xmm6
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm10
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm11
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm12
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm13
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm14
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm15
+	movaps	%xmm0,0x90(%rsp)
+	lea	0xa0+0x28(%rsp),%rax
+.Locb_enc_pop:
+___
+$code.=<<___;
+	mov	-40(%rax),%r14
+	mov	-32(%rax),%r13
+	mov	-24(%rax),%r12
+	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbx
+	lea	(%rax),%rsp
+.Locb_enc_epilogue:
+	ret
+.size	aesni_ocb_encrypt,.-aesni_ocb_encrypt
+
+.type	__ocb_encrypt6,\@abi-omnipotent
+.align	32
+__ocb_encrypt6:
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 movdqa		@offset[0],@offset[4]
+	 pxor		@offset[5],@offset[0]
+	 movdqu		($L_p,$i5),@offset[5]
+	 pxor		@offset[0],@offset[1]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		$inout1,$checksum
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		$inout2,$checksum
+	pxor		@offset[2],$inout2
+	 pxor		@offset[3],@offset[4]
+	pxor		$inout3,$checksum
+	pxor		@offset[3],$inout3
+	 pxor		@offset[4],@offset[5]
+	pxor		$inout4,$checksum
+	pxor		@offset[4],$inout4
+	pxor		$inout5,$checksum
+	pxor		@offset[5],$inout5
+	$movkey		32($key_),$rndkey0
+
+	lea		1($block_num),$i1	# even-numbered blocks
+	lea		3($block_num),$i3
+	lea		5($block_num),$i5
+	add		\$6,$block_num
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	bsf		$i1,$i1			# ntz(block)
+	bsf		$i3,$i3
+	bsf		$i5,$i5
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	aesenc		$rndkey1,$inout4
+	 pxor		$rndkey0l,@offset[3]
+	 pxor		$rndkey0l,@offset[4]
+	aesenc		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor		$rndkey0l,@offset[5]
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	shl		\$4,$i1			# ntz(block) -> table offset
+	shl		\$4,$i3
+	jmp		.Locb_enc_loop6
+
+.align	32
+.Locb_enc_loop6:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	aesenc		$rndkey0,$inout4
+	aesenc		$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop6
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	aesenc		$rndkey1,$inout4
+	aesenc		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+	shl		\$4,$i5
+
+	aesenclast	@offset[0],$inout0
+	movdqu		($L_p),@offset[0]	# L_0 for all odd-numbered blocks
+	mov		%r10,%rax		# restore twisted rounds
+	aesenclast	@offset[1],$inout1
+	aesenclast	@offset[2],$inout2
+	aesenclast	@offset[3],$inout3
+	aesenclast	@offset[4],$inout4
+	aesenclast	@offset[5],$inout5
+	ret
+.size	__ocb_encrypt6,.-__ocb_encrypt6
+
+.type	__ocb_encrypt4,\@abi-omnipotent
+.align	32
+__ocb_encrypt4:
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 pxor		@offset[5],@offset[0]
+	 pxor		@offset[0],@offset[1]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		$inout1,$checksum
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		$inout2,$checksum
+	pxor		@offset[2],$inout2
+	pxor		$inout3,$checksum
+	pxor		@offset[3],$inout3
+	$movkey		32($key_),$rndkey0
+
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	 pxor		$rndkey0l,@offset[3]
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		48($key_),$rndkey1
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_enc_loop4
+
+.align	32
+.Locb_enc_loop4:
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	aesenc		$rndkey0,$inout1
+	aesenc		$rndkey0,$inout2
+	aesenc		$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop4
+
+	aesenc		$rndkey1,$inout0
+	aesenc		$rndkey1,$inout1
+	aesenc		$rndkey1,$inout2
+	aesenc		$rndkey1,$inout3
+	$movkey		16($key_),$rndkey1
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesenclast	@offset[0],$inout0
+	aesenclast	@offset[1],$inout1
+	aesenclast	@offset[2],$inout2
+	aesenclast	@offset[3],$inout3
+	ret
+.size	__ocb_encrypt4,.-__ocb_encrypt4
+
+.type	__ocb_encrypt1,\@abi-omnipotent
+.align	32
+__ocb_encrypt1:
+	 pxor		@offset[5],$inout5	# offset_i
+	 pxor		$rndkey0l,$inout5	# offset_i ^ round[0]
+	pxor		$inout0,$checksum	# accumulate checksum
+	pxor		$inout5,$inout0		# input ^ round[0] ^ offset_i
+	$movkey		32($key_),$rndkey0
+
+	aesenc		$rndkey1,$inout0
+	$movkey		48($key_),$rndkey1
+	pxor		$rndkey0l,$inout5	# offset_i ^ round[last]
+
+	aesenc		$rndkey0,$inout0
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_enc_loop1
+
+.align	32
+.Locb_enc_loop1:
+	aesenc		$rndkey1,$inout0
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesenc		$rndkey0,$inout0
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_enc_loop1
+
+	aesenc		$rndkey1,$inout0
+	$movkey		16($key_),$rndkey1	# redundant in tail
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesenclast	$inout5,$inout0
+	ret
+.size	__ocb_encrypt1,.-__ocb_encrypt1
+
+.globl	aesni_ocb_decrypt
+.type	aesni_ocb_decrypt,\@function,6
+.align	32
+aesni_ocb_decrypt:
+	lea	(%rsp),%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp),%rsp
+	movaps	%xmm6,0x00(%rsp)		# offload everything
+	movaps	%xmm7,0x10(%rsp)
+	movaps	%xmm8,0x20(%rsp)
+	movaps	%xmm9,0x30(%rsp)
+	movaps	%xmm10,0x40(%rsp)
+	movaps	%xmm11,0x50(%rsp)
+	movaps	%xmm12,0x60(%rsp)
+	movaps	%xmm13,0x70(%rsp)
+	movaps	%xmm14,0x80(%rsp)
+	movaps	%xmm15,0x90(%rsp)
+.Locb_dec_body:
+___
+$code.=<<___;
+	mov	$seventh_arg(%rax),$L_p		# 7th argument
+	mov	$seventh_arg+8(%rax),$checksum_p# 8th argument
+
+	mov	240($key),$rnds_
+	mov	$key,$key_
+	shl	\$4,$rnds_
+	$movkey	($key),$rndkey0l		# round[0]
+	$movkey	16($key,$rnds_),$rndkey1	# round[last]
+
+	movdqu	($offset_p),@offset[5]		# load last offset_i
+	pxor	$rndkey1,$rndkey0l		# round[0] ^ round[last]
+	pxor	$rndkey1,@offset[5]		# offset_i ^ round[last]
+
+	mov	\$16+32,$rounds
+	lea	32($key_,$rnds_),$key
+	$movkey	16($key_),$rndkey1		# round[1]
+	sub	%r10,%rax			# twisted $rounds
+	mov	%rax,%r10			# backup twisted $rounds
+
+	movdqu	($L_p),@offset[0]		# L_0 for all odd-numbered blocks
+	movdqu	($checksum_p),$checksum		# load checksum
+
+	test	\$1,$block_num			# is first block number odd?
+	jnz	.Locb_dec_odd
+
+	bsf	$block_num,$i1
+	add	\$1,$block_num
+	shl	\$4,$i1
+	movdqu	($L_p,$i1),$inout5		# borrow
+	movdqu	($inp),$inout0
+	lea	16($inp),$inp
+
+	call	__ocb_decrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,($out)
+	xorps	$inout0,$checksum		# accumulate checksum
+	lea	16($out),$out
+	sub	\$1,$blocks
+	jz	.Locb_dec_done
+
+.Locb_dec_odd:
+	lea	1($block_num),$i1		# even-numbered blocks
+	lea	3($block_num),$i3
+	lea	5($block_num),$i5
+	lea	6($block_num),$block_num
+	bsf	$i1,$i1				# ntz(block)
+	bsf	$i3,$i3
+	bsf	$i5,$i5
+	shl	\$4,$i1				# ntz(block) -> table offset
+	shl	\$4,$i3
+	shl	\$4,$i5
+
+	sub	\$6,$blocks
+	jc	.Locb_dec_short
+	jmp	.Locb_dec_grandloop
+
+.align	32
+.Locb_dec_grandloop:
+	movdqu	`16*0`($inp),$inout0		# load input
+	movdqu	`16*1`($inp),$inout1
+	movdqu	`16*2`($inp),$inout2
+	movdqu	`16*3`($inp),$inout3
+	movdqu	`16*4`($inp),$inout4
+	movdqu	`16*5`($inp),$inout5
+	lea	`16*6`($inp),$inp
+
+	call	__ocb_decrypt6
+
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+	movups	$inout4,`16*4`($out)
+	pxor	$inout4,$checksum
+	movups	$inout5,`16*5`($out)
+	pxor	$inout5,$checksum
+	lea	`16*6`($out),$out
+	sub	\$6,$blocks
+	jnc	.Locb_dec_grandloop
+
+.Locb_dec_short:
+	add	\$6,$blocks
+	jz	.Locb_dec_done
+
+	movdqu	`16*0`($inp),$inout0
+	cmp	\$2,$blocks
+	jb	.Locb_dec_one
+	movdqu	`16*1`($inp),$inout1
+	je	.Locb_dec_two
+
+	movdqu	`16*2`($inp),$inout2
+	cmp	\$4,$blocks
+	jb	.Locb_dec_three
+	movdqu	`16*3`($inp),$inout3
+	je	.Locb_dec_four
+
+	movdqu	`16*4`($inp),$inout4
+	pxor	$inout5,$inout5
+
+	call	__ocb_decrypt6
+
+	movdqa	@offset[4],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+	movups	$inout4,`16*4`($out)
+	pxor	$inout4,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_one:
+	movdqa	@offset[0],$inout5		# borrow
+
+	call	__ocb_decrypt1
+
+	movdqa	$inout5,@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_two:
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+
+	call	__ocb_decrypt4
+
+	movdqa	@offset[1],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	xorps	$inout1,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_three:
+	pxor	$inout3,$inout3
+
+	call	__ocb_decrypt4
+
+	movdqa	@offset[2],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	xorps	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	xorps	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	xorps	$inout2,$checksum
+
+	jmp	.Locb_dec_done
+
+.align	16
+.Locb_dec_four:
+	call	__ocb_decrypt4
+
+	movdqa	@offset[3],@offset[5]
+	movups	$inout0,`16*0`($out)		# store output
+	pxor	$inout0,$checksum		# accumulate checksum
+	movups	$inout1,`16*1`($out)
+	pxor	$inout1,$checksum
+	movups	$inout2,`16*2`($out)
+	pxor	$inout2,$checksum
+	movups	$inout3,`16*3`($out)
+	pxor	$inout3,$checksum
+
+.Locb_dec_done:
+	pxor	$rndkey0,@offset[5]		# "remove" round[last]
+	movdqu	$checksum,($checksum_p)		# store checksum
+	movdqu	@offset[5],($offset_p)		# store last offset_i
+
+	xorps	%xmm0,%xmm0			# clear register bank
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+___
+$code.=<<___ if (!$win64);
+	pxor	%xmm6,%xmm6
+	pxor	%xmm7,%xmm7
+	pxor	%xmm8,%xmm8
+	pxor	%xmm9,%xmm9
+	pxor	%xmm10,%xmm10
+	pxor	%xmm11,%xmm11
+	pxor	%xmm12,%xmm12
+	pxor	%xmm13,%xmm13
+	pxor	%xmm14,%xmm14
+	pxor	%xmm15,%xmm15
+	lea	0x28(%rsp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x00(%rsp),%xmm6
+	movaps	%xmm0,0x00(%rsp)		# clear stack
+	movaps	0x10(%rsp),%xmm7
+	movaps	%xmm0,0x10(%rsp)
+	movaps	0x20(%rsp),%xmm8
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm9
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm10
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm11
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm12
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm13
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm14
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm15
+	movaps	%xmm0,0x90(%rsp)
+	lea	0xa0+0x28(%rsp),%rax
+.Locb_dec_pop:
+___
+$code.=<<___;
+	mov	-40(%rax),%r14
+	mov	-32(%rax),%r13
+	mov	-24(%rax),%r12
+	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbx
+	lea	(%rax),%rsp
+.Locb_dec_epilogue:
+	ret
+.size	aesni_ocb_decrypt,.-aesni_ocb_decrypt
+
+.type	__ocb_decrypt6,\@abi-omnipotent
+.align	32
+__ocb_decrypt6:
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 movdqa		@offset[0],@offset[4]
+	 pxor		@offset[5],@offset[0]
+	 movdqu		($L_p,$i5),@offset[5]
+	 pxor		@offset[0],@offset[1]
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		@offset[2],$inout2
+	 pxor		@offset[3],@offset[4]
+	pxor		@offset[3],$inout3
+	 pxor		@offset[4],@offset[5]
+	pxor		@offset[4],$inout4
+	pxor		@offset[5],$inout5
+	$movkey		32($key_),$rndkey0
+
+	lea		1($block_num),$i1	# even-numbered blocks
+	lea		3($block_num),$i3
+	lea		5($block_num),$i5
+	add		\$6,$block_num
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	bsf		$i1,$i1			# ntz(block)
+	bsf		$i3,$i3
+	bsf		$i5,$i5
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	aesdec		$rndkey1,$inout4
+	 pxor		$rndkey0l,@offset[3]
+	 pxor		$rndkey0l,@offset[4]
+	aesdec		$rndkey1,$inout5
+	$movkey		48($key_),$rndkey1
+	 pxor		$rndkey0l,@offset[5]
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		64($key_),$rndkey0
+	shl		\$4,$i1			# ntz(block) -> table offset
+	shl		\$4,$i3
+	jmp		.Locb_dec_loop6
+
+.align	32
+.Locb_dec_loop6:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	aesdec		$rndkey0,$inout4
+	aesdec		$rndkey0,$inout5
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop6
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	$movkey		16($key_),$rndkey1
+	shl		\$4,$i5
+
+	aesdeclast	@offset[0],$inout0
+	movdqu		($L_p),@offset[0]	# L_0 for all odd-numbered blocks
+	mov		%r10,%rax		# restore twisted rounds
+	aesdeclast	@offset[1],$inout1
+	aesdeclast	@offset[2],$inout2
+	aesdeclast	@offset[3],$inout3
+	aesdeclast	@offset[4],$inout4
+	aesdeclast	@offset[5],$inout5
+	ret
+.size	__ocb_decrypt6,.-__ocb_decrypt6
+
+.type	__ocb_decrypt4,\@abi-omnipotent
+.align	32
+__ocb_decrypt4:
+	 pxor		$rndkey0l,@offset[5]	# offset_i ^ round[0]
+	 movdqu		($L_p,$i1),@offset[1]
+	 movdqa		@offset[0],@offset[2]
+	 movdqu		($L_p,$i3),@offset[3]
+	 pxor		@offset[5],@offset[0]
+	 pxor		@offset[0],@offset[1]
+	pxor		@offset[0],$inout0	# input ^ round[0] ^ offset_i
+	 pxor		@offset[1],@offset[2]
+	pxor		@offset[1],$inout1
+	 pxor		@offset[2],@offset[3]
+	pxor		@offset[2],$inout2
+	pxor		@offset[3],$inout3
+	$movkey		32($key_),$rndkey0
+
+	 pxor		$rndkey0l,@offset[0]	# offset_i ^ round[last]
+	 pxor		$rndkey0l,@offset[1]
+	 pxor		$rndkey0l,@offset[2]
+	 pxor		$rndkey0l,@offset[3]
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		48($key_),$rndkey1
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_dec_loop4
+
+.align	32
+.Locb_dec_loop4:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	aesdec		$rndkey0,$inout1
+	aesdec		$rndkey0,$inout2
+	aesdec		$rndkey0,$inout3
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop4
+
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	$movkey		16($key_),$rndkey1
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesdeclast	@offset[0],$inout0
+	aesdeclast	@offset[1],$inout1
+	aesdeclast	@offset[2],$inout2
+	aesdeclast	@offset[3],$inout3
+	ret
+.size	__ocb_decrypt4,.-__ocb_decrypt4
+
+.type	__ocb_decrypt1,\@abi-omnipotent
+.align	32
+__ocb_decrypt1:
+	 pxor		@offset[5],$inout5	# offset_i
+	 pxor		$rndkey0l,$inout5	# offset_i ^ round[0]
+	pxor		$inout5,$inout0		# input ^ round[0] ^ offset_i
+	$movkey		32($key_),$rndkey0
+
+	aesdec		$rndkey1,$inout0
+	$movkey		48($key_),$rndkey1
+	pxor		$rndkey0l,$inout5	# offset_i ^ round[last]
+
+	aesdec		$rndkey0,$inout0
+	$movkey		64($key_),$rndkey0
+	jmp		.Locb_dec_loop1
+
+.align	32
+.Locb_dec_loop1:
+	aesdec		$rndkey1,$inout0
+	$movkey		($key,%rax),$rndkey1
+	add		\$32,%rax
+
+	aesdec		$rndkey0,$inout0
+	$movkey		-16($key,%rax),$rndkey0
+	jnz		.Locb_dec_loop1
+
+	aesdec		$rndkey1,$inout0
+	$movkey		16($key_),$rndkey1	# redundant in tail
+	mov		%r10,%rax		# restore twisted rounds
+
+	aesdeclast	$inout5,$inout0
+	ret
+.size	__ocb_decrypt1,.-__ocb_decrypt1
+___
+} }}
+
+########################################################################
+# void $PREFIX_cbc_encrypt (const void *inp, void *out,
+#			    size_t length, const AES_KEY *key,
+#			    unsigned char *ivp,const int enc);
+{
+my $frame_size = 0x10 + ($win64?0xa0:0);	# used in decrypt
+my ($iv,$in0,$in1,$in2,$in3,$in4)=map("%xmm$_",(10..15));
+
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+	test	$len,$len		# check length
+	jz	.Lcbc_ret
+
+	mov	240($key),$rnds_	# key->rounds
+	mov	$key,$key_		# backup $key
+	test	%r9d,%r9d		# 6th argument
+	jz	.Lcbc_decrypt
+#--------------------------- CBC ENCRYPT ------------------------------#
+	movups	($ivp),$inout0		# load iv as initial state
+	mov	$rnds_,$rounds
+	cmp	\$16,$len
+	jb	.Lcbc_enc_tail
+	sub	\$16,$len
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movups	($inp),$inout1		# load input
+	lea	16($inp),$inp
+	#xorps	$inout1,$inout0
+___
+	&aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
+$code.=<<___;
+	mov	$rnds_,$rounds		# restore $rounds
+	mov	$key_,$key		# restore $key
+	movups	$inout0,0($out)		# store output
+	lea	16($out),$out
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	add	\$16,$len
+	jnz	.Lcbc_enc_tail
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movups	$inout0,($ivp)
+	 pxor	$inout0,$inout0
+	 pxor	$inout1,$inout1
+	jmp	.Lcbc_ret
+
+.Lcbc_enc_tail:
+	mov	$len,%rcx	# zaps $key
+	xchg	$inp,$out	# $inp is %rsi and $out is %rdi now
+	.long	0x9066A4F3	# rep movsb
+	mov	\$16,%ecx	# zero tail
+	sub	$len,%rcx
+	xor	%eax,%eax
+	.long	0x9066AAF3	# rep stosb
+	lea	-16(%rdi),%rdi	# rewind $out by 1 block
+	mov	$rnds_,$rounds	# restore $rounds
+	mov	%rdi,%rsi	# $inp and $out are the same
+	mov	$key_,$key	# restore $key
+	xor	$len,$len	# len=16
+	jmp	.Lcbc_enc_loop	# one more spin
+#--------------------------- CBC DECRYPT ------------------------------#
+.align	16
+.Lcbc_decrypt:
+	cmp	\$16,$len
+	jne	.Lcbc_decrypt_bulk
+
+	# handle single block without allocating stack frame,
+	# useful in ciphertext stealing mode
+	movdqu	($inp),$inout0		# load input
+	movdqu	($ivp),$inout1		# load iv
+	movdqa	$inout0,$inout2		# future iv
+___
+	&aesni_generate1("dec",$key,$rnds_);
+$code.=<<___;
+	 pxor	$rndkey0,$rndkey0	# clear register bank
+	 pxor	$rndkey1,$rndkey1
+	movdqu	$inout2,($ivp)		# store iv
+	xorps	$inout1,$inout0		# ^=iv
+	 pxor	$inout1,$inout1
+	movups	$inout0,($out)		# store output
+	 pxor	$inout0,$inout0
+	jmp	.Lcbc_ret
+.align	16
+.Lcbc_decrypt_bulk:
+	lea	(%rsp),%r11		# frame pointer
+	push	%rbp
+	sub	\$$frame_size,%rsp
+	and	\$-16,%rsp	# Linux kernel stack can be incorrectly seeded
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lcbc_decrypt_body:
+___
+
+my $inp_=$key_="%rbp";			# reassign $key_
+
+$code.=<<___;
+	mov	$key,$key_		# [re-]backup $key [after reassignment]
+	movups	($ivp),$iv
+	mov	$rnds_,$rounds
+	cmp	\$0x50,$len
+	jbe	.Lcbc_dec_tail
+
+	$movkey	($key),$rndkey0
+	movdqu	0x00($inp),$inout0	# load input
+	movdqu	0x10($inp),$inout1
+	movdqa	$inout0,$in0
+	movdqu	0x20($inp),$inout2
+	movdqa	$inout1,$in1
+	movdqu	0x30($inp),$inout3
+	movdqa	$inout2,$in2
+	movdqu	0x40($inp),$inout4
+	movdqa	$inout3,$in3
+	movdqu	0x50($inp),$inout5
+	movdqa	$inout4,$in4
+	movq	OPENSSL_ia32cap_addr(%rip),%r9
+	mov	4(%r9),%r9d
+	cmp	\$0x70,$len
+	jbe	.Lcbc_dec_six_or_seven
+
+	and	\$`1<<26|1<<22`,%r9d	# isolate XSAVE+MOVBE
+	sub	\$0x50,$len		# $len is biased by -5*16
+	cmp	\$`1<<22`,%r9d		# check for MOVBE without XSAVE
+	je	.Lcbc_dec_loop6_enter	# [which denotes Atom Silvermont]
+	sub	\$0x20,$len		# $len is biased by -7*16
+	lea	0x70($key),$key		# size optimization
+	jmp	.Lcbc_dec_loop8_enter
+.align	16
+.Lcbc_dec_loop8:
+	movups	$inout7,($out)
+	lea	0x10($out),$out
+.Lcbc_dec_loop8_enter:
+	movdqu		0x60($inp),$inout6
+	pxor		$rndkey0,$inout0
+	movdqu		0x70($inp),$inout7
+	pxor		$rndkey0,$inout1
+	$movkey		0x10-0x70($key),$rndkey1
+	pxor		$rndkey0,$inout2
+	mov		\$-1,$inp_
+	cmp		\$0x70,$len	# is there at least 0x60 bytes ahead?
+	pxor		$rndkey0,$inout3
+	pxor		$rndkey0,$inout4
+	pxor		$rndkey0,$inout5
+	pxor		$rndkey0,$inout6
+
+	aesdec		$rndkey1,$inout0
+	pxor		$rndkey0,$inout7
+	$movkey		0x20-0x70($key),$rndkey0
+	aesdec		$rndkey1,$inout1
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	aesdec		$rndkey1,$inout6
+	adc		\$0,$inp_
+	and		\$128,$inp_
+	aesdec		$rndkey1,$inout7
+	add		$inp,$inp_
+	$movkey		0x30-0x70($key),$rndkey1
+___
+for($i=1;$i<12;$i++) {
+my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
+$code.=<<___	if ($i==7);
+	cmp		\$11,$rounds
+___
+$code.=<<___;
+	aesdec		$rndkeyx,$inout0
+	aesdec		$rndkeyx,$inout1
+	aesdec		$rndkeyx,$inout2
+	aesdec		$rndkeyx,$inout3
+	aesdec		$rndkeyx,$inout4
+	aesdec		$rndkeyx,$inout5
+	aesdec		$rndkeyx,$inout6
+	aesdec		$rndkeyx,$inout7
+	$movkey		`0x30+0x10*$i`-0x70($key),$rndkeyx
+___
+$code.=<<___	if ($i<6 || (!($i&1) && $i>7));
+	nop
+___
+$code.=<<___	if ($i==7);
+	jb		.Lcbc_dec_done
+___
+$code.=<<___	if ($i==9);
+	je		.Lcbc_dec_done
+___
+$code.=<<___	if ($i==11);
+	jmp		.Lcbc_dec_done
+___
+}
+$code.=<<___;
+.align	16
+.Lcbc_dec_done:
+	aesdec		$rndkey1,$inout0
+	aesdec		$rndkey1,$inout1
+	pxor		$rndkey0,$iv
+	pxor		$rndkey0,$in0
+	aesdec		$rndkey1,$inout2
+	aesdec		$rndkey1,$inout3
+	pxor		$rndkey0,$in1
+	pxor		$rndkey0,$in2
+	aesdec		$rndkey1,$inout4
+	aesdec		$rndkey1,$inout5
+	pxor		$rndkey0,$in3
+	pxor		$rndkey0,$in4
+	aesdec		$rndkey1,$inout6
+	aesdec		$rndkey1,$inout7
+	movdqu		0x50($inp),$rndkey1
+
+	aesdeclast	$iv,$inout0
+	movdqu		0x60($inp),$iv		# borrow $iv
+	pxor		$rndkey0,$rndkey1
+	aesdeclast	$in0,$inout1
+	pxor		$rndkey0,$iv
+	movdqu		0x70($inp),$rndkey0	# next IV
+	aesdeclast	$in1,$inout2
+	lea		0x80($inp),$inp
+	movdqu		0x00($inp_),$in0
+	aesdeclast	$in2,$inout3
+	aesdeclast	$in3,$inout4
+	movdqu		0x10($inp_),$in1
+	movdqu		0x20($inp_),$in2
+	aesdeclast	$in4,$inout5
+	aesdeclast	$rndkey1,$inout6
+	movdqu		0x30($inp_),$in3
+	movdqu		0x40($inp_),$in4
+	aesdeclast	$iv,$inout7
+	movdqa		$rndkey0,$iv		# return $iv
+	movdqu		0x50($inp_),$rndkey1
+	$movkey		-0x70($key),$rndkey0
+
+	movups		$inout0,($out)		# store output
+	movdqa		$in0,$inout0
+	movups		$inout1,0x10($out)
+	movdqa		$in1,$inout1
+	movups		$inout2,0x20($out)
+	movdqa		$in2,$inout2
+	movups		$inout3,0x30($out)
+	movdqa		$in3,$inout3
+	movups		$inout4,0x40($out)
+	movdqa		$in4,$inout4
+	movups		$inout5,0x50($out)
+	movdqa		$rndkey1,$inout5
+	movups		$inout6,0x60($out)
+	lea		0x70($out),$out
+
+	sub	\$0x80,$len
+	ja	.Lcbc_dec_loop8
+
+	movaps	$inout7,$inout0
+	lea	-0x70($key),$key
+	add	\$0x70,$len
+	jle	.Lcbc_dec_clear_tail_collected
+	movups	$inout7,($out)
+	lea	0x10($out),$out
+	cmp	\$0x50,$len
+	jbe	.Lcbc_dec_tail
+
+	movaps	$in0,$inout0
+.Lcbc_dec_six_or_seven:
+	cmp	\$0x60,$len
+	ja	.Lcbc_dec_seven
+
+	movaps	$inout5,$inout6
+	call	_aesni_decrypt6
+	pxor	$iv,$inout0		# ^= IV
+	movaps	$inout6,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	pxor	$in4,$inout5
+	movdqu	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	lea	0x50($out),$out
+	movdqa	$inout5,$inout0
+	 pxor	$inout5,$inout5
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_seven:
+	movups	0x60($inp),$inout6
+	xorps	$inout7,$inout7
+	call	_aesni_decrypt8
+	movups	0x50($inp),$inout7
+	pxor	$iv,$inout0		# ^= IV
+	movups	0x60($inp),$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	pxor	$in4,$inout5
+	movdqu	$inout4,0x40($out)
+	 pxor	$inout4,$inout4
+	pxor	$inout7,$inout6
+	movdqu	$inout5,0x50($out)
+	 pxor	$inout5,$inout5
+	lea	0x60($out),$out
+	movdqa	$inout6,$inout0
+	 pxor	$inout6,$inout6
+	 pxor	$inout7,$inout7
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_loop6:
+	movups	$inout5,($out)
+	lea	0x10($out),$out
+	movdqu	0x00($inp),$inout0	# load input
+	movdqu	0x10($inp),$inout1
+	movdqa	$inout0,$in0
+	movdqu	0x20($inp),$inout2
+	movdqa	$inout1,$in1
+	movdqu	0x30($inp),$inout3
+	movdqa	$inout2,$in2
+	movdqu	0x40($inp),$inout4
+	movdqa	$inout3,$in3
+	movdqu	0x50($inp),$inout5
+	movdqa	$inout4,$in4
+.Lcbc_dec_loop6_enter:
+	lea	0x60($inp),$inp
+	movdqa	$inout5,$inout6
+
+	call	_aesni_decrypt6
+
+	pxor	$iv,$inout0		# ^= IV
+	movdqa	$inout6,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	pxor	$in3,$inout4
+	mov	$key_,$key
+	movdqu	$inout3,0x30($out)
+	pxor	$in4,$inout5
+	mov	$rnds_,$rounds
+	movdqu	$inout4,0x40($out)
+	lea	0x50($out),$out
+	sub	\$0x60,$len
+	ja	.Lcbc_dec_loop6
+
+	movdqa	$inout5,$inout0
+	add	\$0x50,$len
+	jle	.Lcbc_dec_clear_tail_collected
+	movups	$inout5,($out)
+	lea	0x10($out),$out
+
+.Lcbc_dec_tail:
+	movups	($inp),$inout0
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_one		# $len is 1*16 or less
+
+	movups	0x10($inp),$inout1
+	movaps	$inout0,$in0
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_two		# $len is 2*16 or less
+
+	movups	0x20($inp),$inout2
+	movaps	$inout1,$in1
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_three		# $len is 3*16 or less
+
+	movups	0x30($inp),$inout3
+	movaps	$inout2,$in2
+	sub	\$0x10,$len
+	jbe	.Lcbc_dec_four		# $len is 4*16 or less
+
+	movups	0x40($inp),$inout4	# $len is 5*16 or less
+	movaps	$inout3,$in3
+	movaps	$inout4,$in4
+	xorps	$inout5,$inout5
+	call	_aesni_decrypt6
+	pxor	$iv,$inout0
+	movaps	$in4,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	pxor	$in3,$inout4
+	movdqu	$inout3,0x30($out)
+	 pxor	$inout3,$inout3
+	lea	0x40($out),$out
+	movdqa	$inout4,$inout0
+	 pxor	$inout4,$inout4
+	 pxor	$inout5,$inout5
+	sub	\$0x10,$len
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_one:
+	movaps	$inout0,$in0
+___
+	&aesni_generate1("dec",$key,$rounds);
+$code.=<<___;
+	xorps	$iv,$inout0
+	movaps	$in0,$iv
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_two:
+	movaps	$inout1,$in1
+	call	_aesni_decrypt2
+	pxor	$iv,$inout0
+	movaps	$in1,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	movdqa	$inout1,$inout0
+	 pxor	$inout1,$inout1		# clear register bank
+	lea	0x10($out),$out
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_three:
+	movaps	$inout2,$in2
+	call	_aesni_decrypt3
+	pxor	$iv,$inout0
+	movaps	$in2,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	movdqa	$inout2,$inout0
+	 pxor	$inout2,$inout2
+	lea	0x20($out),$out
+	jmp	.Lcbc_dec_tail_collected
+.align	16
+.Lcbc_dec_four:
+	movaps	$inout3,$in3
+	call	_aesni_decrypt4
+	pxor	$iv,$inout0
+	movaps	$in3,$iv
+	pxor	$in0,$inout1
+	movdqu	$inout0,($out)
+	pxor	$in1,$inout2
+	movdqu	$inout1,0x10($out)
+	 pxor	$inout1,$inout1		# clear register bank
+	pxor	$in2,$inout3
+	movdqu	$inout2,0x20($out)
+	 pxor	$inout2,$inout2
+	movdqa	$inout3,$inout0
+	 pxor	$inout3,$inout3
+	lea	0x30($out),$out
+	jmp	.Lcbc_dec_tail_collected
+
+.align	16
+.Lcbc_dec_clear_tail_collected:
+	pxor	$inout1,$inout1		# clear register bank
+	pxor	$inout2,$inout2
+	pxor	$inout3,$inout3
+___
+$code.=<<___ if (!$win64);
+	pxor	$inout4,$inout4		# %xmm6..9
+	pxor	$inout5,$inout5
+	pxor	$inout6,$inout6
+	pxor	$inout7,$inout7
+___
+$code.=<<___;
+.Lcbc_dec_tail_collected:
+	movups	$iv,($ivp)
+	and	\$15,$len
+	jnz	.Lcbc_dec_tail_partial
+	movups	$inout0,($out)
+	pxor	$inout0,$inout0
+	jmp	.Lcbc_dec_ret
+.align	16
+.Lcbc_dec_tail_partial:
+	movaps	$inout0,(%rsp)
+	pxor	$inout0,$inout0
+	mov	\$16,%rcx
+	mov	$out,%rdi
+	sub	$len,%rcx
+	lea	(%rsp),%rsi
+	.long	0x9066A4F3		# rep movsb
+	movdqa	$inout0,(%rsp)
+
+.Lcbc_dec_ret:
+	xorps	$rndkey0,$rndkey0	# %xmm0
+	pxor	$rndkey1,$rndkey1
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	%xmm0,0x10(%rsp)	# clear stack
+	movaps	0x20(%rsp),%xmm7
+	movaps	%xmm0,0x20(%rsp)
+	movaps	0x30(%rsp),%xmm8
+	movaps	%xmm0,0x30(%rsp)
+	movaps	0x40(%rsp),%xmm9
+	movaps	%xmm0,0x40(%rsp)
+	movaps	0x50(%rsp),%xmm10
+	movaps	%xmm0,0x50(%rsp)
+	movaps	0x60(%rsp),%xmm11
+	movaps	%xmm0,0x60(%rsp)
+	movaps	0x70(%rsp),%xmm12
+	movaps	%xmm0,0x70(%rsp)
+	movaps	0x80(%rsp),%xmm13
+	movaps	%xmm0,0x80(%rsp)
+	movaps	0x90(%rsp),%xmm14
+	movaps	%xmm0,0x90(%rsp)
+	movaps	0xa0(%rsp),%xmm15
+	movaps	%xmm0,0xa0(%rsp)
+___
+$code.=<<___;
+	mov	-8(%r11),%rbp
+	lea	(%r11),%rsp
+.Lcbc_ret:
+	ret
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+} 
+# int ${PREFIX}_set_decrypt_key(const unsigned char *inp,
+#				int bits, AES_KEY *key)
+#
+# input:	$inp	user-supplied key
+#		$bits	$inp length in bits
+#		$key	pointer to key schedule
+# output:	%eax	0 denoting success, -1 or -2 - failure (see C)
+#		*$key	key schedule
+#
+{ my ($inp,$bits,$key) = @_4args;
+  $bits =~ s/%r/%e/;
+
+$code.=<<___;
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_decrypt_key:
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+	call	__aesni_set_encrypt_key
+	shl	\$4,$bits		# rounds-1 after _aesni_set_encrypt_key
+	test	%eax,%eax
+	jnz	.Ldec_key_ret
+	lea	16($key,$bits),$inp	# points at the end of key schedule
+
+	$movkey	($key),%xmm0		# just swap
+	$movkey	($inp),%xmm1
+	$movkey	%xmm0,($inp)
+	$movkey	%xmm1,($key)
+	lea	16($key),$key
+	lea	-16($inp),$inp
+
+.Ldec_key_inverse:
+	$movkey	($key),%xmm0		# swap and inverse
+	$movkey	($inp),%xmm1
+	aesimc	%xmm0,%xmm0
+	aesimc	%xmm1,%xmm1
+	lea	16($key),$key
+	lea	-16($inp),$inp
+	$movkey	%xmm0,16($inp)
+	$movkey	%xmm1,-16($key)
+	cmp	$key,$inp
+	ja	.Ldec_key_inverse
+
+	$movkey	($key),%xmm0		# inverse middle
+	aesimc	%xmm0,%xmm0
+	pxor	%xmm1,%xmm1
+	$movkey	%xmm0,($inp)
+	pxor	%xmm0,%xmm0
+.Ldec_key_ret:
+	add	\$8,%rsp
+	ret
+.LSEH_end_set_decrypt_key:
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+___
+
+# This is based on submission by
+#
+#	Huang Ying <ying.huang@intel.com>
+#	Vinodh Gopal <vinodh.gopal@intel.com>
+#	Kahraman Akdemir
+#
+# Aggressively optimized in respect to aeskeygenassist's critical path
+# and is contained in %xmm0-5 to meet Win64 ABI requirement.
+#
+# int ${PREFIX}_set_encrypt_key(const unsigned char *inp,
+#				int bits, AES_KEY * const key);
+#
+# input:	$inp	user-supplied key
+#		$bits	$inp length in bits
+#		$key	pointer to key schedule
+# output:	%eax	0 denoting success, -1 or -2 - failure (see C)
+#		$bits	rounds-1 (used in aesni_set_decrypt_key)
+#		*$key	key schedule
+#		$key	pointer to key schedule (used in
+#			aesni_set_decrypt_key)
+#
+# Subroutine is frame-less, which means that only volatile registers
+# are used. Note that it's declared "abi-omnipotent", which means that
+# amount of volatile registers is smaller on Windows.
+#
+$code.=<<___;
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@abi-omnipotent
+.align	16
+${PREFIX}_set_encrypt_key:
+__aesni_set_encrypt_key:
+	.byte	0x48,0x83,0xEC,0x08	# sub rsp,8
+	mov	\$-1,%rax
+	test	$inp,$inp
+	jz	.Lenc_key_ret
+	test	$key,$key
+	jz	.Lenc_key_ret
+
+	movups	($inp),%xmm0		# pull first 128 bits of *userKey
+	xorps	%xmm4,%xmm4		# low dword of xmm4 is assumed 0
+	movq	OPENSSL_ia32cap_addr(%rip),%r10
+	movl	4(%r10),%r10d
+	and	\$`1<<28|1<<11`,%r10d	# AVX and XOP bits
+	lea	16($key),%rax		# %rax is used as modifiable copy of $key
+	cmp	\$256,$bits
+	je	.L14rounds
+	cmp	\$192,$bits
+	je	.L12rounds
+	cmp	\$128,$bits
+	jne	.Lbad_keybits
+
+.L10rounds:
+	mov	\$9,$bits			# 10 rounds for 128-bit key
+	cmp	\$`1<<28`,%r10d			# AVX, bit no XOP
+	je	.L10rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 1
+	call		.Lkey_expansion_128_cold
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 2
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 4
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 6
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x40,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x80,%xmm0,%xmm1	# round 8
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x1b,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_128
+	aeskeygenassist	\$0x36,%xmm0,%xmm1	# round 10
+	call		.Lkey_expansion_128
+	$movkey	%xmm0,(%rax)
+	mov	$bits,80(%rax)	# 240(%rdx)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L10rounds_alt:
+	movdqa	.Lkey_rotate(%rip),%xmm5
+	mov	\$8,%r10d
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	movdqa	%xmm0,%xmm2
+	movdqu	%xmm0,($key)
+	jmp	.Loop_key128
+
+.align	16
+.Loop_key128:
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+	pslld		\$1,%xmm4
+	lea		16(%rax),%rax
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,-16(%rax)
+	movdqa		%xmm0,%xmm2
+
+	dec	%r10d
+	jnz	.Loop_key128
+
+	movdqa		.Lkey_rcon1b(%rip),%xmm4
+
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+	pslld		\$1,%xmm4
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,(%rax)
+
+	movdqa		%xmm0,%xmm2
+	pshufb		%xmm5,%xmm0
+	aesenclast	%xmm4,%xmm0
+
+	movdqa		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm2,%xmm3
+	pslldq		\$4,%xmm2
+	pxor		%xmm3,%xmm2
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,16(%rax)
+
+	mov	$bits,96(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L12rounds:
+	movq	16($inp),%xmm2			# remaining 1/3 of *userKey
+	mov	\$11,$bits			# 12 rounds for 192
+	cmp	\$`1<<28`,%r10d			# AVX, but no XOP
+	je	.L12rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 1,2
+	call		.Lkey_expansion_192a_cold
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 2,3
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 4,5
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 5,6
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 7,8
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 8,9
+	call		.Lkey_expansion_192b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 10,11
+	call		.Lkey_expansion_192a
+	aeskeygenassist	\$0x80,%xmm2,%xmm1	# round 11,12
+	call		.Lkey_expansion_192b
+	$movkey	%xmm0,(%rax)
+	mov	$bits,48(%rax)	# 240(%rdx)
+	xor	%rax, %rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L12rounds_alt:
+	movdqa	.Lkey_rotate192(%rip),%xmm5
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	mov	\$8,%r10d
+	movdqu	%xmm0,($key)
+	jmp	.Loop_key192
+
+.align	16
+.Loop_key192:
+	movq		%xmm2,0(%rax)
+	movdqa		%xmm2,%xmm1
+	pshufb		%xmm5,%xmm2
+	aesenclast	%xmm4,%xmm2
+	pslld		\$1, %xmm4
+	lea		24(%rax),%rax
+
+	movdqa		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm3,%xmm0
+
+	pshufd		\$0xff,%xmm0,%xmm3
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+
+	pxor		%xmm2,%xmm0
+	pxor		%xmm3,%xmm2
+	movdqu		%xmm0,-16(%rax)
+
+	dec	%r10d
+	jnz	.Loop_key192
+
+	mov	$bits,32(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L14rounds:
+	movups	16($inp),%xmm2			# remaning half of *userKey
+	mov	\$13,$bits			# 14 rounds for 256
+	lea	16(%rax),%rax
+	cmp	\$`1<<28`,%r10d			# AVX, but no XOP
+	je	.L14rounds_alt
+
+	$movkey	%xmm0,($key)			# round 0
+	$movkey	%xmm2,16($key)			# round 1
+	aeskeygenassist	\$0x1,%xmm2,%xmm1	# round 2
+	call		.Lkey_expansion_256a_cold
+	aeskeygenassist	\$0x1,%xmm0,%xmm1	# round 3
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x2,%xmm2,%xmm1	# round 4
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x2,%xmm0,%xmm1	# round 5
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x4,%xmm2,%xmm1	# round 6
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x4,%xmm0,%xmm1	# round 7
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x8,%xmm2,%xmm1	# round 8
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x8,%xmm0,%xmm1	# round 9
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x10,%xmm2,%xmm1	# round 10
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x10,%xmm0,%xmm1	# round 11
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x20,%xmm2,%xmm1	# round 12
+	call		.Lkey_expansion_256a
+	aeskeygenassist	\$0x20,%xmm0,%xmm1	# round 13
+	call		.Lkey_expansion_256b
+	aeskeygenassist	\$0x40,%xmm2,%xmm1	# round 14
+	call		.Lkey_expansion_256a
+	$movkey	%xmm0,(%rax)
+	mov	$bits,16(%rax)	# 240(%rdx)
+	xor	%rax,%rax
+	jmp	.Lenc_key_ret
+
+.align	16
+.L14rounds_alt:
+	movdqa	.Lkey_rotate(%rip),%xmm5
+	movdqa	.Lkey_rcon1(%rip),%xmm4
+	mov	\$7,%r10d
+	movdqu	%xmm0,0($key)
+	movdqa	%xmm2,%xmm1
+	movdqu	%xmm2,16($key)
+	jmp	.Loop_key256
+
+.align	16
+.Loop_key256:
+	pshufb		%xmm5,%xmm2
+	aesenclast	%xmm4,%xmm2
+
+	movdqa		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm0,%xmm3
+	pslldq		\$4,%xmm0
+	pxor		%xmm3,%xmm0
+	pslld		\$1,%xmm4
+
+	pxor		%xmm2,%xmm0
+	movdqu		%xmm0,(%rax)
+
+	dec	%r10d
+	jz	.Ldone_key256
+
+	pshufd		\$0xff,%xmm0,%xmm2
+	pxor		%xmm3,%xmm3
+	aesenclast	%xmm3,%xmm2
+
+	movdqa		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm1,%xmm3
+	pslldq		\$4,%xmm1
+	pxor		%xmm3,%xmm1
+
+	pxor		%xmm1,%xmm2
+	movdqu		%xmm2,16(%rax)
+	lea		32(%rax),%rax
+	movdqa		%xmm2,%xmm1
+
+	jmp	.Loop_key256
+
+.Ldone_key256:
+	mov	$bits,16(%rax)	# 240($key)
+	xor	%eax,%eax
+	jmp	.Lenc_key_ret
+
+.align	16
+.Lbad_keybits:
+	mov	\$-2,%rax
+.Lenc_key_ret:
+	pxor	%xmm0,%xmm0
+	pxor	%xmm1,%xmm1
+	pxor	%xmm2,%xmm2
+	pxor	%xmm3,%xmm3
+	pxor	%xmm4,%xmm4
+	pxor	%xmm5,%xmm5
+	add	\$8,%rsp
+	ret
+.LSEH_end_set_encrypt_key:
+
+.align	16
+.Lkey_expansion_128:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_128_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4, %xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_192a:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_192a_cold:
+	movaps	%xmm2, %xmm5
+.Lkey_expansion_192b_warm:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	movdqa	%xmm2,%xmm3
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	pslldq	\$4,%xmm3
+	xorps	%xmm4,%xmm0
+	pshufd	\$0b01010101,%xmm1,%xmm1	# critical path
+	pxor	%xmm3,%xmm2
+	pxor	%xmm1,%xmm0
+	pshufd	\$0b11111111,%xmm0,%xmm3
+	pxor	%xmm3,%xmm2
+	ret
+
+.align 16
+.Lkey_expansion_192b:
+	movaps	%xmm0,%xmm3
+	shufps	\$0b01000100,%xmm0,%xmm5
+	$movkey	%xmm5,(%rax)
+	shufps	\$0b01001110,%xmm2,%xmm3
+	$movkey	%xmm3,16(%rax)
+	lea	32(%rax),%rax
+	jmp	.Lkey_expansion_192b_warm
+
+.align	16
+.Lkey_expansion_256a:
+	$movkey	%xmm2,(%rax)
+	lea	16(%rax),%rax
+.Lkey_expansion_256a_cold:
+	shufps	\$0b00010000,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b10001100,%xmm0,%xmm4
+	xorps	%xmm4,%xmm0
+	shufps	\$0b11111111,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm0
+	ret
+
+.align 16
+.Lkey_expansion_256b:
+	$movkey	%xmm0,(%rax)
+	lea	16(%rax),%rax
+
+	shufps	\$0b00010000,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10001100,%xmm2,%xmm4
+	xorps	%xmm4,%xmm2
+	shufps	\$0b10101010,%xmm1,%xmm1	# critical path
+	xorps	%xmm1,%xmm2
+	ret
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+.size	__aesni_set_encrypt_key,.-__aesni_set_encrypt_key
+___
+}
+
+$code.=<<___;
+.align	64
+.Lbswap_mask:
+	.byte	15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
+.Lincrement32:
+	.long	6,6,6,0
+.Lincrement64:
+	.long	1,0,0,0
+.Lxts_magic:
+	.long	0x87,0,1,0
+.Lincrement1:
+	.byte	0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1
+.Lkey_rotate:
+	.long	0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
+.Lkey_rotate192:
+	.long	0x04070605,0x04070605,0x04070605,0x04070605
+.Lkey_rcon1:
+	.long	1,1,1,1
+.Lkey_rcon1b:
+	.long	0x1b,0x1b,0x1b,0x1b
+
+.asciz  "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.type	ecb_ccm64_se_handler,\@abi-omnipotent
+.align	16
+ecb_ccm64_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	0(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$8,%ecx		# 4*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0x58(%rax),%rax		# adjust stack pointer
+
+	jmp	.Lcommon_seh_tail
+.size	ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
+
+.type	ctr_xts_se_handler,\@abi-omnipotent
+.align	16
+ctr_xts_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue lable
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	208($context),%rax	# pull context->R11
+
+	lea	-0xa8(%rax),%rsi	# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	-8(%rax),%rbp		# restore saved %rbp
+	mov	%rbp,160($context)	# restore context->Rbp
+	jmp	.Lcommon_seh_tail
+.size	ctr_xts_se_handler,.-ctr_xts_se_handler
+
+.type	ocb_se_handler,\@abi-omnipotent
+.align	16
+ocb_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue lable
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	8(%r11),%r10d		# HandlerData[2]
+	lea	(%rsi,%r10),%r10
+	cmp	%r10,%rbx		# context->Rip>=pop label
+	jae	.Locb_no_xmm
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xa0+0x28(%rax),%rax
+
+.Locb_no_xmm:
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+
+	jmp	.Lcommon_seh_tail
+.size	ocb_se_handler,.-ocb_se_handler
+___
+$code.=<<___;
+.type	cbc_se_handler,\@abi-omnipotent
+.align	16
+cbc_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	152($context),%rax	# pull context->Rsp
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lcbc_decrypt_bulk(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<"prologue" label
+	jb	.Lcommon_seh_tail
+
+	mov	120($context),%rax	# pull context->Rax
+
+	lea	.Lcbc_decrypt_body(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<cbc_decrypt_body
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lcbc_ret(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>="epilogue" label
+	jae	.Lcommon_seh_tail
+
+	lea	16(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	208($context),%rax	# pull context->R11
+
+	mov	-8(%rax),%rbp		# restore saved %rbp
+	mov	%rbp,160($context)	# restore context->Rbp
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	cbc_se_handler,.-cbc_se_handler
+
+.section	.pdata
+.align	4
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+	.rva	.LSEH_begin_aesni_ecb_encrypt
+	.rva	.LSEH_end_aesni_ecb_encrypt
+	.rva	.LSEH_info_ecb
+
+	.rva	.LSEH_begin_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_encrypt_blocks
+	.rva	.LSEH_info_ccm64_enc
+
+	.rva	.LSEH_begin_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_end_aesni_ccm64_decrypt_blocks
+	.rva	.LSEH_info_ccm64_dec
+
+	.rva	.LSEH_begin_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_end_aesni_ctr32_encrypt_blocks
+	.rva	.LSEH_info_ctr32
+
+	.rva	.LSEH_begin_aesni_xts_encrypt
+	.rva	.LSEH_end_aesni_xts_encrypt
+	.rva	.LSEH_info_xts_enc
+
+	.rva	.LSEH_begin_aesni_xts_decrypt
+	.rva	.LSEH_end_aesni_xts_decrypt
+	.rva	.LSEH_info_xts_dec
+
+	.rva	.LSEH_begin_aesni_ocb_encrypt
+	.rva	.LSEH_end_aesni_ocb_encrypt
+	.rva	.LSEH_info_ocb_enc
+
+	.rva	.LSEH_begin_aesni_ocb_decrypt
+	.rva	.LSEH_end_aesni_ocb_decrypt
+	.rva	.LSEH_info_ocb_dec
+___
+$code.=<<___;
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_cbc
+
+	.rva	${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_set_decrypt_key
+	.rva	.LSEH_info_key
+
+	.rva	${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_set_encrypt_key
+	.rva	.LSEH_info_key
+.section	.xdata
+.align	8
+___
+$code.=<<___ if ($PREFIX eq "aesni");
+.LSEH_info_ecb:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lecb_enc_body,.Lecb_enc_ret		# HandlerData[]
+.LSEH_info_ccm64_enc:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lccm64_enc_body,.Lccm64_enc_ret	# HandlerData[]
+.LSEH_info_ccm64_dec:
+	.byte	9,0,0,0
+	.rva	ecb_ccm64_se_handler
+	.rva	.Lccm64_dec_body,.Lccm64_dec_ret	# HandlerData[]
+.LSEH_info_ctr32:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lctr32_body,.Lctr32_epilogue		# HandlerData[]
+.LSEH_info_xts_enc:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
+.LSEH_info_xts_dec:
+	.byte	9,0,0,0
+	.rva	ctr_xts_se_handler
+	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
+.LSEH_info_ocb_enc:
+	.byte	9,0,0,0
+	.rva	ocb_se_handler
+	.rva	.Locb_enc_body,.Locb_enc_epilogue	# HandlerData[]
+	.rva	.Locb_enc_pop
+	.long	0
+.LSEH_info_ocb_dec:
+	.byte	9,0,0,0
+	.rva	ocb_se_handler
+	.rva	.Locb_dec_body,.Locb_dec_epilogue	# HandlerData[]
+	.rva	.Locb_dec_pop
+	.long	0
+___
+$code.=<<___;
+.LSEH_info_cbc:
+	.byte	9,0,0,0
+	.rva	cbc_se_handler
+.LSEH_info_key:
+	.byte	0x01,0x04,0x01,0x00
+	.byte	0x04,0x02,0x00,0x00	# sub rsp,8
+___
+}
+
+sub rex {
+  local *opcode=shift;
+  my ($dst,$src)=@_;
+  my $rex=0;
+
+    $rex|=0x04			if($dst>=8);
+    $rex|=0x01			if($src>=8);
+    push @opcode,$rex|0x40	if($rex);
+}
+
+sub aesni {
+  my $line=shift;
+  my @opcode=(0x66);
+
+    if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	rex(\@opcode,$4,$3);
+	push @opcode,0x0f,0x3a,0xdf;
+	push @opcode,0xc0|($3&7)|(($4&7)<<3);	# ModR/M
+	my $c=$2;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesimc" => 0xdb,
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	rex(\@opcode,$3,$2);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);	# ModR/M
+	return ".byte\t".join(',',@opcode);
+    }
+    elsif ($line=~/(aes[a-z]+)\s+([0x1-9a-fA-F]*)\(%rsp\),\s*%xmm([0-9]+)/) {
+	my %opcodelet = (
+		"aesenc" => 0xdc,	"aesenclast" => 0xdd,
+		"aesdec" => 0xde,	"aesdeclast" => 0xdf
+	);
+	return undef if (!defined($opcodelet{$1}));
+	my $off = $2;
+	push @opcode,0x44 if ($3>=8);
+	push @opcode,0x0f,0x38,$opcodelet{$1};
+	push @opcode,0x44|(($3&7)<<3),0x24;	# ModR/M
+	push @opcode,($off=~/^0/?oct($off):$off)&0xff;
+	return ".byte\t".join(',',@opcode);
+    }
+    return $line;
+}
+
+sub movbe {
+	".byte	0x0f,0x38,0xf1,0x44,0x24,".shift;
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
+#$code =~ s/\bmovbe\s+%eax/bswap %eax; mov %eax/gm;	# debugging artefact
+$code =~ s/\bmovbe\s+%eax,\s*([0-9]+)\(%rsp\)/movbe($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/aesp8-ppc.pl b/src/crypto/fipsmodule/aes/asm/aesp8-ppc.pl
new file mode 100644
index 0000000..fb76d28
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aesp8-ppc.pl
@@ -0,0 +1,3805 @@
+#! /usr/bin/env perl
+# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for AES instructions as per PowerISA
+# specification version 2.07, first implemented by POWER8 processor.
+# The module is endian-agnostic in sense that it supports both big-
+# and little-endian cases. Data alignment in parallelizable modes is
+# handled with VSX loads and stores, which implies MSR.VSX flag being
+# set. It should also be noted that ISA specification doesn't prohibit
+# alignment exceptions for these instructions on page boundaries.
+# Initially alignment was handled in pure AltiVec/VMX way [when data
+# is aligned programmatically, which in turn guarantees exception-
+# free execution], but it turned to hamper performance when vcipher
+# instructions are interleaved. It's reckoned that eventual
+# misalignment penalties at page boundaries are in average lower
+# than additional overhead in pure AltiVec approach.
+#
+# May 2016
+#
+# Add XTS subroutine, 9x on little- and 12x improvement on big-endian
+# systems were measured.
+#
+######################################################################
+# Current large-block performance in cycles per byte processed with
+# 128-bit key (less is better).
+#
+#		CBC en-/decrypt	CTR	XTS
+# POWER8[le]	3.96/0.72	0.74	1.1
+# POWER8[be]	3.75/0.65	0.66	1.0
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+	$SIZE_T	=8;
+	$LRSAVE	=2*$SIZE_T;
+	$STU	="stdu";
+	$POP	="ld";
+	$PUSH	="std";
+	$UCMP	="cmpld";
+	$SHL	="sldi";
+} elsif ($flavour =~ /32/) {
+	$SIZE_T	=4;
+	$LRSAVE	=$SIZE_T;
+	$STU	="stwu";
+	$POP	="lwz";
+	$PUSH	="stw";
+	$UCMP	="cmplw";
+	$SHL	="slwi";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? $SIZE_T : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=8*$SIZE_T;
+$prefix="aes_hw";
+
+$sp="r1";
+$vrsave="r12";
+
+#########################################################################
+{{{	# Key setup procedures						#
+my ($inp,$bits,$out,$ptr,$cnt,$rounds)=map("r$_",(3..8));
+my ($zero,$in0,$in1,$key,$rcon,$mask,$tmp)=map("v$_",(0..6));
+my ($stage,$outperm,$outmask,$outhead,$outtail)=map("v$_",(7..11));
+
+$code.=<<___;
+.machine	"any"
+
+.text
+
+.align	7
+rcon:
+.long	0x01000000, 0x01000000, 0x01000000, 0x01000000	?rev
+.long	0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000	?rev
+.long	0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c	?rev
+.long	0,0,0,0						?asis
+Lconsts:
+	mflr	r0
+	bcl	20,31,\$+4
+	mflr	$ptr	 #vvvvv "distance between . and rcon
+	addi	$ptr,$ptr,-0x48
+	mtlr	r0
+	blr
+	.long	0
+	.byte	0,12,0x14,0,0,0,0,0
+.asciz	"AES for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl	.${prefix}_set_encrypt_key
+.align	5
+.${prefix}_set_encrypt_key:
+Lset_encrypt_key:
+	mflr		r11
+	$PUSH		r11,$LRSAVE($sp)
+
+	li		$ptr,-1
+	${UCMP}i	$inp,0
+	beq-		Lenc_key_abort		# if ($inp==0) return -1;
+	${UCMP}i	$out,0
+	beq-		Lenc_key_abort		# if ($out==0) return -1;
+	li		$ptr,-2
+	cmpwi		$bits,128
+	blt-		Lenc_key_abort
+	cmpwi		$bits,256
+	bgt-		Lenc_key_abort
+	andi.		r0,$bits,0x3f
+	bne-		Lenc_key_abort
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	bl		Lconsts
+	mtlr		r11
+
+	neg		r9,$inp
+	lvx		$in0,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	lvsr		$key,0,r9		# borrow $key
+	li		r8,0x20
+	cmpwi		$bits,192
+	lvx		$in1,0,$inp
+	le?vspltisb	$mask,0x0f		# borrow $mask
+	lvx		$rcon,0,$ptr
+	le?vxor		$key,$key,$mask		# adjust for byte swap
+	lvx		$mask,r8,$ptr
+	addi		$ptr,$ptr,0x10
+	vperm		$in0,$in0,$in1,$key	# align [and byte swap in LE]
+	li		$cnt,8
+	vxor		$zero,$zero,$zero
+	mtctr		$cnt
+
+	?lvsr		$outperm,0,$out
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$zero,$outmask,$outperm
+
+	blt		Loop128
+	addi		$inp,$inp,8
+	beq		L192
+	addi		$inp,$inp,8
+	b		L256
+
+.align	4
+Loop128:
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	bdnz		Loop128
+
+	lvx		$rcon,0,$ptr		# last two round keys
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+
+	vperm		$key,$in0,$in0,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+
+	addi		$inp,$out,15		# 15 is not typo
+	addi		$out,$out,0x50
+
+	li		$rounds,10
+	b		Ldone
+
+.align	4
+L192:
+	lvx		$tmp,0,$inp
+	li		$cnt,4
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	vspltisb	$key,8			# borrow $key
+	mtctr		$cnt
+	vsububm		$mask,$mask,$key	# adjust the mask
+
+Loop192:
+	vperm		$key,$in1,$in1,$mask	# roate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	vcipherlast	$key,$key,$rcon
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+
+	 vsldoi		$stage,$zero,$in1,8
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vsldoi		$stage,$stage,$in0,8
+
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	 vsldoi		$stage,$in0,$in1,8
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	 vperm		$outtail,$stage,$stage,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vspltw		$tmp,$in0,3
+	vxor		$tmp,$tmp,$in1
+	vsldoi		$in1,$zero,$in1,12	# >>32
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in1,$in1,$tmp
+	vxor		$in0,$in0,$key
+	vxor		$in1,$in1,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdnz		Loop192
+
+	li		$rounds,12
+	addi		$out,$out,0x20
+	b		Ldone
+
+.align	4
+L256:
+	lvx		$tmp,0,$inp
+	li		$cnt,7
+	li		$rounds,14
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+	vperm		$in1,$in1,$tmp,$key	# align [and byte swap in LE]
+	mtctr		$cnt
+
+Loop256:
+	vperm		$key,$in1,$in1,$mask	# rotate-n-splat
+	vsldoi		$tmp,$zero,$in0,12	# >>32
+	 vperm		$outtail,$in1,$in1,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	vcipherlast	$key,$key,$rcon
+	 stvx		$stage,0,$out
+	 addi		$out,$out,16
+
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in0,$in0,$tmp
+	 vadduwm	$rcon,$rcon,$rcon
+	vxor		$in0,$in0,$key
+	 vperm		$outtail,$in0,$in0,$outperm	# rotate
+	 vsel		$stage,$outhead,$outtail,$outmask
+	 vmr		$outhead,$outtail
+	 stvx		$stage,0,$out
+	 addi		$inp,$out,15		# 15 is not typo
+	 addi		$out,$out,16
+	bdz		Ldone
+
+	vspltw		$key,$in0,3		# just splat
+	vsldoi		$tmp,$zero,$in1,12	# >>32
+	vsbox		$key,$key
+
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+	vsldoi		$tmp,$zero,$tmp,12	# >>32
+	vxor		$in1,$in1,$tmp
+
+	vxor		$in1,$in1,$key
+	b		Loop256
+
+.align	4
+Ldone:
+	lvx		$in1,0,$inp		# redundant in aligned case
+	vsel		$in1,$outhead,$in1,$outmask
+	stvx		$in1,0,$inp
+	li		$ptr,0
+	mtspr		256,$vrsave
+	stw		$rounds,0($out)
+
+Lenc_key_abort:
+	mr		r3,$ptr
+	blr
+	.long		0
+	.byte		0,12,0x14,1,0,0,3,0
+	.long		0
+.size	.${prefix}_set_encrypt_key,.-.${prefix}_set_encrypt_key
+
+.globl	.${prefix}_set_decrypt_key
+.align	5
+.${prefix}_set_decrypt_key:
+	$STU		$sp,-$FRAME($sp)
+	mflr		r10
+	$PUSH		r10,$FRAME+$LRSAVE($sp)
+	bl		Lset_encrypt_key
+	mtlr		r10
+
+	cmpwi		r3,0
+	bne-		Ldec_key_abort
+
+	slwi		$cnt,$rounds,4
+	subi		$inp,$out,240		# first round key
+	srwi		$rounds,$rounds,1
+	add		$out,$inp,$cnt		# last round key
+	mtctr		$rounds
+
+Ldeckey:
+	lwz		r0, 0($inp)
+	lwz		r6, 4($inp)
+	lwz		r7, 8($inp)
+	lwz		r8, 12($inp)
+	addi		$inp,$inp,16
+	lwz		r9, 0($out)
+	lwz		r10,4($out)
+	lwz		r11,8($out)
+	lwz		r12,12($out)
+	stw		r0, 0($out)
+	stw		r6, 4($out)
+	stw		r7, 8($out)
+	stw		r8, 12($out)
+	subi		$out,$out,16
+	stw		r9, -16($inp)
+	stw		r10,-12($inp)
+	stw		r11,-8($inp)
+	stw		r12,-4($inp)
+	bdnz		Ldeckey
+
+	xor		r3,r3,r3		# return value
+Ldec_key_abort:
+	addi		$sp,$sp,$FRAME
+	blr
+	.long		0
+	.byte		0,12,4,1,0x80,0,3,0
+	.long		0
+.size	.${prefix}_set_decrypt_key,.-.${prefix}_set_decrypt_key
+___
+}}}
+#########################################################################
+{{{	# Single block en- and decrypt procedures			#
+sub gen_block () {
+my $dir = shift;
+my $n   = $dir eq "de" ? "n" : "";
+my ($inp,$out,$key,$rounds,$idx)=map("r$_",(3..7));
+
+$code.=<<___;
+.globl	.${prefix}_${dir}crypt
+.align	5
+.${prefix}_${dir}crypt:
+	lwz		$rounds,240($key)
+	lis		r0,0xfc00
+	mfspr		$vrsave,256
+	li		$idx,15			# 15 is not typo
+	mtspr		256,r0
+
+	lvx		v0,0,$inp
+	neg		r11,$out
+	lvx		v1,$idx,$inp
+	lvsl		v2,0,$inp		# inpperm
+	le?vspltisb	v4,0x0f
+	?lvsl		v3,0,r11		# outperm
+	le?vxor		v2,v2,v4
+	li		$idx,16
+	vperm		v0,v0,v1,v2		# align [and byte swap in LE]
+	lvx		v1,0,$key
+	?lvsl		v5,0,$key		# keyperm
+	srwi		$rounds,$rounds,1
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	subi		$rounds,$rounds,1
+	?vperm		v1,v1,v2,v5		# align round key
+
+	vxor		v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Loop_${dir}c:
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		v1,v1,v2,v5
+	v${n}cipher	v0,v0,v1
+	lvx		v1,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_${dir}c
+
+	?vperm		v2,v2,v1,v5
+	v${n}cipher	v0,v0,v2
+	lvx		v2,$idx,$key
+	?vperm		v1,v1,v2,v5
+	v${n}cipherlast	v0,v0,v1
+
+	vspltisb	v2,-1
+	vxor		v1,v1,v1
+	li		$idx,15			# 15 is not typo
+	?vperm		v2,v1,v2,v3		# outmask
+	le?vxor		v3,v3,v4
+	lvx		v1,0,$out		# outhead
+	vperm		v0,v0,v0,v3		# rotate [and byte swap in LE]
+	vsel		v1,v1,v0,v2
+	lvx		v4,$idx,$out
+	stvx		v1,0,$out
+	vsel		v0,v0,v4,v2
+	stvx		v0,$idx,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,3,0
+	.long		0
+.size	.${prefix}_${dir}crypt,.-.${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+#########################################################################
+{{{	# CBC en- and decrypt procedures				#
+my ($inp,$out,$len,$key,$ivp,$enc,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm)=
+						map("v$_",(4..10));
+$code.=<<___;
+.globl	.${prefix}_cbc_encrypt
+.align	5
+.${prefix}_cbc_encrypt:
+	${UCMP}i	$len,16
+	bltlr-
+
+	cmpwi		$enc,0			# test direction
+	lis		r0,0xffe0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+	beq		Lcbc_dec
+
+Lcbc_enc:
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$inout,$inout,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	vxor		$inout,$inout,$ivec
+
+Loop_cbc_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$ivec,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vperm		$tmp,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_enc
+
+	b		Lcbc_done
+
+.align	4
+Lcbc_dec:
+	${UCMP}i	$len,128
+	bge		_aesp8_cbc_decrypt8x
+	vmr		$tmp,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	mtctr		$rounds
+	subi		$len,$len,16		# len-=16
+
+	lvx		$rndkey0,0,$key
+	 vperm		$tmp,$tmp,$inptail,$inpperm
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$tmp,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+
+Loop_cbc_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_cbc_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipherlast	$inout,$inout,$rndkey0
+	${UCMP}i	$len,16
+
+	vxor		$inout,$inout,$ivec
+	vmr		$ivec,$tmp
+	vperm		$tmp,$inout,$inout,$outperm
+	vsel		$inout,$outhead,$tmp,$outmask
+	vmr		$outhead,$tmp
+	stvx		$inout,0,$out
+	addi		$out,$out,16
+	bge		Lcbc_dec
+
+Lcbc_done:
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	neg		$enc,$ivp		# write [unaligned] iv
+	li		$idx,15			# 15 is not typo
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	vspltisb	$outmask,-1
+	le?vspltisb	$tmp,0x0f
+	?lvsl		$outperm,0,$enc
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+	lvx		$outhead,0,$ivp
+	vperm		$ivec,$ivec,$ivec,$outperm
+	vsel		$inout,$outhead,$ivec,$outmask
+	lvx		$inptail,$idx,$ivp
+	stvx		$inout,0,$ivp
+	vsel		$inout,$ivec,$inptail,$outmask
+	stvx		$inout,$idx,$ivp
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CBC decrypt procedure				#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10..13));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(14..21));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+
+$code.=<<___;
+.align	5
+_aesp8_cbc_decrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+	subi		$len,$len,128		# bias
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_cbc_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_cbc_dec_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	#lvx		$inptail,0,$inp		# "caller" already did this
+	#addi		$inp,$inp,15		# 15 is not typo
+	subi		$inp,$inp,15		# undo "caller"
+
+	 le?li		$idx,8
+	lvx_u		$in0,$x00,$inp		# load first 8 "words"
+	 le?lvsl	$inpperm,0,$idx
+	 le?vspltisb	$tmp,0x0f
+	lvx_u		$in1,$x10,$inp
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	lvx_u		$in2,$x20,$inp
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	lvx_u		$in3,$x30,$inp
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	lvx_u		$in4,$x40,$inp
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vxor		$out0,$in0,$rndkey0
+	lvx_u		$in5,$x50,$inp
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vxor		$out1,$in1,$rndkey0
+	lvx_u		$in6,$x60,$inp
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vxor		$out2,$in2,$rndkey0
+	lvx_u		$in7,$x70,$inp
+	addi		$inp,$inp,0x80
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vxor		$out3,$in3,$rndkey0
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vxor		$out4,$in4,$rndkey0
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	vxor		$out5,$in5,$rndkey0
+	vxor		$out6,$in6,$rndkey0
+	vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_cbc_dec8x
+.align	5
+Loop_cbc_dec8x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x
+
+	subic		$len,$len,128		# $len-=128
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	and		r0,r0,$len
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vncipher	$out0,$out0,v30
+	 vxor		$ivec,$ivec,v31		# xor with last round key
+	vncipher	$out1,$out1,v30
+	 vxor		$in0,$in0,v31
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	vncipherlast	$out0,$out0,$ivec
+	vncipherlast	$out1,$out1,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	vncipherlast	$out2,$out2,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out3,$out3,$in2
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	 lvx_u		$in2,$x20,$inp
+	vncipherlast	$out4,$out4,$in3
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out5,$out5,$in4
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	 lvx_u		$in4,$x40,$inp
+	vncipherlast	$out6,$out6,$in5
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	 lvx_u		$in5,$x50,$inp
+	vncipherlast	$out7,$out7,$in6
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	 lvx_u		$in6,$x60,$inp
+	vmr		$ivec,$in7
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	 vxor		$out0,$in0,$rndkey0
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	 le?vperm	$in7,$in7,$in7,$inpperm
+	 vxor		$out1,$in1,$rndkey0
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$rndkey0
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$rndkey0
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$rndkey0
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$rndkey0
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	 vxor		$out6,$in6,$rndkey0
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	 vxor		$out7,$in7,$rndkey0
+
+	mtctr		$rounds
+	beq		Loop_cbc_dec8x		# did $len-=128 borrow?
+
+	addic.		$len,$len,128
+	beq		Lcbc_dec8x_done
+	nop
+	nop
+
+Loop_cbc_dec8x_tail:				# up to 7 "words" tail...
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_cbc_dec8x_tail
+
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	vncipher	$out6,$out6,v24
+	vncipher	$out7,$out7,v24
+
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	vncipher	$out6,$out6,v25
+	vncipher	$out7,$out7,v25
+
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+	vncipher	$out6,$out6,v26
+	vncipher	$out7,$out7,v26
+
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+	vncipher	$out6,$out6,v27
+	vncipher	$out7,$out7,v27
+
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	vncipher	$out6,$out6,v28
+	vncipher	$out7,$out7,v28
+
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	vncipher	$out6,$out6,v29
+	vncipher	$out7,$out7,v29
+
+	vncipher	$out1,$out1,v30
+	 vxor		$ivec,$ivec,v31		# last round key
+	vncipher	$out2,$out2,v30
+	 vxor		$in1,$in1,v31
+	vncipher	$out3,$out3,v30
+	 vxor		$in2,$in2,v31
+	vncipher	$out4,$out4,v30
+	 vxor		$in3,$in3,v31
+	vncipher	$out5,$out5,v30
+	 vxor		$in4,$in4,v31
+	vncipher	$out6,$out6,v30
+	 vxor		$in5,$in5,v31
+	vncipher	$out7,$out7,v30
+	 vxor		$in6,$in6,v31
+
+	cmplwi		$len,32			# switch($len)
+	blt		Lcbc_dec8x_one
+	nop
+	beq		Lcbc_dec8x_two
+	cmplwi		$len,64
+	blt		Lcbc_dec8x_three
+	nop
+	beq		Lcbc_dec8x_four
+	cmplwi		$len,96
+	blt		Lcbc_dec8x_five
+	nop
+	beq		Lcbc_dec8x_six
+
+Lcbc_dec8x_seven:
+	vncipherlast	$out1,$out1,$ivec
+	vncipherlast	$out2,$out2,$in1
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out1,$out1,$out1,$inpperm
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x00,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x10,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x20,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x30,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x40,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x50,$out
+	stvx_u		$out7,$x60,$out
+	addi		$out,$out,0x70
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_six:
+	vncipherlast	$out2,$out2,$ivec
+	vncipherlast	$out3,$out3,$in2
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out2,$out2,$out2,$inpperm
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x00,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x10,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x20,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x30,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x40,$out
+	stvx_u		$out7,$x50,$out
+	addi		$out,$out,0x60
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_five:
+	vncipherlast	$out3,$out3,$ivec
+	vncipherlast	$out4,$out4,$in3
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out3,$out3,$out3,$inpperm
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x00,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x10,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x20,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x30,$out
+	stvx_u		$out7,$x40,$out
+	addi		$out,$out,0x50
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_four:
+	vncipherlast	$out4,$out4,$ivec
+	vncipherlast	$out5,$out5,$in4
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out4,$out4,$out4,$inpperm
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x00,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x10,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x20,$out
+	stvx_u		$out7,$x30,$out
+	addi		$out,$out,0x40
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_three:
+	vncipherlast	$out5,$out5,$ivec
+	vncipherlast	$out6,$out6,$in5
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out5,$out5,$out5,$inpperm
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x00,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x10,$out
+	stvx_u		$out7,$x20,$out
+	addi		$out,$out,0x30
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_two:
+	vncipherlast	$out6,$out6,$ivec
+	vncipherlast	$out7,$out7,$in6
+	vmr		$ivec,$in7
+
+	le?vperm	$out6,$out6,$out6,$inpperm
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x00,$out
+	stvx_u		$out7,$x10,$out
+	addi		$out,$out,0x20
+	b		Lcbc_dec8x_done
+
+.align	5
+Lcbc_dec8x_one:
+	vncipherlast	$out7,$out7,$ivec
+	vmr		$ivec,$in7
+
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out7,0,$out
+	addi		$out,$out,0x10
+
+Lcbc_dec8x_done:
+	le?vperm	$ivec,$ivec,$ivec,$inpperm
+	stvx_u		$ivec,0,$ivp		# write [unaligned] iv
+
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_cbc_encrypt,.-.${prefix}_cbc_encrypt
+___
+}}	}}}
+
+#########################################################################
+{{{	# CTR procedure[s]						#
+my ($inp,$out,$len,$key,$ivp,$x10,$rounds,$idx)=map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout,$tmp)=		map("v$_",(0..3));
+my ($ivec,$inptail,$inpperm,$outhead,$outperm,$outmask,$keyperm,$one)=
+						map("v$_",(4..11));
+my $dat=$tmp;
+
+$code.=<<___;
+.globl	.${prefix}_ctr32_encrypt_blocks
+.align	5
+.${prefix}_ctr32_encrypt_blocks:
+	${UCMP}i	$len,1
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		$vrsave,256
+	mtspr		256,r0
+
+	li		$idx,15
+	vxor		$rndkey0,$rndkey0,$rndkey0
+	le?vspltisb	$tmp,0x0f
+
+	lvx		$ivec,0,$ivp		# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	 vspltisb	$one,1
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$ivec,$ivec,$inptail,$inpperm
+	 vsldoi		$one,$rndkey0,$one,1
+
+	neg		r11,$inp
+	?lvsl		$keyperm,0,$key		# prepare for unaligned key
+	lwz		$rounds,240($key)
+
+	lvsr		$inpperm,0,r11		# prepare for unaligned load
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,15		# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	srwi		$rounds,$rounds,1
+	li		$idx,16
+	subi		$rounds,$rounds,1
+
+	${UCMP}i	$len,8
+	bge		_aesp8_ctr32_encrypt8x
+
+	?lvsr		$outperm,0,$out		# prepare for unaligned store
+	vspltisb	$outmask,-1
+	lvx		$outhead,0,$out
+	?vperm		$outmask,$rndkey0,$outmask,$outperm
+	le?vxor		$outperm,$outperm,$tmp
+
+	lvx		$rndkey0,0,$key
+	mtctr		$rounds
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$ivec,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	b		Loop_ctr32_enc
+
+.align	5
+Loop_ctr32_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key
+	addi		$idx,$idx,16
+	bdnz		Loop_ctr32_enc
+
+	vadduwm		$ivec,$ivec,$one
+	 vmr		$dat,$inptail
+	 lvx		$inptail,0,$inp
+	 addi		$inp,$inp,16
+	 subic.		$len,$len,1		# blocks--
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key
+	 vperm		$dat,$dat,$inptail,$inpperm
+	 li		$idx,16
+	?vperm		$rndkey1,$rndkey0,$rndkey1,$keyperm
+	 lvx		$rndkey0,0,$key
+	vxor		$dat,$dat,$rndkey1	# last round key
+	vcipherlast	$inout,$inout,$dat
+
+	 lvx		$rndkey1,$idx,$key
+	 addi		$idx,$idx,16
+	vperm		$inout,$inout,$inout,$outperm
+	vsel		$dat,$outhead,$inout,$outmask
+	 mtctr		$rounds
+	 ?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vmr		$outhead,$inout
+	 vxor		$inout,$ivec,$rndkey0
+	 lvx		$rndkey0,$idx,$key
+	 addi		$idx,$idx,16
+	stvx		$dat,0,$out
+	addi		$out,$out,16
+	bne		Loop_ctr32_enc
+
+	addi		$out,$out,-1
+	lvx		$inout,0,$out		# redundant in aligned case
+	vsel		$inout,$outhead,$inout,$outmask
+	stvx		$inout,0,$out
+
+	mtspr		256,$vrsave
+	blr
+	.long		0
+	.byte		0,12,0x14,0,0,0,6,0
+	.long		0
+___
+#########################################################################
+{{	# Optimized CTR procedure					#
+my $key_="r11";
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,8,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0, $in1, $in2, $in3, $in4, $in5, $in6, $in7 )=map("v$_",(0..3,10,12..14));
+my ($out0,$out1,$out2,$out3,$out4,$out5,$out6,$out7)=map("v$_",(15..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($tmp,$keyperm)=($in3,$in4);	# aliases with "caller", redundant assignment
+my ($two,$three,$four)=($outhead,$outperm,$outmask);
+
+$code.=<<___;
+.align	5
+_aesp8_ctr32_encrypt8x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	li		r10,`$FRAME+8*16+15`
+	li		r11,`$FRAME+8*16+31`
+	stvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	stvx		v21,r11,$sp
+	addi		r11,r11,32
+	stvx		v22,r10,$sp
+	addi		r10,r10,32
+	stvx		v23,r11,$sp
+	addi		r11,r11,32
+	stvx		v24,r10,$sp
+	addi		r10,r10,32
+	stvx		v25,r11,$sp
+	addi		r11,r11,32
+	stvx		v26,r10,$sp
+	addi		r10,r10,32
+	stvx		v27,r11,$sp
+	addi		r11,r11,32
+	stvx		v28,r10,$sp
+	addi		r10,r10,32
+	stvx		v29,r11,$sp
+	addi		r11,r11,32
+	stvx		v30,r10,$sp
+	stvx		v31,r11,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key	# load key schedule
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	lvx		v31,$x00,$key
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_ctr32_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key
+	addi		$key,$key,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_ctr32_enc_key
+
+	lvx		v26,$x10,$key
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$out0,$x70,$key		# borrow $out0
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$out0,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	vadduwm		$two,$one,$one
+	subi		$inp,$inp,15		# undo "caller"
+	$SHL		$len,$len,4
+
+	vadduwm		$out1,$ivec,$one	# counter values ...
+	vadduwm		$out2,$ivec,$two
+	vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	 le?li		$idx,8
+	vadduwm		$out3,$out1,$two
+	vxor		$out1,$out1,$rndkey0
+	 le?lvsl	$inpperm,0,$idx
+	vadduwm		$out4,$out2,$two
+	vxor		$out2,$out2,$rndkey0
+	 le?vspltisb	$tmp,0x0f
+	vadduwm		$out5,$out3,$two
+	vxor		$out3,$out3,$rndkey0
+	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
+	vadduwm		$out6,$out4,$two
+	vxor		$out4,$out4,$rndkey0
+	vadduwm		$out7,$out5,$two
+	vxor		$out5,$out5,$rndkey0
+	vadduwm		$ivec,$out6,$two	# next counter value
+	vxor		$out6,$out6,$rndkey0
+	vxor		$out7,$out7,$rndkey0
+
+	mtctr		$rounds
+	b		Loop_ctr32_enc8x
+.align	5
+Loop_ctr32_enc8x:
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+Loop_ctr32_enc8x_middle:
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher 	$out5,$out5,v25
+	vcipher 	$out6,$out6,v25
+	vcipher 	$out7,$out7,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_ctr32_enc8x
+
+	subic		r11,$len,256		# $len-256, borrow $key_
+	vcipher 	$out0,$out0,v24
+	vcipher 	$out1,$out1,v24
+	vcipher 	$out2,$out2,v24
+	vcipher 	$out3,$out3,v24
+	vcipher 	$out4,$out4,v24
+	vcipher 	$out5,$out5,v24
+	vcipher 	$out6,$out6,v24
+	vcipher 	$out7,$out7,v24
+
+	subfe		r0,r0,r0		# borrow?-1:0
+	vcipher 	$out0,$out0,v25
+	vcipher 	$out1,$out1,v25
+	vcipher 	$out2,$out2,v25
+	vcipher 	$out3,$out3,v25
+	vcipher 	$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	vcipher		$out6,$out6,v25
+	vcipher		$out7,$out7,v25
+
+	and		r0,r0,r11
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+	vcipher		$out6,$out6,v26
+	vcipher		$out7,$out7,v26
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	subic		$len,$len,129		# $len-=129
+	vcipher		$out0,$out0,v27
+	addi		$len,$len,1		# $len-=128 really
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+	vcipher		$out6,$out6,v27
+	vcipher		$out7,$out7,v27
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+
+	vcipher		$out0,$out0,v28
+	 lvx_u		$in0,$x00,$inp		# load input
+	vcipher		$out1,$out1,v28
+	 lvx_u		$in1,$x10,$inp
+	vcipher		$out2,$out2,v28
+	 lvx_u		$in2,$x20,$inp
+	vcipher		$out3,$out3,v28
+	 lvx_u		$in3,$x30,$inp
+	vcipher		$out4,$out4,v28
+	 lvx_u		$in4,$x40,$inp
+	vcipher		$out5,$out5,v28
+	 lvx_u		$in5,$x50,$inp
+	vcipher		$out6,$out6,v28
+	 lvx_u		$in6,$x60,$inp
+	vcipher		$out7,$out7,v28
+	 lvx_u		$in7,$x70,$inp
+	 addi		$inp,$inp,0x80
+
+	vcipher		$out0,$out0,v29
+	 le?vperm	$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v29
+	 le?vperm	$in1,$in1,$in1,$inpperm
+	vcipher		$out2,$out2,v29
+	 le?vperm	$in2,$in2,$in2,$inpperm
+	vcipher		$out3,$out3,v29
+	 le?vperm	$in3,$in3,$in3,$inpperm
+	vcipher		$out4,$out4,v29
+	 le?vperm	$in4,$in4,$in4,$inpperm
+	vcipher		$out5,$out5,v29
+	 le?vperm	$in5,$in5,$in5,$inpperm
+	vcipher		$out6,$out6,v29
+	 le?vperm	$in6,$in6,$in6,$inpperm
+	vcipher		$out7,$out7,v29
+	 le?vperm	$in7,$in7,$in7,$inpperm
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in7 are loaded
+						# with last "words"
+	subfe.		r0,r0,r0		# borrow?-1:0
+	vcipher		$out0,$out0,v30
+	 vxor		$in0,$in0,v31		# xor with last round key
+	vcipher		$out1,$out1,v30
+	 vxor		$in1,$in1,v31
+	vcipher		$out2,$out2,v30
+	 vxor		$in2,$in2,v31
+	vcipher		$out3,$out3,v30
+	 vxor		$in3,$in3,v31
+	vcipher		$out4,$out4,v30
+	 vxor		$in4,$in4,v31
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$in5,v31
+	vcipher		$out6,$out6,v30
+	 vxor		$in6,$in6,v31
+	vcipher		$out7,$out7,v30
+	 vxor		$in7,$in7,v31
+
+	bne		Lctr32_enc8x_break	# did $len-129 borrow?
+
+	vcipherlast	$in0,$out0,$in0
+	vcipherlast	$in1,$out1,$in1
+	 vadduwm	$out1,$ivec,$one	# counter values ...
+	vcipherlast	$in2,$out2,$in2
+	 vadduwm	$out2,$ivec,$two
+	 vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
+	vcipherlast	$in3,$out3,$in3
+	 vadduwm	$out3,$out1,$two
+	 vxor		$out1,$out1,$rndkey0
+	vcipherlast	$in4,$out4,$in4
+	 vadduwm	$out4,$out2,$two
+	 vxor		$out2,$out2,$rndkey0
+	vcipherlast	$in5,$out5,$in5
+	 vadduwm	$out5,$out3,$two
+	 vxor		$out3,$out3,$rndkey0
+	vcipherlast	$in6,$out6,$in6
+	 vadduwm	$out6,$out4,$two
+	 vxor		$out4,$out4,$rndkey0
+	vcipherlast	$in7,$out7,$in7
+	 vadduwm	$out7,$out5,$two
+	 vxor		$out5,$out5,$rndkey0
+	le?vperm	$in0,$in0,$in0,$inpperm
+	 vadduwm	$ivec,$out6,$two	# next counter value
+	 vxor		$out6,$out6,$rndkey0
+	le?vperm	$in1,$in1,$in1,$inpperm
+	 vxor		$out7,$out7,$rndkey0
+	mtctr		$rounds
+
+	 vcipher	$out0,$out0,v24
+	stvx_u		$in0,$x00,$out
+	le?vperm	$in2,$in2,$in2,$inpperm
+	 vcipher	$out1,$out1,v24
+	stvx_u		$in1,$x10,$out
+	le?vperm	$in3,$in3,$in3,$inpperm
+	 vcipher	$out2,$out2,v24
+	stvx_u		$in2,$x20,$out
+	le?vperm	$in4,$in4,$in4,$inpperm
+	 vcipher	$out3,$out3,v24
+	stvx_u		$in3,$x30,$out
+	le?vperm	$in5,$in5,$in5,$inpperm
+	 vcipher	$out4,$out4,v24
+	stvx_u		$in4,$x40,$out
+	le?vperm	$in6,$in6,$in6,$inpperm
+	 vcipher	$out5,$out5,v24
+	stvx_u		$in5,$x50,$out
+	le?vperm	$in7,$in7,$in7,$inpperm
+	 vcipher	$out6,$out6,v24
+	stvx_u		$in6,$x60,$out
+	 vcipher	$out7,$out7,v24
+	stvx_u		$in7,$x70,$out
+	addi		$out,$out,0x80
+
+	b		Loop_ctr32_enc8x_middle
+
+.align	5
+Lctr32_enc8x_break:
+	cmpwi		$len,-0x60
+	blt		Lctr32_enc8x_one
+	nop
+	beq		Lctr32_enc8x_two
+	cmpwi		$len,-0x40
+	blt		Lctr32_enc8x_three
+	nop
+	beq		Lctr32_enc8x_four
+	cmpwi		$len,-0x20
+	blt		Lctr32_enc8x_five
+	nop
+	beq		Lctr32_enc8x_six
+	cmpwi		$len,0x00
+	blt		Lctr32_enc8x_seven
+
+Lctr32_enc8x_eight:
+	vcipherlast	$out0,$out0,$in0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	vcipherlast	$out5,$out5,$in5
+	vcipherlast	$out6,$out6,$in6
+	vcipherlast	$out7,$out7,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	le?vperm	$out7,$out7,$out7,$inpperm
+	stvx_u		$out6,$x60,$out
+	stvx_u		$out7,$x70,$out
+	addi		$out,$out,0x80
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_seven:
+	vcipherlast	$out0,$out0,$in1
+	vcipherlast	$out1,$out1,$in2
+	vcipherlast	$out2,$out2,$in3
+	vcipherlast	$out3,$out3,$in4
+	vcipherlast	$out4,$out4,$in5
+	vcipherlast	$out5,$out5,$in6
+	vcipherlast	$out6,$out6,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	le?vperm	$out6,$out6,$out6,$inpperm
+	stvx_u		$out5,$x50,$out
+	stvx_u		$out6,$x60,$out
+	addi		$out,$out,0x70
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_six:
+	vcipherlast	$out0,$out0,$in2
+	vcipherlast	$out1,$out1,$in3
+	vcipherlast	$out2,$out2,$in4
+	vcipherlast	$out3,$out3,$in5
+	vcipherlast	$out4,$out4,$in6
+	vcipherlast	$out5,$out5,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	le?vperm	$out5,$out5,$out5,$inpperm
+	stvx_u		$out4,$x40,$out
+	stvx_u		$out5,$x50,$out
+	addi		$out,$out,0x60
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_five:
+	vcipherlast	$out0,$out0,$in3
+	vcipherlast	$out1,$out1,$in4
+	vcipherlast	$out2,$out2,$in5
+	vcipherlast	$out3,$out3,$in6
+	vcipherlast	$out4,$out4,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$inpperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_four:
+	vcipherlast	$out0,$out0,$in4
+	vcipherlast	$out1,$out1,$in5
+	vcipherlast	$out2,$out2,$in6
+	vcipherlast	$out3,$out3,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$inpperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	b		Lctr32_enc8x_done
+
+.align	5
+Lctr32_enc8x_three:
+	vcipherlast	$out0,$out0,$in5
+	vcipherlast	$out1,$out1,$in6
+	vcipherlast	$out2,$out2,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	le?vperm	$out2,$out2,$out2,$inpperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	b		Lcbc_dec8x_done
+
+.align	5
+Lctr32_enc8x_two:
+	vcipherlast	$out0,$out0,$in6
+	vcipherlast	$out1,$out1,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	le?vperm	$out1,$out1,$out1,$inpperm
+	stvx_u		$out0,$x00,$out
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	b		Lcbc_dec8x_done
+
+.align	5
+Lctr32_enc8x_one:
+	vcipherlast	$out0,$out0,$in7
+
+	le?vperm	$out0,$out0,$out0,$inpperm
+	stvx_u		$out0,0,$out
+	addi		$out,$out,0x10
+
+Lctr32_enc8x_done:
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$inpperm,r10,$sp	# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+	stvx		$inpperm,r10,$sp
+	addi		r10,r10,32
+	stvx		$inpperm,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_ctr32_encrypt_blocks,.-.${prefix}_ctr32_encrypt_blocks
+___
+}}	}}}
+
+#########################################################################
+{{{	# XTS procedures						#
+# int aes_p8_xts_[en|de]crypt(const char *inp, char *out, size_t len,	#
+#                             const AES_KEY *key1, const AES_KEY *key2,	#
+#                             [const] unsigned char iv[16]);		#
+# If $key2 is NULL, then a "tweak chaining" mode is engaged, in which	#
+# input tweak value is assumed to be encrypted already, and last tweak	#
+# value, one suitable for consecutive call on same chunk of data, is	#
+# written back to original buffer. In addition, in "tweak chaining"	#
+# mode only complete input blocks are processed.			#
+
+my ($inp,$out,$len,$key1,$key2,$ivp,$rounds,$idx) =	map("r$_",(3..10));
+my ($rndkey0,$rndkey1,$inout) =				map("v$_",(0..2));
+my ($output,$inptail,$inpperm,$leperm,$keyperm) =	map("v$_",(3..7));
+my ($tweak,$seven,$eighty7,$tmp,$tweak1) =		map("v$_",(8..12));
+my $taillen = $key2;
+
+   ($inp,$idx) = ($idx,$inp);				# reassign
+
+$code.=<<___;
+.globl	.${prefix}_xts_encrypt
+.align	5
+.${prefix}_xts_encrypt:
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff0
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_enc_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_enc
+
+Lxts_enc_no_key2:
+	li		$idx,-16
+	and		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_enc:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_encrypt6x
+
+	andi.		$taillen,$len,15
+	subic		r0,$len,32
+	subi		$taillen,$taillen,16
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+	b		Loop_xts_enc
+
+.align	5
+Loop_xts_enc:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_enc
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vcipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_enc_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	subic		r0,$len,32
+	subfe		r0,r0,r0
+	and		r0,r0,$taillen
+	add		$inp,$inp,r0
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$output,$output,$rndkey0	# just in case $len<16
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_enc
+
+	vxor		$output,$output,$tweak
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	subi		r11,$out,17
+	subi		$out,$out,16
+	mtctr		$len
+	li		$len,16
+Loop_xts_enc_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_enc_steal
+
+	mtctr		$rounds
+	b		Loop_xts_enc			# one more time...
+
+Lxts_enc_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_encrypt,.-.${prefix}_xts_encrypt
+
+.globl	.${prefix}_xts_decrypt
+.align	5
+.${prefix}_xts_decrypt:
+	mr		$inp,r3				# reassign
+	li		r3,-1
+	${UCMP}i	$len,16
+	bltlr-
+
+	lis		r0,0xfff8
+	mfspr		r12,256				# save vrsave
+	li		r11,0
+	mtspr		256,r0
+
+	andi.		r0,$len,15
+	neg		r0,r0
+	andi.		r0,r0,16
+	sub		$len,$len,r0
+
+	vspltisb	$seven,0x07			# 0x070707..07
+	le?lvsl		$leperm,r11,r11
+	le?vspltisb	$tmp,0x0f
+	le?vxor		$leperm,$leperm,$seven
+
+	li		$idx,15
+	lvx		$tweak,0,$ivp			# load [unaligned] iv
+	lvsl		$inpperm,0,$ivp
+	lvx		$inptail,$idx,$ivp
+	le?vxor		$inpperm,$inpperm,$tmp
+	vperm		$tweak,$tweak,$inptail,$inpperm
+
+	neg		r11,$inp
+	lvsr		$inpperm,0,r11			# prepare for unaligned load
+	lvx		$inout,0,$inp
+	addi		$inp,$inp,15			# 15 is not typo
+	le?vxor		$inpperm,$inpperm,$tmp
+
+	${UCMP}i	$key2,0				# key2==NULL?
+	beq		Lxts_dec_no_key2
+
+	?lvsl		$keyperm,0,$key2		# prepare for unaligned key
+	lwz		$rounds,240($key2)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	lvx		$rndkey0,0,$key2
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+Ltweak_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipher		$tweak,$tweak,$rndkey0
+	lvx		$rndkey0,$idx,$key2
+	addi		$idx,$idx,16
+	bdnz		Ltweak_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vcipher		$tweak,$tweak,$rndkey1
+	lvx		$rndkey1,$idx,$key2
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vcipherlast	$tweak,$tweak,$rndkey0
+
+	li		$ivp,0				# don't chain the tweak
+	b		Lxts_dec
+
+Lxts_dec_no_key2:
+	neg		$idx,$len
+	andi.		$idx,$idx,15
+	add		$len,$len,$idx			# in "tweak chaining"
+							# mode only complete
+							# blocks are processed
+Lxts_dec:
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+
+	?lvsl		$keyperm,0,$key1		# prepare for unaligned key
+	lwz		$rounds,240($key1)
+	srwi		$rounds,$rounds,1
+	subi		$rounds,$rounds,1
+	li		$idx,16
+
+	vslb		$eighty7,$seven,$seven		# 0x808080..80
+	vor		$eighty7,$eighty7,$seven	# 0x878787..87
+	vspltisb	$tmp,1				# 0x010101..01
+	vsldoi		$eighty7,$eighty7,$tmp,15	# 0x870101..01
+
+	${UCMP}i	$len,96
+	bge		_aesp8_xts_decrypt6x
+
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	mtctr		$rounds
+
+	${UCMP}i	$len,16
+	blt		Ltail_xts_dec
+	be?b		Loop_xts_dec
+
+.align	5
+Loop_xts_dec:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+	addi		$out,$out,16
+
+	subic.		$len,$len,16
+	beq		Lxts_dec_done
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$inout,$inout,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	mtctr		$rounds
+	${UCMP}i	$len,16
+	bge		Loop_xts_dec
+
+Ltail_xts_dec:
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak1,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak1,$tweak1,$tmp
+
+	subi		$inp,$inp,16
+	add		$inp,$inp,$len
+
+	vxor		$inout,$inout,$tweak		# :-(
+	vxor		$inout,$inout,$tweak1		# :-)
+
+Loop_xts_dec_short:
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vncipher	$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+	bdnz		Loop_xts_dec_short
+
+	?vperm		$rndkey1,$rndkey1,$rndkey0,$keyperm
+	vncipher	$inout,$inout,$rndkey1
+	lvx		$rndkey1,$idx,$key1
+	li		$idx,16
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+	vxor		$rndkey0,$rndkey0,$tweak1
+	vncipherlast	$output,$inout,$rndkey0
+
+	le?vperm	$tmp,$output,$output,$leperm
+	be?nop
+	le?stvx_u	$tmp,0,$out
+	be?stvx_u	$output,0,$out
+
+	vmr		$inout,$inptail
+	lvx		$inptail,0,$inp
+	#addi		$inp,$inp,16
+	lvx		$rndkey0,0,$key1
+	lvx		$rndkey1,$idx,$key1
+	addi		$idx,$idx,16
+	vperm		$inout,$inout,$inptail,$inpperm
+	?vperm		$rndkey0,$rndkey0,$rndkey1,$keyperm
+
+	lvsr		$inpperm,0,$len			# $inpperm is no longer needed
+	vxor		$inptail,$inptail,$inptail	# $inptail is no longer needed
+	vspltisb	$tmp,-1
+	vperm		$inptail,$inptail,$tmp,$inpperm
+	vsel		$inout,$inout,$output,$inptail
+
+	vxor		$rndkey0,$rndkey0,$tweak
+	vxor		$inout,$inout,$rndkey0
+	lvx		$rndkey0,$idx,$key1
+	addi		$idx,$idx,16
+
+	subi		r11,$out,1
+	mtctr		$len
+	li		$len,16
+Loop_xts_dec_steal:
+	lbzu		r0,1(r11)
+	stb		r0,16(r11)
+	bdnz		Loop_xts_dec_steal
+
+	mtctr		$rounds
+	b		Loop_xts_dec			# one more time...
+
+Lxts_dec_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec_ret
+
+	vsrab		$tmp,$tweak,$seven		# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	vxor		$tweak,$tweak,$tmp
+
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec_ret:
+	mtspr		256,r12				# restore vrsave
+	li		r3,0
+	blr
+	.long		0
+	.byte		0,12,0x04,0,0x80,6,6,0
+	.long		0
+.size	.${prefix}_xts_decrypt,.-.${prefix}_xts_decrypt
+___
+#########################################################################
+{{	# Optimized XTS procedures					#
+my $key_=$key2;
+my ($x00,$x10,$x20,$x30,$x40,$x50,$x60,$x70)=map("r$_",(0,3,26..31));
+    $x00=0 if ($flavour =~ /osx/);
+my ($in0,  $in1,  $in2,  $in3,  $in4,  $in5 )=map("v$_",(0..5));
+my ($out0, $out1, $out2, $out3, $out4, $out5)=map("v$_",(7,12..16));
+my ($twk0, $twk1, $twk2, $twk3, $twk4, $twk5)=map("v$_",(17..22));
+my $rndkey0="v23";	# v24-v25 rotating buffer for first found keys
+			# v26-v31 last 6 round keys
+my ($keyperm)=($out0);	# aliases with "caller", redundant assignment
+my $taillen=$x70;
+
+$code.=<<___;
+.align	5
+_aesp8_xts_encrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_enc_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_enc_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_enc6x
+
+.align	5
+Loop_xts_enc6x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v24
+	vcipher		$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vcipher		$out4,$out4,v25
+	vcipher		$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out0,$out0,v26
+	vcipher		$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v26
+	vcipher		$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vcipher		$out0,$out0,v27
+	vcipher		$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out4,$out4,v27
+	vcipher		$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipher		$out4,$out4,v28
+	vcipher		$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vcipher		$out0,$out0,v29
+	vcipher		$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vcipher		$out4,$out4,v29
+	vcipher		$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vcipher		$out0,$out0,v30
+	vcipher		$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vcipher		$out4,$out4,v30
+	vcipher		$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vcipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vcipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vcipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vcipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vcipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vcipherlast	$tmp,$out5,$in5		# last block might be needed
+						# in stealing mode
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$tmp,$tmp,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	le?stvx_u	$out5,$x50,$out
+	be?stvx_u	$tmp, $x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_enc6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_enc6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_enc6x_one
+	nop
+	beq		Lxts_enc6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_enc6x_three
+	nop
+	beq		Lxts_enc6x_four
+
+Lxts_enc6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	vxor		$tmp,$out4,$twk5	# last block prep for stealing
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	vxor		$tmp,$out3,$twk4	# last block prep for stealing
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$tmp,$out2,$twk3	# last block prep for stealing
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_enc5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vxor		$tmp,$out1,$twk2	# last block prep for stealing
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_enc1x:
+	vcipher		$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_enc1x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+
+	lvsr		$inpperm,0,$taillen
+	vcipher		$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vcipher		$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vcipher		$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vxor		$tmp,$out0,$twk1	# last block prep for stealing
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	bne		Lxts_enc6x_steal
+	b		Lxts_enc6x_done
+
+.align	4
+Lxts_enc6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_enc6x_done
+
+	add		$inp,$inp,$taillen
+	subi		$inp,$inp,16
+	lvx_u		$in0,0,$inp
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	le?vperm	$in0,$in0,$in0,$leperm
+	vperm		$in0,$in0,$in0,$inpperm
+	vxor		$tmp,$tmp,$twk0
+Lxts_enc6x_steal:
+	vxor		$in0,$in0,$twk0
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0	# $tmp is last block, remember?
+
+	subi		r30,$out,17
+	subi		$out,$out,16
+	mtctr		$taillen
+Loop_xts_enc6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_enc6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_enc1x		# one more time...
+
+.align	4
+Lxts_enc6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_enc6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_enc6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_enc5x:
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_enc5x
+
+	add		$inp,$inp,$taillen
+	cmpwi		$taillen,0
+	vcipher		$out0,$out0,v24
+	vcipher		$out1,$out1,v24
+	vcipher		$out2,$out2,v24
+	vcipher		$out3,$out3,v24
+	vcipher		$out4,$out4,v24
+
+	subi		$inp,$inp,16
+	vcipher		$out0,$out0,v25
+	vcipher		$out1,$out1,v25
+	vcipher		$out2,$out2,v25
+	vcipher		$out3,$out3,v25
+	vcipher		$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	vcipher		$out0,$out0,v26
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	vcipher		$out1,$out1,v26
+	vcipher		$out2,$out2,v26
+	vcipher		$out3,$out3,v26
+	vcipher		$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vcipher		$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vcipher		$out1,$out1,v27
+	vcipher		$out2,$out2,v27
+	vcipher		$out3,$out3,v27
+	vcipher		$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vcipher		$out0,$out0,v28
+	vcipher		$out1,$out1,v28
+	vcipher		$out2,$out2,v28
+	vcipher		$out3,$out3,v28
+	vcipher		$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vcipher		$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vcipher		$out1,$out1,v29
+	vcipher		$out2,$out2,v29
+	vcipher		$out3,$out3,v29
+	vcipher		$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vcipher		$out0,$out0,v30
+	vperm		$in0,$in0,$in0,$inpperm
+	vcipher		$out1,$out1,v30
+	vcipher		$out2,$out2,v30
+	vcipher		$out3,$out3,v30
+	vcipher		$out4,$out4,v30
+
+	vcipherlast	$out0,$out0,$twk0
+	vcipherlast	$out1,$out1,$in1
+	vcipherlast	$out2,$out2,$in2
+	vcipherlast	$out3,$out3,$in3
+	vcipherlast	$out4,$out4,$in4
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+
+.align	5
+_aesp8_xts_decrypt6x:
+	$STU		$sp,-`($FRAME+21*16+6*$SIZE_T)`($sp)
+	mflr		r11
+	li		r7,`$FRAME+8*16+15`
+	li		r3,`$FRAME+8*16+31`
+	$PUSH		r11,`$FRAME+21*16+6*$SIZE_T+$LRSAVE`($sp)
+	stvx		v20,r7,$sp		# ABI says so
+	addi		r7,r7,32
+	stvx		v21,r3,$sp
+	addi		r3,r3,32
+	stvx		v22,r7,$sp
+	addi		r7,r7,32
+	stvx		v23,r3,$sp
+	addi		r3,r3,32
+	stvx		v24,r7,$sp
+	addi		r7,r7,32
+	stvx		v25,r3,$sp
+	addi		r3,r3,32
+	stvx		v26,r7,$sp
+	addi		r7,r7,32
+	stvx		v27,r3,$sp
+	addi		r3,r3,32
+	stvx		v28,r7,$sp
+	addi		r7,r7,32
+	stvx		v29,r3,$sp
+	addi		r3,r3,32
+	stvx		v30,r7,$sp
+	stvx		v31,r3,$sp
+	li		r0,-1
+	stw		$vrsave,`$FRAME+21*16-4`($sp)	# save vrsave
+	li		$x10,0x10
+	$PUSH		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	li		$x20,0x20
+	$PUSH		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	li		$x30,0x30
+	$PUSH		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	li		$x40,0x40
+	$PUSH		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	li		$x50,0x50
+	$PUSH		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	li		$x60,0x60
+	$PUSH		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	li		$x70,0x70
+	mtspr		256,r0
+
+	subi		$rounds,$rounds,3	# -4 in total
+
+	lvx		$rndkey0,$x00,$key1	# load key schedule
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	lvx		v31,$x00,$key1
+	?vperm		$rndkey0,$rndkey0,v30,$keyperm
+	addi		$key_,$sp,$FRAME+15
+	mtctr		$rounds
+
+Load_xts_dec_key:
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v30,$x10,$key1
+	addi		$key1,$key1,0x20
+	stvx		v24,$x00,$key_		# off-load round[1]
+	?vperm		v25,v31,v30,$keyperm
+	lvx		v31,$x00,$key1
+	stvx		v25,$x10,$key_		# off-load round[2]
+	addi		$key_,$key_,0x20
+	bdnz		Load_xts_dec_key
+
+	lvx		v26,$x10,$key1
+	?vperm		v24,v30,v31,$keyperm
+	lvx		v27,$x20,$key1
+	stvx		v24,$x00,$key_		# off-load round[3]
+	?vperm		v25,v31,v26,$keyperm
+	lvx		v28,$x30,$key1
+	stvx		v25,$x10,$key_		# off-load round[4]
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	?vperm		v26,v26,v27,$keyperm
+	lvx		v29,$x40,$key1
+	?vperm		v27,v27,v28,$keyperm
+	lvx		v30,$x50,$key1
+	?vperm		v28,v28,v29,$keyperm
+	lvx		v31,$x60,$key1
+	?vperm		v29,v29,v30,$keyperm
+	lvx		$twk5,$x70,$key1	# borrow $twk5
+	?vperm		v30,v30,v31,$keyperm
+	lvx		v24,$x00,$key_		# pre-load round[1]
+	?vperm		v31,v31,$twk5,$keyperm
+	lvx		v25,$x10,$key_		# pre-load round[2]
+
+	 vperm		$in0,$inout,$inptail,$inpperm
+	 subi		$inp,$inp,31		# undo "caller"
+	vxor		$twk0,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out0,$in0,$twk0
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in1,$x10,$inp
+	vxor		$twk1,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in1,$in1,$in1,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out1,$in1,$twk1
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in2,$x20,$inp
+	 andi.		$taillen,$len,15
+	vxor		$twk2,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in2,$in2,$in2,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out2,$in2,$twk2
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in3,$x30,$inp
+	 sub		$len,$len,$taillen
+	vxor		$twk3,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in3,$in3,$in3,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out3,$in3,$twk3
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in4,$x40,$inp
+	 subi		$len,$len,0x60
+	vxor		$twk4,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in4,$in4,$in4,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out4,$in4,$twk4
+	vxor		$tweak,$tweak,$tmp
+
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	vxor		$twk5,$tweak,$rndkey0
+	vsrab		$tmp,$tweak,$seven	# next tweak value
+	vaddubm		$tweak,$tweak,$tweak
+	vsldoi		$tmp,$tmp,$tmp,15
+	 le?vperm	$in5,$in5,$in5,$leperm
+	vand		$tmp,$tmp,$eighty7
+	 vxor		$out5,$in5,$twk5
+	vxor		$tweak,$tweak,$tmp
+
+	vxor		v31,v31,$rndkey0
+	mtctr		$rounds
+	b		Loop_xts_dec6x
+
+.align	5
+Loop_xts_dec6x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec6x
+
+	subic		$len,$len,96		# $len-=96
+	 vxor		$in0,$twk0,v31		# xor with last round key
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk0,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v24
+	vncipher	$out5,$out5,v24
+
+	subfe.		r0,r0,r0		# borrow?-1:0
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	 vxor		$in1,$twk1,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk1,$tweak,$rndkey0
+	vncipher	$out4,$out4,v25
+	vncipher	$out5,$out5,v25
+
+	and		r0,r0,$len
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v26
+	vncipher	$out5,$out5,v26
+
+	add		$inp,$inp,r0		# $inp is adjusted in such
+						# way that at exit from the
+						# loop inX-in5 are loaded
+						# with last "words"
+	 vxor		$in2,$twk2,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk2,$tweak,$rndkey0
+	 vaddubm	$tweak,$tweak,$tweak
+	vncipher	$out0,$out0,v27
+	vncipher	$out1,$out1,v27
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out4,$out4,v27
+	vncipher	$out5,$out5,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	 vxor		$in3,$twk3,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk3,$tweak,$rndkey0
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipher	$out4,$out4,v28
+	vncipher	$out5,$out5,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vand		$tmp,$tmp,$eighty7
+
+	vncipher	$out0,$out0,v29
+	vncipher	$out1,$out1,v29
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	 vxor		$in4,$twk4,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk4,$tweak,$rndkey0
+	vncipher	$out4,$out4,v29
+	vncipher	$out5,$out5,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	 vand		$tmp,$tmp,$eighty7
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	 vxor		$tweak,$tweak,$tmp
+	vncipher	$out4,$out4,v30
+	vncipher	$out5,$out5,v30
+	 vxor		$in5,$twk5,v31
+	 vsrab		$tmp,$tweak,$seven	# next tweak value
+	 vxor		$twk5,$tweak,$rndkey0
+
+	vncipherlast	$out0,$out0,$in0
+	 lvx_u		$in0,$x00,$inp		# load next input block
+	 vaddubm	$tweak,$tweak,$tweak
+	 vsldoi		$tmp,$tmp,$tmp,15
+	vncipherlast	$out1,$out1,$in1
+	 lvx_u		$in1,$x10,$inp
+	vncipherlast	$out2,$out2,$in2
+	 le?vperm	$in0,$in0,$in0,$leperm
+	 lvx_u		$in2,$x20,$inp
+	 vand		$tmp,$tmp,$eighty7
+	vncipherlast	$out3,$out3,$in3
+	 le?vperm	$in1,$in1,$in1,$leperm
+	 lvx_u		$in3,$x30,$inp
+	vncipherlast	$out4,$out4,$in4
+	 le?vperm	$in2,$in2,$in2,$leperm
+	 lvx_u		$in4,$x40,$inp
+	 vxor		$tweak,$tweak,$tmp
+	vncipherlast	$out5,$out5,$in5
+	 le?vperm	$in3,$in3,$in3,$leperm
+	 lvx_u		$in5,$x50,$inp
+	 addi		$inp,$inp,0x60
+	 le?vperm	$in4,$in4,$in4,$leperm
+	 le?vperm	$in5,$in5,$in5,$leperm
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	 vxor		$out0,$in0,$twk0
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	 vxor		$out1,$in1,$twk1
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	 vxor		$out2,$in2,$twk2
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	 vxor		$out3,$in3,$twk3
+	le?vperm	$out5,$out5,$out5,$leperm
+	stvx_u		$out4,$x40,$out
+	 vxor		$out4,$in4,$twk4
+	stvx_u		$out5,$x50,$out
+	 vxor		$out5,$in5,$twk5
+	addi		$out,$out,0x60
+
+	mtctr		$rounds
+	beq		Loop_xts_dec6x		# did $len-=96 borrow?
+
+	addic.		$len,$len,0x60
+	beq		Lxts_dec6x_zero
+	cmpwi		$len,0x20
+	blt		Lxts_dec6x_one
+	nop
+	beq		Lxts_dec6x_two
+	cmpwi		$len,0x40
+	blt		Lxts_dec6x_three
+	nop
+	beq		Lxts_dec6x_four
+
+Lxts_dec6x_five:
+	vxor		$out0,$in1,$twk0
+	vxor		$out1,$in2,$twk1
+	vxor		$out2,$in3,$twk2
+	vxor		$out3,$in4,$twk3
+	vxor		$out4,$in5,$twk4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk5		# unused tweak
+	vxor		$twk1,$tweak,$rndkey0
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk1
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	le?vperm	$out4,$out4,$out4,$leperm
+	stvx_u		$out3,$x30,$out
+	stvx_u		$out4,$x40,$out
+	addi		$out,$out,0x50
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_four:
+	vxor		$out0,$in2,$twk0
+	vxor		$out1,$in3,$twk1
+	vxor		$out2,$in4,$twk2
+	vxor		$out3,$in5,$twk3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk4		# unused tweak
+	vmr		$twk1,$twk5
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk5
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	le?vperm	$out3,$out3,$out3,$leperm
+	stvx_u		$out2,$x20,$out
+	stvx_u		$out3,$x30,$out
+	addi		$out,$out,0x40
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_three:
+	vxor		$out0,$in3,$twk0
+	vxor		$out1,$in4,$twk1
+	vxor		$out2,$in5,$twk2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk3		# unused tweak
+	vmr		$twk1,$twk4
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk4
+	le?vperm	$out2,$out2,$out2,$leperm
+	stvx_u		$out1,$x10,$out
+	stvx_u		$out2,$x20,$out
+	addi		$out,$out,0x30
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_two:
+	vxor		$out0,$in4,$twk0
+	vxor		$out1,$in5,$twk1
+	vxor		$out2,$out2,$out2
+	vxor		$out3,$out3,$out3
+	vxor		$out4,$out4,$out4
+
+	bl		_aesp8_xts_dec5x
+
+	le?vperm	$out0,$out0,$out0,$leperm
+	vmr		$twk0,$twk2		# unused tweak
+	vmr		$twk1,$twk3
+	le?vperm	$out1,$out1,$out1,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	vxor		$out0,$in0,$twk3
+	stvx_u		$out1,$x10,$out
+	addi		$out,$out,0x20
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_one:
+	vxor		$out0,$in5,$twk0
+	nop
+Loop_xts_dec1x:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Loop_xts_dec1x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk0,$twk0,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	mtctr		$rounds
+	vncipherlast	$out0,$out0,$twk0
+
+	vmr		$twk0,$twk1		# unused tweak
+	vmr		$twk1,$twk2
+	le?vperm	$out0,$out0,$out0,$leperm
+	stvx_u		$out0,$x00,$out		# store output
+	addi		$out,$out,0x10
+	vxor		$out0,$in0,$twk2
+	bne		Lxts_dec6x_steal
+	b		Lxts_dec6x_done
+
+.align	4
+Lxts_dec6x_zero:
+	cmpwi		$taillen,0
+	beq		Lxts_dec6x_done
+
+	lvx_u		$in0,0,$inp
+	le?vperm	$in0,$in0,$in0,$leperm
+	vxor		$out0,$in0,$twk1
+Lxts_dec6x_steal:
+	vncipher	$out0,$out0,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		Lxts_dec6x_steal
+
+	add		$inp,$inp,$taillen
+	vncipher	$out0,$out0,v24
+
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+
+	lvx_u		$in0,0,$inp
+	vncipher	$out0,$out0,v26
+
+	lvsr		$inpperm,0,$taillen	# $in5 is no more
+	vncipher	$out0,$out0,v27
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+
+	vncipher	$out0,$out0,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$twk1,$twk1,v31
+
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out0,$out0,v30
+
+	vperm		$in0,$in0,$in0,$inpperm
+	vncipherlast	$tmp,$out0,$twk1
+
+	le?vperm	$out0,$tmp,$tmp,$leperm
+	le?stvx_u	$out0,0,$out
+	be?stvx_u	$tmp,0,$out
+
+	vxor		$out0,$out0,$out0
+	vspltisb	$out1,-1
+	vperm		$out0,$out0,$out1,$inpperm
+	vsel		$out0,$in0,$tmp,$out0
+	vxor		$out0,$out0,$twk0
+
+	subi		r30,$out,1
+	mtctr		$taillen
+Loop_xts_dec6x_steal:
+	lbzu		r0,1(r30)
+	stb		r0,16(r30)
+	bdnz		Loop_xts_dec6x_steal
+
+	li		$taillen,0
+	mtctr		$rounds
+	b		Loop_xts_dec1x		# one more time...
+
+.align	4
+Lxts_dec6x_done:
+	${UCMP}i	$ivp,0
+	beq		Lxts_dec6x_ret
+
+	vxor		$tweak,$twk0,$rndkey0
+	le?vperm	$tweak,$tweak,$tweak,$leperm
+	stvx_u		$tweak,0,$ivp
+
+Lxts_dec6x_ret:
+	mtlr		r11
+	li		r10,`$FRAME+15`
+	li		r11,`$FRAME+31`
+	stvx		$seven,r10,$sp		# wipe copies of round keys
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+	stvx		$seven,r10,$sp
+	addi		r10,r10,32
+	stvx		$seven,r11,$sp
+	addi		r11,r11,32
+
+	mtspr		256,$vrsave
+	lvx		v20,r10,$sp		# ABI says so
+	addi		r10,r10,32
+	lvx		v21,r11,$sp
+	addi		r11,r11,32
+	lvx		v22,r10,$sp
+	addi		r10,r10,32
+	lvx		v23,r11,$sp
+	addi		r11,r11,32
+	lvx		v24,r10,$sp
+	addi		r10,r10,32
+	lvx		v25,r11,$sp
+	addi		r11,r11,32
+	lvx		v26,r10,$sp
+	addi		r10,r10,32
+	lvx		v27,r11,$sp
+	addi		r11,r11,32
+	lvx		v28,r10,$sp
+	addi		r10,r10,32
+	lvx		v29,r11,$sp
+	addi		r11,r11,32
+	lvx		v30,r10,$sp
+	lvx		v31,r11,$sp
+	$POP		r26,`$FRAME+21*16+0*$SIZE_T`($sp)
+	$POP		r27,`$FRAME+21*16+1*$SIZE_T`($sp)
+	$POP		r28,`$FRAME+21*16+2*$SIZE_T`($sp)
+	$POP		r29,`$FRAME+21*16+3*$SIZE_T`($sp)
+	$POP		r30,`$FRAME+21*16+4*$SIZE_T`($sp)
+	$POP		r31,`$FRAME+21*16+5*$SIZE_T`($sp)
+	addi		$sp,$sp,`$FRAME+21*16+6*$SIZE_T`
+	blr
+	.long		0
+	.byte		0,12,0x04,1,0x80,6,6,0
+	.long		0
+
+.align	5
+_aesp8_xts_dec5x:
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+	lvx		v24,$x20,$key_		# round[3]
+	addi		$key_,$key_,0x20
+
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	lvx		v25,$x10,$key_		# round[4]
+	bdnz		_aesp8_xts_dec5x
+
+	subi		r0,$taillen,1
+	vncipher	$out0,$out0,v24
+	vncipher	$out1,$out1,v24
+	vncipher	$out2,$out2,v24
+	vncipher	$out3,$out3,v24
+	vncipher	$out4,$out4,v24
+
+	andi.		r0,r0,16
+	cmpwi		$taillen,0
+	vncipher	$out0,$out0,v25
+	vncipher	$out1,$out1,v25
+	vncipher	$out2,$out2,v25
+	vncipher	$out3,$out3,v25
+	vncipher	$out4,$out4,v25
+	 vxor		$twk0,$twk0,v31
+
+	sub		$inp,$inp,r0
+	vncipher	$out0,$out0,v26
+	vncipher	$out1,$out1,v26
+	vncipher	$out2,$out2,v26
+	vncipher	$out3,$out3,v26
+	vncipher	$out4,$out4,v26
+	 vxor		$in1,$twk1,v31
+
+	vncipher	$out0,$out0,v27
+	lvx_u		$in0,0,$inp
+	vncipher	$out1,$out1,v27
+	vncipher	$out2,$out2,v27
+	vncipher	$out3,$out3,v27
+	vncipher	$out4,$out4,v27
+	 vxor		$in2,$twk2,v31
+
+	addi		$key_,$sp,$FRAME+15	# rewind $key_
+	vncipher	$out0,$out0,v28
+	vncipher	$out1,$out1,v28
+	vncipher	$out2,$out2,v28
+	vncipher	$out3,$out3,v28
+	vncipher	$out4,$out4,v28
+	lvx		v24,$x00,$key_		# re-pre-load round[1]
+	 vxor		$in3,$twk3,v31
+
+	vncipher	$out0,$out0,v29
+	le?vperm	$in0,$in0,$in0,$leperm
+	vncipher	$out1,$out1,v29
+	vncipher	$out2,$out2,v29
+	vncipher	$out3,$out3,v29
+	vncipher	$out4,$out4,v29
+	lvx		v25,$x10,$key_		# re-pre-load round[2]
+	 vxor		$in4,$twk4,v31
+
+	vncipher	$out0,$out0,v30
+	vncipher	$out1,$out1,v30
+	vncipher	$out2,$out2,v30
+	vncipher	$out3,$out3,v30
+	vncipher	$out4,$out4,v30
+
+	vncipherlast	$out0,$out0,$twk0
+	vncipherlast	$out1,$out1,$in1
+	vncipherlast	$out2,$out2,$in2
+	vncipherlast	$out3,$out3,$in3
+	vncipherlast	$out4,$out4,$in4
+	mtctr		$rounds
+	blr
+        .long   	0
+        .byte   	0,12,0x14,0,0,0,0,0
+___
+}}	}}}
+
+my $consts=1;
+foreach(split("\n",$code)) {
+        s/\`([^\`]*)\`/eval($1)/geo;
+
+	# constants table endian-specific conversion
+	if ($consts && m/\.(long|byte)\s+(.+)\s+(\?[a-z]*)$/o) {
+	    my $conv=$3;
+	    my @bytes=();
+
+	    # convert to endian-agnostic format
+	    if ($1 eq "long") {
+	      foreach (split(/,\s*/,$2)) {
+		my $l = /^0/?oct:int;
+		push @bytes,($l>>24)&0xff,($l>>16)&0xff,($l>>8)&0xff,$l&0xff;
+	      }
+	    } else {
+		@bytes = map(/^0/?oct:int,split(/,\s*/,$2));
+	    }
+
+	    # little-endian conversion
+	    if ($flavour =~ /le$/o) {
+		SWITCH: for($conv)  {
+		    /\?inv/ && do   { @bytes=map($_^0xf,@bytes); last; };
+		    /\?rev/ && do   { @bytes=reverse(@bytes);    last; };
+		}
+	    }
+
+	    #emit
+	    print ".byte\t",join(',',map (sprintf("0x%02x",$_),@bytes)),"\n";
+	    next;
+	}
+	$consts=0 if (m/Lconsts:/o);	# end of table
+
+	# instructions prefixed with '?' are endian-specific and need
+	# to be adjusted accordingly...
+	if ($flavour =~ /le$/o) {	# little-endian
+	    s/le\?//o		or
+	    s/be\?/#be#/o	or
+	    s/\?lvsr/lvsl/o	or
+	    s/\?lvsl/lvsr/o	or
+	    s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/o or
+	    s/\?(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/o or
+	    s/\?(vspltw\s+v[0-9]+,\s*)(v[0-9]+,)\s*([0-9])/$1$2 3-$3/o;
+	} else {			# big-endian
+	    s/le\?/#le#/o	or
+	    s/be\?//o		or
+	    s/\?([a-z]+)/$1/o;
+	}
+
+        print $_,"\n";
+}
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/aesv8-armx.pl b/src/crypto/fipsmodule/aes/asm/aesv8-armx.pl
new file mode 100644
index 0000000..325bdeb
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/aesv8-armx.pl
@@ -0,0 +1,1001 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# This module implements support for ARMv8 AES instructions. The
+# module is endian-agnostic in sense that it supports both big- and
+# little-endian cases. As does it support both 32- and 64-bit modes
+# of operation. Latter is achieved by limiting amount of utilized
+# registers to 16, which implies additional NEON load and integer
+# instructions. This has no effect on mighty Apple A7, where results
+# are literally equal to the theoretical estimates based on AES
+# instruction latencies and issue rates. On Cortex-A53, an in-order
+# execution core, this costs up to 10-15%, which is partially
+# compensated by implementing dedicated code path for 128-bit
+# CBC encrypt case. On Cortex-A57 parallelizable mode performance
+# seems to be limited by sheer amount of NEON instructions...
+#
+# Performance in cycles per byte processed with 128-bit key:
+#
+#		CBC enc		CBC dec		CTR
+# Apple A7	2.39		1.20		1.20
+# Cortex-A53	1.32		1.29		1.46
+# Cortex-A57(*)	1.95		0.85		0.93
+# Denver	1.96		0.86		0.80
+#
+# (*)	original 3.64/1.34/1.32 results were for r0p0 revision
+#	and are still same even for updated module;
+
+$flavour = shift;
+$output  = shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+$prefix="aes_hw";
+
+$code=<<___;
+#include <openssl/arm_arch.h>
+
+#if __ARM_MAX_ARCH__>=7
+.text
+___
+$code.=<<___ if ($flavour =~ /64/);
+#if !defined(__clang__) || defined(BORINGSSL_CLANG_SUPPORTS_DOT_ARCH)
+.arch  armv8-a+crypto
+#endif
+___
+$code.=".arch	armv7-a\n.fpu	neon\n.code	32\n"	if ($flavour !~ /64/);
+		#^^^^^^ this is done to simplify adoption by not depending
+		#	on latest binutils.
+
+# Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
+# NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
+# maintain both 32- and 64-bit codes within single module and
+# transliterate common code to either flavour with regex vodoo.
+#
+{{{
+my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
+my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
+	$flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
+
+
+$code.=<<___;
+.align	5
+.Lrcon:
+.long	0x01,0x01,0x01,0x01
+.long	0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d	// rotate-n-splat
+.long	0x1b,0x1b,0x1b,0x1b
+
+.globl	${prefix}_set_encrypt_key
+.type	${prefix}_set_encrypt_key,%function
+.align	5
+${prefix}_set_encrypt_key:
+.Lenc_key:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___;
+	mov	$ptr,#-1
+	cmp	$inp,#0
+	b.eq	.Lenc_key_abort
+	cmp	$out,#0
+	b.eq	.Lenc_key_abort
+	mov	$ptr,#-2
+	cmp	$bits,#128
+	b.lt	.Lenc_key_abort
+	cmp	$bits,#256
+	b.gt	.Lenc_key_abort
+	tst	$bits,#0x3f
+	b.ne	.Lenc_key_abort
+
+	adr	$ptr,.Lrcon
+	cmp	$bits,#192
+
+	veor	$zero,$zero,$zero
+	vld1.8	{$in0},[$inp],#16
+	mov	$bits,#8		// reuse $bits
+	vld1.32	{$rcon,$mask},[$ptr],#32
+
+	b.lt	.Loop128
+	b.eq	.L192
+	b	.L256
+
+.align	4
+.Loop128:
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+	b.ne	.Loop128
+
+	vld1.32	{$rcon},[$ptr]
+
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+
+	vtbl.8	$key,{$in0},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in0},[$out],#16
+	aese	$key,$zero
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	veor	$in0,$in0,$key
+	vst1.32	{$in0},[$out]
+	add	$out,$out,#0x50
+
+	mov	$rounds,#10
+	b	.Ldone
+
+.align	4
+.L192:
+	vld1.8	{$in1},[$inp],#8
+	vmov.i8	$key,#8			// borrow $key
+	vst1.32	{$in0},[$out],#16
+	vsub.i8	$mask,$mask,$key	// adjust the mask
+
+.Loop192:
+	vtbl.8	$key,{$in1},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in1},[$out],#8
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+
+	vdup.32	$tmp,${in0}[3]
+	veor	$tmp,$tmp,$in1
+	 veor	$key,$key,$rcon
+	vext.8	$in1,$zero,$in1,#12
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in1,$in1,$tmp
+	veor	$in0,$in0,$key
+	veor	$in1,$in1,$key
+	vst1.32	{$in0},[$out],#16
+	b.ne	.Loop192
+
+	mov	$rounds,#12
+	add	$out,$out,#0x20
+	b	.Ldone
+
+.align	4
+.L256:
+	vld1.8	{$in1},[$inp]
+	mov	$bits,#7
+	mov	$rounds,#14
+	vst1.32	{$in0},[$out],#16
+
+.Loop256:
+	vtbl.8	$key,{$in1},$mask
+	vext.8	$tmp,$zero,$in0,#12
+	vst1.32	{$in1},[$out],#16
+	aese	$key,$zero
+	subs	$bits,$bits,#1
+
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in0,$in0,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	 veor	$key,$key,$rcon
+	veor	$in0,$in0,$tmp
+	vshl.u8	$rcon,$rcon,#1
+	veor	$in0,$in0,$key
+	vst1.32	{$in0},[$out],#16
+	b.eq	.Ldone
+
+	vdup.32	$key,${in0}[3]		// just splat
+	vext.8	$tmp,$zero,$in1,#12
+	aese	$key,$zero
+
+	veor	$in1,$in1,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in1,$in1,$tmp
+	vext.8	$tmp,$zero,$tmp,#12
+	veor	$in1,$in1,$tmp
+
+	veor	$in1,$in1,$key
+	b	.Loop256
+
+.Ldone:
+	str	$rounds,[$out]
+	mov	$ptr,#0
+
+.Lenc_key_abort:
+	mov	x0,$ptr			// return value
+	`"ldr	x29,[sp],#16"		if ($flavour =~ /64/)`
+	ret
+.size	${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
+
+.globl	${prefix}_set_decrypt_key
+.type	${prefix}_set_decrypt_key,%function
+.align	5
+${prefix}_set_decrypt_key:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	stmdb	sp!,{r4,lr}
+___
+$code.=<<___;
+	bl	.Lenc_key
+
+	cmp	x0,#0
+	b.ne	.Ldec_key_abort
+
+	sub	$out,$out,#240		// restore original $out
+	mov	x4,#-16
+	add	$inp,$out,x12,lsl#4	// end of key schedule
+
+	vld1.32	{v0.16b},[$out]
+	vld1.32	{v1.16b},[$inp]
+	vst1.32	{v0.16b},[$inp],x4
+	vst1.32	{v1.16b},[$out],#16
+
+.Loop_imc:
+	vld1.32	{v0.16b},[$out]
+	vld1.32	{v1.16b},[$inp]
+	aesimc	v0.16b,v0.16b
+	aesimc	v1.16b,v1.16b
+	vst1.32	{v0.16b},[$inp],x4
+	vst1.32	{v1.16b},[$out],#16
+	cmp	$inp,$out
+	b.hi	.Loop_imc
+
+	vld1.32	{v0.16b},[$out]
+	aesimc	v0.16b,v0.16b
+	vst1.32	{v0.16b},[$inp]
+
+	eor	x0,x0,x0		// return value
+.Ldec_key_abort:
+___
+$code.=<<___	if ($flavour !~ /64/);
+	ldmia	sp!,{r4,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldp	x29,x30,[sp],#16
+	ret
+___
+$code.=<<___;
+.size	${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
+___
+}}}
+{{{
+sub gen_block () {
+my $dir = shift;
+my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
+my ($inp,$out,$key)=map("x$_",(0..2));
+my $rounds="w3";
+my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
+
+$code.=<<___;
+.globl	${prefix}_${dir}crypt
+.type	${prefix}_${dir}crypt,%function
+.align	5
+${prefix}_${dir}crypt:
+	ldr	$rounds,[$key,#240]
+	vld1.32	{$rndkey0},[$key],#16
+	vld1.8	{$inout},[$inp]
+	sub	$rounds,$rounds,#2
+	vld1.32	{$rndkey1},[$key],#16
+
+.Loop_${dir}c:
+	aes$e	$inout,$rndkey0
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey0},[$key],#16
+	subs	$rounds,$rounds,#2
+	aes$e	$inout,$rndkey1
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey1},[$key],#16
+	b.gt	.Loop_${dir}c
+
+	aes$e	$inout,$rndkey0
+	aes$mc	$inout,$inout
+	vld1.32	{$rndkey0},[$key]
+	aes$e	$inout,$rndkey1
+	veor	$inout,$inout,$rndkey0
+
+	vst1.8	{$inout},[$out]
+	ret
+.size	${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
+___
+}
+&gen_block("en");
+&gen_block("de");
+}}}
+{{{
+my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
+my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
+my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
+
+my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
+my ($key4,$key5,$key6,$key7)=("x6","x12","x14",$key);
+
+### q8-q15	preloaded key schedule
+
+$code.=<<___;
+.globl	${prefix}_cbc_encrypt
+.type	${prefix}_cbc_encrypt,%function
+.align	5
+${prefix}_cbc_encrypt:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	mov	ip,sp
+	stmdb	sp!,{r4-r8,lr}
+	vstmdb	sp!,{d8-d15}            @ ABI specification says so
+	ldmia	ip,{r4-r5}		@ load remaining args
+___
+$code.=<<___;
+	subs	$len,$len,#16
+	mov	$step,#16
+	b.lo	.Lcbc_abort
+	cclr	$step,eq
+
+	cmp	$enc,#0			// en- or decrypting?
+	ldr	$rounds,[$key,#240]
+	and	$len,$len,#-16
+	vld1.8	{$ivec},[$ivp]
+	vld1.8	{$dat},[$inp],$step
+
+	vld1.32	{q8-q9},[$key]		// load key schedule...
+	sub	$rounds,$rounds,#6
+	add	$key_,$key,x5,lsl#4	// pointer to last 7 round keys
+	sub	$rounds,$rounds,#2
+	vld1.32	{q10-q11},[$key_],#32
+	vld1.32	{q12-q13},[$key_],#32
+	vld1.32	{q14-q15},[$key_],#32
+	vld1.32	{$rndlast},[$key_]
+
+	add	$key_,$key,#32
+	mov	$cnt,$rounds
+	b.eq	.Lcbc_dec
+
+	cmp	$rounds,#2
+	veor	$dat,$dat,$ivec
+	veor	$rndzero_n_last,q8,$rndlast
+	b.eq	.Lcbc_enc128
+
+	vld1.32	{$in0-$in1},[$key_]
+	add	$key_,$key,#16
+	add	$key4,$key,#16*4
+	add	$key5,$key,#16*5
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	add	$key6,$key,#16*6
+	add	$key7,$key,#16*7
+	b	.Lenter_cbc_enc
+
+.align	4
+.Loop_cbc_enc:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 vst1.8	{$ivec},[$out],#16
+.Lenter_cbc_enc:
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	aese	$dat,$in0
+	aesmc	$dat,$dat
+	vld1.32	{q8},[$key4]
+	cmp	$rounds,#4
+	aese	$dat,$in1
+	aesmc	$dat,$dat
+	vld1.32	{q9},[$key5]
+	b.eq	.Lcbc_enc192
+
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	vld1.32	{q8},[$key6]
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	vld1.32	{q9},[$key7]
+	nop
+
+.Lcbc_enc192:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 subs	$len,$len,#16
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	 cclr	$step,eq
+	aese	$dat,q10
+	aesmc	$dat,$dat
+	aese	$dat,q11
+	aesmc	$dat,$dat
+	 vld1.8	{q8},[$inp],$step
+	aese	$dat,q12
+	aesmc	$dat,$dat
+	 veor	q8,q8,$rndzero_n_last
+	aese	$dat,q13
+	aesmc	$dat,$dat
+	 vld1.32 {q9},[$key_]		// re-pre-load rndkey[1]
+	aese	$dat,q14
+	aesmc	$dat,$dat
+	aese	$dat,q15
+	veor	$ivec,$dat,$rndlast
+	b.hs	.Loop_cbc_enc
+
+	vst1.8	{$ivec},[$out],#16
+	b	.Lcbc_done
+
+.align	5
+.Lcbc_enc128:
+	vld1.32	{$in0-$in1},[$key_]
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	b	.Lenter_cbc_enc128
+.Loop_cbc_enc128:
+	aese	$dat,q8
+	aesmc	$dat,$dat
+	 vst1.8	{$ivec},[$out],#16
+.Lenter_cbc_enc128:
+	aese	$dat,q9
+	aesmc	$dat,$dat
+	 subs	$len,$len,#16
+	aese	$dat,$in0
+	aesmc	$dat,$dat
+	 cclr	$step,eq
+	aese	$dat,$in1
+	aesmc	$dat,$dat
+	aese	$dat,q10
+	aesmc	$dat,$dat
+	aese	$dat,q11
+	aesmc	$dat,$dat
+	 vld1.8	{q8},[$inp],$step
+	aese	$dat,q12
+	aesmc	$dat,$dat
+	aese	$dat,q13
+	aesmc	$dat,$dat
+	aese	$dat,q14
+	aesmc	$dat,$dat
+	 veor	q8,q8,$rndzero_n_last
+	aese	$dat,q15
+	veor	$ivec,$dat,$rndlast
+	b.hs	.Loop_cbc_enc128
+
+	vst1.8	{$ivec},[$out],#16
+	b	.Lcbc_done
+___
+{
+my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
+$code.=<<___;
+.align	5
+.Lcbc_dec:
+	vld1.8	{$dat2},[$inp],#16
+	subs	$len,$len,#32		// bias
+	add	$cnt,$rounds,#2
+	vorr	$in1,$dat,$dat
+	vorr	$dat1,$dat,$dat
+	vorr	$in2,$dat2,$dat2
+	b.lo	.Lcbc_dec_tail
+
+	vorr	$dat1,$dat2,$dat2
+	vld1.8	{$dat2},[$inp],#16
+	vorr	$in0,$dat,$dat
+	vorr	$in1,$dat1,$dat1
+	vorr	$in2,$dat2,$dat2
+
+.Loop3x_cbc_dec:
+	aesd	$dat0,q8
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	vld1.32	{q8},[$key_],#16
+	subs	$cnt,$cnt,#2
+	aesd	$dat0,q9
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	vld1.32	{q9},[$key_],#16
+	b.gt	.Loop3x_cbc_dec
+
+	aesd	$dat0,q8
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	 veor	$tmp0,$ivec,$rndlast
+	 subs	$len,$len,#0x30
+	 veor	$tmp1,$in0,$rndlast
+	 mov.lo	x6,$len			// x6, $cnt, is zero at this point
+	aesd	$dat0,q9
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	 veor	$tmp2,$in1,$rndlast
+	 add	$inp,$inp,x6		// $inp is adjusted in such way that
+					// at exit from the loop $dat1-$dat2
+					// are loaded with last "words"
+	 vorr	$ivec,$in2,$in2
+	 mov	$key_,$key
+	aesd	$dat0,q12
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q12
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q12
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in0},[$inp],#16
+	aesd	$dat0,q13
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q13
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q13
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in1},[$inp],#16
+	aesd	$dat0,q14
+	aesimc	$dat0,$dat0
+	aesd	$dat1,q14
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q14
+	aesimc	$dat2,$dat2
+	 vld1.8	{$in2},[$inp],#16
+	aesd	$dat0,q15
+	aesd	$dat1,q15
+	aesd	$dat2,q15
+	 vld1.32 {q8},[$key_],#16	// re-pre-load rndkey[0]
+	 add	$cnt,$rounds,#2
+	veor	$tmp0,$tmp0,$dat0
+	veor	$tmp1,$tmp1,$dat1
+	veor	$dat2,$dat2,$tmp2
+	 vld1.32 {q9},[$key_],#16	// re-pre-load rndkey[1]
+	vst1.8	{$tmp0},[$out],#16
+	 vorr	$dat0,$in0,$in0
+	vst1.8	{$tmp1},[$out],#16
+	 vorr	$dat1,$in1,$in1
+	vst1.8	{$dat2},[$out],#16
+	 vorr	$dat2,$in2,$in2
+	b.hs	.Loop3x_cbc_dec
+
+	cmn	$len,#0x30
+	b.eq	.Lcbc_done
+	nop
+
+.Lcbc_dec_tail:
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	vld1.32	{q8},[$key_],#16
+	subs	$cnt,$cnt,#2
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	vld1.32	{q9},[$key_],#16
+	b.gt	.Lcbc_dec_tail
+
+	aesd	$dat1,q8
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q8
+	aesimc	$dat2,$dat2
+	aesd	$dat1,q9
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q9
+	aesimc	$dat2,$dat2
+	aesd	$dat1,q12
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q12
+	aesimc	$dat2,$dat2
+	 cmn	$len,#0x20
+	aesd	$dat1,q13
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q13
+	aesimc	$dat2,$dat2
+	 veor	$tmp1,$ivec,$rndlast
+	aesd	$dat1,q14
+	aesimc	$dat1,$dat1
+	aesd	$dat2,q14
+	aesimc	$dat2,$dat2
+	 veor	$tmp2,$in1,$rndlast
+	aesd	$dat1,q15
+	aesd	$dat2,q15
+	b.eq	.Lcbc_dec_one
+	veor	$tmp1,$tmp1,$dat1
+	veor	$tmp2,$tmp2,$dat2
+	 vorr	$ivec,$in2,$in2
+	vst1.8	{$tmp1},[$out],#16
+	vst1.8	{$tmp2},[$out],#16
+	b	.Lcbc_done
+
+.Lcbc_dec_one:
+	veor	$tmp1,$tmp1,$dat2
+	 vorr	$ivec,$in2,$in2
+	vst1.8	{$tmp1},[$out],#16
+
+.Lcbc_done:
+	vst1.8	{$ivec},[$ivp]
+.Lcbc_abort:
+___
+}
+$code.=<<___	if ($flavour !~ /64/);
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r8,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldr	x29,[sp],#16
+	ret
+___
+$code.=<<___;
+.size	${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
+___
+}}}
+{{{
+my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
+my ($rounds,$cnt,$key_)=("w5","w6","x7");
+my ($ctr,$tctr0,$tctr1,$tctr2)=map("w$_",(8..10,12));
+my $step="x12";		# aliases with $tctr2
+
+my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
+my ($dat2,$in2,$tmp2)=map("q$_",(10,11,9));
+
+my ($dat,$tmp)=($dat0,$tmp0);
+
+### q8-q15	preloaded key schedule
+
+$code.=<<___;
+.globl	${prefix}_ctr32_encrypt_blocks
+.type	${prefix}_ctr32_encrypt_blocks,%function
+.align	5
+${prefix}_ctr32_encrypt_blocks:
+___
+$code.=<<___	if ($flavour =~ /64/);
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+___
+$code.=<<___	if ($flavour !~ /64/);
+	mov		ip,sp
+	stmdb		sp!,{r4-r10,lr}
+	vstmdb		sp!,{d8-d15}            @ ABI specification says so
+	ldr		r4, [ip]		@ load remaining arg
+___
+$code.=<<___;
+	ldr		$rounds,[$key,#240]
+
+	ldr		$ctr, [$ivp, #12]
+	vld1.32		{$dat0},[$ivp]
+
+	vld1.32		{q8-q9},[$key]		// load key schedule...
+	sub		$rounds,$rounds,#4
+	mov		$step,#16
+	cmp		$len,#2
+	add		$key_,$key,x5,lsl#4	// pointer to last 5 round keys
+	sub		$rounds,$rounds,#2
+	vld1.32		{q12-q13},[$key_],#32
+	vld1.32		{q14-q15},[$key_],#32
+	vld1.32		{$rndlast},[$key_]
+	add		$key_,$key,#32
+	mov		$cnt,$rounds
+	cclr		$step,lo
+#ifndef __ARMEB__
+	rev		$ctr, $ctr
+#endif
+	vorr		$dat1,$dat0,$dat0
+	add		$tctr1, $ctr, #1
+	vorr		$dat2,$dat0,$dat0
+	add		$ctr, $ctr, #2
+	vorr		$ivec,$dat0,$dat0
+	rev		$tctr1, $tctr1
+	vmov.32		${dat1}[3],$tctr1
+	b.ls		.Lctr32_tail
+	rev		$tctr2, $ctr
+	sub		$len,$len,#3		// bias
+	vmov.32		${dat2}[3],$tctr2
+	b		.Loop3x_ctr32
+
+.align	4
+.Loop3x_ctr32:
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	aese		$dat2,q8
+	aesmc		$dat2,$dat2
+	vld1.32		{q8},[$key_],#16
+	subs		$cnt,$cnt,#2
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	aese		$dat2,q9
+	aesmc		$dat2,$dat2
+	vld1.32		{q9},[$key_],#16
+	b.gt		.Loop3x_ctr32
+
+	aese		$dat0,q8
+	aesmc		$tmp0,$dat0
+	aese		$dat1,q8
+	aesmc		$tmp1,$dat1
+	 vld1.8		{$in0},[$inp],#16
+	 vorr		$dat0,$ivec,$ivec
+	aese		$dat2,q8
+	aesmc		$dat2,$dat2
+	 vld1.8		{$in1},[$inp],#16
+	 vorr		$dat1,$ivec,$ivec
+	aese		$tmp0,q9
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q9
+	aesmc		$tmp1,$tmp1
+	 vld1.8		{$in2},[$inp],#16
+	 mov		$key_,$key
+	aese		$dat2,q9
+	aesmc		$tmp2,$dat2
+	 vorr		$dat2,$ivec,$ivec
+	 add		$tctr0,$ctr,#1
+	aese		$tmp0,q12
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q12
+	aesmc		$tmp1,$tmp1
+	 veor		$in0,$in0,$rndlast
+	 add		$tctr1,$ctr,#2
+	aese		$tmp2,q12
+	aesmc		$tmp2,$tmp2
+	 veor		$in1,$in1,$rndlast
+	 add		$ctr,$ctr,#3
+	aese		$tmp0,q13
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q13
+	aesmc		$tmp1,$tmp1
+	 veor		$in2,$in2,$rndlast
+	 rev		$tctr0,$tctr0
+	aese		$tmp2,q13
+	aesmc		$tmp2,$tmp2
+	 vmov.32	${dat0}[3], $tctr0
+	 rev		$tctr1,$tctr1
+	aese		$tmp0,q14
+	aesmc		$tmp0,$tmp0
+	aese		$tmp1,q14
+	aesmc		$tmp1,$tmp1
+	 vmov.32	${dat1}[3], $tctr1
+	 rev		$tctr2,$ctr
+	aese		$tmp2,q14
+	aesmc		$tmp2,$tmp2
+	 vmov.32	${dat2}[3], $tctr2
+	 subs		$len,$len,#3
+	aese		$tmp0,q15
+	aese		$tmp1,q15
+	aese		$tmp2,q15
+
+	veor		$in0,$in0,$tmp0
+	 vld1.32	 {q8},[$key_],#16	// re-pre-load rndkey[0]
+	vst1.8		{$in0},[$out],#16
+	veor		$in1,$in1,$tmp1
+	 mov		$cnt,$rounds
+	vst1.8		{$in1},[$out],#16
+	veor		$in2,$in2,$tmp2
+	 vld1.32	 {q9},[$key_],#16	// re-pre-load rndkey[1]
+	vst1.8		{$in2},[$out],#16
+	b.hs		.Loop3x_ctr32
+
+	adds		$len,$len,#3
+	b.eq		.Lctr32_done
+	cmp		$len,#1
+	mov		$step,#16
+	cclr		$step,eq
+
+.Lctr32_tail:
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	vld1.32		{q8},[$key_],#16
+	subs		$cnt,$cnt,#2
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	vld1.32		{q9},[$key_],#16
+	b.gt		.Lctr32_tail
+
+	aese		$dat0,q8
+	aesmc		$dat0,$dat0
+	aese		$dat1,q8
+	aesmc		$dat1,$dat1
+	aese		$dat0,q9
+	aesmc		$dat0,$dat0
+	aese		$dat1,q9
+	aesmc		$dat1,$dat1
+	 vld1.8		{$in0},[$inp],$step
+	aese		$dat0,q12
+	aesmc		$dat0,$dat0
+	aese		$dat1,q12
+	aesmc		$dat1,$dat1
+	 vld1.8		{$in1},[$inp]
+	aese		$dat0,q13
+	aesmc		$dat0,$dat0
+	aese		$dat1,q13
+	aesmc		$dat1,$dat1
+	 veor		$in0,$in0,$rndlast
+	aese		$dat0,q14
+	aesmc		$dat0,$dat0
+	aese		$dat1,q14
+	aesmc		$dat1,$dat1
+	 veor		$in1,$in1,$rndlast
+	aese		$dat0,q15
+	aese		$dat1,q15
+
+	cmp		$len,#1
+	veor		$in0,$in0,$dat0
+	veor		$in1,$in1,$dat1
+	vst1.8		{$in0},[$out],#16
+	b.eq		.Lctr32_done
+	vst1.8		{$in1},[$out]
+
+.Lctr32_done:
+___
+$code.=<<___	if ($flavour !~ /64/);
+	vldmia		sp!,{d8-d15}
+	ldmia		sp!,{r4-r10,pc}
+___
+$code.=<<___	if ($flavour =~ /64/);
+	ldr		x29,[sp],#16
+	ret
+___
+$code.=<<___;
+.size	${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
+___
+}}}
+$code.=<<___;
+#endif
+___
+########################################
+if ($flavour =~ /64/) {			######## 64-bit code
+    my %opcode = (
+	"aesd"	=>	0x4e285800,	"aese"	=>	0x4e284800,
+	"aesimc"=>	0x4e287800,	"aesmc"	=>	0x4e286800	);
+
+    local *unaes = sub {
+	my ($mnemonic,$arg)=@_;
+
+	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o	&&
+	sprintf ".inst\t0x%08x\t//%s %s",
+			$opcode{$mnemonic}|$1|($2<<5),
+			$mnemonic,$arg;
+    };
+
+    foreach(split("\n",$code)) {
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;	# old->new registers
+	s/@\s/\/\//o;			# old->new style commentary
+
+	#s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo	or
+	s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel	$1$2,$1zr,$1$2,$3/o	or
+	s/mov\.([a-z]+)\s+([wx][0-9]+),\s*([wx][0-9]+)/csel	$2,$3,$2,$1/o	or
+	s/vmov\.i8/movi/o	or	# fix up legacy mnemonics
+	s/vext\.8/ext/o		or
+	s/vrev32\.8/rev32/o	or
+	s/vtst\.8/cmtst/o	or
+	s/vshr/ushr/o		or
+	s/^(\s+)v/$1/o		or	# strip off v prefix
+	s/\bbx\s+lr\b/ret/o;
+
+	# fix up remainig legacy suffixes
+	s/\.[ui]?8//o;
+	m/\],#8/o and s/\.16b/\.8b/go;
+	s/\.[ui]?32//o and s/\.16b/\.4s/go;
+	s/\.[ui]?64//o and s/\.16b/\.2d/go;
+	s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
+
+	print $_,"\n";
+    }
+} else {				######## 32-bit code
+    my %opcode = (
+	"aesd"	=>	0xf3b00340,	"aese"	=>	0xf3b00300,
+	"aesimc"=>	0xf3b003c0,	"aesmc"	=>	0xf3b00380	);
+
+    local *unaes = sub {
+	my ($mnemonic,$arg)=@_;
+
+	if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
+	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+					 |(($2&7)<<1) |(($2&8)<<2);
+	    # since ARMv7 instructions are always encoded little-endian.
+	    # correct solution is to use .inst directive, but older
+	    # assemblers don't implement it:-(
+	    sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+			$word&0xff,($word>>8)&0xff,
+			($word>>16)&0xff,($word>>24)&0xff,
+			$mnemonic,$arg;
+	}
+    };
+
+    sub unvtbl {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
+	sprintf	"vtbl.8	d%d,{q%d},d%d\n\t".
+		"vtbl.8	d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
+    }
+
+    sub unvdup32 {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
+	sprintf	"vdup.32	q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
+    }
+
+    sub unvmov32 {
+	my $arg=shift;
+
+	$arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
+	sprintf	"vmov.32	d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
+    }
+
+    foreach(split("\n",$code)) {
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\b[wx]([0-9]+)\b/r$1/go;		# new->old registers
+	s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;	# new->old registers
+	s/\/\/\s?/@ /o;				# new->old style commentary
+
+	# fix up remainig new-style suffixes
+	s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo	or
+	s/\],#[0-9]+/]!/o;
+
+	s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo	or
+	s/cclr\s+([^,]+),\s*([a-z]+)/mov$2	$1,#0/o	or
+	s/vtbl\.8\s+(.*)/unvtbl($1)/geo			or
+	s/vdup\.32\s+(.*)/unvdup32($1)/geo		or
+	s/vmov\.32\s+(.*)/unvmov32($1)/geo		or
+	s/^(\s+)b\./$1b/o				or
+	s/^(\s+)mov\./$1mov/o				or
+	s/^(\s+)ret/$1bx\tlr/o;
+
+	print $_,"\n";
+    }
+}
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/bsaes-armv7.pl b/src/crypto/fipsmodule/aes/asm/bsaes-armv7.pl
new file mode 100644
index 0000000..895a269
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/bsaes-armv7.pl
@@ -0,0 +1,2495 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt	19.5 cycles per byte processed with 128-bit key
+# decrypt	22.1 cycles per byte processed with 128-bit key
+# key conv.	440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#						<appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#					<ard.biesheuvel@linaro.org>
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[2], @b[2], @b[1]
+	veor	@b[5], @b[5], @b[6]
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[6], @b[6], @b[2]
+	veor	@b[5], @b[5], @b[0]
+
+	veor	@b[6], @b[6], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	veor	@b[4], @b[4], @b[5]
+
+	veor	@b[2], @b[2], @b[7]
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[2], @b[2], @b[0]
+	veor	@b[6], @b[6], @b[1]
+
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[5], @b[5], @b[3]
+	veor	@b[3], @b[3], @b[7]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[2], @b[2], @b[5]
+
+	veor	@b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	 veor	@b[1], @b[1], @b[7]
+	veor	@b[4], @b[4], @b[7]
+
+	veor	@b[7], @b[7], @b[5]
+	 veor	@b[1], @b[1], @b[3]
+	veor	@b[2], @b[2], @b[5]
+	veor	@b[3], @b[3], @b[7]
+
+	veor	@b[6], @b[6], @b[1]
+	veor	@b[2], @b[2], @b[0]
+	 veor	@b[5], @b[5], @b[3]
+	veor	@b[4], @b[4], @b[6]
+	veor	@b[0], @b[0], @b[6]
+	veor	@b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	veor	@b[1], @b[1], @b[5]
+	veor	@b[2], @b[2], @b[7]
+
+	veor	@b[3], @b[3], @b[1]
+	veor	@b[4], @b[4], @b[5]
+	veor	@b[7], @b[7], @b[5]
+	veor	@b[3], @b[3], @b[4]
+	 veor 	@b[5], @b[5], @b[0]
+	veor	@b[3], @b[3], @b[7]
+	 veor	@b[6], @b[6], @b[2]
+	 veor	@b[2], @b[2], @b[1]
+	veor	@b[6], @b[6], @b[3]
+
+	veor	@b[3], @b[3], @b[0]
+	veor	@b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+	veor 	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$t1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $t1, $t0
+	veor	$x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	vand	$t0, $t0, $x0
+	veor	$x0, $x0, $x1
+	vand	$x1, $x1, $y0
+	vand	$x0, $x0, $y1
+	veor	$x1, $x1, $x0
+	veor	$x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	veor	$t0, $y0, $y1
+	 veor 	$t1, $y2, $y3
+	vand	$t0, $t0, $x0
+	 vand	$t1, $t1, $x2
+	veor	$x0, $x0, $x1
+	 veor	$x2, $x2, $x3
+	vand	$x1, $x1, $y0
+	 vand	$x3, $x3, $y2
+	vand	$x0, $x0, $y1
+	 vand	$x2, $x2, $y3
+	veor	$x1, $x1, $x0
+	 veor	$x2, $x2, $x3
+	veor	$x0, $x0, $t0
+	 veor	$x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	veor	@t[0], @x[0], @x[2]
+	veor	@t[1], @x[1], @x[3]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@x[0], @x[0], @t[0]
+	veor	@x[2], @x[2], @t[0]
+	veor	@x[1], @x[1], @t[1]
+	veor	@x[3], @x[3], @t[1]
+
+	veor	@t[0], @x[4], @x[6]
+	veor	@t[1], @x[5], @x[7]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	veor	@y[0], @y[0], @y[2]
+	veor	@y[1], @y[1], @y[3]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+	veor	@x[4], @x[4], @t[0]
+	veor	@x[6], @x[6], @t[0]
+	veor	@x[5], @x[5], @t[1]
+	veor	@x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	veor	@t[3], @x[4], @x[6]
+	veor	@t[2], @x[5], @x[7]
+	veor	@t[1], @x[1], @x[3]
+	veor	@s[1], @x[7], @x[6]
+	 vmov	@t[0], @t[2]
+	veor	@s[0], @x[0], @x[2]
+
+	vorr	@t[2], @t[2], @t[1]
+	veor	@s[3], @t[3], @t[0]
+	vand	@s[2], @t[3], @s[0]
+	vorr	@t[3], @t[3], @s[0]
+	veor	@s[0], @s[0], @t[1]
+	vand	@t[0], @t[0], @t[1]
+	veor	@t[1], @x[3], @x[2]
+	vand	@s[3], @s[3], @s[0]
+	vand	@s[1], @s[1], @t[1]
+	veor	@t[1], @x[4], @x[5]
+	veor	@s[0], @x[1], @x[0]
+	veor	@t[3], @t[3], @s[1]
+	veor	@t[2], @t[2], @s[1]
+	vand	@s[1], @t[1], @s[0]
+	vorr	@t[1], @t[1], @s[0]
+	veor	@t[3], @t[3], @s[3]
+	veor	@t[0], @t[0], @s[1]
+	veor	@t[2], @t[2], @s[2]
+	veor	@t[1], @t[1], @s[3]
+	veor	@t[0], @t[0], @s[2]
+	vand	@s[0], @x[7], @x[3]
+	veor	@t[1], @t[1], @s[2]
+	vand	@s[1], @x[6], @x[2]
+	vand	@s[2], @x[5], @x[1]
+	vorr	@s[3], @x[4], @x[0]
+	veor	@t[3], @t[3], @s[0]
+	veor	@t[1], @t[1], @s[2]
+	veor	@t[0], @t[0], @s[3]
+	veor	@t[2], @t[2], @s[1]
+
+	@ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	@ new smaller inversion
+
+	vand	@s[2], @t[3], @t[1]
+	vmov	@s[0], @t[0]
+
+	veor	@s[1], @t[2], @s[2]
+	veor	@s[3], @t[0], @s[2]
+	veor	@s[2], @t[0], @s[2]	@ @s[2]=@s[3]
+
+	vbsl	@s[1], @t[1], @t[0]
+	vbsl	@s[3], @t[3], @t[2]
+	veor	@t[3], @t[3], @t[2]
+
+	vbsl	@s[0], @s[1], @s[2]
+	vbsl	@t[0], @s[2], @s[1]
+
+	vand	@s[2], @s[0], @s[3]
+	veor	@t[1], @t[1], @t[0]
+
+	veor	@s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+	vldmia	$key!, {@t[0]-@t[3]}
+	veor	@t[0], @t[0], @x[0]
+	veor	@t[1], @t[1], @x[1]
+	vtbl.8	`&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[0]}
+	veor	@t[2], @t[2], @x[2]
+	vtbl.8	`&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[1]}
+	veor	@t[3], @t[3], @x[3]
+	vtbl.8	`&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[2]}
+	vtbl.8	`&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+	vldmia	$key!, {@t[3]}
+	veor	@t[0], @t[0], @x[4]
+	veor	@t[1], @t[1], @x[5]
+	vtbl.8	`&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+	veor	@t[2], @t[2], @x[6]
+	vtbl.8	`&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+	veor	@t[3], @t[3], @x[7]
+	vtbl.8	`&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+	vtbl.8	`&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+	vtbl.8	`&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];	# optional
+$code.=<<___;
+	vext.8	@t[0], @x[0], @x[0], #12	@ x0 <<< 32
+	vext.8	@t[1], @x[1], @x[1], #12
+	 veor	@x[0], @x[0], @t[0]		@ x0 ^ (x0 <<< 32)
+	vext.8	@t[2], @x[2], @x[2], #12
+	 veor	@x[1], @x[1], @t[1]
+	vext.8	@t[3], @x[3], @x[3], #12
+	 veor	@x[2], @x[2], @t[2]
+	vext.8	@t[4], @x[4], @x[4], #12
+	 veor	@x[3], @x[3], @t[3]
+	vext.8	@t[5], @x[5], @x[5], #12
+	 veor	@x[4], @x[4], @t[4]
+	vext.8	@t[6], @x[6], @x[6], #12
+	 veor	@x[5], @x[5], @t[5]
+	vext.8	@t[7], @x[7], @x[7], #12
+	 veor	@x[6], @x[6], @t[6]
+
+	veor	@t[1], @t[1], @x[0]
+	 veor	@x[7], @x[7], @t[7]
+	 vext.8	@x[0], @x[0], @x[0], #8		@ (x0 ^ (x0 <<< 32)) <<< 64)
+	veor	@t[2], @t[2], @x[1]
+	veor	@t[0], @t[0], @x[7]
+	veor	@t[1], @t[1], @x[7]
+	 vext.8	@x[1], @x[1], @x[1], #8
+	veor	@t[5], @t[5], @x[4]
+	 veor	@x[0], @x[0], @t[0]
+	veor	@t[6], @t[6], @x[5]
+	 veor	@x[1], @x[1], @t[1]
+	 vext.8	@t[0], @x[4], @x[4], #8
+	veor	@t[4], @t[4], @x[3]
+	 vext.8	@t[1], @x[5], @x[5], #8
+	veor	@t[7], @t[7], @x[6]
+	 vext.8	@x[4], @x[3], @x[3], #8
+	veor	@t[3], @t[3], @x[2]
+	 vext.8	@x[5], @x[7], @x[7], #8
+	veor	@t[4], @t[4], @x[7]
+	 vext.8	@x[3], @x[6], @x[6], #8
+	veor	@t[3], @t[3], @x[7]
+	 vext.8	@x[6], @x[2], @x[2], #8
+	veor	@x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+	veor	@x[2], @t[0], @t[4]
+	veor	@x[4], @x[4], @t[3]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[3], @x[3], @t[6]
+	 @ vmov	@x[2], @t[0]
+	veor	@x[6], @x[6], @t[2]
+	 @ vmov	@x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+	veor	@t[3], @t[3], @x[4]
+	veor	@x[5], @x[5], @t[7]
+	veor	@x[2], @x[3], @t[6]
+	veor	@x[3], @t[0], @t[4]
+	veor	@x[4], @x[6], @t[2]
+	vmov	@x[6], @t[3]
+	 @ vmov	@x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	@ multiplication by 0x0e
+	vext.8	@t[7], @x[7], @x[7], #12
+	vmov	@t[2], @x[2]
+	veor	@x[2], @x[2], @x[5]		@ 2 5
+	veor	@x[7], @x[7], @x[5]		@ 7 5
+	vext.8	@t[0], @x[0], @x[0], #12
+	vmov	@t[5], @x[5]
+	veor	@x[5], @x[5], @x[0]		@ 5 0		[1]
+	veor	@x[0], @x[0], @x[1]		@ 0 1
+	vext.8	@t[1], @x[1], @x[1], #12
+	veor	@x[1], @x[1], @x[2]		@ 1 25
+	veor	@x[0], @x[0], @x[6]		@ 01 6		[2]
+	vext.8	@t[3], @x[3], @x[3], #12
+	veor	@x[1], @x[1], @x[3]		@ 125 3		[4]
+	veor	@x[2], @x[2], @x[0]		@ 25 016	[3]
+	veor	@x[3], @x[3], @x[7]		@ 3 75
+	veor	@x[7], @x[7], @x[6]		@ 75 6		[0]
+	vext.8	@t[6], @x[6], @x[6], #12
+	vmov	@t[4], @x[4]
+	veor	@x[6], @x[6], @x[4]		@ 6 4
+	veor	@x[4], @x[4], @x[3]		@ 4 375		[6]
+	veor	@x[3], @x[3], @x[7]		@ 375 756=36
+	veor	@x[6], @x[6], @t[5]		@ 64 5		[7]
+	veor	@x[3], @x[3], @t[2]		@ 36 2
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@x[3], @x[3], @t[4]		@ 362 4		[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	@ multiplication by 0x0b
+	veor	@y[1], @y[1], @y[0]
+	veor	@y[0], @y[0], @t[0]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[1], @y[1], @t[6]
+	veor	@y[0], @y[0], @t[7]
+	veor	@t[7], @t[7], @t[6]		@ clobber t[7]
+
+	veor	@y[3], @y[3], @t[0]
+	 veor	@y[1], @y[1], @y[0]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[2], @y[2], @t[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+	veor	@y[2], @y[2], @t[2]
+	veor	@y[3], @y[3], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[7]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[3], @y[3], @t[3]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[4], @y[4], @t[3]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[3], @t[3], @t[3], #12
+	veor	@y[5], @y[5], @t[4]
+	veor	@y[7], @y[7], @t[7]
+	veor	@t[7], @t[7], @t[5]		@ clobber t[7] even more
+	veor	@y[3], @y[3], @t[5]
+	veor	@y[4], @y[4], @t[4]
+
+	veor	@y[5], @y[5], @t[7]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[6], @y[6], @t[7]
+	veor	@y[4], @y[4], @t[7]
+
+	veor	@t[7], @t[7], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+
+	@ multiplication by 0x0d
+	veor	@y[4], @y[4], @y[7]
+	 veor	@t[7], @t[7], @t[6]		@ restore t[7]
+	veor	@y[7], @y[7], @t[4]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@y[2], @y[2], @t[0]
+	veor	@y[7], @y[7], @t[5]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@y[2], @y[2], @t[2]
+
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[1], @y[1], @t[1]
+	veor	@y[0], @y[0], @t[0]
+	veor	@y[3], @y[3], @t[0]
+	veor	@y[1], @y[1], @t[5]
+	veor	@y[0], @y[0], @t[5]
+	vext.8	@t[0], @t[0], @t[0], #12
+	veor	@y[1], @y[1], @t[7]
+	veor	@y[0], @y[0], @t[6]
+	veor	@y[3], @y[3], @y[1]
+	veor	@y[4], @y[4], @t[1]
+	vext.8	@t[1], @t[1], @t[1], #12
+
+	veor	@y[7], @y[7], @t[7]
+	veor	@y[4], @y[4], @t[2]
+	veor	@y[5], @y[5], @t[2]
+	veor	@y[2], @y[2], @t[6]
+	veor	@t[6], @t[6], @t[3]		@ clobber t[6]
+	vext.8	@t[2], @t[2], @t[2], #12
+	veor	@y[4], @y[4], @y[7]
+	veor	@y[3], @y[3], @t[6]
+
+	veor	@y[6], @y[6], @t[6]
+	veor	@y[5], @y[5], @t[5]
+	vext.8	@t[5], @t[5], @t[5], #12
+	veor	@y[6], @y[6], @t[4]
+	vext.8	@t[4], @t[4], @t[4], #12
+	veor	@y[5], @y[5], @t[6]
+	veor	@y[6], @y[6], @t[7]
+	vext.8	@t[7], @t[7], @t[7], #12
+	veor	@t[6], @t[6], @t[3]		@ restore t[6]
+	vext.8	@t[3], @t[3], @t[3], #12
+
+	@ multiplication by 0x09
+	veor	@y[4], @y[4], @y[1]
+	veor	@t[1], @t[1], @y[1]		@ t[1]=y[1]
+	veor	@t[0], @t[0], @t[5]		@ clobber t[0]
+	vext.8	@t[6], @t[6], @t[6], #12
+	veor	@t[1], @t[1], @t[5]
+	veor	@y[3], @y[3], @t[0]
+	veor	@t[0], @t[0], @y[0]		@ t[0]=y[0]
+	veor	@t[1], @t[1], @t[6]
+	veor	@t[6], @t[6], @t[7]		@ clobber t[6]
+	veor	@y[4], @y[4], @t[1]
+	veor	@y[7], @y[7], @t[4]
+	veor	@y[6], @y[6], @t[3]
+	veor	@y[5], @y[5], @t[2]
+	veor	@t[4], @t[4], @y[4]		@ t[4]=y[4]
+	veor	@t[3], @t[3], @y[3]		@ t[3]=y[3]
+	veor	@t[5], @t[5], @y[5]		@ t[5]=y[5]
+	veor	@t[2], @t[2], @y[2]		@ t[2]=y[2]
+	veor	@t[3], @t[3], @t[7]
+	veor	@XMM[5], @t[5], @t[6]
+	veor	@XMM[6], @t[6], @y[6]		@ t[6]=y[6]
+	veor	@XMM[2], @t[2], @t[6]
+	veor	@XMM[7], @t[7], @y[7]		@ t[7]=y[7]
+
+	vmov	@XMM[0], @t[0]
+	vmov	@XMM[1], @t[1]
+	@ vmov	@XMM[2], @t[2]
+	vmov	@XMM[3], @t[3]
+	vmov	@XMM[4], @t[4]
+	@ vmov	@XMM[5], @t[5]
+	@ vmov	@XMM[6], @t[6]
+	@ vmov	@XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+	@ multiplication by 0x05-0x00-0x04-0x00
+	vext.8	@t[0], @x[0], @x[0], #8
+	vext.8	@t[6], @x[6], @x[6], #8
+	vext.8	@t[7], @x[7], @x[7], #8
+	veor	@t[0], @t[0], @x[0]
+	vext.8	@t[1], @x[1], @x[1], #8
+	veor	@t[6], @t[6], @x[6]
+	vext.8	@t[2], @x[2], @x[2], #8
+	veor	@t[7], @t[7], @x[7]
+	vext.8	@t[3], @x[3], @x[3], #8
+	veor	@t[1], @t[1], @x[1]
+	vext.8	@t[4], @x[4], @x[4], #8
+	veor	@t[2], @t[2], @x[2]
+	vext.8	@t[5], @x[5], @x[5], #8
+	veor	@t[3], @t[3], @x[3]
+	veor	@t[4], @t[4], @x[4]
+	veor	@t[5], @t[5], @x[5]
+
+	 veor	@x[0], @x[0], @t[6]
+	 veor	@x[1], @x[1], @t[6]
+	 veor	@x[2], @x[2], @t[0]
+	 veor	@x[4], @x[4], @t[2]
+	 veor	@x[3], @x[3], @t[1]
+	 veor	@x[1], @x[1], @t[7]
+	 veor	@x[2], @x[2], @t[7]
+	 veor	@x[4], @x[4], @t[6]
+	 veor	@x[5], @x[5], @t[3]
+	 veor	@x[3], @x[3], @t[6]
+	 veor	@x[6], @x[6], @t[4]
+	 veor	@x[4], @x[4], @t[7]
+	 veor	@x[5], @x[5], @t[7]
+	 veor	@x[7], @x[7], @t[5]
+___
+	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	vshr.u64	$t, $b, #$n
+	veor		$t, $t, $a
+	vand		$t, $t, $mask
+	veor		$a, $a, $t
+	vshl.u64	$t, $t, #$n
+	veor		$b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	vshr.u64	$t0, $b0, #$n
+	 vshr.u64	$t1, $b1, #$n
+	veor		$t0, $t0, $a0
+	 veor		$t1, $t1, $a1
+	vand		$t0, $t0, $mask
+	 vand		$t1, $t1, $mask
+	veor		$a0, $a0, $t0
+	vshl.u64	$t0, $t0, #$n
+	 veor		$a1, $a1, $t1
+	 vshl.u64	$t1, $t1, #$n
+	veor		$b0, $b0, $t0
+	 veor		$b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	vmov.i8	$t0,#0x55			@ compose .LBS0
+	vmov.i8	$t1,#0x33			@ compose .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	vmov.i8	$t0,#0x0f			@ compose .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include <openssl/arm_arch.h>
+
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+# define VFP_ABI_FRAME	0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME	0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.text
+.syntax	unified 	@ ARMv7-capable assembler is expected to handle this
+#if defined(__thumb2__) && !defined(__APPLE__)
+.thumb
+#else
+.code   32
+# undef __thumb2__
+#endif
+
+.type	_bsaes_decrypt8,%function
+.align	4
+_bsaes_decrypt8:
+	adr	$const,_bsaes_decrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+#ifdef	__APPLE__
+	adr	$const,.LM0ISR
+#else
+	add	$const,$const,#.LM0ISR-_bsaes_decrypt8
+#endif
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0ISR
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Ldec_sbox
+.align	4
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LISR
+	ite	eq				@ Thumb2 thing, sanity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Ldec_loop
+	vldmia	$const, {@XMM[12]}		@ .LISRM0
+	b	.Ldec_loop
+.align	4
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type	_bsaes_const,%object
+.align	6
+_bsaes_const:
+.LM0ISR:	@ InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:		@ ShiftRows constants
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+	.quad	0x090d01050c000408, 0x03070b0f060a0e02
+.asciz	"Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	6
+.size	_bsaes_const,.-_bsaes_const
+
+.type	_bsaes_encrypt8,%function
+.align	4
+_bsaes_encrypt8:
+	adr	$const,_bsaes_encrypt8
+	vldmia	$key!, {@XMM[9]}		@ round 0 key
+#ifdef	__APPLE__
+	adr	$const,.LM0SR
+#else
+	sub	$const,$const,#_bsaes_encrypt8-.LM0SR
+#endif
+
+	vldmia	$const!, {@XMM[8]}		@ .LM0SR
+_bsaes_encrypt8_alt:
+	veor	@XMM[10], @XMM[0], @XMM[9]	@ xor with round0 key
+	veor	@XMM[11], @XMM[1], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	veor	@XMM[12], @XMM[2], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+	veor	@XMM[13], @XMM[3], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+	veor	@XMM[14], @XMM[4], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+	veor	@XMM[15], @XMM[5], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+	veor	@XMM[10], @XMM[6], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+	veor	@XMM[11], @XMM[7], @XMM[9]
+	 vtbl.8	`&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+	 vtbl.8	`&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+	 vtbl.8	`&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	sub	$rounds,$rounds,#1
+	b	.Lenc_sbox
+.align	4
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	subs	$rounds,$rounds,#1
+	bcc	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	vldmia	$const, {@XMM[12]}		@ .LSR
+	ite	eq				@ Thumb2 thing, samity check in ARM
+	addeq	$const,$const,#0x10
+	bne	.Lenc_loop
+	vldmia	$const, {@XMM[12]}		@ .LSRM0
+	b	.Lenc_loop
+.align	4
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	vldmia	$key, {@XMM[8]}			@ last round key
+	veor	@XMM[4], @XMM[4], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[8]
+	veor	@XMM[3], @XMM[3], @XMM[8]
+	veor	@XMM[7], @XMM[7], @XMM[8]
+	veor	@XMM[2], @XMM[2], @XMM[8]
+	veor	@XMM[5], @XMM[5], @XMM[8]
+	veor	@XMM[0], @XMM[0], @XMM[8]
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	bx	lr
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	@ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+	vmov	@x[2], @x[0]
+	vmov	@x[3], @x[1]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	@ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	vmov	@x[4], @x[0]
+	vmov	@x[6], @x[2]
+	vmov	@x[5], @x[1]
+	vmov	@x[7], @x[3]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,%function
+.align	4
+_bsaes_key_convert:
+	adr	$const,_bsaes_key_convert
+	vld1.8	{@XMM[7]},  [$inp]!		@ load round 0 key
+#ifdef	__APPLE__
+	adr	$const,.LM0
+#else
+	sub	$const,$const,#_bsaes_key_convert-.LM0
+#endif
+	vld1.8	{@XMM[15]}, [$inp]!		@ load round 1 key
+
+	vmov.i8	@XMM[8],  #0x01			@ bit masks
+	vmov.i8	@XMM[9],  #0x02
+	vmov.i8	@XMM[10], #0x04
+	vmov.i8	@XMM[11], #0x08
+	vmov.i8	@XMM[12], #0x10
+	vmov.i8	@XMM[13], #0x20
+	vldmia	$const, {@XMM[14]}		@ .LM0
+
+#ifdef __ARMEL__
+	vrev32.8	@XMM[7],  @XMM[7]
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	sub	$rounds,$rounds,#1
+	vstmia	$out!, {@XMM[7]}		@ save round 0 key
+	b	.Lkey_loop
+
+.align	4
+.Lkey_loop:
+	vtbl.8	`&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+	vtbl.8	`&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+	vmov.i8	@XMM[6],  #0x40
+	vmov.i8	@XMM[15], #0x80
+
+	vtst.8	@XMM[0], @XMM[7], @XMM[8]
+	vtst.8	@XMM[1], @XMM[7], @XMM[9]
+	vtst.8	@XMM[2], @XMM[7], @XMM[10]
+	vtst.8	@XMM[3], @XMM[7], @XMM[11]
+	vtst.8	@XMM[4], @XMM[7], @XMM[12]
+	vtst.8	@XMM[5], @XMM[7], @XMM[13]
+	vtst.8	@XMM[6], @XMM[7], @XMM[6]
+	vtst.8	@XMM[7], @XMM[7], @XMM[15]
+	vld1.8	{@XMM[15]}, [$inp]!		@ load next round key
+	vmvn	@XMM[0], @XMM[0]		@ "pnot"
+	vmvn	@XMM[1], @XMM[1]
+	vmvn	@XMM[5], @XMM[5]
+	vmvn	@XMM[6], @XMM[6]
+#ifdef __ARMEL__
+	vrev32.8	@XMM[15], @XMM[15]
+#endif
+	subs	$rounds,$rounds,#1
+	vstmia	$out!,{@XMM[0]-@XMM[7]}		@ write bit-sliced round key
+	bne	.Lkey_loop
+
+	vmov.i8	@XMM[7],#0x63			@ compose .L63
+	@ don't save last round key
+	bx	lr
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {		# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,%function
+.align	4
+bsaes_enc_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,%function
+.align	4
+bsaes_encrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Lenc128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_encrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Lenc128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,%function
+.align	4
+bsaes_dec_key_convert:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	ldr	r5,[$inp,#240]			@ pass rounds
+	mov	r4,$inp				@ pass key
+	mov	r12,$out			@ pass key schedule
+	bl	_bsaes_key_convert
+	vldmia	$out, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$out, {@XMM[7]}
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,%function
+.align	4
+bsaes_decrypt_128:
+	stmdb	sp!,{r4-r6,lr}
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+.Ldec128_loop:
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+	mov	r4,$key				@ pass the key
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5,#10				@ pass rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]!
+
+	bl	_bsaes_decrypt8
+
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	subs	$len,$len,#0x80
+	vst1.8	{@XMM[5]}, [$out]!
+	bhi	.Ldec128_loop
+
+	vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r6,pc}
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,%function
+.align	5
+bsaes_cbc_encrypt:
+#ifndef	__KERNEL__
+	cmp	$len, #128
+#ifndef	__thumb__
+	blo	AES_cbc_encrypt
+#else
+	bhs	1f
+	b	AES_cbc_encrypt
+1:
+#endif
+#endif
+
+	@ it is up to the caller to make sure we are called with enc == 0
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ivp, [ip]			@ IV is 1st arg on the stack
+	mov	$len, $len, lsr#4		@ len in 16 byte blocks
+	sub	sp, #0x10			@ scratch space to carry over the IV
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ sifze of bit-slices key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	vldmia	$keysched, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	$keysched, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12, {@XMM[15]}			@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:
+#endif
+
+	vld1.8	{@XMM[15]}, [$ivp]		@ load IV
+	b	.Lcbc_dec_loop
+
+.align	4
+.Lcbc_dec_loop:
+	subs	$len, $len, #0x8
+	bmi	.Lcbc_dec_loop_finish
+
+	vld1.8	{@XMM[0]-@XMM[1]}, [$inp]!	@ load input
+	vld1.8	{@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	vld1.8	{@XMM[4]-@XMM[5]}, [$inp]!
+	mov	r5, $rounds
+	vld1.8	{@XMM[6]-@XMM[7]}, [$inp]
+	sub	$inp, $inp, #0x60
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[14]-@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	veor	@XMM[5], @XMM[5], @XMM[14]
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	vst1.8	{@XMM[5]}, [$out]!
+
+	b	.Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+	adds	$len, $len, #8
+	beq	.Lcbc_dec_done
+
+	vld1.8	{@XMM[0]}, [$inp]!		@ load input
+	cmp	$len, #2
+	blo	.Lcbc_dec_one
+	vld1.8	{@XMM[1]}, [$inp]!
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	mov	r4, $keysched			@ pass the key
+#else
+	add	r4, $key, #248
+#endif
+	mov	r5, $rounds
+	vstmia	$fp, {@XMM[15]}			@ put aside IV
+	beq	.Lcbc_dec_two
+	vld1.8	{@XMM[2]}, [$inp]!
+	cmp	$len, #4
+	blo	.Lcbc_dec_three
+	vld1.8	{@XMM[3]}, [$inp]!
+	beq	.Lcbc_dec_four
+	vld1.8	{@XMM[4]}, [$inp]!
+	cmp	$len, #6
+	blo	.Lcbc_dec_five
+	vld1.8	{@XMM[5]}, [$inp]!
+	beq	.Lcbc_dec_six
+	vld1.8	{@XMM[6]}, [$inp]!
+	sub	$inp, $inp, #0x70
+
+	bl	_bsaes_decrypt8
+
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]-@XMM[13]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[3], @XMM[3], @XMM[13]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	vst1.8	{@XMM[3]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_six:
+	sub	$inp, $inp, #0x60
+	bl	_bsaes_decrypt8
+	vldmia	$fp,{@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[12]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[7], @XMM[7], @XMM[12]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	vst1.8	{@XMM[7]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_five:
+	sub	$inp, $inp, #0x50
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]-@XMM[11]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor	@XMM[2], @XMM[2], @XMM[11]
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	vst1.8	{@XMM[2]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_four:
+	sub	$inp, $inp, #0x40
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[10]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[4], @XMM[4], @XMM[10]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	vst1.8	{@XMM[4]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_three:
+	sub	$inp, $inp, #0x30
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]-@XMM[9]}, [$inp]!	@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	veor	@XMM[6], @XMM[6], @XMM[9]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	vst1.8	{@XMM[6]}, [$out]!
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_two:
+	sub	$inp, $inp, #0x20
+	bl	_bsaes_decrypt8
+	vldmia	$fp, {@XMM[14]}			@ reload IV
+	vld1.8	{@XMM[8]}, [$inp]!		@ reload input
+	veor	@XMM[0], @XMM[0], @XMM[14]	@ ^= IV
+	vld1.8	{@XMM[15]}, [$inp]!		@ reload input
+	veor	@XMM[1], @XMM[1], @XMM[8]
+	vst1.8	{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	b	.Lcbc_dec_done
+.align	4
+.Lcbc_dec_one:
+	sub	$inp, $inp, #0x10
+	mov	$rounds, $out			@ save original out pointer
+	mov	$out, $fp			@ use the iv scratch space as out buffer
+	mov	r2, $key
+	vmov	@XMM[4],@XMM[15]		@ just in case ensure that IV
+	vmov	@XMM[5],@XMM[0]			@ and input are preserved
+	bl	AES_decrypt
+	vld1.8	{@XMM[0]}, [$fp]		@ load result
+	veor	@XMM[0], @XMM[0], @XMM[4]	@ ^= IV
+	vmov	@XMM[15], @XMM[5]		@ @XMM[5] holds input
+	vst1.8	{@XMM[0]}, [$rounds]		@ write output
+
+.Lcbc_dec_done:
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+.Lcbc_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lcbc_dec_bzero
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10			@ add sp,$fp,#0x10 is no good for thumb
+	vst1.8	{@XMM[15]}, [$ivp]		@ return IV
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";	# shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern	AES_encrypt
+.global	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,%function
+.align	5
+bsaes_ctr32_encrypt_blocks:
+	cmp	$len, #8			@ use plain AES for
+	blo	.Lctr_enc_short			@ small sizes
+
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}
+	VFP_ABI_PUSH
+	ldr	$ctr, [ip]			@ ctr is 1st arg on the stack
+	sub	sp, sp, #0x10			@ scratch space to carry over the ctr
+	mov	$fp, sp				@ save sp
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	add	r12, #`128-32`			@ size of bit-sliced key schedule
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12				@ sp is $keysched
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+#ifdef	__APPLE__
+	mov	$ctr, #:lower16:(.LREVM0SR-.LM0)
+	add	$ctr, $const, $ctr
+#else
+	add	$ctr, $const, #.LREVM0SR-.LM0	@ borrow $ctr
+#endif
+	vldmia	$keysched, {@XMM[4]}		@ load round0 key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	@ populate the key schedule
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7],@XMM[7],@XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+
+.align	2
+0:	add	r12, $key, #248
+	vld1.8	{@XMM[0]}, [$ctr]		@ load counter
+	adrl	$ctr, .LREVM0SR			@ borrow $ctr
+	vldmia	r12, {@XMM[4]}			@ load round0 key
+	sub	sp, #0x10			@ place for adjusted round0 key
+#endif
+
+	vmov.i32	@XMM[8],#1		@ compose 1<<96
+	veor		@XMM[9],@XMM[9],@XMM[9]
+	vrev32.8	@XMM[0],@XMM[0]
+	vext.8		@XMM[8],@XMM[9],@XMM[8],#4
+	vrev32.8	@XMM[4],@XMM[4]
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]	@ compose 2<<96
+	vstmia	$keysched, {@XMM[4]}		@ save adjusted round0 key
+	b	.Lctr_enc_loop
+
+.align	4
+.Lctr_enc_loop:
+	vadd.u32	@XMM[10], @XMM[8], @XMM[9]	@ compose 3<<96
+	vadd.u32	@XMM[1], @XMM[0], @XMM[8]	@ +1
+	vadd.u32	@XMM[2], @XMM[0], @XMM[9]	@ +2
+	vadd.u32	@XMM[3], @XMM[0], @XMM[10]	@ +3
+	vadd.u32	@XMM[4], @XMM[1], @XMM[10]
+	vadd.u32	@XMM[5], @XMM[2], @XMM[10]
+	vadd.u32	@XMM[6], @XMM[3], @XMM[10]
+	vadd.u32	@XMM[7], @XMM[4], @XMM[10]
+	vadd.u32	@XMM[10], @XMM[5], @XMM[10]	@ next counter
+
+	@ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	@ to flip byte order in 32-bit counter
+
+	vldmia		$keysched, {@XMM[9]}		@ load round0 key
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, $keysched, #0x10		@ pass next round key
+#else
+	add		r4, $key, #`248+16`
+#endif
+	vldmia		$ctr, {@XMM[8]}			@ .LREVM0SR
+	mov		r5, $rounds			@ pass rounds
+	vstmia		$fp, {@XMM[10]}			@ save next counter
+#ifdef	__APPLE__
+	mov		$const, #:lower16:(.LREVM0SR-.LSR)
+	sub		$const, $ctr, $const
+#else
+	sub		$const, $ctr, #.LREVM0SR-.LSR	@ pass constants
+#endif
+
+	bl		_bsaes_encrypt8_alt
+
+	subs		$len, $len, #8
+	blo		.Lctr_enc_loop_done
+
+	vld1.8		{@XMM[8]-@XMM[9]}, [$inp]!	@ load input
+	vld1.8		{@XMM[10]-@XMM[11]}, [$inp]!
+	veor		@XMM[0], @XMM[8]
+	veor		@XMM[1], @XMM[9]
+	vld1.8		{@XMM[12]-@XMM[13]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	veor		@XMM[6], @XMM[11]
+	vld1.8		{@XMM[14]-@XMM[15]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!	@ write output
+	veor		@XMM[7], @XMM[13]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[4]}, [$out]!
+	veor		@XMM[5], @XMM[15]
+	vst1.8		{@XMM[6]}, [$out]!
+	vmov.i32	@XMM[8], #1			@ compose 1<<96
+	vst1.8		{@XMM[3]}, [$out]!
+	veor		@XMM[9], @XMM[9], @XMM[9]
+	vst1.8		{@XMM[7]}, [$out]!
+	vext.8		@XMM[8], @XMM[9], @XMM[8], #4
+	vst1.8		{@XMM[2]}, [$out]!
+	vadd.u32	@XMM[9],@XMM[8],@XMM[8]		@ compose 2<<96
+	vst1.8		{@XMM[5]}, [$out]!
+	vldmia		$fp, {@XMM[0]}			@ load counter
+
+	bne		.Lctr_enc_loop
+	b		.Lctr_enc_done
+
+.align	4
+.Lctr_enc_loop_done:
+	add		$len, $len, #8
+	vld1.8		{@XMM[8]}, [$inp]!	@ load input
+	veor		@XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!	@ write output
+	cmp		$len, #2
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[9]}, [$inp]!
+	veor		@XMM[1], @XMM[9]
+	vst1.8		{@XMM[1]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[10]}, [$inp]!
+	veor		@XMM[4], @XMM[10]
+	vst1.8		{@XMM[4]}, [$out]!
+	cmp		$len, #4
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[11]}, [$inp]!
+	veor		@XMM[6], @XMM[11]
+	vst1.8		{@XMM[6]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[12]}, [$inp]!
+	veor		@XMM[3], @XMM[12]
+	vst1.8		{@XMM[3]}, [$out]!
+	cmp		$len, #6
+	blo		.Lctr_enc_done
+	vld1.8		{@XMM[13]}, [$inp]!
+	veor		@XMM[7], @XMM[13]
+	vst1.8		{@XMM[7]}, [$out]!
+	beq		.Lctr_enc_done
+	vld1.8		{@XMM[14]}, [$inp]
+	veor		@XMM[2], @XMM[14]
+	vst1.8		{@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifndef	BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:			@ wipe key schedule [if any]
+	vstmia		$keysched!, {q0-q1}
+	cmp		$keysched, $fp
+	bne		.Lctr_enc_bzero
+#else
+	vstmia		$keysched, {q0-q1}
+#endif
+
+	mov	sp, $fp
+	add	sp, #0x10		@ add sp,$fp,#0x10 is no good for thumb
+	VFP_ABI_POP
+	ldmia	sp!, {r4-r10, pc}	@ return
+
+.align	4
+.Lctr_enc_short:
+	ldr	ip, [sp]		@ ctr pointer is passed on stack
+	stmdb	sp!, {r4-r8, lr}
+
+	mov	r4, $inp		@ copy arguments
+	mov	r5, $out
+	mov	r6, $len
+	mov	r7, $key
+	ldr	r8, [ip, #12]		@ load counter LSW
+	vld1.8	{@XMM[1]}, [ip]		@ load whole counter value
+#ifdef __ARMEL__
+	rev	r8, r8
+#endif
+	sub	sp, sp, #0x10
+	vst1.8	{@XMM[1]}, [sp]		@ copy counter value
+	sub	sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+	add	r0, sp, #0x10		@ input counter value
+	mov	r1, sp			@ output on the stack
+	mov	r2, r7			@ key
+
+	bl	AES_encrypt
+
+	vld1.8	{@XMM[0]}, [r4]!	@ load input
+	vld1.8	{@XMM[1]}, [sp]		@ load encrypted counter
+	add	r8, r8, #1
+#ifdef __ARMEL__
+	rev	r0, r8
+	str	r0, [sp, #0x1c]		@ next counter value
+#else
+	str	r8, [sp, #0x1c]		@ next counter value
+#endif
+	veor	@XMM[0],@XMM[0],@XMM[1]
+	vst1.8	{@XMM[0]}, [r5]!	@ store output
+	subs	r6, r6, #1
+	bne	.Lctr_enc_short_loop
+
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+	vstmia		sp!, {q0-q1}
+
+	ldmia	sp!, {r4-r8, pc}
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";		# returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,%function
+.align	4
+bsaes_xts_encrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0,sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}			@ save last round key
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	veor	@XMM[7], @XMM[7], @XMM[15]	@ fix up last round key
+	vstmia	r12, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+	subs	$len, #0x80
+	blo	.Lxts_enc_short
+	b	.Lxts_enc_loop
+
+.align	4
+.Lxts_enc_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_enc_loop
+
+.Lxts_enc_short:
+	adds		$len, #0x70
+	bmi		.Lxts_enc_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[2], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_6:
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align	5
+.Lxts_magic:
+	.quad	1, 0x87
+
+.align	5
+.Lxts_enc_5:
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	veor		@XMM[10], @XMM[3], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_4:
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[6], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_3:
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[4], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_2:
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_encrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_enc_done
+.align	4
+.Lxts_enc_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_enc_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_enc_ret
+	sub		r6, $out, #0x10
+
+.Lxts_enc_steal:
+	ldrb		r0, [$inp], #1
+	ldrb		r1, [$out, #-0x10]
+	strb		r0, [$out, #-0x10]
+	strb		r1, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_enc_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_encrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_enc_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_enc_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_enc_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,%function
+.align	4
+bsaes_xts_decrypt:
+	mov	ip, sp
+	stmdb	sp!, {r4-r10, lr}		@ 0x20
+	VFP_ABI_PUSH
+	mov	r6, sp				@ future $fp
+
+	mov	$inp, r0
+	mov	$out, r1
+	mov	$len, r2
+	mov	$key, r3
+
+	sub	r0, sp, #0x10			@ 0x10
+	bic	r0, #0xf			@ align at 16 bytes
+	mov	sp, r0
+
+#ifdef	XTS_CHAIN_TWEAK
+	ldr	r0, [ip]			@ pointer to input tweak
+#else
+	@ generate initial tweak
+	ldr	r0, [ip, #4]			@ iv[]
+	mov	r1, sp
+	ldr	r2, [ip, #0]			@ key2
+	bl	AES_encrypt
+	mov	r0, sp				@ pointer to initial tweak
+#endif
+
+	ldr	$rounds, [$key, #240]		@ get # of rounds
+	mov	$fp, r6
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	@ allocate the key schedule on the stack
+	sub	r12, sp, $rounds, lsl#7		@ 128 bytes per inner round key
+	@ add	r12, #`128-32`			@ size of bit-sliced key schedule
+	sub	r12, #`32+16`			@ place for tweak[9]
+
+	@ populate the key schedule
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	mov	sp, r12
+	add	r12, #0x90			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, sp, #0x90
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+#else
+	ldr	r12, [$key, #244]
+	eors	r12, #1
+	beq	0f
+
+	str	r12, [$key, #244]
+	mov	r4, $key			@ pass key
+	mov	r5, $rounds			@ pass # of rounds
+	add	r12, $key, #248			@ pass key schedule
+	bl	_bsaes_key_convert
+	add	r4, $key, #248
+	vldmia	r4, {@XMM[6]}
+	vstmia	r12,  {@XMM[15]}		@ save last round key
+	veor	@XMM[7], @XMM[7], @XMM[6]	@ fix up round 0 key
+	vstmia	r4, {@XMM[7]}
+
+.align	2
+0:	sub	sp, #0x90			@ place for tweak[9]
+#endif
+	vld1.8	{@XMM[8]}, [r0]			@ initial tweak
+	adr	$magic, .Lxts_magic
+
+#ifndef	XTS_CHAIN_TWEAK
+	tst	$len, #0xf			@ if not multiple of 16
+	it	ne				@ Thumb2 thing, sanity check in ARM
+	subne	$len, #0x10			@ subtract another 16 bytes
+#endif
+	subs	$len, #0x80
+
+	blo	.Lxts_dec_short
+	b	.Lxts_dec_loop
+
+.align	4
+.Lxts_dec_loop:
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	vadd.u64	@XMM[8], @XMM[15], @XMM[15]
+	vst1.64		{@XMM[15]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	veor		@XMM[8], @XMM[8], @T[0]
+	vst1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]-@XMM[7]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	veor		@XMM[7], @XMM[7], @XMM[15]
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]-@XMM[15]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	veor		@XMM[13], @XMM[5], @XMM[15]
+	vst1.8		{@XMM[12]-@XMM[13]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+
+	subs		$len, #0x80
+	bpl		.Lxts_dec_loop
+
+.Lxts_dec_short:
+	adds		$len, #0x70
+	bmi		.Lxts_dec_done
+
+	vldmia		$magic, {$twmask}	@ load XTS magic
+	vshr.s64	@T[0], @XMM[8], #63
+	mov		r0, sp
+	vand		@T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+	vadd.u64	@XMM[$i], @XMM[$i-1], @XMM[$i-1]
+	vst1.64		{@XMM[$i-1]}, [r0,:128]!
+	vswp		`&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+	vshr.s64	@T[1], @XMM[$i], #63
+	veor		@XMM[$i], @XMM[$i], @T[0]
+	vand		@T[1], @T[1], $twmask
+___
+	@T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+	vld1.8		{@XMM[$i-10]}, [$inp]!
+	subs		$len, #0x10
+	bmi		.Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+	veor		@XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+	sub		$len, #0x10
+	vst1.64		{@XMM[15]}, [r0,:128]		@ next round tweak
+
+	vld1.8		{@XMM[6]}, [$inp]!
+	veor		@XMM[5], @XMM[5], @XMM[13]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[6], @XMM[6], @XMM[14]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vld1.64		{@XMM[14]}, [r0,:128]!
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	veor		@XMM[12], @XMM[3], @XMM[14]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+	vst1.8		{@XMM[12]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_6:
+	vst1.64		{@XMM[14]}, [r0,:128]		@ next round tweak
+
+	veor		@XMM[4], @XMM[4], @XMM[12]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[5], @XMM[5], @XMM[13]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]-@XMM[13]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	veor		@XMM[11], @XMM[7], @XMM[13]
+	vst1.8		{@XMM[10]-@XMM[11]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_5:
+	veor		@XMM[3], @XMM[3], @XMM[11]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[4], @XMM[4], @XMM[12]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	vld1.64		{@XMM[12]}, [r0,:128]!
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	veor		@XMM[10], @XMM[2], @XMM[12]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+	vst1.8		{@XMM[10]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_4:
+	veor		@XMM[2], @XMM[2], @XMM[10]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[3], @XMM[3], @XMM[11]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+	vld1.64		{@XMM[10]-@XMM[11]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	veor		@XMM[9], @XMM[4], @XMM[11]
+	vst1.8		{@XMM[8]-@XMM[9]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_3:
+	veor		@XMM[1], @XMM[1], @XMM[9]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[2], @XMM[2], @XMM[10]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	vld1.64		{@XMM[10]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	veor		@XMM[8], @XMM[6], @XMM[10]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+	vst1.8		{@XMM[8]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_2:
+	veor		@XMM[0], @XMM[0], @XMM[8]
+#ifndef	BSAES_ASM_EXTENDED_KEY
+	add		r4, sp, #0x90			@ pass key schedule
+#else
+	add		r4, $key, #248			@ pass key schedule
+#endif
+	veor		@XMM[1], @XMM[1], @XMM[9]
+	mov		r5, $rounds			@ pass rounds
+	mov		r0, sp
+
+	bl		_bsaes_decrypt8
+
+	vld1.64		{@XMM[8]-@XMM[9]}, [r0,:128]!
+	veor		@XMM[0], @XMM[0], @XMM[ 8]
+	veor		@XMM[1], @XMM[1], @XMM[ 9]
+	vst1.8		{@XMM[0]-@XMM[1]}, [$out]!
+
+	vld1.64		{@XMM[8]}, [r0,:128]		@ next round tweak
+	b		.Lxts_dec_done
+.align	4
+.Lxts_dec_1:
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r5, $magic			@ preserve magic
+	mov		r2, $key
+	mov		r4, $fp				@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [$out]!
+	mov		$fp, r4
+	mov		$magic, r5
+
+	vmov		@XMM[8], @XMM[9]		@ next round tweak
+
+.Lxts_dec_done:
+#ifndef	XTS_CHAIN_TWEAK
+	adds		$len, #0x10
+	beq		.Lxts_dec_ret
+
+	@ calculate one round of extra tweak for the stolen ciphertext
+	vldmia		$magic, {$twmask}
+	vshr.s64	@XMM[6], @XMM[8], #63
+	vand		@XMM[6], @XMM[6], $twmask
+	vadd.u64	@XMM[9], @XMM[8], @XMM[8]
+	vswp		`&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+	veor		@XMM[9], @XMM[9], @XMM[6]
+
+	@ perform the final decryption with the last tweak value
+	vld1.8		{@XMM[0]}, [$inp]!
+	mov		r0, sp
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+	mov		r4, $fp			@ preserve fp
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[9]
+	vst1.8		{@XMM[0]}, [$out]
+
+	mov		r6, $out
+.Lxts_dec_steal:
+	ldrb		r1, [$out]
+	ldrb		r0, [$inp], #1
+	strb		r1, [$out, #0x10]
+	strb		r0, [$out], #1
+
+	subs		$len, #1
+	bhi		.Lxts_dec_steal
+
+	vld1.8		{@XMM[0]}, [r6]
+	mov		r0, sp
+	veor		@XMM[0], @XMM[8]
+	mov		r1, sp
+	vst1.8		{@XMM[0]}, [sp,:128]
+	mov		r2, $key
+
+	bl		AES_decrypt
+
+	vld1.8		{@XMM[0]}, [sp,:128]
+	veor		@XMM[0], @XMM[0], @XMM[8]
+	vst1.8		{@XMM[0]}, [r6]
+	mov		$fp, r4
+#endif
+
+.Lxts_dec_ret:
+	bic		r0, $fp, #0xf
+	vmov.i32	q0, #0
+	vmov.i32	q1, #0
+#ifdef	XTS_CHAIN_TWEAK
+	ldr		r1, [$fp, #0x20+VFP_ABI_FRAME]	@ chain tweak
+#endif
+.Lxts_dec_bzero:				@ wipe key schedule [if any]
+	vstmia		sp!, {q0-q1}
+	cmp		sp, r0
+	bne		.Lxts_dec_bzero
+
+	mov		sp, $fp
+#ifdef	XTS_CHAIN_TWEAK
+	vst1.8		{@XMM[8]}, [r1]
+#endif
+	VFP_ABI_POP
+	ldmia		sp!, {r4-r10, pc}	@ return
+
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/bsaes-x86_64.pl b/src/crypto/fipsmodule/aes/asm/bsaes-x86_64.pl
new file mode 100644
index 0000000..8d9ee6b
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/bsaes-x86_64.pl
@@ -0,0 +1,3119 @@
+#!/usr/bin/env perl
+
+###################################################################
+### AES-128 [originally in CTR mode]				###
+### bitsliced implementation for Intel Core 2 processors	###
+### requires support of SSE extensions up to SSSE3		###
+### Author: Emilia Käsper and Peter Schwabe			###
+### Date: 2009-03-19						###
+### Public domain						###
+###								###
+### See http://homes.esat.kuleuven.be/~ekasper/#software for	###
+### further information.					###
+###################################################################
+#
+# September 2011.
+#
+# Started as transliteration to "perlasm" the original code has
+# undergone following changes:
+#
+# - code was made position-independent;
+# - rounds were folded into a loop resulting in >5x size reduction
+#   from 12.5KB to 2.2KB;
+# - above was possibile thanks to mixcolumns() modification that
+#   allowed to feed its output back to aesenc[last], this was
+#   achieved at cost of two additional inter-registers moves;
+# - some instruction reordering and interleaving;
+# - this module doesn't implement key setup subroutine, instead it
+#   relies on conversion of "conventional" key schedule as returned
+#   by AES_set_encrypt_key (see discussion below);
+# - first and last round keys are treated differently, which allowed
+#   to skip one shiftrows(), reduce bit-sliced key schedule and
+#   speed-up conversion by 22%;
+# - support for 192- and 256-bit keys was added;
+#
+# Resulting performance in CPU cycles spent to encrypt one byte out
+# of 4096-byte buffer with 128-bit key is:
+#
+#		Emilia's	this(*)		difference
+#
+# Core 2    	9.30		8.69		+7%
+# Nehalem(**) 	7.63		6.88		+11%
+# Atom	    	17.1		16.4		+4%
+# Silvermont	-		12.9
+# Goldmont	-		8.85
+#
+# (*)	Comparison is not completely fair, because "this" is ECB,
+#	i.e. no extra processing such as counter values calculation
+#	and xor-ing input as in Emilia's CTR implementation is
+#	performed. However, the CTR calculations stand for not more
+#	than 1% of total time, so comparison is *rather* fair.
+#
+# (**)	Results were collected on Westmere, which is considered to
+#	be equivalent to Nehalem for this code.
+#
+# As for key schedule conversion subroutine. Interface to OpenSSL
+# relies on per-invocation on-the-fly conversion. This naturally
+# has impact on performance, especially for short inputs. Conversion
+# time in CPU cycles and its ratio to CPU cycles spent in 8x block
+# function is:
+#
+# 		conversion	conversion/8x block
+# Core 2	240		0.22
+# Nehalem	180		0.20
+# Atom		430		0.20
+#
+# The ratio values mean that 128-byte blocks will be processed
+# 16-18% slower, 256-byte blocks - 9-10%, 384-byte blocks - 6-7%,
+# etc. Then keep in mind that input sizes not divisible by 128 are
+# *effectively* slower, especially shortest ones, e.g. consecutive
+# 144-byte blocks are processed 44% slower than one would expect,
+# 272 - 29%, 400 - 22%, etc. Yet, despite all these "shortcomings"
+# it's still faster than ["hyper-threading-safe" code path in]
+# aes-x86_64.pl on all lengths above 64 bytes...
+#
+# October 2011.
+#
+# Add decryption procedure. Performance in CPU cycles spent to decrypt
+# one byte out of 4096-byte buffer with 128-bit key is:
+#
+# Core 2	9.98
+# Nehalem	7.80
+# Atom		17.9
+# Silvermont	14.0
+# Goldmont	10.2
+#
+# November 2011.
+#
+# Add bsaes_xts_[en|de]crypt. Less-than-80-bytes-block performance is
+# suboptimal, but XTS is meant to be used with larger blocks...
+#
+#						<appro@openssl.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+my ($inp,$out,$len,$key,$ivp)=("%rdi","%rsi","%rdx","%rcx");
+my @XMM=map("%xmm$_",(15,0..14));	# best on Atom, +10% over (0..15)
+my $ecb=0;	# suppress unreferenced ECB subroutines, spare some space...
+
+{
+my ($key,$rounds,$const)=("%rax","%r10d","%r11");
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InBasisChange	(@b);
+	&Inv_GF256	(@b[6,5,0,3,7,1,4,2],@t,@s);
+	&OutBasisChange	(@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	pxor	@b[6], @b[5]
+	pxor	@b[1], @b[2]
+	pxor	@b[0], @b[3]
+	pxor	@b[2], @b[6]
+	pxor 	@b[0], @b[5]
+
+	pxor	@b[3], @b[6]
+	pxor	@b[7], @b[3]
+	pxor	@b[5], @b[7]
+	pxor	@b[4], @b[3]
+	pxor	@b[5], @b[4]
+	pxor	@b[1], @b[3]
+
+	pxor	@b[7], @b[2]
+	pxor	@b[5], @b[1]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+	pxor	@b[6], @b[0]
+	pxor	@b[4], @b[1]
+	pxor	@b[0], @b[2]
+	pxor	@b[6], @b[4]
+	pxor	@b[1], @b[6]
+
+	pxor	@b[5], @b[1]
+	pxor	@b[3], @b[5]
+	pxor	@b[7], @b[3]
+	pxor	@b[5], @b[7]
+	pxor	@b[5], @b[2]
+
+	pxor	@b[7], @b[4]
+___
+}
+
+sub InvSbox {
+# input in lsb 	> [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb	> [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+	&InvInBasisChange	(@b);
+	&Inv_GF256		(@b[5,1,2,6,3,7,0,4],@t,@s);
+	&InvOutBasisChange	(@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {		# OutBasisChange in reverse
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+	pxor	@b[7], @b[4]
+
+	pxor	@b[5], @b[7]
+	pxor	@b[5], @b[2]
+	pxor	@b[7], @b[3]
+	pxor	@b[3], @b[5]
+	pxor	@b[5], @b[1]
+
+	pxor	@b[1], @b[6]
+	pxor	@b[0], @b[2]
+	pxor	@b[6], @b[4]
+	pxor	@b[6], @b[0]
+	pxor	@b[4], @b[1]
+___
+}
+
+sub InvOutBasisChange {		# InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+	pxor	@b[5], @b[1]
+	pxor	@b[7], @b[2]
+
+	pxor	@b[1], @b[3]
+	pxor	@b[5], @b[4]
+	pxor	@b[5], @b[7]
+	pxor	@b[4], @b[3]
+	 pxor 	@b[0], @b[5]
+	pxor	@b[7], @b[3]
+	 pxor	@b[2], @b[6]
+	 pxor	@b[1], @b[2]
+	pxor	@b[3], @b[6]
+
+	pxor	@b[0], @b[3]
+	pxor	@b[6], @b[5]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	pxor 	$y1, $t0
+	pand	$x0, $t0
+	pxor	$x1, $x0
+	pand	$y0, $x1
+	pand	$y1, $x0
+	pxor	$x1, $x0
+	pxor	$t0, $x1
+___
+}
+
+sub Mul_GF4_N {				# not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	pxor	$y1, $t0
+	pand	$x0, $t0
+	pxor	$x1, $x0
+	pand	$y0, $x1
+	pand	$y1, $x0
+	pxor	$x0, $x1
+	pxor	$t0, $x0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+	movdqa	$y0, $t0
+	 movdqa	$y2, $t1
+	pxor	$y1, $t0
+	 pxor 	$y3, $t1
+	pand	$x0, $t0
+	 pand	$x2, $t1
+	pxor	$x1, $x0
+	 pxor	$x3, $x2
+	pand	$y0, $x1
+	 pand	$y2, $x3
+	pand	$y1, $x0
+	 pand	$y3, $x2
+	pxor	$x0, $x1
+	 pxor	$x3, $x2
+	pxor	$t0, $x0
+	 pxor	$t1, $x3
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+	movdqa	@x[0], @t[0]
+	movdqa	@x[1], @t[1]
+___
+	&Mul_GF4  	(@x[0], @x[1], @y[0], @y[1], @t[2]);
+$code.=<<___;
+	pxor	@x[2], @t[0]
+	pxor	@x[3], @t[1]
+	pxor	@y[2], @y[0]
+	pxor	@y[3], @y[1]
+___
+	Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	pxor	@t[0], @x[0]
+	pxor	@t[0], @x[2]
+	pxor	@t[1], @x[1]
+	pxor	@t[1], @x[3]
+
+	movdqa	@x[4], @t[0]
+	movdqa	@x[5], @t[1]
+	pxor	@x[6], @t[0]
+	pxor	@x[7], @t[1]
+___
+	&Mul_GF4_N_GF4	(@t[0], @t[1], @y[0], @y[1], @t[3],
+			 @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+	pxor	@y[2], @y[0]
+	pxor	@y[3], @y[1]
+___
+	&Mul_GF4  	(@x[4], @x[5], @y[0], @y[1], @t[3]);
+$code.=<<___;
+	pxor	@t[0], @x[4]
+	pxor	@t[0], @x[6]
+	pxor	@t[1], @x[5]
+	pxor	@t[1], @x[7]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+	movdqa	@x[4], @t[3]
+	movdqa	@x[5], @t[2]
+	movdqa	@x[1], @t[1]
+	movdqa	@x[7], @s[1]
+	movdqa	@x[0], @s[0]
+
+	pxor	@x[6], @t[3]
+	pxor	@x[7], @t[2]
+	pxor	@x[3], @t[1]
+	 movdqa	@t[3], @s[2]
+	pxor	@x[6], @s[1]
+	 movdqa	@t[2], @t[0]
+	pxor	@x[2], @s[0]
+	 movdqa	@t[3], @s[3]
+
+	por	@t[1], @t[2]
+	por	@s[0], @t[3]
+	pxor	@t[0], @s[3]
+	pand	@s[0], @s[2]
+	pxor	@t[1], @s[0]
+	pand	@t[1], @t[0]
+	pand	@s[0], @s[3]
+	movdqa	@x[3], @s[0]
+	pxor	@x[2], @s[0]
+	pand	@s[0], @s[1]
+	pxor	@s[1], @t[3]
+	pxor	@s[1], @t[2]
+	movdqa	@x[4], @s[1]
+	movdqa	@x[1], @s[0]
+	pxor	@x[5], @s[1]
+	pxor	@x[0], @s[0]
+	movdqa	@s[1], @t[1]
+	pand	@s[0], @s[1]
+	por	@s[0], @t[1]
+	pxor	@s[1], @t[0]
+	pxor	@s[3], @t[3]
+	pxor	@s[2], @t[2]
+	pxor	@s[3], @t[1]
+	movdqa	@x[7], @s[0]
+	pxor	@s[2], @t[0]
+	movdqa	@x[6], @s[1]
+	pxor	@s[2], @t[1]
+	movdqa	@x[5], @s[2]
+	pand	@x[3], @s[0]
+	movdqa	@x[4], @s[3]
+	pand	@x[2], @s[1]
+	pand	@x[1], @s[2]
+	por	@x[0], @s[3]
+	pxor	@s[0], @t[3]
+	pxor	@s[1], @t[2]
+	pxor	@s[2], @t[1]
+	pxor	@s[3], @t[0]
+
+	#Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+	# new smaller inversion
+
+	movdqa	@t[3], @s[0]
+	pand	@t[1], @t[3]
+	pxor	@t[2], @s[0]
+
+	movdqa	@t[0], @s[2]
+	movdqa	@s[0], @s[3]
+	pxor	@t[3], @s[2]
+	pand	@s[2], @s[3]
+
+	movdqa	@t[1], @s[1]
+	pxor	@t[2], @s[3]
+	pxor	@t[0], @s[1]
+
+	pxor	@t[2], @t[3]
+
+	pand	@t[3], @s[1]
+
+	movdqa	@s[2], @t[2]
+	pxor	@t[0], @s[1]
+
+	pxor	@s[1], @t[2]
+	pxor	@s[1], @t[1]
+
+	pand	@t[0], @t[2]
+
+	pxor	@t[2], @s[2]
+	pxor	@t[2], @t[1]
+
+	pand	@s[3], @s[2]
+
+	pxor	@s[0], @s[2]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+	&Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my $mask=pop;
+$code.=<<___;
+	pxor	0x00($key),@x[0]
+	pxor	0x10($key),@x[1]
+	pxor	0x20($key),@x[2]
+	pxor	0x30($key),@x[3]
+	pshufb	$mask,@x[0]
+	pshufb	$mask,@x[1]
+	pxor	0x40($key),@x[4]
+	pxor	0x50($key),@x[5]
+	pshufb	$mask,@x[2]
+	pshufb	$mask,@x[3]
+	pxor	0x60($key),@x[6]
+	pxor	0x70($key),@x[7]
+	pshufb	$mask,@x[4]
+	pshufb	$mask,@x[5]
+	pshufb	$mask,@x[6]
+	pshufb	$mask,@x[7]
+	lea	0x80($key),$key
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];	# optional
+$code.=<<___;
+	pshufd	\$0x93, @x[0], @t[0]	# x0 <<< 32
+	pshufd	\$0x93, @x[1], @t[1]
+	 pxor	@t[0], @x[0]		# x0 ^ (x0 <<< 32)
+	pshufd	\$0x93, @x[2], @t[2]
+	 pxor	@t[1], @x[1]
+	pshufd	\$0x93, @x[3], @t[3]
+	 pxor	@t[2], @x[2]
+	pshufd	\$0x93, @x[4], @t[4]
+	 pxor	@t[3], @x[3]
+	pshufd	\$0x93, @x[5], @t[5]
+	 pxor	@t[4], @x[4]
+	pshufd	\$0x93, @x[6], @t[6]
+	 pxor	@t[5], @x[5]
+	pshufd	\$0x93, @x[7], @t[7]
+	 pxor	@t[6], @x[6]
+	 pxor	@t[7], @x[7]
+
+	pxor	@x[0], @t[1]
+	pxor	@x[7], @t[0]
+	pxor	@x[7], @t[1]
+	 pshufd	\$0x4E, @x[0], @x[0] 	# (x0 ^ (x0 <<< 32)) <<< 64)
+	pxor	@x[1], @t[2]
+	 pshufd	\$0x4E, @x[1], @x[1]
+	pxor	@x[4], @t[5]
+	 pxor	@t[0], @x[0]
+	pxor	@x[5], @t[6]
+	 pxor	@t[1], @x[1]
+	pxor	@x[3], @t[4]
+	 pshufd	\$0x4E, @x[4], @t[0]
+	pxor	@x[6], @t[7]
+	 pshufd	\$0x4E, @x[5], @t[1]
+	pxor	@x[2], @t[3]
+	 pshufd	\$0x4E, @x[3], @x[4]
+	pxor	@x[7], @t[3]
+	 pshufd	\$0x4E, @x[7], @x[5]
+	pxor	@x[7], @t[4]
+	 pshufd	\$0x4E, @x[6], @x[3]
+	pxor	@t[4], @t[0]
+	 pshufd	\$0x4E, @x[2], @x[6]
+	pxor	@t[5], @t[1]
+___
+$code.=<<___ if (!$inv);
+	pxor	@t[3], @x[4]
+	pxor	@t[7], @x[5]
+	pxor	@t[6], @x[3]
+	 movdqa	@t[0], @x[2]
+	pxor	@t[2], @x[6]
+	 movdqa	@t[1], @x[7]
+___
+$code.=<<___ if ($inv);
+	pxor	@x[4], @t[3]
+	pxor	@t[7], @x[5]
+	pxor	@x[3], @t[6]
+	 movdqa	@t[0], @x[3]
+	pxor	@t[2], @x[6]
+	 movdqa	@t[6], @x[2]
+	 movdqa	@t[1], @x[7]
+	 movdqa	@x[6], @x[4]
+	 movdqa	@t[3], @x[6]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+	# multiplication by 0x0e
+	pshufd	\$0x93, @x[7], @t[7]
+	movdqa	@x[2], @t[2]
+	pxor	@x[5], @x[7]		# 7 5
+	pxor	@x[5], @x[2]		# 2 5
+	pshufd	\$0x93, @x[0], @t[0]
+	movdqa	@x[5], @t[5]
+	pxor	@x[0], @x[5]		# 5 0		[1]
+	pxor	@x[1], @x[0]		# 0 1
+	pshufd	\$0x93, @x[1], @t[1]
+	pxor	@x[2], @x[1]		# 1 25
+	pxor	@x[6], @x[0]		# 01 6		[2]
+	pxor	@x[3], @x[1]		# 125 3		[4]
+	pshufd	\$0x93, @x[3], @t[3]
+	pxor	@x[0], @x[2]		# 25 016	[3]
+	pxor	@x[7], @x[3]		# 3 75
+	pxor	@x[6], @x[7]		# 75 6		[0]
+	pshufd	\$0x93, @x[6], @t[6]
+	movdqa	@x[4], @t[4]
+	pxor	@x[4], @x[6]		# 6 4
+	pxor	@x[3], @x[4]		# 4 375		[6]
+	pxor	@x[7], @x[3]		# 375 756=36
+	pxor	@t[5], @x[6]		# 64 5		[7]
+	pxor	@t[2], @x[3]		# 36 2
+	pxor	@t[4], @x[3]		# 362 4		[5]
+	pshufd	\$0x93, @t[5], @t[5]
+___
+					my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+	# multiplication by 0x0b
+	pxor	@y[0], @y[1]
+	pxor	@t[0], @y[0]
+	pxor	@t[1], @y[1]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[5], @y[0]
+	pxor	@t[6], @y[1]
+	pxor	@t[7], @y[0]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[6], @t[7]		# clobber t[7]
+	pxor	@y[0], @y[1]
+
+	pxor	@t[0], @y[3]
+	pshufd	\$0x93, @t[0], @t[0]
+	pxor	@t[1], @y[2]
+	pxor	@t[1], @y[4]
+	pxor	@t[2], @y[2]
+	pshufd	\$0x93, @t[1], @t[1]
+	pxor	@t[2], @y[3]
+	pxor	@t[2], @y[5]
+	pxor	@t[7], @y[2]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[3], @y[3]
+	pxor	@t[3], @y[6]
+	pxor	@t[3], @y[4]
+	pshufd	\$0x93, @t[3], @t[3]
+	pxor	@t[4], @y[7]
+	pxor	@t[4], @y[5]
+	pxor	@t[7], @y[7]
+	pxor	@t[5], @y[3]
+	pxor	@t[4], @y[4]
+	pxor	@t[5], @t[7]		# clobber t[7] even more
+
+	pxor	@t[7], @y[5]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[7], @y[6]
+	pxor	@t[7], @y[4]
+
+	pxor	@t[5], @t[7]
+	pshufd	\$0x93, @t[5], @t[5]
+	pxor	@t[6], @t[7]		# restore t[7]
+
+	# multiplication by 0x0d
+	pxor	@y[7], @y[4]
+	pxor	@t[4], @y[7]
+	pshufd	\$0x93, @t[6], @t[6]
+	pxor	@t[0], @y[2]
+	pxor	@t[5], @y[7]
+	pxor	@t[2], @y[2]
+	pshufd	\$0x93, @t[7], @t[7]
+
+	pxor	@y[1], @y[3]
+	pxor	@t[1], @y[1]
+	pxor	@t[0], @y[0]
+	pxor	@t[0], @y[3]
+	pxor	@t[5], @y[1]
+	pxor	@t[5], @y[0]
+	pxor	@t[7], @y[1]
+	pshufd	\$0x93, @t[0], @t[0]
+	pxor	@t[6], @y[0]
+	pxor	@y[1], @y[3]
+	pxor	@t[1], @y[4]
+	pshufd	\$0x93, @t[1], @t[1]
+
+	pxor	@t[7], @y[7]
+	pxor	@t[2], @y[4]
+	pxor	@t[2], @y[5]
+	pshufd	\$0x93, @t[2], @t[2]
+	pxor	@t[6], @y[2]
+	pxor	@t[3], @t[6]		# clobber t[6]
+	pxor	@y[7], @y[4]
+	pxor	@t[6], @y[3]
+
+	pxor	@t[6], @y[6]
+	pxor	@t[5], @y[5]
+	pxor	@t[4], @y[6]
+	pshufd	\$0x93, @t[4], @t[4]
+	pxor	@t[6], @y[5]
+	pxor	@t[7], @y[6]
+	pxor	@t[3], @t[6]		# restore t[6]
+
+	pshufd	\$0x93, @t[5], @t[5]
+	pshufd	\$0x93, @t[6], @t[6]
+	pshufd	\$0x93, @t[7], @t[7]
+	pshufd	\$0x93, @t[3], @t[3]
+
+	# multiplication by 0x09
+	pxor	@y[1], @y[4]
+	pxor	@y[1], @t[1]		# t[1]=y[1]
+	pxor	@t[5], @t[0]		# clobber t[0]
+	pxor	@t[5], @t[1]
+	pxor	@t[0], @y[3]
+	pxor	@y[0], @t[0]		# t[0]=y[0]
+	pxor	@t[6], @t[1]
+	pxor	@t[7], @t[6]		# clobber t[6]
+	pxor	@t[1], @y[4]
+	pxor	@t[4], @y[7]
+	pxor	@y[4], @t[4]		# t[4]=y[4]
+	pxor	@t[3], @y[6]
+	pxor	@y[3], @t[3]		# t[3]=y[3]
+	pxor	@t[2], @y[5]
+	pxor	@y[2], @t[2]		# t[2]=y[2]
+	pxor	@t[7], @t[3]
+	pxor	@y[5], @t[5]		# t[5]=y[5]
+	pxor	@t[6], @t[2]
+	pxor	@t[6], @t[5]
+	pxor	@y[6], @t[6]		# t[6]=y[6]
+	pxor	@y[7], @t[7]		# t[7]=y[7]
+
+	movdqa	@t[0],@XMM[0]
+	movdqa	@t[1],@XMM[1]
+	movdqa	@t[2],@XMM[2]
+	movdqa	@t[3],@XMM[3]
+	movdqa	@t[4],@XMM[4]
+	movdqa	@t[5],@XMM[5]
+	movdqa	@t[6],@XMM[6]
+	movdqa	@t[7],@XMM[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+	# multiplication by 0x05-0x00-0x04-0x00
+	pshufd	\$0x4E, @x[0], @t[0]
+	pshufd	\$0x4E, @x[6], @t[6]
+	pxor	@x[0], @t[0]
+	pshufd	\$0x4E, @x[7], @t[7]
+	pxor	@x[6], @t[6]
+	pshufd	\$0x4E, @x[1], @t[1]
+	pxor	@x[7], @t[7]
+	pshufd	\$0x4E, @x[2], @t[2]
+	pxor	@x[1], @t[1]
+	pshufd	\$0x4E, @x[3], @t[3]
+	pxor	@x[2], @t[2]
+	 pxor	@t[6], @x[0]
+	 pxor	@t[6], @x[1]
+	pshufd	\$0x4E, @x[4], @t[4]
+	pxor	@x[3], @t[3]
+	 pxor	@t[0], @x[2]
+	 pxor	@t[1], @x[3]
+	pshufd	\$0x4E, @x[5], @t[5]
+	pxor	@x[4], @t[4]
+	 pxor	@t[7], @x[1]
+	 pxor	@t[2], @x[4]
+	pxor	@x[5], @t[5]
+
+	 pxor	@t[7], @x[2]
+	 pxor	@t[6], @x[3]
+	 pxor	@t[6], @x[4]
+	 pxor	@t[3], @x[5]
+	 pxor	@t[4], @x[6]
+	 pxor	@t[7], @x[4]
+	 pxor	@t[7], @x[5]
+	 pxor	@t[5], @x[7]
+___
+	&MixColumns	(@x,@t,1);	# flipped 2<->3 and 4<->6
+}
+
+sub aesenc {				# not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+	movdqa	0x30($const),@t[0]	# .LSR
+___
+	&ShiftRows	(@b,@t[0]);
+	&Sbox		(@b,@t);
+	&MixColumns	(@b[0,1,4,6,3,7,2,5],@t);
+}
+
+sub aesenclast {			# not used
+my @b=@_[0..7];
+my @t=@_[8..15];
+$code.=<<___;
+	movdqa	0x40($const),@t[0]	# .LSRM0
+___
+	&ShiftRows	(@b,@t[0]);
+	&Sbox		(@b,@t);
+$code.=<<___
+	pxor	0x00($key),@b[0]
+	pxor	0x10($key),@b[1]
+	pxor	0x20($key),@b[4]
+	pxor	0x30($key),@b[6]
+	pxor	0x40($key),@b[3]
+	pxor	0x50($key),@b[7]
+	pxor	0x60($key),@b[2]
+	pxor	0x70($key),@b[5]
+___
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+	movdqa	$b,$t
+	psrlq	\$$n,$b
+	pxor  	$a,$b
+	pand	$mask,$b
+	pxor	$b,$a
+	psllq	\$$n,$b
+	pxor	$t,$b
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+	movdqa	$b0,$t0
+	psrlq	\$$n,$b0
+	 movdqa	$b1,$t1
+	 psrlq	\$$n,$b1
+	pxor  	$a0,$b0
+	 pxor  	$a1,$b1
+	pand	$mask,$b0
+	 pand	$mask,$b1
+	pxor	$b0,$a0
+	psllq	\$$n,$b0
+	 pxor	$b1,$a1
+	 psllq	\$$n,$b1
+	pxor	$t0,$b0
+	 pxor	$t1,$b1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+	movdqa	0x00($const),$t0	# .LBS0
+	movdqa	0x10($const),$t1	# .LBS1
+___
+	&swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+	&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+	movdqa	0x20($const),$t0	# .LBS2
+___
+	&swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+	&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+	&swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+	&swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+.text
+
+.extern	asm_AES_encrypt
+.extern	asm_AES_decrypt
+
+.type	_bsaes_encrypt8,\@abi-omnipotent
+.align	64
+_bsaes_encrypt8:
+	lea	.LBS0(%rip), $const	# constants table
+
+	movdqa	($key), @XMM[9]		# round 0 key
+	lea	0x10($key), $key
+	movdqa	0x50($const), @XMM[8]	# .LM0SR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	pxor	@XMM[9], @XMM[2]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[0]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[4]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[2]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[6]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[4]
+	 pshufb	@XMM[8], @XMM[5]
+	 pshufb	@XMM[8], @XMM[6]
+	 pshufb	@XMM[8], @XMM[7]
+_bsaes_encrypt8_bitslice:
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	dec	$rounds
+	jmp	.Lenc_sbox
+.align	16
+.Lenc_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8]);
+$code.=".Lenc_sbox:\n";
+	&Sbox		(@XMM[0..7, 8..15]);
+$code.=<<___;
+	dec	$rounds
+	jl	.Lenc_done
+___
+	&MixColumns	(@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+	movdqa	0x30($const), @XMM[8]	# .LSR
+	jnz	.Lenc_loop
+	movdqa	0x40($const), @XMM[8]	# .LSRM0
+	jmp	.Lenc_loop
+.align	16
+.Lenc_done:
+___
+	# output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+	&bitslice	(@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+	movdqa	($key), @XMM[8]		# last round key
+	pxor	@XMM[8], @XMM[4]
+	pxor	@XMM[8], @XMM[6]
+	pxor	@XMM[8], @XMM[3]
+	pxor	@XMM[8], @XMM[7]
+	pxor	@XMM[8], @XMM[2]
+	pxor	@XMM[8], @XMM[5]
+	pxor	@XMM[8], @XMM[0]
+	pxor	@XMM[8], @XMM[1]
+	ret
+.size	_bsaes_encrypt8,.-_bsaes_encrypt8
+
+.type	_bsaes_decrypt8,\@abi-omnipotent
+.align	64
+_bsaes_decrypt8:
+	lea	.LBS0(%rip), $const	# constants table
+
+	movdqa	($key), @XMM[9]		# round 0 key
+	lea	0x10($key), $key
+	movdqa	-0x30($const), @XMM[8]	# .LM0ISR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	pxor	@XMM[9], @XMM[2]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[0]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[4]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[2]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[6]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[4]
+	 pshufb	@XMM[8], @XMM[5]
+	 pshufb	@XMM[8], @XMM[6]
+	 pshufb	@XMM[8], @XMM[7]
+___
+	&bitslice	(@XMM[0..7, 8..11]);
+$code.=<<___;
+	dec	$rounds
+	jmp	.Ldec_sbox
+.align	16
+.Ldec_loop:
+___
+	&ShiftRows	(@XMM[0..7, 8]);
+$code.=".Ldec_sbox:\n";
+	&InvSbox	(@XMM[0..7, 8..15]);
+$code.=<<___;
+	dec	$rounds
+	jl	.Ldec_done
+___
+	&InvMixColumns	(@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+	movdqa	-0x10($const), @XMM[8]	# .LISR
+	jnz	.Ldec_loop
+	movdqa	-0x20($const), @XMM[8]	# .LISRM0
+	jmp	.Ldec_loop
+.align	16
+.Ldec_done:
+___
+	&bitslice	(@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+	movdqa	($key), @XMM[8]		# last round key
+	pxor	@XMM[8], @XMM[6]
+	pxor	@XMM[8], @XMM[4]
+	pxor	@XMM[8], @XMM[2]
+	pxor	@XMM[8], @XMM[7]
+	pxor	@XMM[8], @XMM[3]
+	pxor	@XMM[8], @XMM[5]
+	pxor	@XMM[8], @XMM[0]
+	pxor	@XMM[8], @XMM[1]
+	ret
+.size	_bsaes_decrypt8,.-_bsaes_decrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("%rax","%rcx","%r10d","%r11");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+	&swapmove	(@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+	#&swapmove(@x[2,3],1,$t0,$t2,$t3);
+	movdqa	@x[0], @x[2]
+	movdqa	@x[1], @x[3]
+___
+	#&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+	&swapmove2x	(@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+	#&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+	movdqa	@x[0], @x[4]
+	movdqa	@x[2], @x[6]
+	movdqa	@x[1], @x[5]
+	movdqa	@x[3], @x[7]
+___
+	&swapmove2x	(@x[0,4,1,5],4,$bs2,$t2,$t3);
+	&swapmove2x	(@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type	_bsaes_key_convert,\@abi-omnipotent
+.align	16
+_bsaes_key_convert:
+	lea	.Lmasks(%rip), $const
+	movdqu	($inp), %xmm7		# load round 0 key
+	lea	0x10($inp), $inp
+	movdqa	0x00($const), %xmm0	# 0x01...
+	movdqa	0x10($const), %xmm1	# 0x02...
+	movdqa	0x20($const), %xmm2	# 0x04...
+	movdqa	0x30($const), %xmm3	# 0x08...
+	movdqa	0x40($const), %xmm4	# .LM0
+	pcmpeqd	%xmm5, %xmm5		# .LNOT
+
+	movdqu	($inp), %xmm6		# load round 1 key
+	movdqa	%xmm7, ($out)		# save round 0 key
+	lea	0x10($out), $out
+	dec	$rounds
+	jmp	.Lkey_loop
+.align	16
+.Lkey_loop:
+	pshufb	%xmm4, %xmm6		# .LM0
+
+	movdqa	%xmm0,	%xmm8
+	movdqa	%xmm1,	%xmm9
+
+	pand	%xmm6,	%xmm8
+	pand	%xmm6,	%xmm9
+	movdqa	%xmm2,	%xmm10
+	pcmpeqb	%xmm0,	%xmm8
+	psllq	\$4,	%xmm0		# 0x10...
+	movdqa	%xmm3,	%xmm11
+	pcmpeqb	%xmm1,	%xmm9
+	psllq	\$4,	%xmm1		# 0x20...
+
+	pand	%xmm6,	%xmm10
+	pand	%xmm6,	%xmm11
+	movdqa	%xmm0,	%xmm12
+	pcmpeqb	%xmm2,	%xmm10
+	psllq	\$4,	%xmm2		# 0x40...
+	movdqa	%xmm1,	%xmm13
+	pcmpeqb	%xmm3,	%xmm11
+	psllq	\$4,	%xmm3		# 0x80...
+
+	movdqa	%xmm2,	%xmm14
+	movdqa	%xmm3,	%xmm15
+	 pxor	%xmm5,	%xmm8		# "pnot"
+	 pxor	%xmm5,	%xmm9
+
+	pand	%xmm6,	%xmm12
+	pand	%xmm6,	%xmm13
+	 movdqa	%xmm8, 0x00($out)	# write bit-sliced round key
+	pcmpeqb	%xmm0,	%xmm12
+	psrlq	\$4,	%xmm0		# 0x01...
+	 movdqa	%xmm9, 0x10($out)
+	pcmpeqb	%xmm1,	%xmm13
+	psrlq	\$4,	%xmm1		# 0x02...
+	 lea	0x10($inp), $inp
+
+	pand	%xmm6,	%xmm14
+	pand	%xmm6,	%xmm15
+	 movdqa	%xmm10, 0x20($out)
+	pcmpeqb	%xmm2,	%xmm14
+	psrlq	\$4,	%xmm2		# 0x04...
+	 movdqa	%xmm11, 0x30($out)
+	pcmpeqb	%xmm3,	%xmm15
+	psrlq	\$4,	%xmm3		# 0x08...
+	 movdqu	($inp), %xmm6		# load next round key
+
+	pxor	%xmm5, %xmm13		# "pnot"
+	pxor	%xmm5, %xmm14
+	movdqa	%xmm12, 0x40($out)
+	movdqa	%xmm13, 0x50($out)
+	movdqa	%xmm14, 0x60($out)
+	movdqa	%xmm15, 0x70($out)
+	lea	0x80($out),$out
+	dec	$rounds
+	jnz	.Lkey_loop
+
+	movdqa	0x50($const), %xmm7	# .L63
+	#movdqa	%xmm6, ($out)		# don't save last round key
+	ret
+.size	_bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0 && !$win64) {	# following four functions are unsupported interface
+			# used for benchmarking...
+$code.=<<___;
+.globl	bsaes_enc_key_convert
+.type	bsaes_enc_key_convert,\@function,2
+.align	16
+bsaes_enc_key_convert:
+	mov	240($inp),%r10d		# pass rounds
+	mov	$inp,%rcx		# pass key
+	mov	$out,%rax		# pass key schedule
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+	ret
+.size	bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl	bsaes_encrypt_128
+.type	bsaes_encrypt_128,\@function,4
+.align	16
+bsaes_encrypt_128:
+.Lenc128_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	movdqu	0x60($inp), @XMM[6]
+	movdqu	0x70($inp), @XMM[7]
+	mov	$key, %rax		# pass the $key
+	lea	0x80($inp), $inp
+	mov	\$10,%r10d
+
+	call	_bsaes_encrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$0x80,$len
+	ja	.Lenc128_loop
+	ret
+.size	bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl	bsaes_dec_key_convert
+.type	bsaes_dec_key_convert,\@function,2
+.align	16
+bsaes_dec_key_convert:
+	mov	240($inp),%r10d		# pass rounds
+	mov	$inp,%rcx		# pass key
+	mov	$out,%rax		# pass key schedule
+	call	_bsaes_key_convert
+	pxor	($out),%xmm7		# fix up round 0 key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,($out)
+	ret
+.size	bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl	bsaes_decrypt_128
+.type	bsaes_decrypt_128,\@function,4
+.align	16
+bsaes_decrypt_128:
+.Ldec128_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	movdqu	0x60($inp), @XMM[6]
+	movdqu	0x70($inp), @XMM[7]
+	mov	$key, %rax		# pass the $key
+	lea	0x80($inp), $inp
+	mov	\$10,%r10d
+
+	call	_bsaes_decrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$0x80,$len
+	ja	.Ldec128_loop
+	ret
+.size	bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+######################################################################
+#
+# OpenSSL interface
+#
+my ($arg1,$arg2,$arg3,$arg4,$arg5,$arg6)=$win64	? ("%rcx","%rdx","%r8","%r9","%r10","%r11d")
+						: ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+my ($inp,$out,$len,$key)=("%r12","%r13","%r14","%r15");
+
+if ($ecb) {
+$code.=<<___;
+.globl	bsaes_ecb_encrypt_blocks
+.type	bsaes_ecb_encrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ecb_encrypt_blocks:
+	mov	%rsp, %rax
+.Lecb_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lecb_enc_body:
+___
+$code.=<<___;
+	mov	%rsp,%rbp		# backup %rsp
+	mov	240($arg4),%eax		# rounds
+	mov	$arg1,$inp		# backup arguments
+	mov	$arg2,$out
+	mov	$arg3,$len
+	mov	$arg4,$key
+	cmp	\$8,$arg3
+	jb	.Lecb_enc_short
+
+	mov	%eax,%ebx		# backup rounds
+	shl	\$7,%rax		# 128 bytes per inner round key
+	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
+	sub	%rax,%rsp
+	mov	%rsp,%rax		# pass key schedule
+	mov	$key,%rcx		# pass key
+	mov	%ebx,%r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+
+	sub	\$8,$len
+.Lecb_enc_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%ebx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	lea	0x80($inp), $inp
+
+	call	_bsaes_encrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lecb_enc_loop
+
+	add	\$8,$len
+	jz	.Lecb_enc_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%ebx,%r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lecb_enc_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lecb_enc_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lecb_enc_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lecb_enc_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lecb_enc_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lecb_enc_six
+	movdqu	0x60($inp), @XMM[6]
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_six:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_five:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_four:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_three:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_two:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_one:
+	call	_bsaes_encrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	jmp	.Lecb_enc_done
+.align	16
+.Lecb_enc_short:
+	lea	($inp), $arg1
+	lea	($out), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt
+	lea	16($inp), $inp
+	lea	16($out), $out
+	dec	$len
+	jnz	.Lecb_enc_short
+
+.Lecb_enc_done:
+	lea	(%rsp),%rax
+	pxor	%xmm0, %xmm0
+.Lecb_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	jb	.Lecb_enc_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lecb_enc_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lecb_enc_epilogue:
+	ret
+.size	bsaes_ecb_encrypt_blocks,.-bsaes_ecb_encrypt_blocks
+
+.globl	bsaes_ecb_decrypt_blocks
+.type	bsaes_ecb_decrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ecb_decrypt_blocks:
+	mov	%rsp, %rax
+.Lecb_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lecb_dec_body:
+___
+$code.=<<___;
+	mov	%rsp,%rbp		# backup %rsp
+	mov	240($arg4),%eax		# rounds
+	mov	$arg1,$inp		# backup arguments
+	mov	$arg2,$out
+	mov	$arg3,$len
+	mov	$arg4,$key
+	cmp	\$8,$arg3
+	jb	.Lecb_dec_short
+
+	mov	%eax,%ebx		# backup rounds
+	shl	\$7,%rax		# 128 bytes per inner round key
+	sub	\$`128-32`,%rax		# size of bit-sliced key schedule
+	sub	%rax,%rsp
+	mov	%rsp,%rax		# pass key schedule
+	mov	$key,%rcx		# pass key
+	mov	%ebx,%r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp),%xmm7		# fix up 0 round key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,(%rsp)
+
+	sub	\$8,$len
+.Lecb_dec_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%ebx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	lea	0x80($inp), $inp
+
+	call	_bsaes_decrypt8
+
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lecb_dec_loop
+
+	add	\$8,$len
+	jz	.Lecb_dec_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%ebx,%r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lecb_dec_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lecb_dec_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lecb_dec_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lecb_dec_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lecb_dec_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lecb_dec_six
+	movdqu	0x60($inp), @XMM[6]
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_six:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_five:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_four:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_three:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_two:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_one:
+	call	_bsaes_decrypt8
+	movdqu	@XMM[0], 0x00($out)	# write output
+	jmp	.Lecb_dec_done
+.align	16
+.Lecb_dec_short:
+	lea	($inp), $arg1
+	lea	($out), $arg2
+	lea	($key), $arg3
+	call	asm_AES_decrypt
+	lea	16($inp), $inp
+	lea	16($out), $out
+	dec	$len
+	jnz	.Lecb_dec_short
+
+.Lecb_dec_done:
+	lea	(%rsp),%rax
+	pxor	%xmm0, %xmm0
+.Lecb_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	jb	.Lecb_dec_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lecb_dec_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lecb_dec_epilogue:
+	ret
+.size	bsaes_ecb_decrypt_blocks,.-bsaes_ecb_decrypt_blocks
+___
+}
+$code.=<<___;
+.extern	asm_AES_cbc_encrypt
+.globl	bsaes_cbc_encrypt
+.type	bsaes_cbc_encrypt,\@abi-omnipotent
+.align	16
+bsaes_cbc_encrypt:
+___
+$code.=<<___ if ($win64);
+	mov	48(%rsp),$arg6		# pull direction flag
+___
+$code.=<<___;
+	cmp	\$0,$arg6
+	jne	asm_AES_cbc_encrypt
+	cmp	\$128,$arg3
+	jb	asm_AES_cbc_encrypt
+
+	mov	%rsp, %rax
+.Lcbc_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lcbc_dec_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	240($arg4), %eax	# rounds
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+	mov	$arg5, %rbx
+	shr	\$4, $len		# bytes to blocks
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp),%xmm7		# fix up 0 round key
+	movdqa	%xmm6,(%rax)		# save last round key
+	movdqa	%xmm7,(%rsp)
+
+	movdqu	(%rbx), @XMM[15]	# load IV
+	sub	\$8,$len
+.Lcbc_dec_loop:
+	movdqu	0x00($inp), @XMM[0]	# load input
+	movdqu	0x10($inp), @XMM[1]
+	movdqu	0x20($inp), @XMM[2]
+	movdqu	0x30($inp), @XMM[3]
+	movdqu	0x40($inp), @XMM[4]
+	movdqu	0x50($inp), @XMM[5]
+	mov	%rsp, %rax		# pass key schedule
+	movdqu	0x60($inp), @XMM[6]
+	mov	%edx,%r10d		# pass rounds
+	movdqu	0x70($inp), @XMM[7]
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+
+	call	_bsaes_decrypt8
+
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[12], @XMM[7]
+	movdqu	0x60($inp), @XMM[14]
+	pxor	@XMM[13], @XMM[3]
+	movdqu	0x70($inp), @XMM[15]	# IV
+	pxor	@XMM[14], @XMM[5]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x80($inp), $inp
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	sub	\$8,$len
+	jnc	.Lcbc_dec_loop
+
+	add	\$8,$len
+	jz	.Lcbc_dec_done
+
+	movdqu	0x00($inp), @XMM[0]	# load input
+	mov	%rsp, %rax		# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+	cmp	\$2,$len
+	jb	.Lcbc_dec_one
+	movdqu	0x10($inp), @XMM[1]
+	je	.Lcbc_dec_two
+	movdqu	0x20($inp), @XMM[2]
+	cmp	\$4,$len
+	jb	.Lcbc_dec_three
+	movdqu	0x30($inp), @XMM[3]
+	je	.Lcbc_dec_four
+	movdqu	0x40($inp), @XMM[4]
+	cmp	\$6,$len
+	jb	.Lcbc_dec_five
+	movdqu	0x50($inp), @XMM[5]
+	je	.Lcbc_dec_six
+	movdqu	0x60($inp), @XMM[6]
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[12], @XMM[7]
+	movdqu	0x60($inp), @XMM[15]	# IV
+	pxor	@XMM[13], @XMM[3]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_six:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[11], @XMM[2]
+	movdqu	0x50($inp), @XMM[15]	# IV
+	pxor	@XMM[12], @XMM[7]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_five:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	0x40($inp), @XMM[15]	# IV
+	pxor	@XMM[11], @XMM[2]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_four:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[9], @XMM[6]
+	movdqu	0x30($inp), @XMM[15]	# IV
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_three:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[8], @XMM[1]
+	movdqu	0x20($inp), @XMM[15]	# IV
+	pxor	@XMM[9], @XMM[6]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_two:
+	movdqa	@XMM[15], 0x20(%rbp)	# put aside IV
+	call	_bsaes_decrypt8
+	pxor	0x20(%rbp), @XMM[0]	# ^= IV
+	movdqu	0x00($inp), @XMM[8]	# re-load input
+	movdqu	0x10($inp), @XMM[15]	# IV
+	pxor	@XMM[8], @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	jmp	.Lcbc_dec_done
+.align	16
+.Lcbc_dec_one:
+	lea	($inp), $arg1
+	lea	0x20(%rbp), $arg2	# buffer output
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[15]	# ^= IV
+	movdqu	@XMM[15], ($out)	# write output
+	movdqa	@XMM[0], @XMM[15]	# IV
+
+.Lcbc_dec_done:
+	movdqu	@XMM[15], (%rbx)	# return IV
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lcbc_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lcbc_dec_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lcbc_dec_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lcbc_dec_epilogue:
+	ret
+.size	bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+
+.globl	bsaes_ctr32_encrypt_blocks
+.type	bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
+.align	16
+bsaes_ctr32_encrypt_blocks:
+	mov	%rsp, %rax
+.Lctr_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lctr_enc_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	movdqu	($arg5), %xmm0		# load counter
+	mov	240($arg4), %eax	# rounds
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+	movdqa	%xmm0, 0x20(%rbp)	# copy counter
+	cmp	\$8, $arg3
+	jb	.Lctr_enc_short
+
+	mov	%eax, %ebx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%ebx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6,%xmm7		# fix up last round key
+	movdqa	%xmm7,(%rax)		# save last round key
+
+	movdqa	(%rsp), @XMM[9]		# load round0 key
+	lea	.LADD1(%rip), %r11
+	movdqa	0x20(%rbp), @XMM[0]	# counter copy
+	movdqa	-0x20(%r11), @XMM[8]	# .LSWPUP
+	pshufb	@XMM[8], @XMM[9]	# byte swap upper part
+	pshufb	@XMM[8], @XMM[0]
+	movdqa	@XMM[9], (%rsp)		# save adjusted round0 key
+	jmp	.Lctr_enc_loop
+.align	16
+.Lctr_enc_loop:
+	movdqa	@XMM[0], 0x20(%rbp)	# save counter
+	movdqa	@XMM[0], @XMM[1]	# prepare 8 counter values
+	movdqa	@XMM[0], @XMM[2]
+	paddd	0x00(%r11), @XMM[1]	# .LADD1
+	movdqa	@XMM[0], @XMM[3]
+	paddd	0x10(%r11), @XMM[2]	# .LADD2
+	movdqa	@XMM[0], @XMM[4]
+	paddd	0x20(%r11), @XMM[3]	# .LADD3
+	movdqa	@XMM[0], @XMM[5]
+	paddd	0x30(%r11), @XMM[4]	# .LADD4
+	movdqa	@XMM[0], @XMM[6]
+	paddd	0x40(%r11), @XMM[5]	# .LADD5
+	movdqa	@XMM[0], @XMM[7]
+	paddd	0x50(%r11), @XMM[6]	# .LADD6
+	paddd	0x60(%r11), @XMM[7]	# .LADD7
+
+	# Borrow prologue from _bsaes_encrypt8 to use the opportunity
+	# to flip byte order in 32-bit counter
+	movdqa	(%rsp), @XMM[9]		# round 0 key
+	lea	0x10(%rsp), %rax	# pass key schedule
+	movdqa	-0x10(%r11), @XMM[8]	# .LSWPUPM0SR
+	pxor	@XMM[9], @XMM[0]	# xor with round0 key
+	pxor	@XMM[9], @XMM[1]
+	pxor	@XMM[9], @XMM[2]
+	pxor	@XMM[9], @XMM[3]
+	 pshufb	@XMM[8], @XMM[0]
+	 pshufb	@XMM[8], @XMM[1]
+	pxor	@XMM[9], @XMM[4]
+	pxor	@XMM[9], @XMM[5]
+	 pshufb	@XMM[8], @XMM[2]
+	 pshufb	@XMM[8], @XMM[3]
+	pxor	@XMM[9], @XMM[6]
+	pxor	@XMM[9], @XMM[7]
+	 pshufb	@XMM[8], @XMM[4]
+	 pshufb	@XMM[8], @XMM[5]
+	 pshufb	@XMM[8], @XMM[6]
+	 pshufb	@XMM[8], @XMM[7]
+	lea	.LBS0(%rip), %r11	# constants table
+	mov	%ebx,%r10d		# pass rounds
+
+	call	_bsaes_encrypt8_bitslice
+
+	sub	\$8,$len
+	jc	.Lctr_enc_loop_done
+
+	movdqu	0x00($inp), @XMM[8]	# load input
+	movdqu	0x10($inp), @XMM[9]
+	movdqu	0x20($inp), @XMM[10]
+	movdqu	0x30($inp), @XMM[11]
+	movdqu	0x40($inp), @XMM[12]
+	movdqu	0x50($inp), @XMM[13]
+	movdqu	0x60($inp), @XMM[14]
+	movdqu	0x70($inp), @XMM[15]
+	lea	0x80($inp),$inp
+	pxor	@XMM[0], @XMM[8]
+	movdqa	0x20(%rbp), @XMM[0]	# load counter
+	pxor	@XMM[9], @XMM[1]
+	movdqu	@XMM[8], 0x00($out)	# write output
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	@XMM[11], @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	@XMM[12], @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	@XMM[13], @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	@XMM[14], @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	@XMM[15], @XMM[5]
+	movdqu	@XMM[2], 0x60($out)
+	lea	.LADD1(%rip), %r11
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+	paddd	0x70(%r11), @XMM[0]	# .LADD8
+	jnz	.Lctr_enc_loop
+
+	jmp	.Lctr_enc_done
+.align	16
+.Lctr_enc_loop_done:
+	add	\$8, $len
+	movdqu	0x00($inp), @XMM[8]	# load input
+	pxor	@XMM[8], @XMM[0]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	cmp	\$2,$len
+	jb	.Lctr_enc_done
+	movdqu	0x10($inp), @XMM[9]
+	pxor	@XMM[9], @XMM[1]
+	movdqu	@XMM[1], 0x10($out)
+	je	.Lctr_enc_done
+	movdqu	0x20($inp), @XMM[10]
+	pxor	@XMM[10], @XMM[4]
+	movdqu	@XMM[4], 0x20($out)
+	cmp	\$4,$len
+	jb	.Lctr_enc_done
+	movdqu	0x30($inp), @XMM[11]
+	pxor	@XMM[11], @XMM[6]
+	movdqu	@XMM[6], 0x30($out)
+	je	.Lctr_enc_done
+	movdqu	0x40($inp), @XMM[12]
+	pxor	@XMM[12], @XMM[3]
+	movdqu	@XMM[3], 0x40($out)
+	cmp	\$6,$len
+	jb	.Lctr_enc_done
+	movdqu	0x50($inp), @XMM[13]
+	pxor	@XMM[13], @XMM[7]
+	movdqu	@XMM[7], 0x50($out)
+	je	.Lctr_enc_done
+	movdqu	0x60($inp), @XMM[14]
+	pxor	@XMM[14], @XMM[2]
+	movdqu	@XMM[2], 0x60($out)
+	jmp	.Lctr_enc_done
+
+.align	16
+.Lctr_enc_short:
+	lea	0x20(%rbp), $arg1
+	lea	0x30(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt
+	movdqu	($inp), @XMM[1]
+	lea	16($inp), $inp
+	mov	0x2c(%rbp), %eax	# load 32-bit counter
+	bswap	%eax
+	pxor	0x30(%rbp), @XMM[1]
+	inc	%eax			# increment
+	movdqu	@XMM[1], ($out)
+	bswap	%eax
+	lea	16($out), $out
+	mov	%eax, 0x2c(%rsp)	# save 32-bit counter
+	dec	$len
+	jnz	.Lctr_enc_short
+
+.Lctr_enc_done:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lctr_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lctr_enc_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lctr_enc_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lctr_enc_epilogue:
+	ret
+.size	bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#	const AES_KEY *key1, const AES_KEY *key2,
+#	const unsigned char iv[16]);
+#
+my ($twmask,$twres,$twtmp)=@XMM[13..15];
+$arg6=~s/d$//;
+
+$code.=<<___;
+.globl	bsaes_xts_encrypt
+.type	bsaes_xts_encrypt,\@abi-omnipotent
+.align	16
+bsaes_xts_encrypt:
+	mov	%rsp, %rax
+.Lxts_enc_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull key2
+	mov	0xa8(%rsp),$arg6	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lxts_enc_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+
+	lea	($arg6), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($arg5), $arg3
+	call	asm_AES_encrypt		# generate initial tweak
+
+	mov	240($key), %eax		# rounds
+	mov	$len, %rbx		# backup $len
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	%xmm6, %xmm7		# fix up last round key
+	movdqa	%xmm7, (%rax)		# save last round key
+
+	and	\$-16, $len
+	sub	\$0x80, %rsp		# place for tweak[8]
+	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+
+	sub	\$0x80, $len
+	jc	.Lxts_enc_short
+	jmp	.Lxts_enc_loop
+
+.align	16
+.Lxts_enc_loop:
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqu	0x70($inp), @XMM[8+7]
+	lea	0x80($inp), $inp
+	movdqa	@XMM[7], 0x70(%rsp)
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	pxor	@XMM[8+7], @XMM[7]
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	0x60(%rsp), @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	0x70(%rsp), @XMM[5]
+	movdqu	@XMM[2], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+
+	sub	\$0x80,$len
+	jnc	.Lxts_enc_loop
+
+.Lxts_enc_short:
+	add	\$0x80, $len
+	jz	.Lxts_enc_done
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+	cmp	\$`0x10*$i`,$len
+	je	.Lxts_enc_$i
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqa	@XMM[7], 0x70(%rsp)
+	lea	0x70($inp), $inp
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	pxor	0x60(%rsp), @XMM[2]
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[2], 0x60($out)
+	lea	0x70($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_6:
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x60($inp), $inp
+	pxor	@XMM[8+5], @XMM[5]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[3], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	lea	0x60($out), $out
+
+	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_5:
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x50($inp), $inp
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	pxor	0x40(%rsp), @XMM[3]
+	movdqu	@XMM[6], 0x30($out)
+	movdqu	@XMM[3], 0x40($out)
+	lea	0x50($out), $out
+
+	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_4:
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x40($inp), $inp
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[6]
+	movdqu	@XMM[4], 0x20($out)
+	movdqu	@XMM[6], 0x30($out)
+	lea	0x40($out), $out
+
+	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_3:
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x30($inp), $inp
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[4]
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[4], 0x20($out)
+	lea	0x30($out), $out
+
+	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_2:
+	pxor	@XMM[8+0], @XMM[0]
+	lea	0x20($inp), $inp
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_encrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	lea	0x20($out), $out
+
+	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_enc_done
+.align	16
+.Lxts_enc_1:
+	pxor	@XMM[0], @XMM[8]
+	lea	0x10($inp), $inp
+	movdqa	@XMM[8], 0x20(%rbp)
+	lea	0x20(%rbp), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_encrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
+	#pxor	@XMM[8], @XMM[0]
+	#lea	0x80(%rsp), %rax	# pass key schedule
+	#mov	%edx, %r10d		# pass rounds
+	#call	_bsaes_encrypt8
+	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x10($out), $out
+
+	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
+
+.Lxts_enc_done:
+	and	\$15, %ebx
+	jz	.Lxts_enc_ret
+	mov	$out, %rdx
+
+.Lxts_enc_steal:
+	movzb	($inp), %eax
+	movzb	-16(%rdx), %ecx
+	lea	1($inp), $inp
+	mov	%al, -16(%rdx)
+	mov	%cl, 0(%rdx)
+	lea	1(%rdx), %rdx
+	sub	\$1,%ebx
+	jnz	.Lxts_enc_steal
+
+	movdqu	-16($out), @XMM[0]
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[7], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_encrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[7]
+	movdqu	@XMM[7], -16($out)
+
+.Lxts_enc_ret:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lxts_enc_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lxts_enc_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lxts_enc_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lxts_enc_epilogue:
+	ret
+.size	bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl	bsaes_xts_decrypt
+.type	bsaes_xts_decrypt,\@abi-omnipotent
+.align	16
+bsaes_xts_decrypt:
+	mov	%rsp, %rax
+.Lxts_dec_prologue:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	lea	-0x48(%rsp), %rsp
+___
+$code.=<<___ if ($win64);
+	mov	0xa0(%rsp),$arg5	# pull key2
+	mov	0xa8(%rsp),$arg6	# pull ivp
+	lea	-0xa0(%rsp), %rsp
+	movaps	%xmm6, 0x40(%rsp)
+	movaps	%xmm7, 0x50(%rsp)
+	movaps	%xmm8, 0x60(%rsp)
+	movaps	%xmm9, 0x70(%rsp)
+	movaps	%xmm10, 0x80(%rsp)
+	movaps	%xmm11, 0x90(%rsp)
+	movaps	%xmm12, 0xa0(%rsp)
+	movaps	%xmm13, 0xb0(%rsp)
+	movaps	%xmm14, 0xc0(%rsp)
+	movaps	%xmm15, 0xd0(%rsp)
+.Lxts_dec_body:
+___
+$code.=<<___;
+	mov	%rsp, %rbp		# backup %rsp
+	mov	$arg1, $inp		# backup arguments
+	mov	$arg2, $out
+	mov	$arg3, $len
+	mov	$arg4, $key
+
+	lea	($arg6), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($arg5), $arg3
+	call	asm_AES_encrypt		# generate initial tweak
+
+	mov	240($key), %eax		# rounds
+	mov	$len, %rbx		# backup $len
+
+	mov	%eax, %edx		# rounds
+	shl	\$7, %rax		# 128 bytes per inner round key
+	sub	\$`128-32`, %rax	# size of bit-sliced key schedule
+	sub	%rax, %rsp
+
+	mov	%rsp, %rax		# pass key schedule
+	mov	$key, %rcx		# pass key
+	mov	%edx, %r10d		# pass rounds
+	call	_bsaes_key_convert
+	pxor	(%rsp), %xmm7		# fix up round 0 key
+	movdqa	%xmm6, (%rax)		# save last round key
+	movdqa	%xmm7, (%rsp)
+
+	xor	%eax, %eax		# if ($len%16) len-=16;
+	and	\$-16, $len
+	test	\$15, %ebx
+	setnz	%al
+	shl	\$4, %rax
+	sub	%rax, $len
+
+	sub	\$0x80, %rsp		# place for tweak[8]
+	movdqa	0x20(%rbp), @XMM[7]	# initial tweak
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+
+	sub	\$0x80, $len
+	jc	.Lxts_dec_short
+	jmp	.Lxts_dec_loop
+
+.align	16
+.Lxts_dec_loop:
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqu	0x70($inp), @XMM[8+7]
+	lea	0x80($inp), $inp
+	movdqa	@XMM[7], 0x70(%rsp)
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	pxor	@XMM[8+7], @XMM[7]
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	pxor	0x60(%rsp), @XMM[3]
+	movdqu	@XMM[7], 0x50($out)
+	pxor	0x70(%rsp), @XMM[5]
+	movdqu	@XMM[3], 0x60($out)
+	movdqu	@XMM[5], 0x70($out)
+	lea	0x80($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# prepare next iteration tweak
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+
+	sub	\$0x80,$len
+	jnc	.Lxts_dec_loop
+
+.Lxts_dec_short:
+	add	\$0x80, $len
+	jz	.Lxts_dec_done
+___
+    for ($i=0;$i<7;$i++) {
+    $code.=<<___;
+	pshufd	\$0x13, $twtmp, $twres
+	pxor	$twtmp, $twtmp
+	movdqa	@XMM[7], @XMM[$i]
+	movdqa	@XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
+	paddq	@XMM[7], @XMM[7]	# psllq	1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	pcmpgtd	@XMM[7], $twtmp		# broadcast upper bits
+	pxor	$twres, @XMM[7]
+___
+    $code.=<<___ if ($i>=1);
+	movdqu	`0x10*($i-1)`($inp), @XMM[8+$i-1]
+	cmp	\$`0x10*$i`,$len
+	je	.Lxts_dec_$i
+___
+    $code.=<<___ if ($i>=2);
+	pxor	@XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
+___
+    }
+$code.=<<___;
+	movdqu	0x60($inp), @XMM[8+6]
+	pxor	@XMM[8+5], @XMM[5]
+	movdqa	@XMM[7], 0x70(%rsp)
+	lea	0x70($inp), $inp
+	pxor	@XMM[8+6], @XMM[6]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	pxor	0x60(%rsp), @XMM[3]
+	movdqu	@XMM[7], 0x50($out)
+	movdqu	@XMM[3], 0x60($out)
+	lea	0x70($out), $out
+
+	movdqa	0x70(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_6:
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x60($inp), $inp
+	pxor	@XMM[8+5], @XMM[5]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	pxor	0x50(%rsp), @XMM[7]
+	movdqu	@XMM[2], 0x40($out)
+	movdqu	@XMM[7], 0x50($out)
+	lea	0x60($out), $out
+
+	movdqa	0x60(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_5:
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x50($inp), $inp
+	pxor	@XMM[8+4], @XMM[4]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	pxor	0x40(%rsp), @XMM[2]
+	movdqu	@XMM[4], 0x30($out)
+	movdqu	@XMM[2], 0x40($out)
+	lea	0x50($out), $out
+
+	movdqa	0x50(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_4:
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x40($inp), $inp
+	pxor	@XMM[8+3], @XMM[3]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	pxor	0x30(%rsp), @XMM[4]
+	movdqu	@XMM[6], 0x20($out)
+	movdqu	@XMM[4], 0x30($out)
+	lea	0x40($out), $out
+
+	movdqa	0x40(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_3:
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x30($inp), $inp
+	pxor	@XMM[8+2], @XMM[2]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	pxor	0x20(%rsp), @XMM[6]
+	movdqu	@XMM[1], 0x10($out)
+	movdqu	@XMM[6], 0x20($out)
+	lea	0x30($out), $out
+
+	movdqa	0x30(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_2:
+	pxor	@XMM[8+0], @XMM[0]
+	lea	0x20($inp), $inp
+	pxor	@XMM[8+1], @XMM[1]
+	lea	0x80(%rsp), %rax	# pass key schedule
+	mov	%edx, %r10d		# pass rounds
+
+	call	_bsaes_decrypt8
+
+	pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	pxor	0x10(%rsp), @XMM[1]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	movdqu	@XMM[1], 0x10($out)
+	lea	0x20($out), $out
+
+	movdqa	0x20(%rsp), @XMM[7]	# next iteration tweak
+	jmp	.Lxts_dec_done
+.align	16
+.Lxts_dec_1:
+	pxor	@XMM[0], @XMM[8]
+	lea	0x10($inp), $inp
+	movdqa	@XMM[8], 0x20(%rbp)
+	lea	0x20(%rbp), $arg1
+	lea	0x20(%rbp), $arg2
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[0]	# ^= tweak[]
+	#pxor	@XMM[8], @XMM[0]
+	#lea	0x80(%rsp), %rax	# pass key schedule
+	#mov	%edx, %r10d		# pass rounds
+	#call	_bsaes_decrypt8
+	#pxor	0x00(%rsp), @XMM[0]	# ^= tweak[]
+	movdqu	@XMM[0], 0x00($out)	# write output
+	lea	0x10($out), $out
+
+	movdqa	0x10(%rsp), @XMM[7]	# next iteration tweak
+
+.Lxts_dec_done:
+	and	\$15, %ebx
+	jz	.Lxts_dec_ret
+
+	pxor	$twtmp, $twtmp
+	movdqa	.Lxts_magic(%rip), $twmask
+	pcmpgtd	@XMM[7], $twtmp
+	pshufd	\$0x13, $twtmp, $twres
+	movdqa	@XMM[7], @XMM[6]
+	paddq	@XMM[7], @XMM[7]	# psllq 1,$tweak
+	pand	$twmask, $twres		# isolate carry and residue
+	movdqu	($inp), @XMM[0]
+	pxor	$twres, @XMM[7]
+
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[7], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[7]
+	mov	$out, %rdx
+	movdqu	@XMM[7], ($out)
+
+.Lxts_dec_steal:
+	movzb	16($inp), %eax
+	movzb	(%rdx), %ecx
+	lea	1($inp), $inp
+	mov	%al, (%rdx)
+	mov	%cl, 16(%rdx)
+	lea	1(%rdx), %rdx
+	sub	\$1,%ebx
+	jnz	.Lxts_dec_steal
+
+	movdqu	($out), @XMM[0]
+	lea	0x20(%rbp), $arg1
+	pxor	@XMM[6], @XMM[0]
+	lea	0x20(%rbp), $arg2
+	movdqa	@XMM[0], 0x20(%rbp)
+	lea	($key), $arg3
+	call	asm_AES_decrypt		# doesn't touch %xmm
+	pxor	0x20(%rbp), @XMM[6]
+	movdqu	@XMM[6], ($out)
+
+.Lxts_dec_ret:
+	lea	(%rsp), %rax
+	pxor	%xmm0, %xmm0
+.Lxts_dec_bzero:			# wipe key schedule [if any]
+	movdqa	%xmm0, 0x00(%rax)
+	movdqa	%xmm0, 0x10(%rax)
+	lea	0x20(%rax), %rax
+	cmp	%rax, %rbp
+	ja	.Lxts_dec_bzero
+
+	lea	0x78(%rbp),%rax
+___
+$code.=<<___ if ($win64);
+	movaps	0x40(%rbp), %xmm6
+	movaps	0x50(%rbp), %xmm7
+	movaps	0x60(%rbp), %xmm8
+	movaps	0x70(%rbp), %xmm9
+	movaps	0x80(%rbp), %xmm10
+	movaps	0x90(%rbp), %xmm11
+	movaps	0xa0(%rbp), %xmm12
+	movaps	0xb0(%rbp), %xmm13
+	movaps	0xc0(%rbp), %xmm14
+	movaps	0xd0(%rbp), %xmm15
+	lea	0xa0(%rax), %rax
+.Lxts_dec_tail:
+___
+$code.=<<___;
+	mov	-48(%rax), %r15
+	mov	-40(%rax), %r14
+	mov	-32(%rax), %r13
+	mov	-24(%rax), %r12
+	mov	-16(%rax), %rbx
+	mov	-8(%rax), %rbp
+	lea	(%rax), %rsp		# restore %rsp
+.Lxts_dec_epilogue:
+	ret
+.size	bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+.type	_bsaes_const,\@object
+.align	64
+_bsaes_const:
+.LM0ISR:	# InvShiftRows constants
+	.quad	0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISRM0:
+	.quad	0x01040b0e0205080f, 0x0306090c00070a0d
+.LISR:
+	.quad	0x0504070602010003, 0x0f0e0d0c080b0a09
+.LBS0:		# bit-slice constants
+	.quad	0x5555555555555555, 0x5555555555555555
+.LBS1:
+	.quad	0x3333333333333333, 0x3333333333333333
+.LBS2:
+	.quad	0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
+.LSR:		# shiftrows constants
+	.quad	0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+	.quad	0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0SR:
+	.quad	0x0a0e02060f03070b, 0x0004080c05090d01
+.LSWPUP:	# byte-swap upper dword
+	.quad	0x0706050403020100, 0x0c0d0e0f0b0a0908
+.LSWPUPM0SR:
+	.quad	0x0a0d02060c03070b, 0x0004080f05090e01
+.LADD1:		# counter increment constants
+	.quad	0x0000000000000000, 0x0000000100000000
+.LADD2:
+	.quad	0x0000000000000000, 0x0000000200000000
+.LADD3:
+	.quad	0x0000000000000000, 0x0000000300000000
+.LADD4:
+	.quad	0x0000000000000000, 0x0000000400000000
+.LADD5:
+	.quad	0x0000000000000000, 0x0000000500000000
+.LADD6:
+	.quad	0x0000000000000000, 0x0000000600000000
+.LADD7:
+	.quad	0x0000000000000000, 0x0000000700000000
+.LADD8:
+	.quad	0x0000000000000000, 0x0000000800000000
+.Lxts_magic:
+	.long	0x87,0,1,0
+.Lmasks:
+	.quad	0x0101010101010101, 0x0101010101010101
+	.quad	0x0202020202020202, 0x0202020202020202
+	.quad	0x0404040404040404, 0x0404040404040404
+	.quad	0x0808080808080808, 0x0808080808080808
+.LM0:
+	.quad	0x02060a0e03070b0f, 0x0004080c0105090d
+.L63:
+	.quad	0x6363636363636363, 0x6363636363636363
+.asciz	"Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov"
+.align	64
+.size	_bsaes_const,.-_bsaes_const
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<=prologue label
+	jbe	.Lin_prologue
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+
+	mov	8(%r11),%r10d		# HandlerData[2]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=tail label
+	jae	.Lin_tail
+
+	mov	160($context),%rax	# pull context->Rbp
+
+	lea	0x40(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xa0+0x78(%rax),%rax	# adjust stack pointer
+
+.Lin_tail:
+	mov	-48(%rax),%rbp
+	mov	-40(%rax),%rbx
+	mov	-32(%rax),%r12
+	mov	-24(%rax),%r13
+	mov	-16(%rax),%r14
+	mov	-8(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_prologue:
+	mov	%rax,152($context)	# restore context->Rsp
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+___
+$code.=<<___ if ($ecb);
+	.rva	.Lecb_enc_prologue
+	.rva	.Lecb_enc_epilogue
+	.rva	.Lecb_enc_info
+
+	.rva	.Lecb_dec_prologue
+	.rva	.Lecb_dec_epilogue
+	.rva	.Lecb_dec_info
+___
+$code.=<<___;
+	.rva	.Lcbc_dec_prologue
+	.rva	.Lcbc_dec_epilogue
+	.rva	.Lcbc_dec_info
+
+	.rva	.Lctr_enc_prologue
+	.rva	.Lctr_enc_epilogue
+	.rva	.Lctr_enc_info
+
+	.rva	.Lxts_enc_prologue
+	.rva	.Lxts_enc_epilogue
+	.rva	.Lxts_enc_info
+
+	.rva	.Lxts_dec_prologue
+	.rva	.Lxts_dec_epilogue
+	.rva	.Lxts_dec_info
+
+.section	.xdata
+.align	8
+___
+$code.=<<___ if ($ecb);
+.Lecb_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lecb_enc_body,.Lecb_enc_epilogue	# HandlerData[]
+	.rva	.Lecb_enc_tail
+	.long	0
+.Lecb_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lecb_dec_body,.Lecb_dec_epilogue	# HandlerData[]
+	.rva	.Lecb_dec_tail
+	.long	0
+___
+$code.=<<___;
+.Lcbc_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lcbc_dec_body,.Lcbc_dec_epilogue	# HandlerData[]
+	.rva	.Lcbc_dec_tail
+	.long	0
+.Lctr_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lctr_enc_body,.Lctr_enc_epilogue	# HandlerData[]
+	.rva	.Lctr_enc_tail
+	.long	0
+.Lxts_enc_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lxts_enc_body,.Lxts_enc_epilogue	# HandlerData[]
+	.rva	.Lxts_enc_tail
+	.long	0
+.Lxts_dec_info:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lxts_dec_body,.Lxts_dec_epilogue	# HandlerData[]
+	.rva	.Lxts_dec_tail
+	.long	0
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/vpaes-x86.pl b/src/crypto/fipsmodule/aes/asm/vpaes-x86.pl
new file mode 100644
index 0000000..58b90a3
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/vpaes-x86.pl
@@ -0,0 +1,909 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
+# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-586.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86.pl column - [also
+# large-block CBC] encrypt/decrypt.
+#
+#		aes-586.pl		vpaes-x86.pl
+#
+# Core 2(**)	28.1/41.4/18.3		21.9/25.2(***)
+# Nehalem	27.9/40.4/18.1		10.2/11.9
+# Atom		70.7/92.1/60.1		61.1/75.4(***)
+# Silvermont	45.4/62.9/24.1		49.2/61.1(***)
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +28%/64%  improvement on Core 2
+#	and +15% on Atom (as implied, over "hyper-threading-safe"
+#	code path).
+#
+#						<appro@openssl.org>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open OUT,">$output";
+*STDOUT=*OUT;
+
+&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
+
+$PREFIX="vpaes";
+
+my  ($round, $base, $magic, $key, $const, $inp, $out)=
+    ("eax",  "ebx", "ecx",  "edx","ebp",  "esi","edi");
+
+&static_label("_vpaes_consts");
+&static_label("_vpaes_schedule_low_round");
+
+&set_label("_vpaes_consts",64);
+$k_inv=-0x30;		# inv, inva
+	&data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
+	&data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
+
+$k_s0F=-0x10;		# s0F
+	&data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
+
+$k_ipt=0x00;		# input transform (lo, hi)
+	&data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
+	&data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
+
+$k_sb1=0x20;		# sb1u, sb1t
+	&data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
+	&data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
+$k_sb2=0x40;		# sb2u, sb2t
+	&data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
+	&data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
+$k_sbo=0x60;		# sbou, sbot
+	&data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
+	&data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
+
+$k_mc_forward=0x80;	# mc_forward
+	&data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
+	&data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
+	&data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
+	&data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
+
+$k_mc_backward=0xc0;	# mc_backward
+	&data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
+	&data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
+	&data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
+	&data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
+
+$k_sr=0x100;		# sr
+	&data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
+	&data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
+	&data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
+	&data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
+
+$k_rcon=0x140;		# rcon
+	&data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
+
+$k_s63=0x150;		# s63: all equal to 0x63 transformed
+	&data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
+
+$k_opt=0x160;		# output transform
+	&data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
+	&data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
+
+$k_deskew=0x180;	# deskew tables: inverts the sbox's "skew"
+	&data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
+	&data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+$k_dksd=0x1a0;		# decryption key schedule: invskew x*D
+	&data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
+	&data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
+$k_dksb=0x1c0;		# decryption key schedule: invskew x*B
+	&data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
+	&data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
+$k_dkse=0x1e0;		# decryption key schedule: invskew x*E + 0x63
+	&data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
+	&data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
+$k_dks9=0x200;		# decryption key schedule: invskew x*9
+	&data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
+	&data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+$k_dipt=0x220;		# decryption input transform
+	&data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
+	&data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
+
+$k_dsb9=0x240;		# decryption sbox output *9*u, *9*t
+	&data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
+	&data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
+$k_dsbd=0x260;		# decryption sbox output *D*u, *D*t
+	&data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
+	&data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
+$k_dsbb=0x280;		# decryption sbox output *B*u, *B*t
+	&data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
+	&data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
+$k_dsbe=0x2a0;		# decryption sbox output *E*u, *E*t
+	&data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
+	&data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
+$k_dsbo=0x2c0;		# decryption sbox final output
+	&data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
+	&data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
+&asciz	("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
+&align	(64);
+
+&function_begin_B("_vpaes_preheat");
+	&add	($const,&DWP(0,"esp"));
+	&movdqa	("xmm7",&QWP($k_inv,$const));
+	&movdqa	("xmm6",&QWP($k_s0F,$const));
+	&ret	();
+&function_end_B("_vpaes_preheat");
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm6-%xmm7 as in _vpaes_preheat
+##    (%edx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
+##
+##
+&function_begin_B("_vpaes_encrypt_core");
+	&mov	($magic,16);
+	&mov	($round,&DWP(240,$key));
+	&movdqa	("xmm1","xmm6")
+	&movdqa	("xmm2",&QWP($k_ipt,$const));
+	&pandn	("xmm1","xmm0");
+	&pand	("xmm0","xmm6");
+	&movdqu	("xmm5",&QWP(0,$key));
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_ipt+16,$const));
+	&pxor	("xmm2","xmm5");
+	&psrld	("xmm1",4);
+	&add	($key,16);
+	&pshufb	("xmm0","xmm1");
+	&lea	($base,&DWP($k_mc_backward,$const));
+	&pxor	("xmm0","xmm2");
+	&jmp	(&label("enc_entry"));
+
+
+&set_label("enc_loop",16);
+	# middle of middle round
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sb1u
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
+	&pshufb	("xmm4","xmm2");		# 4 = sb1u
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&movdqa	("xmm5",&QWP($k_sb2,$const));	# 4 : sb2u
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&movdqa	("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
+	&pshufb	("xmm5","xmm2");		# 4 = sb2u
+	&movdqa	("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
+	&movdqa	("xmm4",&QWP(0,$base,$magic));	# .Lk_mc_backward[]
+	&pshufb	("xmm2","xmm3");		# 2 = sb2t
+	&movdqa	("xmm3","xmm0");		# 3 = A
+	&pxor	("xmm2","xmm5");		# 2 = 2A
+	&pshufb	("xmm0","xmm1");		# 0 = B
+	&add	($key,16);			# next key
+	&pxor	("xmm0","xmm2");		# 0 = 2A+B
+	&pshufb	("xmm3","xmm4");		# 3 = D
+	&add	($magic,16);			# next mc
+	&pxor	("xmm3","xmm0");		# 3 = 2A+B+D
+	&pshufb	("xmm0","xmm1");		# 0 = 2B+C
+	&and	($magic,0x30);			# ... mod 4
+	&sub	($round,1);			# nr--
+	&pxor	("xmm0","xmm3");		# 0 = 2A+3B+C+D
+
+&set_label("enc_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&movdqa	("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm6");		# 0 = k
+	&pshufb	("xmm5","xmm0");		# 2 = a/k
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pxor	("xmm3","xmm5");		# 3 = iak = 1/i + a/k
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pxor	("xmm4","xmm5");		# 4 = jak = 1/j + a/k
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&movdqu	("xmm5",&QWP(0,$key));
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&jnz	(&label("enc_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP($k_sbo,$const));	# 3 : sbou      .Lk_sbo
+	&movdqa	("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot      .Lk_sbo+16
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm5");		# 4 = sb1u + k
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&movdqa	("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm1");
+	&ret	();
+&function_end_B("_vpaes_encrypt_core");
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+&function_begin_B("_vpaes_decrypt_core");
+	&lea	($base,&DWP($k_dsbd,$const));
+	&mov	($round,&DWP(240,$key));
+	&movdqa	("xmm1","xmm6");
+	&movdqa	("xmm2",&QWP($k_dipt-$k_dsbd,$base));
+	&pandn	("xmm1","xmm0");
+	&mov	($magic,$round);
+	&psrld	("xmm1",4)
+	&movdqu	("xmm5",&QWP(0,$key));
+	&shl	($magic,4);
+	&pand	("xmm0","xmm6");
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
+	&xor	($magic,0x30);
+	&pshufb	("xmm0","xmm1");
+	&and	($magic,0x30);
+	&pxor	("xmm2","xmm5");
+	&movdqa	("xmm5",&QWP($k_mc_forward+48,$const));
+	&pxor	("xmm0","xmm2");
+	&add	($key,16);
+	&lea	($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
+	&jmp	(&label("dec_entry"));
+
+&set_label("dec_loop",16);
+##
+##  Inverse mix columns
+##
+	&movdqa	("xmm4",&QWP(-0x20,$base));	# 4 : sb9u
+	&movdqa	("xmm1",&QWP(-0x10,$base));	# 0 : sb9t
+	&pshufb	("xmm4","xmm2");		# 4 = sb9u
+	&pshufb	("xmm1","xmm3");		# 0 = sb9t
+	&pxor	("xmm0","xmm4");
+	&movdqa	("xmm4",&QWP(0,$base));		# 4 : sbdu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x10,$base));	# 0 : sbdt
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbdu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbdt
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&movdqa	("xmm4",&QWP(0x20,$base));	# 4 : sbbu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x30,$base));	# 0 : sbbt
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbbu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbbt
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&movdqa	("xmm4",&QWP(0x40,$base));	# 4 : sbeu
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&movdqa	("xmm1",&QWP(0x50,$base));	# 0 : sbet
+
+	&pshufb	("xmm4","xmm2");		# 4 = sbeu
+	&pshufb	("xmm0","xmm5");		# MC ch
+	&pshufb	("xmm1","xmm3");		# 0 = sbet
+	&pxor	("xmm0","xmm4");		# 4 = ch
+	&add	($key,16);			# next round key
+	&palignr("xmm5","xmm5",12);
+	&pxor	("xmm0","xmm1");		# 0 = ch
+	&sub	($round,1);			# nr--
+
+&set_label("dec_entry");
+	# top of round
+	&movdqa	("xmm1","xmm6");		# 1 : i
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pandn	("xmm1","xmm0");		# 1 = i<<4
+	&pand	("xmm0","xmm6");		# 0 = k
+	&psrld	("xmm1",4);			# 1 = i
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&movdqa	("xmm3","xmm7");		# 3 : 1/i
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&movdqa	("xmm4","xmm7");		# 4 : 1/j
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm7");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&movdqa	("xmm3","xmm7");		# 3 : 1/jak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&movdqu	("xmm0",&QWP(0,$key));
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&jnz	(&label("dec_loop"));
+
+	# middle of last round
+	&movdqa	("xmm4",&QWP(0x60,$base));	# 3 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&pxor	("xmm4","xmm0");		# 4 = sb1u + k
+	&movdqa	("xmm0",&QWP(0x70,$base));	# 0 : sbot
+	&movdqa	("xmm2",&QWP(0,$magic));
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = A
+	&pshufb	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_decrypt_core");
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+&function_begin_B("_vpaes_schedule_core");
+	&add	($const,&DWP(0,"esp"));
+	&movdqu	("xmm0",&QWP(0,$inp));		# load key (unaligned)
+	&movdqa	("xmm2",&QWP($k_rcon,$const));	# load rcon
+
+	# input transform
+	&movdqa	("xmm3","xmm0");
+	&lea	($base,&DWP($k_ipt,$const));
+	&movdqa	(&QWP(4,"esp"),"xmm2");		# xmm8
+	&call	("_vpaes_schedule_transform");
+	&movdqa	("xmm7","xmm0");
+
+	&test	($out,$out);
+	&jnz	(&label("schedule_am_decrypting"));
+
+	# encrypting, output zeroth round key after transform
+	&movdqu	(&QWP(0,$key),"xmm0");
+	&jmp	(&label("schedule_go"));
+
+&set_label("schedule_am_decrypting");
+	# decrypting, output zeroth round key after shiftrows
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&xor	($magic,0x30);
+
+&set_label("schedule_go");
+	&cmp	($round,192);
+	&ja	(&label("schedule_256"));
+	&je	(&label("schedule_192"));
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+&set_label("schedule_128");
+	&mov	($round,10);
+
+&set_label("loop_schedule_128");
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# write output
+	&jmp	(&label("loop_schedule_128"));
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+&set_label("schedule_192",16);
+	&movdqu	("xmm0",&QWP(8,$inp));		# load key part 2 (very unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform
+	&movdqa	("xmm6","xmm0");		# save short part
+	&pxor	("xmm4","xmm4");		# clear 4
+	&movhlps("xmm6","xmm4");		# clobber low side with zeros
+	&mov	($round,4);
+
+&set_label("loop_schedule_192");
+	&call	("_vpaes_schedule_round");
+	&palignr("xmm0","xmm6",8);
+	&call	("_vpaes_schedule_mangle");	# save key n
+	&call	("_vpaes_schedule_192_smear");
+	&call	("_vpaes_schedule_mangle");	# save key n+1
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");	# save key n+2
+	&call	("_vpaes_schedule_192_smear");
+	&jmp	(&label("loop_schedule_192"));
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+&set_label("schedule_256",16);
+	&movdqu	("xmm0",&QWP(16,$inp));		# load key part 2 (unaligned)
+	&call	("_vpaes_schedule_transform");	# input transform
+	&mov	($round,7);
+
+&set_label("loop_schedule_256");
+	&call	("_vpaes_schedule_mangle");	# output low result
+	&movdqa	("xmm6","xmm0");		# save cur_lo in xmm6
+
+	# high round
+	&call	("_vpaes_schedule_round");
+	&dec	($round);
+	&jz	(&label("schedule_mangle_last"));
+	&call	("_vpaes_schedule_mangle");
+
+	# low round. swap xmm7 and xmm6
+	&pshufd	("xmm0","xmm0",0xFF);
+	&movdqa	(&QWP(20,"esp"),"xmm7");
+	&movdqa	("xmm7","xmm6");
+	&call	("_vpaes_schedule_low_round");
+	&movdqa	("xmm7",&QWP(20,"esp"));
+
+	&jmp	(&label("loop_schedule_256"));
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+&set_label("schedule_mangle_last",16);
+	# schedule last round key from xmm0
+	&lea	($base,&DWP($k_deskew,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_last_dec"));
+
+	# encrypting
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm0","xmm1");		# output permute
+	&lea	($base,&DWP($k_opt,$const));	# prepare to output transform
+	&add	($key,32);
+
+&set_label("schedule_mangle_last_dec");
+	&add	($key,-16);
+	&pxor	("xmm0",&QWP($k_s63,$const));
+	&call	("_vpaes_schedule_transform");	# output transform
+	&movdqu	(&QWP(0,$key),"xmm0");		# save last key
+
+	# cleanup
+	&pxor	("xmm0","xmm0");
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm2","xmm2");
+	&pxor	("xmm3","xmm3");
+	&pxor	("xmm4","xmm4");
+	&pxor	("xmm5","xmm5");
+	&pxor	("xmm6","xmm6");
+	&pxor	("xmm7","xmm7");
+	&ret	();
+&function_end_B("_vpaes_schedule_core");
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+&function_begin_B("_vpaes_schedule_192_smear");
+	&pshufd	("xmm1","xmm6",0x80);		# d c 0 0 -> c 0 0 0
+	&pshufd	("xmm0","xmm7",0xFE);		# b a _ _ -> b b b a
+	&pxor	("xmm6","xmm1");		# -> c+d c 0 0
+	&pxor	("xmm1","xmm1");
+	&pxor	("xmm6","xmm0");		# -> b+c+d b+c b a
+	&movdqa	("xmm0","xmm6");
+	&movhlps("xmm6","xmm1");		# clobber low side with zeros
+	&ret	();
+&function_end_B("_vpaes_schedule_192_smear");
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm5.
+##
+&function_begin_B("_vpaes_schedule_round");
+	# extract rcon from xmm8
+	&movdqa	("xmm2",&QWP(8,"esp"));		# xmm8
+	&pxor	("xmm1","xmm1");
+	&palignr("xmm1","xmm2",15);
+	&palignr("xmm2","xmm2",15);
+	&pxor	("xmm7","xmm1");
+
+	# rotate
+	&pshufd	("xmm0","xmm0",0xFF);
+	&palignr("xmm0","xmm0",1);
+
+	# fall through...
+	&movdqa	(&QWP(8,"esp"),"xmm2");		# xmm8
+
+	# low round: same as high round, but no rotation and no rcon.
+&set_label("_vpaes_schedule_low_round");
+	# smear xmm7
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",4);
+	&pxor	("xmm7","xmm1");
+	&movdqa	("xmm1","xmm7");
+	&pslldq	("xmm7",8);
+	&pxor	("xmm7","xmm1");
+	&pxor	("xmm7",&QWP($k_s63,$const));
+
+	# subbyte
+	&movdqa	("xmm4",&QWP($k_s0F,$const));
+	&movdqa	("xmm5",&QWP($k_inv,$const));	# 4 : 1/j
+	&movdqa	("xmm1","xmm4");
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);			# 1 = i
+	&pand	("xmm0","xmm4");		# 0 = k
+	&movdqa	("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
+	&pshufb	("xmm2","xmm0");		# 2 = a/k
+	&pxor	("xmm0","xmm1");		# 0 = j
+	&movdqa	("xmm3","xmm5");		# 3 : 1/i
+	&pshufb	("xmm3","xmm1");		# 3 = 1/i
+	&pxor	("xmm3","xmm2");		# 3 = iak = 1/i + a/k
+	&movdqa	("xmm4","xmm5");		# 4 : 1/j
+	&pshufb	("xmm4","xmm0");		# 4 = 1/j
+	&pxor	("xmm4","xmm2");		# 4 = jak = 1/j + a/k
+	&movdqa	("xmm2","xmm5");		# 2 : 1/iak
+	&pshufb	("xmm2","xmm3");		# 2 = 1/iak
+	&pxor	("xmm2","xmm0");		# 2 = io
+	&movdqa	("xmm3","xmm5");		# 3 : 1/jak
+	&pshufb	("xmm3","xmm4");		# 3 = 1/jak
+	&pxor	("xmm3","xmm1");		# 3 = jo
+	&movdqa	("xmm4",&QWP($k_sb1,$const));	# 4 : sbou
+	&pshufb	("xmm4","xmm2");		# 4 = sbou
+	&movdqa	("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
+	&pshufb	("xmm0","xmm3");		# 0 = sb1t
+	&pxor	("xmm0","xmm4");		# 0 = sbox output
+
+	# add in smeared stuff
+	&pxor	("xmm0","xmm7");
+	&movdqa	("xmm7","xmm0");
+	&ret	();
+&function_end_B("_vpaes_schedule_round");
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%ebx)
+##
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+&function_begin_B("_vpaes_schedule_transform");
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm0");
+	&psrld	("xmm1",4);
+	&pand	("xmm0","xmm2");
+	&movdqa	("xmm2",&QWP(0,$base));
+	&pshufb	("xmm2","xmm0");
+	&movdqa	("xmm0",&QWP(16,$base));
+	&pshufb	("xmm0","xmm1");
+	&pxor	("xmm0","xmm2");
+	&ret	();
+&function_end_B("_vpaes_schedule_transform");
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%edx), and increments or decrements it
+##  Keeps track of round number mod 4 in %ecx
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+&function_begin_B("_vpaes_schedule_mangle");
+	&movdqa	("xmm4","xmm0");	# save xmm0 for later
+	&movdqa	("xmm5",&QWP($k_mc_forward,$const));
+	&test	($out,$out);
+	&jnz	(&label("schedule_mangle_dec"));
+
+	# encrypting
+	&add	($key,16);
+	&pxor	("xmm4",&QWP($k_s63,$const));
+	&pshufb	("xmm4","xmm5");
+	&movdqa	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+	&pshufb	("xmm4","xmm5");
+	&pxor	("xmm3","xmm4");
+
+	&jmp	(&label("schedule_mangle_both"));
+
+&set_label("schedule_mangle_dec",16);
+	# inverse mix columns
+	&movdqa	("xmm2",&QWP($k_s0F,$const));
+	&lea	($inp,&DWP($k_dksd,$const));
+	&movdqa	("xmm1","xmm2");
+	&pandn	("xmm1","xmm4");
+	&psrld	("xmm1",4);			# 1 = hi
+	&pand	("xmm4","xmm2");		# 4 = lo
+
+	&movdqa	("xmm2",&QWP(0,$inp));
+	&pshufb	("xmm2","xmm4");
+	&movdqa	("xmm3",&QWP(0x10,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x20,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x30,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x40,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x50,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+	&pshufb	("xmm3","xmm5");
+
+	&movdqa	("xmm2",&QWP(0x60,$inp));
+	&pshufb	("xmm2","xmm4");
+	&pxor	("xmm2","xmm3");
+	&movdqa	("xmm3",&QWP(0x70,$inp));
+	&pshufb	("xmm3","xmm1");
+	&pxor	("xmm3","xmm2");
+
+	&add	($key,-16);
+
+&set_label("schedule_mangle_both");
+	&movdqa	("xmm1",&QWP($k_sr,$const,$magic));
+	&pshufb	("xmm3","xmm1");
+	&add	($magic,-16);
+	&and	($magic,0x30);
+	&movdqu	(&QWP(0,$key),"xmm3");
+	&ret	();
+&function_end_B("_vpaes_schedule_mangle");
+
+#
+# Interface to OpenSSL
+#
+&function_begin("${PREFIX}_set_encrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);		# AES_KEY->rounds = nbits/32+5;
+	&mov	($magic,0x30);
+	&mov	($out,0);
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_encrypt_key");
+
+&function_begin("${PREFIX}_set_decrypt_key");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($round,&wparam(1));		# bits
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	($base,$round);
+	&shr	($base,5);
+	&add	($base,5);
+	&mov	(&DWP(240,$key),$base);	# AES_KEY->rounds = nbits/32+5;
+	&shl	($base,4);
+	&lea	($key,&DWP(16,$key,$base));
+
+	&mov	($out,1);
+	&mov	($magic,$round);
+	&shr	($magic,1);
+	&and	($magic,32);
+	&xor	($magic,32);			# nbist==192?0:32;
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_schedule_core");
+&set_label("pic_point");
+
+	&mov	("esp",&DWP(48,"esp"));
+	&xor	("eax","eax");
+&function_end("${PREFIX}_set_decrypt_key");
+
+&function_begin("${PREFIX}_encrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_encrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_encrypt");
+
+&function_begin("${PREFIX}_decrypt");
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&mov	($inp,&wparam(0));		# inp
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($out,&wparam(1));		# out
+	&and	($base,-16);
+	&mov	($key,&wparam(2));		# key
+	&xchg	($base,"esp");			# alloca
+	&mov	(&DWP(48,"esp"),$base);
+
+	&movdqu	("xmm0",&QWP(0,$inp));
+	&call	("_vpaes_decrypt_core");
+	&movdqu	(&QWP(0,$out),"xmm0");
+
+	&mov	("esp",&DWP(48,"esp"));
+&function_end("${PREFIX}_decrypt");
+
+&function_begin("${PREFIX}_cbc_encrypt");
+	&mov	($inp,&wparam(0));		# inp
+	&mov	($out,&wparam(1));		# out
+	&mov	($round,&wparam(2));		# len
+	&mov	($key,&wparam(3));		# key
+	&sub	($round,16);
+	&jc	(&label("cbc_abort"));
+	&lea	($base,&DWP(-56,"esp"));
+	&mov	($const,&wparam(4));		# ivp
+	&and	($base,-16);
+	&mov	($magic,&wparam(5));		# enc
+	&xchg	($base,"esp");			# alloca
+	&movdqu	("xmm1",&QWP(0,$const));	# load IV
+	&sub	($out,$inp);
+	&mov	(&DWP(48,"esp"),$base);
+
+	&mov	(&DWP(0,"esp"),$out);		# save out
+	&mov	(&DWP(4,"esp"),$key)		# save key
+	&mov	(&DWP(8,"esp"),$const);		# save ivp
+	&mov	($out,$round);			# $out works as $len
+
+	&lea	($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
+	&call	("_vpaes_preheat");
+&set_label("pic_point");
+	&cmp	($magic,0);
+	&je	(&label("cbc_dec_loop"));
+	&jmp	(&label("cbc_enc_loop"));
+
+&set_label("cbc_enc_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&pxor	("xmm0","xmm1");		# inp^=iv
+	&call	("_vpaes_encrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&movdqa	("xmm1","xmm0");
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_enc_loop"));
+	&jmp	(&label("cbc_done"));
+
+&set_label("cbc_dec_loop",16);
+	&movdqu	("xmm0",&QWP(0,$inp));		# load input
+	&movdqa	(&QWP(16,"esp"),"xmm1");	# save IV
+	&movdqa	(&QWP(32,"esp"),"xmm0");	# save future IV
+	&call	("_vpaes_decrypt_core");
+	&mov	($base,&DWP(0,"esp"));		# restore out
+	&mov	($key,&DWP(4,"esp"));		# restore key
+	&pxor	("xmm0",&QWP(16,"esp"));	# out^=iv
+	&movdqa	("xmm1",&QWP(32,"esp"));	# load next IV
+	&movdqu	(&QWP(0,$base,$inp),"xmm0");	# write output
+	&lea	($inp,&DWP(16,$inp));
+	&sub	($out,16);
+	&jnc	(&label("cbc_dec_loop"));
+
+&set_label("cbc_done");
+	&mov	($base,&DWP(8,"esp"));		# restore ivp
+	&mov	("esp",&DWP(48,"esp"));
+	&movdqu	(&QWP(0,$base),"xmm1");		# write IV
+&set_label("cbc_abort");
+&function_end("${PREFIX}_cbc_encrypt");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl b/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
new file mode 100644
index 0000000..eb4c724
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
@@ -0,0 +1,1208 @@
+#!/usr/bin/env perl
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+
+######################################################################
+# September 2011.
+#
+# Interface to OpenSSL as "almost" drop-in replacement for
+# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
+# doesn't handle partial vectors (doesn't have to if called from
+# EVP only). "Drop-in" implies that this module doesn't share key
+# schedule structure with the original nor does it make assumption
+# about its alignment...
+#
+# Performance summary. aes-x86_64.pl column lists large-block CBC
+# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
+# byte processed with 128-bit key, and vpaes-x86_64.pl column -
+# [also large-block CBC] encrypt/decrypt.
+#
+#		aes-x86_64.pl		vpaes-x86_64.pl
+#
+# Core 2(**)	29.6/41.1/14.3		21.9/25.2(***)
+# Nehalem	29.6/40.3/14.6		10.0/11.8
+# Atom		57.3/74.2/32.1		60.9/77.2(***)
+# Silvermont	52.7/64.0/19.5		48.8/60.8(***)
+# Goldmont	38.9/49.0/17.8		10.6/12.6
+#
+# (*)	"Hyper-threading" in the context refers rather to cache shared
+#	among multiple cores, than to specifically Intel HTT. As vast
+#	majority of contemporary cores share cache, slower code path
+#	is common place. In other words "with-hyper-threading-off"
+#	results are presented mostly for reference purposes.
+#
+# (**)	"Core 2" refers to initial 65nm design, a.k.a. Conroe.
+#
+# (***)	Less impressive improvement on Core 2 and Atom is due to slow
+#	pshufb,	yet it's respectable +36%/62% improvement on Core 2
+#	(as implied, over "hyper-threading-safe" code path).
+#
+#						<appro@openssl.org>
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$PREFIX="vpaes";
+
+$code.=<<___;
+.text
+
+##
+##  _aes_encrypt_core
+##
+##  AES-encrypt %xmm0.
+##
+##  Inputs:
+##     %xmm0 = input
+##     %xmm9-%xmm15 as in _vpaes_preheat
+##    (%rdx) = scheduled keys
+##
+##  Output in %xmm0
+##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
+##  Preserves %xmm6 - %xmm8 so you get some local vectors
+##
+##
+.type	_vpaes_encrypt_core,\@abi-omnipotent
+.align 16
+_vpaes_encrypt_core:
+	mov	%rdx,	%r9
+	mov	\$16,	%r11
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_ipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_ipt+16(%rip), %xmm0	# ipthi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm5,	%xmm2
+	add	\$16,	%r9
+	pxor	%xmm2,	%xmm0
+	lea	.Lk_mc_backward(%rip),%r10
+	jmp	.Lenc_entry
+
+.align 16
+.Lenc_loop:
+	# middle of middle round
+	movdqa  %xmm13,	%xmm4	# 4 : sb1u
+	movdqa  %xmm12,	%xmm0	# 0 : sb1t
+	pshufb  %xmm2,	%xmm4	# 4 = sb1u
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm5,	%xmm4	# 4 = sb1u + k
+	movdqa  %xmm15,	%xmm5	# 4 : sb2u
+	pxor	%xmm4,	%xmm0	# 0 = A
+	movdqa	-0x40(%r11,%r10), %xmm1		# .Lk_mc_forward[]
+	pshufb	%xmm2,	%xmm5	# 4 = sb2u
+	movdqa	(%r11,%r10), %xmm4		# .Lk_mc_backward[]
+	movdqa	%xmm14, %xmm2	# 2 : sb2t
+	pshufb	%xmm3,  %xmm2	# 2 = sb2t
+	movdqa	%xmm0,  %xmm3	# 3 = A
+	pxor	%xmm5,	%xmm2	# 2 = 2A
+	pshufb  %xmm1,  %xmm0	# 0 = B
+	add	\$16,	%r9	# next key
+	pxor	%xmm2,  %xmm0	# 0 = 2A+B
+	pshufb	%xmm4,	%xmm3	# 3 = D
+	add	\$16,	%r11	# next mc
+	pxor	%xmm0,	%xmm3	# 3 = 2A+B+D
+	pshufb  %xmm1,	%xmm0	# 0 = 2B+C
+	and	\$0x30,	%r11	# ... mod 4
+	sub	\$1,%rax	# nr--
+	pxor	%xmm3,	%xmm0	# 0 = 2A+3B+C+D
+
+.Lenc_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	movdqa	%xmm11, %xmm5	# 2 : a/k
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	psrld	\$4,   	%xmm1   # 1 = i
+	pand	%xmm9, 	%xmm0   # 0 = k
+	pshufb  %xmm0,  %xmm5	# 2 = a/k
+	movdqa	%xmm10,	%xmm3  	# 3 : 1/i
+	pxor	%xmm1,	%xmm0	# 0 = j
+	pshufb  %xmm1, 	%xmm3  	# 3 = 1/i
+	movdqa	%xmm10,	%xmm4  	# 4 : 1/j
+	pxor	%xmm5, 	%xmm3  	# 3 = iak = 1/i + a/k
+	pshufb	%xmm0, 	%xmm4  	# 4 = 1/j
+	movdqa	%xmm10,	%xmm2  	# 2 : 1/iak
+	pxor	%xmm5, 	%xmm4  	# 4 = jak = 1/j + a/k
+	pshufb  %xmm3,	%xmm2  	# 2 = 1/iak
+	movdqa	%xmm10, %xmm3   # 3 : 1/jak
+	pxor	%xmm0, 	%xmm2  	# 2 = io
+	pshufb  %xmm4,  %xmm3   # 3 = 1/jak
+	movdqu	(%r9),	%xmm5
+	pxor	%xmm1,  %xmm3   # 3 = jo
+	jnz	.Lenc_loop
+
+	# middle of last round
+	movdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
+	movdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm5,  %xmm4	# 4 = sb1u + k
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	movdqa	0x40(%r11,%r10), %xmm1		# .Lk_sr[]
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm1,	%xmm0
+	ret
+.size	_vpaes_encrypt_core,.-_vpaes_encrypt_core
+
+##
+##  Decryption core
+##
+##  Same API as encryption core.
+##
+.type	_vpaes_decrypt_core,\@abi-omnipotent
+.align	16
+_vpaes_decrypt_core:
+	mov	%rdx,	%r9		# load key
+	mov	240(%rdx),%eax
+	movdqa	%xmm9,	%xmm1
+	movdqa	.Lk_dipt(%rip), %xmm2	# iptlo
+	pandn	%xmm0,	%xmm1
+	mov	%rax,	%r11
+	psrld	\$4,	%xmm1
+	movdqu	(%r9),	%xmm5		# round0 key
+	shl	\$4,	%r11
+	pand	%xmm9,	%xmm0
+	pshufb	%xmm0,	%xmm2
+	movdqa	.Lk_dipt+16(%rip), %xmm0 # ipthi
+	xor	\$0x30,	%r11
+	lea	.Lk_dsbd(%rip),%r10
+	pshufb	%xmm1,	%xmm0
+	and	\$0x30,	%r11
+	pxor	%xmm5,	%xmm2
+	movdqa	.Lk_mc_forward+48(%rip), %xmm5
+	pxor	%xmm2,	%xmm0
+	add	\$16,	%r9
+	add	%r10,	%r11
+	jmp	.Ldec_entry
+
+.align 16
+.Ldec_loop:
+##
+##  Inverse mix columns
+##
+	movdqa  -0x20(%r10),%xmm4	# 4 : sb9u
+	movdqa  -0x10(%r10),%xmm1	# 0 : sb9t
+	pshufb	%xmm2,	%xmm4		# 4 = sb9u
+	pshufb	%xmm3,	%xmm1		# 0 = sb9t
+	pxor	%xmm4,	%xmm0
+	movdqa  0x00(%r10),%xmm4	# 4 : sbdu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x10(%r10),%xmm1	# 0 : sbdt
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbdu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbdt
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	movdqa  0x20(%r10),%xmm4	# 4 : sbbu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x30(%r10),%xmm1	# 0 : sbbt
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbbu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbbt
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	movdqa  0x40(%r10),%xmm4	# 4 : sbeu
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	movdqa  0x50(%r10),%xmm1	# 0 : sbet
+
+	pshufb	%xmm2,	%xmm4		# 4 = sbeu
+	pshufb	%xmm5,	%xmm0		# MC ch
+	pshufb	%xmm3,	%xmm1		# 0 = sbet
+	pxor	%xmm4,	%xmm0		# 4 = ch
+	add	\$16, %r9		# next round key
+	palignr	\$12,	%xmm5,	%xmm5
+	pxor	%xmm1,	%xmm0		# 0 = ch
+	sub	\$1,%rax		# nr--
+
+.Ldec_entry:
+	# top of round
+	movdqa  %xmm9, 	%xmm1	# 1 : i
+	pandn	%xmm0, 	%xmm1	# 1 = i<<4
+	movdqa	%xmm11, %xmm2	# 2 : a/k
+	psrld	\$4,    %xmm1	# 1 = i
+	pand	%xmm9, 	%xmm0	# 0 = k
+	pshufb  %xmm0,  %xmm2	# 2 = a/k
+	movdqa	%xmm10,	%xmm3	# 3 : 1/i
+	pxor	%xmm1,	%xmm0	# 0 = j
+	pshufb  %xmm1, 	%xmm3	# 3 = 1/i
+	movdqa	%xmm10,	%xmm4	# 4 : 1/j
+	pxor	%xmm2, 	%xmm3	# 3 = iak = 1/i + a/k
+	pshufb	%xmm0, 	%xmm4	# 4 = 1/j
+	pxor	%xmm2, 	%xmm4	# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2	# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2	# 2 = 1/iak
+	movdqa	%xmm10, %xmm3	# 3 : 1/jak
+	pxor	%xmm0, 	%xmm2	# 2 = io
+	pshufb  %xmm4,  %xmm3	# 3 = 1/jak
+	movdqu	(%r9),	%xmm0
+	pxor	%xmm1,  %xmm3	# 3 = jo
+	jnz	.Ldec_loop
+
+	# middle of last round
+	movdqa	0x60(%r10), %xmm4	# 3 : sbou
+	pshufb  %xmm2,  %xmm4	# 4 = sbou
+	pxor	%xmm0,  %xmm4	# 4 = sb1u + k
+	movdqa	0x70(%r10), %xmm0	# 0 : sbot
+	movdqa	-0x160(%r11), %xmm2	# .Lk_sr-.Lk_dsbd=-0x160
+	pshufb  %xmm3,	%xmm0	# 0 = sb1t
+	pxor	%xmm4,	%xmm0	# 0 = A
+	pshufb	%xmm2,	%xmm0
+	ret
+.size	_vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+########################################################
+##                                                    ##
+##                  AES key schedule                  ##
+##                                                    ##
+########################################################
+.type	_vpaes_schedule_core,\@abi-omnipotent
+.align	16
+_vpaes_schedule_core:
+	# rdi = key
+	# rsi = size in bits
+	# rdx = buffer
+	# rcx = direction.  0=encrypt, 1=decrypt
+
+	call	_vpaes_preheat		# load the tables
+	movdqa	.Lk_rcon(%rip), %xmm8	# load rcon
+	movdqu	(%rdi),	%xmm0		# load key (unaligned)
+
+	# input transform
+	movdqa	%xmm0,	%xmm3
+	lea	.Lk_ipt(%rip), %r11
+	call	_vpaes_schedule_transform
+	movdqa	%xmm0,	%xmm7
+
+	lea	.Lk_sr(%rip),%r10
+	test	%rcx,	%rcx
+	jnz	.Lschedule_am_decrypting
+
+	# encrypting, output zeroth round key after transform
+	movdqu	%xmm0,	(%rdx)
+	jmp	.Lschedule_go
+
+.Lschedule_am_decrypting:
+	# decrypting, output zeroth round key after shiftrows
+	movdqa	(%r8,%r10),%xmm1
+	pshufb  %xmm1,	%xmm3
+	movdqu	%xmm3,	(%rdx)
+	xor	\$0x30, %r8
+
+.Lschedule_go:
+	cmp	\$192,	%esi
+	ja	.Lschedule_256
+	je	.Lschedule_192
+	# 128: fall though
+
+##
+##  .schedule_128
+##
+##  128-bit specific part of key schedule.
+##
+##  This schedule is really simple, because all its parts
+##  are accomplished by the subroutines.
+##
+.Lschedule_128:
+	mov	\$10, %esi
+
+.Loop_schedule_128:
+	call 	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# write output
+	jmp 	.Loop_schedule_128
+
+##
+##  .aes_schedule_192
+##
+##  192-bit specific part of key schedule.
+##
+##  The main body of this schedule is the same as the 128-bit
+##  schedule, but with more smearing.  The long, high side is
+##  stored in %xmm7 as before, and the short, low side is in
+##  the high bits of %xmm6.
+##
+##  This schedule is somewhat nastier, however, because each
+##  round produces 192 bits of key material, or 1.5 round keys.
+##  Therefore, on each cycle we do 2 rounds and produce 3 round
+##  keys.
+##
+.align	16
+.Lschedule_192:
+	movdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	movdqa	%xmm0,	%xmm6		# save short part
+	pxor	%xmm4,	%xmm4		# clear 4
+	movhlps	%xmm4,	%xmm6		# clobber low side with zeros
+	mov	\$4,	%esi
+
+.Loop_schedule_192:
+	call	_vpaes_schedule_round
+	palignr	\$8,%xmm6,%xmm0
+	call	_vpaes_schedule_mangle	# save key n
+	call	_vpaes_schedule_192_smear
+	call	_vpaes_schedule_mangle	# save key n+1
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle	# save key n+2
+	call	_vpaes_schedule_192_smear
+	jmp	.Loop_schedule_192
+
+##
+##  .aes_schedule_256
+##
+##  256-bit specific part of key schedule.
+##
+##  The structure here is very similar to the 128-bit
+##  schedule, but with an additional "low side" in
+##  %xmm6.  The low side's rounds are the same as the
+##  high side's, except no rcon and no rotation.
+##
+.align	16
+.Lschedule_256:
+	movdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
+	call	_vpaes_schedule_transform	# input transform
+	mov	\$7, %esi
+
+.Loop_schedule_256:
+	call	_vpaes_schedule_mangle	# output low result
+	movdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
+
+	# high round
+	call	_vpaes_schedule_round
+	dec	%rsi
+	jz 	.Lschedule_mangle_last
+	call	_vpaes_schedule_mangle
+
+	# low round. swap xmm7 and xmm6
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	movdqa	%xmm7,	%xmm5
+	movdqa	%xmm6,	%xmm7
+	call	_vpaes_schedule_low_round
+	movdqa	%xmm5,	%xmm7
+
+	jmp	.Loop_schedule_256
+
+
+##
+##  .aes_schedule_mangle_last
+##
+##  Mangler for last round of key schedule
+##  Mangles %xmm0
+##    when encrypting, outputs out(%xmm0) ^ 63
+##    when decrypting, outputs unskew(%xmm0)
+##
+##  Always called right before return... jumps to cleanup and exits
+##
+.align	16
+.Lschedule_mangle_last:
+	# schedule last round key from xmm0
+	lea	.Lk_deskew(%rip),%r11	# prepare to deskew
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_last_dec
+
+	# encrypting
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,	%xmm0		# output permute
+	lea	.Lk_opt(%rip),	%r11	# prepare to output transform
+	add	\$32,	%rdx
+
+.Lschedule_mangle_last_dec:
+	add	\$-16,	%rdx
+	pxor	.Lk_s63(%rip),	%xmm0
+	call	_vpaes_schedule_transform # output transform
+	movdqu	%xmm0,	(%rdx)		# save last key
+
+	# cleanup
+	pxor	%xmm0,  %xmm0
+	pxor	%xmm1,  %xmm1
+	pxor	%xmm2,  %xmm2
+	pxor	%xmm3,  %xmm3
+	pxor	%xmm4,  %xmm4
+	pxor	%xmm5,  %xmm5
+	pxor	%xmm6,  %xmm6
+	pxor	%xmm7,  %xmm7
+	ret
+.size	_vpaes_schedule_core,.-_vpaes_schedule_core
+
+##
+##  .aes_schedule_192_smear
+##
+##  Smear the short, low side in the 192-bit key schedule.
+##
+##  Inputs:
+##    %xmm7: high side, b  a  x  y
+##    %xmm6:  low side, d  c  0  0
+##    %xmm13: 0
+##
+##  Outputs:
+##    %xmm6: b+c+d  b+c  0  0
+##    %xmm0: b+c+d  b+c  b  a
+##
+.type	_vpaes_schedule_192_smear,\@abi-omnipotent
+.align	16
+_vpaes_schedule_192_smear:
+	pshufd	\$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
+	pshufd	\$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
+	pxor	%xmm1,	%xmm6		# -> c+d c 0 0
+	pxor	%xmm1,	%xmm1
+	pxor	%xmm0,	%xmm6		# -> b+c+d b+c b a
+	movdqa	%xmm6,	%xmm0
+	movhlps	%xmm1,	%xmm6		# clobber low side with zeros
+	ret
+.size	_vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+##
+##  .aes_schedule_round
+##
+##  Runs one main round of the key schedule on %xmm0, %xmm7
+##
+##  Specifically, runs subbytes on the high dword of %xmm0
+##  then rotates it by one byte and xors into the low dword of
+##  %xmm7.
+##
+##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
+##  next rcon.
+##
+##  Smears the dwords of %xmm7 by xoring the low into the
+##  second low, result into third, result into highest.
+##
+##  Returns results in %xmm7 = %xmm0.
+##  Clobbers %xmm1-%xmm4, %r11.
+##
+.type	_vpaes_schedule_round,\@abi-omnipotent
+.align	16
+_vpaes_schedule_round:
+	# extract rcon from xmm8
+	pxor	%xmm1,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm1
+	palignr	\$15,	%xmm8,	%xmm8
+	pxor	%xmm1,	%xmm7
+
+	# rotate
+	pshufd	\$0xFF,	%xmm0,	%xmm0
+	palignr	\$1,	%xmm0,	%xmm0
+
+	# fall through...
+
+	# low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+	# smear xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$4,	%xmm7
+	pxor	%xmm1,	%xmm7
+	movdqa	%xmm7,	%xmm1
+	pslldq	\$8,	%xmm7
+	pxor	%xmm1,	%xmm7
+	pxor	.Lk_s63(%rip), %xmm7
+
+	# subbytes
+	movdqa  %xmm9, 	%xmm1
+	pandn	%xmm0, 	%xmm1
+	psrld	\$4,    %xmm1		# 1 = i
+	pand	%xmm9, 	%xmm0		# 0 = k
+	movdqa	%xmm11, %xmm2		# 2 : a/k
+	pshufb  %xmm0,  %xmm2		# 2 = a/k
+	pxor	%xmm1,	%xmm0		# 0 = j
+	movdqa	%xmm10,	%xmm3		# 3 : 1/i
+	pshufb  %xmm1, 	%xmm3		# 3 = 1/i
+	pxor	%xmm2, 	%xmm3		# 3 = iak = 1/i + a/k
+	movdqa	%xmm10,	%xmm4		# 4 : 1/j
+	pshufb	%xmm0, 	%xmm4		# 4 = 1/j
+	pxor	%xmm2, 	%xmm4		# 4 = jak = 1/j + a/k
+	movdqa	%xmm10,	%xmm2		# 2 : 1/iak
+	pshufb  %xmm3,	%xmm2		# 2 = 1/iak
+	pxor	%xmm0, 	%xmm2		# 2 = io
+	movdqa	%xmm10, %xmm3		# 3 : 1/jak
+	pshufb  %xmm4,  %xmm3		# 3 = 1/jak
+	pxor	%xmm1,  %xmm3		# 3 = jo
+	movdqa	%xmm13, %xmm4		# 4 : sbou
+	pshufb  %xmm2,  %xmm4		# 4 = sbou
+	movdqa	%xmm12, %xmm0		# 0 : sbot
+	pshufb  %xmm3,	%xmm0		# 0 = sb1t
+	pxor	%xmm4, 	%xmm0		# 0 = sbox output
+
+	# add in smeared stuff
+	pxor	%xmm7,	%xmm0
+	movdqa	%xmm0,	%xmm7
+	ret
+.size	_vpaes_schedule_round,.-_vpaes_schedule_round
+
+##
+##  .aes_schedule_transform
+##
+##  Linear-transform %xmm0 according to tables at (%r11)
+##
+##  Requires that %xmm9 = 0x0F0F... as in preheat
+##  Output in %xmm0
+##  Clobbers %xmm1, %xmm2
+##
+.type	_vpaes_schedule_transform,\@abi-omnipotent
+.align	16
+_vpaes_schedule_transform:
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm0,	%xmm1
+	psrld	\$4,	%xmm1
+	pand	%xmm9,	%xmm0
+	movdqa	(%r11), %xmm2 	# lo
+	pshufb	%xmm0,	%xmm2
+	movdqa	16(%r11), %xmm0 # hi
+	pshufb	%xmm1,	%xmm0
+	pxor	%xmm2,	%xmm0
+	ret
+.size	_vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+##
+##  .aes_schedule_mangle
+##
+##  Mangle xmm0 from (basis-transformed) standard version
+##  to our version.
+##
+##  On encrypt,
+##    xor with 0x63
+##    multiply by circulant 0,1,1,1
+##    apply shiftrows transform
+##
+##  On decrypt,
+##    xor with 0x63
+##    multiply by "inverse mixcolumns" circulant E,B,D,9
+##    deskew
+##    apply shiftrows transform
+##
+##
+##  Writes out to (%rdx), and increments or decrements it
+##  Keeps track of round number mod 4 in %r8
+##  Preserves xmm0
+##  Clobbers xmm1-xmm5
+##
+.type	_vpaes_schedule_mangle,\@abi-omnipotent
+.align	16
+_vpaes_schedule_mangle:
+	movdqa	%xmm0,	%xmm4	# save xmm0 for later
+	movdqa	.Lk_mc_forward(%rip),%xmm5
+	test	%rcx, 	%rcx
+	jnz	.Lschedule_mangle_dec
+
+	# encrypting
+	add	\$16,	%rdx
+	pxor	.Lk_s63(%rip),%xmm4
+	pshufb	%xmm5,	%xmm4
+	movdqa	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+	pshufb	%xmm5,	%xmm4
+	pxor	%xmm4,	%xmm3
+
+	jmp	.Lschedule_mangle_both
+.align	16
+.Lschedule_mangle_dec:
+	# inverse mix columns
+	lea	.Lk_dksd(%rip),%r11
+	movdqa	%xmm9,	%xmm1
+	pandn	%xmm4,	%xmm1
+	psrld	\$4,	%xmm1	# 1 = hi
+	pand	%xmm9,	%xmm4	# 4 = lo
+
+	movdqa	0x00(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	movdqa	0x10(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x20(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x30(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x40(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x50(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+	pshufb	%xmm5,	%xmm3
+
+	movdqa	0x60(%r11), %xmm2
+	pshufb	%xmm4,	%xmm2
+	pxor	%xmm3,	%xmm2
+	movdqa	0x70(%r11), %xmm3
+	pshufb	%xmm1,	%xmm3
+	pxor	%xmm2,	%xmm3
+
+	add	\$-16,	%rdx
+
+.Lschedule_mangle_both:
+	movdqa	(%r8,%r10),%xmm1
+	pshufb	%xmm1,%xmm3
+	add	\$-16,	%r8
+	and	\$0x30,	%r8
+	movdqu	%xmm3,	(%rdx)
+	ret
+.size	_vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+#
+# Interface to OpenSSL
+#
+.globl	${PREFIX}_set_encrypt_key
+.type	${PREFIX}_set_encrypt_key,\@function,3
+.align	16
+${PREFIX}_set_encrypt_key:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+
+	mov	\$0,%ecx
+	mov	\$0x30,%r8d
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.size	${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
+
+.globl	${PREFIX}_set_decrypt_key
+.type	${PREFIX}_set_decrypt_key,\@function,3
+.align	16
+${PREFIX}_set_decrypt_key:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_key_body:
+___
+$code.=<<___;
+	mov	%esi,%eax
+	shr	\$5,%eax
+	add	\$5,%eax
+	mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
+	shl	\$4,%eax
+	lea	16(%rdx,%rax),%rdx
+
+	mov	\$1,%ecx
+	mov	%esi,%r8d
+	shr	\$1,%r8d
+	and	\$32,%r8d
+	xor	\$32,%r8d	# nbits==192?0:32
+	call	_vpaes_schedule_core
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_key_epilogue:
+___
+$code.=<<___;
+	xor	%eax,%eax
+	ret
+.size	${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
+
+.globl	${PREFIX}_encrypt
+.type	${PREFIX}_encrypt,\@function,3
+.align	16
+${PREFIX}_encrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lenc_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_encrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lenc_epilogue:
+___
+$code.=<<___;
+	ret
+.size	${PREFIX}_encrypt,.-${PREFIX}_encrypt
+
+.globl	${PREFIX}_decrypt
+.type	${PREFIX}_decrypt,\@function,3
+.align	16
+${PREFIX}_decrypt:
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Ldec_body:
+___
+$code.=<<___;
+	movdqu	(%rdi),%xmm0
+	call	_vpaes_preheat
+	call	_vpaes_decrypt_core
+	movdqu	%xmm0,(%rsi)
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Ldec_epilogue:
+___
+$code.=<<___;
+	ret
+.size	${PREFIX}_decrypt,.-${PREFIX}_decrypt
+___
+{
+my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
+# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
+#                       size_t length, const AES_KEY *key,
+#                       unsigned char *ivp,const int enc);
+$code.=<<___;
+.globl	${PREFIX}_cbc_encrypt
+.type	${PREFIX}_cbc_encrypt,\@function,6
+.align	16
+${PREFIX}_cbc_encrypt:
+	xchg	$key,$len
+___
+($len,$key)=($key,$len);
+$code.=<<___;
+	sub	\$16,$len
+	jc	.Lcbc_abort
+___
+$code.=<<___ if ($win64);
+	lea	-0xb8(%rsp),%rsp
+	movaps	%xmm6,0x10(%rsp)
+	movaps	%xmm7,0x20(%rsp)
+	movaps	%xmm8,0x30(%rsp)
+	movaps	%xmm9,0x40(%rsp)
+	movaps	%xmm10,0x50(%rsp)
+	movaps	%xmm11,0x60(%rsp)
+	movaps	%xmm12,0x70(%rsp)
+	movaps	%xmm13,0x80(%rsp)
+	movaps	%xmm14,0x90(%rsp)
+	movaps	%xmm15,0xa0(%rsp)
+.Lcbc_body:
+___
+$code.=<<___;
+	movdqu	($ivp),%xmm6		# load IV
+	sub	$inp,$out
+	call	_vpaes_preheat
+	cmp	\$0,${enc}d
+	je	.Lcbc_dec_loop
+	jmp	.Lcbc_enc_loop
+.align	16
+.Lcbc_enc_loop:
+	movdqu	($inp),%xmm0
+	pxor	%xmm6,%xmm0
+	call	_vpaes_encrypt_core
+	movdqa	%xmm0,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_enc_loop
+	jmp	.Lcbc_done
+.align	16
+.Lcbc_dec_loop:
+	movdqu	($inp),%xmm0
+	movdqa	%xmm0,%xmm7
+	call	_vpaes_decrypt_core
+	pxor	%xmm6,%xmm0
+	movdqa	%xmm7,%xmm6
+	movdqu	%xmm0,($out,$inp)
+	lea	16($inp),$inp
+	sub	\$16,$len
+	jnc	.Lcbc_dec_loop
+.Lcbc_done:
+	movdqu	%xmm6,($ivp)		# save IV
+___
+$code.=<<___ if ($win64);
+	movaps	0x10(%rsp),%xmm6
+	movaps	0x20(%rsp),%xmm7
+	movaps	0x30(%rsp),%xmm8
+	movaps	0x40(%rsp),%xmm9
+	movaps	0x50(%rsp),%xmm10
+	movaps	0x60(%rsp),%xmm11
+	movaps	0x70(%rsp),%xmm12
+	movaps	0x80(%rsp),%xmm13
+	movaps	0x90(%rsp),%xmm14
+	movaps	0xa0(%rsp),%xmm15
+	lea	0xb8(%rsp),%rsp
+.Lcbc_epilogue:
+___
+$code.=<<___;
+.Lcbc_abort:
+	ret
+.size	${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
+___
+}
+$code.=<<___;
+##
+##  _aes_preheat
+##
+##  Fills register %r10 -> .aes_consts (so you can -fPIC)
+##  and %xmm9-%xmm15 as specified below.
+##
+.type	_vpaes_preheat,\@abi-omnipotent
+.align	16
+_vpaes_preheat:
+	lea	.Lk_s0F(%rip), %r10
+	movdqa	-0x20(%r10), %xmm10	# .Lk_inv
+	movdqa	-0x10(%r10), %xmm11	# .Lk_inv+16
+	movdqa	0x00(%r10), %xmm9	# .Lk_s0F
+	movdqa	0x30(%r10), %xmm13	# .Lk_sb1
+	movdqa	0x40(%r10), %xmm12	# .Lk_sb1+16
+	movdqa	0x50(%r10), %xmm15	# .Lk_sb2
+	movdqa	0x60(%r10), %xmm14	# .Lk_sb2+16
+	ret
+.size	_vpaes_preheat,.-_vpaes_preheat
+########################################################
+##                                                    ##
+##                     Constants                      ##
+##                                                    ##
+########################################################
+.type	_vpaes_consts,\@object
+.align	64
+_vpaes_consts:
+.Lk_inv:	# inv, inva
+	.quad	0x0E05060F0D080180, 0x040703090A0B0C02
+	.quad	0x01040A060F0B0780, 0x030D0E0C02050809
+
+.Lk_s0F:	# s0F
+	.quad	0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
+
+.Lk_ipt:	# input transform (lo, hi)
+	.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
+	.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+
+.Lk_sb1:	# sb1u, sb1t
+	.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+	.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+.Lk_sb2:	# sb2u, sb2t
+	.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
+	.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
+.Lk_sbo:	# sbou, sbot
+	.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
+	.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+
+.Lk_mc_forward:	# mc_forward
+	.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
+	.quad	0x080B0A0904070605, 0x000302010C0F0E0D
+	.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
+	.quad	0x000302010C0F0E0D, 0x080B0A0904070605
+
+.Lk_mc_backward:# mc_backward
+	.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
+	.quad	0x020100030E0D0C0F, 0x0A09080B06050407
+	.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
+	.quad	0x0A09080B06050407, 0x020100030E0D0C0F
+
+.Lk_sr:		# sr
+	.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
+	.quad	0x030E09040F0A0500, 0x0B06010C07020D08
+	.quad	0x0F060D040B020900, 0x070E050C030A0108
+	.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
+
+.Lk_rcon:	# rcon
+	.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_s63:	# s63: all equal to 0x63 transformed
+	.quad	0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
+
+.Lk_opt:	# output transform
+	.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
+	.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+
+.Lk_deskew:	# deskew tables: inverts the sbox's "skew"
+	.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+	.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+
+##
+##  Decryption stuff
+##  Key schedule constants
+##
+.Lk_dksd:	# decryption key schedule: invskew x*D
+	.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+	.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb:	# decryption key schedule: invskew x*B
+	.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
+	.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse:	# decryption key schedule: invskew x*E + 0x63
+	.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
+	.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9:	# decryption key schedule: invskew x*9
+	.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
+	.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+##
+##  Decryption stuff
+##  Round function constants
+##
+.Lk_dipt:	# decryption input transform
+	.quad	0x0F505B040B545F00, 0x154A411E114E451A
+	.quad	0x86E383E660056500, 0x12771772F491F194
+
+.Lk_dsb9:	# decryption sbox output *9*u, *9*t
+	.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
+	.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd:	# decryption sbox output *D*u, *D*t
+	.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+	.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb:	# decryption sbox output *B*u, *B*t
+	.quad	0xD022649296B44200, 0x602646F6B0F2D404
+	.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe:	# decryption sbox output *E*u, *E*t
+	.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
+	.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.Lk_dsbo:	# decryption sbox final output
+	.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+	.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.asciz	"Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
+.align	64
+.size	_vpaes_consts,.-_vpaes_consts
+___
+
+if ($win64) {
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+
+	lea	16(%rax),%rsi		# %xmm save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+	lea	0xb8(%rax),%rax		# adjust stack pointer
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_encrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_encrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_end_${PREFIX}_set_decrypt_key
+	.rva	.LSEH_info_${PREFIX}_set_decrypt_key
+
+	.rva	.LSEH_begin_${PREFIX}_encrypt
+	.rva	.LSEH_end_${PREFIX}_encrypt
+	.rva	.LSEH_info_${PREFIX}_encrypt
+
+	.rva	.LSEH_begin_${PREFIX}_decrypt
+	.rva	.LSEH_end_${PREFIX}_decrypt
+	.rva	.LSEH_info_${PREFIX}_decrypt
+
+	.rva	.LSEH_begin_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_end_${PREFIX}_cbc_encrypt
+	.rva	.LSEH_info_${PREFIX}_cbc_encrypt
+
+.section	.xdata
+.align	8
+.LSEH_info_${PREFIX}_set_encrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_key_body,.Lenc_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_set_decrypt_key:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_key_body,.Ldec_key_epilogue	# HandlerData[]
+.LSEH_info_${PREFIX}_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lenc_body,.Lenc_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_decrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Ldec_body,.Ldec_epilogue		# HandlerData[]
+.LSEH_info_${PREFIX}_cbc_encrypt:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lcbc_body,.Lcbc_epilogue		# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/key_wrap.c b/src/crypto/fipsmodule/aes/key_wrap.c
new file mode 100644
index 0000000..73de17f
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/key_wrap.c
@@ -0,0 +1,138 @@
+/* ====================================================================
+ * Copyright (c) 2001-2011 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ==================================================================== */
+
+#include <openssl/aes.h>
+
+#include <limits.h>
+#include <string.h>
+
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+/* kDefaultIV is the default IV value given in RFC 3394, 2.2.3.1. */
+static const uint8_t kDefaultIV[] = {
+    0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6,
+};
+
+static const unsigned kBound = 6;
+
+int AES_wrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
+                 const uint8_t *in, size_t in_len) {
+  /* See RFC 3394, section 2.2.1. */
+
+  if (in_len > INT_MAX - 8 || in_len < 8 || in_len % 8 != 0) {
+    return -1;
+  }
+
+  if (iv == NULL) {
+    iv = kDefaultIV;
+  }
+
+  OPENSSL_memmove(out + 8, in, in_len);
+  uint8_t A[AES_BLOCK_SIZE];
+  OPENSSL_memcpy(A, iv, 8);
+
+  size_t n = in_len / 8;
+
+  for (unsigned j = 0; j < kBound; j++) {
+    for (size_t i = 1; i <= n; i++) {
+      OPENSSL_memcpy(A + 8, out + 8 * i, 8);
+      AES_encrypt(A, A, key);
+
+      uint32_t t = (uint32_t)(n * j + i);
+      A[7] ^= t & 0xff;
+      A[6] ^= (t >> 8) & 0xff;
+      A[5] ^= (t >> 16) & 0xff;
+      A[4] ^= (t >> 24) & 0xff;
+      OPENSSL_memcpy(out + 8 * i, A + 8, 8);
+    }
+  }
+
+  OPENSSL_memcpy(out, A, 8);
+  return (int)in_len + 8;
+}
+
+int AES_unwrap_key(const AES_KEY *key, const uint8_t *iv, uint8_t *out,
+                   const uint8_t *in, size_t in_len) {
+  /* See RFC 3394, section 2.2.2. */
+
+  if (in_len > INT_MAX || in_len < 16 || in_len % 8 != 0) {
+    return -1;
+  }
+
+  if (iv == NULL) {
+    iv = kDefaultIV;
+  }
+
+  uint8_t A[AES_BLOCK_SIZE];
+  OPENSSL_memcpy(A, in, 8);
+  OPENSSL_memmove(out, in + 8, in_len - 8);
+
+  size_t n = (in_len / 8) - 1;
+
+  for (unsigned j = kBound - 1; j < kBound; j--) {
+    for (size_t i = n; i > 0; i--) {
+      uint32_t t = (uint32_t)(n * j + i);
+      A[7] ^= t & 0xff;
+      A[6] ^= (t >> 8) & 0xff;
+      A[5] ^= (t >> 16) & 0xff;
+      A[4] ^= (t >> 24) & 0xff;
+      OPENSSL_memcpy(A + 8, out + 8 * (i - 1), 8);
+      AES_decrypt(A, A, key);
+      OPENSSL_memcpy(out + 8 * (i - 1), A + 8, 8);
+    }
+  }
+
+  if (CRYPTO_memcmp(A, iv, 8) != 0) {
+    return -1;
+  }
+
+  return (int)in_len - 8;
+}
diff --git a/src/crypto/fipsmodule/aes/mode_wrappers.c b/src/crypto/fipsmodule/aes/mode_wrappers.c
new file mode 100644
index 0000000..81e77a4
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/mode_wrappers.c
@@ -0,0 +1,112 @@
+/* ====================================================================
+ * Copyright (c) 2002-2006 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ==================================================================== */
+
+#include <openssl/aes.h>
+
+#include <assert.h>
+
+#include "../../modes/internal.h"
+
+
+void AES_ctr128_encrypt(const uint8_t *in, uint8_t *out, size_t len,
+                        const AES_KEY *key, uint8_t ivec[AES_BLOCK_SIZE],
+                        uint8_t ecount_buf[AES_BLOCK_SIZE], unsigned int *num) {
+  CRYPTO_ctr128_encrypt(in, out, len, key, ivec, ecount_buf, num,
+                        (block128_f)AES_encrypt);
+}
+
+void AES_ecb_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key,
+                     const int enc) {
+  assert(in && out && key);
+  assert((AES_ENCRYPT == enc) || (AES_DECRYPT == enc));
+
+  if (AES_ENCRYPT == enc) {
+    AES_encrypt(in, out, key);
+  } else {
+    AES_decrypt(in, out, key);
+  }
+}
+
+#if defined(OPENSSL_NO_ASM) || \
+    (!defined(OPENSSL_X86_64) && !defined(OPENSSL_X86))
+void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len,
+                     const AES_KEY *key, uint8_t *ivec, const int enc) {
+
+  if (enc) {
+    CRYPTO_cbc128_encrypt(in, out, len, key, ivec, (block128_f)AES_encrypt);
+  } else {
+    CRYPTO_cbc128_decrypt(in, out, len, key, ivec, (block128_f)AES_decrypt);
+  }
+}
+#else
+
+void asm_AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len,
+                         const AES_KEY *key, uint8_t *ivec, const int enc);
+void AES_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t len,
+                     const AES_KEY *key, uint8_t *ivec, const int enc) {
+  asm_AES_cbc_encrypt(in, out, len, key, ivec, enc);
+}
+
+#endif  /* OPENSSL_NO_ASM || (!OPENSSL_X86_64 && !OPENSSL_X86) */
+
+void AES_ofb128_encrypt(const uint8_t *in, uint8_t *out, size_t length,
+                        const AES_KEY *key, uint8_t *ivec, int *num) {
+  unsigned num_u = (unsigned)(*num);
+  CRYPTO_ofb128_encrypt(in, out, length, key, ivec, &num_u,
+                        (block128_f)AES_encrypt);
+  *num = (int)num_u;
+}
+
+void AES_cfb128_encrypt(const uint8_t *in, uint8_t *out, size_t length,
+                        const AES_KEY *key, uint8_t *ivec, int *num,
+                        int enc) {
+  unsigned num_u = (unsigned)(*num);
+  CRYPTO_cfb128_encrypt(in, out, length, key, ivec, &num_u, enc,
+                        (block128_f)AES_encrypt);
+  *num = (int)num_u;
+}
diff --git a/src/crypto/fipsmodule/ar.go b/src/crypto/fipsmodule/ar.go
new file mode 100644
index 0000000..51e7aa5
--- /dev/null
+++ b/src/crypto/fipsmodule/ar.go
@@ -0,0 +1,120 @@
+// Copyright (c) 2017, Google Inc.
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+// ar.go contains functions for parsing .a archive files.
+
+package main
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"strconv"
+	"strings"
+)
+
+// ParseAR parses an archive file from r and returns a map from filename to
+// contents, or else an error.
+func ParseAR(r io.Reader) (map[string][]byte, error) {
+	// See https://en.wikipedia.org/wiki/Ar_(Unix)#File_format_details
+	const expectedMagic = "!<arch>\n"
+	var magic [len(expectedMagic)]byte
+	if _, err := io.ReadFull(r, magic[:]); err != nil {
+		return nil, err
+	}
+	if string(magic[:]) != expectedMagic {
+		return nil, errors.New("ar: not an archive file")
+	}
+
+	const filenameTableName = "//"
+	const symbolTableName = "/"
+	var longFilenameTable []byte
+	ret := make(map[string][]byte)
+
+	for {
+		var header [60]byte
+		if _, err := io.ReadFull(r, header[:]); err != nil {
+			if err == io.EOF {
+				break
+			}
+			return nil, errors.New("ar: error reading file header: " + err.Error())
+		}
+
+		name := strings.TrimRight(string(header[:16]), " ")
+		sizeStr := strings.TrimRight(string(header[48:58]), "\x00 ")
+		size, err := strconv.ParseUint(sizeStr, 10, 64)
+		if err != nil {
+			return nil, errors.New("ar: failed to parse file size: " + err.Error())
+		}
+
+		// File contents are padded to a multiple of two bytes
+		storedSize := size
+		if storedSize%2 == 1 {
+			storedSize++
+		}
+
+		contents := make([]byte, storedSize)
+		if _, err := io.ReadFull(r, contents); err != nil {
+			return nil, errors.New("ar: error reading file contents: " + err.Error())
+		}
+		contents = contents[:size]
+
+		switch {
+		case name == filenameTableName:
+			if longFilenameTable != nil {
+				return nil, errors.New("ar: two filename tables found")
+			}
+			longFilenameTable = contents
+			continue
+
+		case name == symbolTableName:
+			continue
+
+		case len(name) > 1 && name[0] == '/':
+			if longFilenameTable == nil {
+				return nil, errors.New("ar: long filename reference found before filename table")
+			}
+
+			// A long filename is stored as "/" followed by a
+			// base-10 offset in the filename table.
+			offset, err := strconv.ParseUint(name[1:], 10, 64)
+			if err != nil {
+				return nil, errors.New("ar: failed to parse filename offset: " + err.Error())
+			}
+			if offset > uint64((^uint(0))>>1) {
+				return nil, errors.New("ar: filename offset overflow")
+			}
+
+			if int(offset) > len(longFilenameTable) {
+				return nil, errors.New("ar: filename offset out of bounds")
+			}
+
+			filename := longFilenameTable[offset:]
+			if i := bytes.IndexByte(filename, '/'); i < 0 {
+				return nil, errors.New("ar: unterminated filename in table")
+			} else {
+				filename = filename[:i]
+			}
+
+			name = string(filename)
+
+		default:
+			name = strings.TrimRight(name, "/")
+		}
+
+		ret[name] = contents
+	}
+
+	return ret, nil
+}
diff --git a/src/crypto/fipsmodule/bcm.c b/src/crypto/fipsmodule/bcm.c
new file mode 100644
index 0000000..8a05ea2
--- /dev/null
+++ b/src/crypto/fipsmodule/bcm.c
@@ -0,0 +1,97 @@
+/* Copyright (c) 2017, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#include <openssl/base.h>
+#include <openssl/crypto.h>
+#include <openssl/hmac.h>
+
+#include "../internal.h"
+
+#include "aes/aes.c"
+#include "aes/key_wrap.c"
+#include "aes/mode_wrappers.c"
+#include "digest/digest.c"
+#include "digest/digests.c"
+#include "hmac/hmac.c"
+#include "md4/md4.c"
+#include "md5/md5.c"
+#include "sha/sha1-altivec.c"
+#include "sha/sha1.c"
+#include "sha/sha256.c"
+#include "sha/sha512.c"
+
+
+#if defined(BORINGSSL_FIPS)
+static void hexdump(const uint8_t *in, size_t len) {
+  for (size_t i = 0; i < len; i++) {
+    printf("%02x", in[i]);
+  }
+}
+
+/* These functions are removed by delocate.go and references to them are
+ * rewritten to point to the start and end of the module, and the location of
+ * the integrity hash. */
+static void BORINGSSL_bcm_text_dummy_start(void) {}
+static void BORINGSSL_bcm_text_dummy_end(void) {}
+static void BORINGSSL_bcm_text_dummy_hash(void) {}
+
+static void BORINGSSL_bcm_power_on_self_test(void) __attribute__((constructor));
+
+static void BORINGSSL_bcm_power_on_self_test(void) {
+  CRYPTO_library_init();
+
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+  /* Power-on tests cannot run under ASAN because it involves reading the full
+   * .text section, which triggers the global-buffer overflow detection. */
+  return;
+#endif
+#endif
+
+  const uint8_t *const start = (const uint8_t *)BORINGSSL_bcm_text_dummy_start;
+  const uint8_t *const end = (const uint8_t *)BORINGSSL_bcm_text_dummy_end;
+
+  static const uint8_t kHMACKey[32] = {0};
+  uint8_t result[SHA256_DIGEST_LENGTH];
+
+  unsigned result_len;
+  if (!HMAC(EVP_sha256(), kHMACKey, sizeof(kHMACKey), start, end - start,
+            result, &result_len) ||
+      result_len != sizeof(result)) {
+    goto err;
+  }
+
+  const uint8_t *const expected =
+      (const uint8_t *)BORINGSSL_bcm_text_dummy_hash;
+
+  if (OPENSSL_memcmp(expected, result, sizeof(result)) != 0) {
+    printf("FIPS integrity test failed.\nExpected: ");
+    hexdump(expected, sizeof(result));
+    printf("\nCalculated: ");
+    hexdump(result, sizeof(result));
+    printf("\n");
+    goto err;
+  }
+
+  // TODO(fips): KAT tests go here.
+
+  return;
+
+err:
+  for (;;) {
+    exit(1);
+    abort();
+  }
+}
+#endif  /* BORINGSSL_FIPS */
diff --git a/src/crypto/fipsmodule/const.go b/src/crypto/fipsmodule/const.go
new file mode 100644
index 0000000..aec10c7
--- /dev/null
+++ b/src/crypto/fipsmodule/const.go
@@ -0,0 +1,22 @@
+// Copyright (c) 2017, Google Inc.
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+package main
+
+// uninitHashValue is the default hash value that we inject into the module.
+// This value need only be distinct, i.e. so that we can safely
+// search-and-replace it in an object file.
+var uninitHashValue = [32]byte{
+	0x5f, 0x30, 0xd1, 0x80, 0xe7, 0x9e, 0x8f, 0x8f, 0xdf, 0x8b, 0x93, 0xd4, 0x96, 0x36, 0x30, 0xcc, 0x30, 0xea, 0x38, 0x0f, 0x75, 0x56, 0x9a, 0x1b, 0x23, 0x2f, 0x7c, 0x79, 0xff, 0x1b, 0x2b, 0xca,
+}
diff --git a/src/crypto/fipsmodule/delocate.go b/src/crypto/fipsmodule/delocate.go
new file mode 100644
index 0000000..d8b5fb3
--- /dev/null
+++ b/src/crypto/fipsmodule/delocate.go
@@ -0,0 +1,473 @@
+// Copyright (c) 2017, Google Inc.
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+// delocate performs several transformations of textual assembly code. See
+// FIPS.md in this directory for an overview.
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"flag"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+func main() {
+	// The .a file, if given, is expected to be an archive of textual
+	// assembly sources. That's odd, but CMake really wants to create
+	// archive files so it's the only way that we can make it work.
+	arInput := flag.String("a", "", "Path to a .a file containing assembly sources")
+
+	outFile := flag.String("o", "", "Path to output assembly")
+	asmFiles := flag.String("as", "", "Comma separated list of assembly inputs")
+
+	flag.Parse()
+
+	var lines []string
+	var err error
+	if len(*arInput) > 0 {
+		if lines, err = arLines(lines, *arInput); err != nil {
+			panic(err)
+		}
+	}
+
+	asPaths := strings.Split(*asmFiles, ",")
+	for i, path := range asPaths {
+		if len(path) == 0 {
+			continue
+		}
+
+		if lines, err = asLines(lines, path, i); err != nil {
+			panic(err)
+		}
+	}
+
+	symbols := definedSymbols(lines)
+	lines = transform(lines, symbols)
+
+	out, err := os.OpenFile(*outFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
+	if err != nil {
+		panic(err)
+	}
+	defer out.Close()
+
+	for _, line := range lines {
+		out.WriteString(line)
+		out.WriteString("\n")
+	}
+}
+
+// isSymbolDef returns detects whether line contains a (non-local) symbol
+// definition. If so, it returns the symbol and true. Otherwise it returns ""
+// and false.
+func isSymbolDef(line string) (string, bool) {
+	line = strings.TrimSpace(line)
+
+	if len(line) > 0 && line[len(line)-1] == ':' && line[0] != '.' {
+		symbol := line[:len(line)-1]
+		if validSymbolName(symbol) {
+			return symbol, true
+		}
+	}
+
+	return "", false
+}
+
+// definedSymbols finds all (non-local) symbols from lines and returns a map
+// from symbol name to whether or not that symbol is global.
+func definedSymbols(lines []string) map[string]bool {
+	globalSymbols := make(map[string]struct{})
+	symbols := make(map[string]bool)
+
+	for _, line := range lines {
+		if len(line) == 0 {
+			continue
+		}
+
+		if symbol, ok := isSymbolDef(line); ok {
+			_, isGlobal := globalSymbols[symbol]
+			symbols[symbol] = isGlobal
+		}
+
+		parts := strings.Fields(strings.TrimSpace(line))
+		if parts[0] == ".globl" {
+			globalSymbols[parts[1]] = struct{}{}
+		}
+	}
+
+	return symbols
+}
+
+func referencesIA32CapDirectly(line string) bool {
+	const symbol = "OPENSSL_ia32cap_P"
+	i := strings.Index(line, symbol)
+	if i < 0 {
+		return false
+	}
+	i += len(symbol)
+	return i == len(line) || line[i] == '+' || line[i] == '('
+}
+
+// transform performs a number of transformations on the given assembly code.
+// See FIPS.md in the current directory for an overview.
+func transform(lines []string, symbols map[string]bool) (ret []string) {
+	ret = append(ret, ".text", "BORINGSSL_bcm_text_start:")
+
+	// redirectors maps from out-call symbol name to the name of a
+	// redirector function for that symbol.
+	redirectors := make(map[string]string)
+
+	// ia32capAddrNeeded is true iff OPENSSL_ia32cap_addr has been
+	// referenced and thus needs to be emitted outside the module.
+	ia32capAddrNeeded := false
+
+	// extractedText contains lines that have been extracted from the
+	// assembly and need to be moved outside of the module. (This is used
+	// for the .init_array section that specifies constructors.)
+	var extractedText []string
+	extractingText := false
+
+	// bssAccessorsNeeded contains the names of BSS symbols for which
+	// accessor functions need to be emitted outside of the module.
+	var bssAccessorsNeeded []string
+
+	for lineNo, line := range lines {
+		if referencesIA32CapDirectly(line) {
+			panic("reference to OPENSSL_ia32cap_P needs to be changed to indirect via OPENSSL_ia32cap_addr")
+		}
+
+		if strings.Contains(line, "OPENSSL_ia32cap_addr(%rip)") {
+			ia32capAddrNeeded = true
+		}
+
+		parts := strings.Fields(strings.TrimSpace(line))
+
+		if len(parts) == 0 {
+			ret = append(ret, line)
+			continue
+		}
+
+		switch parts[0] {
+		case "call", "callq", "jmp", "jne", "jb", "jz", "jnz", "ja":
+			target := parts[1]
+			// indirect via register or local label
+			if strings.HasPrefix(target, "*") || strings.HasPrefix(target, ".L") {
+				ret = append(ret, line)
+				continue
+			}
+
+			if isGlobal, ok := symbols[target]; ok {
+				newTarget := target
+				if isGlobal {
+					newTarget = localTargetName(target)
+				}
+				ret = append(ret, fmt.Sprintf("\t%s %s", parts[0], newTarget))
+				continue
+			}
+
+			redirectorName := "bcm_redirector_" + target
+
+			if strings.HasSuffix(target, "@PLT") {
+				withoutPLT := target[:len(target)-4]
+				if isGlobal, ok := symbols[withoutPLT]; ok {
+					newTarget := withoutPLT
+					if isGlobal {
+						newTarget = localTargetName(withoutPLT)
+					}
+					ret = append(ret, fmt.Sprintf("\t%s %s", parts[0], newTarget))
+					continue
+				}
+
+				redirectorName = redirectorName[:len(redirectorName)-4]
+			}
+
+			ret = append(ret, fmt.Sprintf("\t%s %s", parts[0], redirectorName))
+			redirectors[redirectorName] = target
+			continue
+
+		case "leaq":
+			if strings.Contains(line, "BORINGSSL_bcm_text_dummy_") {
+				line = strings.Replace(line, "BORINGSSL_bcm_text_dummy_", "BORINGSSL_bcm_text_", -1)
+			}
+
+			target := strings.SplitN(parts[1], ",", 2)[0]
+			if strings.HasSuffix(target, "(%rip)") {
+				target = target[:len(target)-6]
+				if isGlobal := symbols[target]; isGlobal {
+					line = strings.Replace(line, target, localTargetName(target), 1)
+				}
+			}
+
+			ret = append(ret, line)
+			continue
+
+		case ".file":
+			// Do not reorder .file directives. These define
+			// numbered files which are referenced by other debug
+			// directives which may not be reordered.
+			ret = append(ret, line)
+			continue
+
+		case ".comm":
+			p := strings.Split(parts[1], ",")
+			name := p[0]
+			bssAccessorsNeeded = append(bssAccessorsNeeded, name)
+			symbols[accessorName(name)] = false
+			ret = append(ret, line)
+
+		case ".section":
+			extractingText = false
+
+			p := strings.Split(parts[1], ",")
+			section := p[0]
+
+			if section == ".rodata" || section == ".text.startup" || strings.HasPrefix(section, ".rodata.") {
+				// Move .rodata to .text so it may be accessed
+				// without a relocation. GCC with
+				// -fmerge-constants will place strings into
+				// separate sections, so we move all sections
+				// named like .rodata. Also move .text.startup
+				// so the self-test function is also in the
+				// module.
+				ret = append(ret, ".text  # "+section)
+				break
+			}
+
+			switch section {
+			case ".data", ".data.rel.ro.local":
+				panic(fmt.Sprintf("bad section %q on line %d", parts[1], lineNo+1))
+
+			case ".init_array":
+				// init_array contains function pointers to
+				// constructor functions. Since these must be
+				// relocated, this section is moved to the end
+				// of the file.
+				extractedText = append(extractedText, line)
+				extractingText = true
+
+			default:
+				ret = append(ret, line)
+			}
+
+		case ".text":
+			extractingText = false
+			fallthrough
+
+		default:
+			if extractingText {
+				extractedText = append(extractedText, line)
+				continue
+			}
+
+			if symbol, ok := isSymbolDef(line); ok {
+				if isGlobal := symbols[symbol]; isGlobal {
+					ret = append(ret, localTargetName(symbol)+":")
+				}
+			}
+
+			ret = append(ret, line)
+		}
+	}
+
+	ret = append(ret, ".text")
+	ret = append(ret, "BORINGSSL_bcm_text_end:")
+
+	// Emit redirector functions. Each is a single JMP instruction.
+	var redirectorNames []string
+	for name := range redirectors {
+		redirectorNames = append(redirectorNames, name)
+	}
+	sort.Strings(redirectorNames)
+
+	for _, name := range redirectorNames {
+		ret = append(ret, ".type "+name+", @function")
+		ret = append(ret, name+":")
+		ret = append(ret, "\tjmp "+redirectors[name])
+	}
+
+	// Emit BSS accessor functions. Each is a single LEA followed by RET.
+	for _, name := range bssAccessorsNeeded {
+		funcName := accessorName(name)
+		ret = append(ret, ".type "+funcName+", @function")
+		ret = append(ret, funcName+":")
+		ret = append(ret, "\tleaq "+name+"(%rip), %rax")
+		ret = append(ret, "\tret")
+	}
+
+	// Emit an indirect reference to OPENSSL_ia32cap_P.
+	if ia32capAddrNeeded {
+		ret = append(ret, ".extern OPENSSL_ia32cap_P")
+		ret = append(ret, ".type OPENSSL_ia32cap_addr,@object")
+		ret = append(ret, ".size OPENSSL_ia32cap_addr,8")
+		ret = append(ret, "OPENSSL_ia32cap_addr:")
+		ret = append(ret, "\t.quad OPENSSL_ia32cap_P")
+	}
+
+	// Emit an array for storing the module hash.
+	ret = append(ret, ".type BORINGSSL_bcm_text_hash,@object")
+	ret = append(ret, ".size OPENSSL_ia32cap_addr,32")
+	ret = append(ret, "BORINGSSL_bcm_text_hash:")
+	for _, b := range uninitHashValue {
+		ret = append(ret, ".byte 0x"+strconv.FormatUint(uint64(b), 16))
+	}
+
+	ret = append(ret, extractedText...)
+
+	return ret
+}
+
+// accessorName returns the name of the accessor function for a BSS symbol
+// named name.
+func accessorName(name string) string {
+	return name + "_bss_get"
+}
+
+// localTargetName returns the name of the local target label for a global
+// symbol named name.
+func localTargetName(name string) string {
+	return ".L" + name + "_local_target"
+}
+
+// asLines appends the contents of path to lines. Local symbols are renamed
+// using uniqueId to avoid collisions.
+func asLines(lines []string, path string, uniqueId int) ([]string, error) {
+	basename := symbolRuneOrUnderscore(filepath.Base(path))
+
+	asFile, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer asFile.Close()
+
+	var contents []string
+
+	// localSymbols maps from the symbol name used in the input, to a
+	// unique symbol name.
+	localSymbols := make(map[string]string)
+
+	scanner := bufio.NewScanner(asFile)
+	for scanner.Scan() {
+		line := scanner.Text()
+		trimmed := strings.TrimSpace(line)
+		if strings.HasPrefix(trimmed, ".L") && strings.HasSuffix(trimmed, ":") {
+			symbol := trimmed[:len(trimmed)-1]
+			mappedSymbol := fmt.Sprintf(".L%s_%d_%s", basename, uniqueId, symbol[2:])
+			localSymbols[symbol] = mappedSymbol
+			contents = append(contents, mappedSymbol+":")
+			continue
+		}
+
+		contents = append(contents, scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, err
+	}
+
+	for _, line := range contents {
+		for symbol, mappedSymbol := range localSymbols {
+			for i := strings.Index(line, symbol); i >= 0; i = strings.Index(line[i:], symbol) {
+				before := ' '
+				if i > 0 {
+					before, _ = utf8.DecodeLastRuneInString(line[:i])
+				}
+
+				after, _ := utf8.DecodeRuneInString(line[i+len(symbol):])
+
+				if !symbolRune(before) && !symbolRune(after) {
+					line = strings.Replace(line, symbol, mappedSymbol, 1)
+					i += len(mappedSymbol)
+				} else {
+					i += len(symbol)
+				}
+			}
+		}
+
+		lines = append(lines, line)
+	}
+
+	return lines, nil
+}
+
+func arLines(lines []string, arPath string) ([]string, error) {
+	arFile, err := os.Open(arPath)
+	if err != nil {
+		return nil, err
+	}
+	defer arFile.Close()
+
+	ar, err := ParseAR(arFile)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(ar) != 1 {
+		return nil, fmt.Errorf("expected one file in archive, but found %d", len(ar))
+	}
+
+	for _, contents := range ar {
+		scanner := bufio.NewScanner(bytes.NewBuffer(contents))
+		for scanner.Scan() {
+			lines = append(lines, scanner.Text())
+		}
+		if err := scanner.Err(); err != nil {
+			return nil, err
+		}
+	}
+
+	return lines, nil
+}
+
+// validSymbolName returns true if s is a valid (non-local) name for a symbol.
+func validSymbolName(s string) bool {
+	if len(s) == 0 {
+		return false
+	}
+
+	r, n := utf8.DecodeRuneInString(s)
+	// symbols don't start with a digit.
+	if r == utf8.RuneError || !symbolRune(r) || ('0' <= s[0] && s[0] <= '9') {
+		return false
+	}
+
+	return strings.IndexFunc(s[n:], func(r rune) bool {
+		return !symbolRune(r)
+	}) == -1
+}
+
+// symbolRune returns true if r is valid in a symbol name.
+func symbolRune(r rune) bool {
+	return (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '$' || r == '_'
+}
+
+// symbolRuneOrUnderscore maps s where runes valid in a symbol name map to
+// themselves and all other runs map to underscore.
+func symbolRuneOrUnderscore(s string) string {
+	runes := make([]rune, 0, len(s))
+
+	for _, r := range s {
+		if symbolRune(r) {
+			runes = append(runes, r)
+		} else {
+			runes = append(runes, '_')
+		}
+	}
+
+	return string(runes)
+}
diff --git a/src/crypto/fipsmodule/delocate.h b/src/crypto/fipsmodule/delocate.h
new file mode 100644
index 0000000..676073c
--- /dev/null
+++ b/src/crypto/fipsmodule/delocate.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2017, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#ifndef OPENSSL_HEADER_FIPSMODULE_DELOCATE_H
+#define OPENSSL_HEADER_FIPSMODULE_DELOCATE_H
+
+#include <openssl/base.h>
+
+#include "../internal.h"
+
+
+#if defined(BORINGSSL_FIPS)
+#define DEFINE_BSS_GET(type, name)            \
+  static type name __attribute__((used)); \
+  type *name##_bss_get(void);
+#else
+#define DEFINE_BSS_GET(type, name) \
+  static type name;            \
+  static type *name##_bss_get(void) { return &name; }
+#endif
+
+/* DEFINE_METHOD_FUNCTION defines a function named |name| which returns a
+ * method table of type const |type|*. In FIPS mode, to avoid rel.ro data, it
+ * is split into a CRYPTO_once_t-guarded initializer in the module and
+ * unhashed, non-module accessor functions to space reserved in the BSS. The
+ * method table is initialized by a caller-supplied function which takes a
+ * parameter named |out| of type |type|*. The caller should follow the macro
+ * invocation with the body of this function:
+ *
+ *     DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) {
+ *       out->type = NID_md4;
+ *       out->md_size = MD4_DIGEST_LENGTH;
+ *       out->flags = 0;
+ *       out->init = md4_init;
+ *       out->update = md4_update;
+ *       out->final = md4_final;
+ *       out->block_size = 64;
+ *       out->ctx_size = sizeof(MD4_CTX);
+ *     }
+ *
+ * This mechanism does not use a static initializer because their execution
+ * order is undefined. See FIPS.md for more details. */
+#define DEFINE_METHOD_FUNCTION(type, name)                                    \
+  DEFINE_BSS_GET(type, name##_storage)                                        \
+  DEFINE_BSS_GET(CRYPTO_once_t, name##_once)                                  \
+  static void name##_do_init(type *out);                                      \
+  static void name##_init(void) { name##_do_init(name##_storage_bss_get()); } \
+  const type *name(void) {                                                    \
+    CRYPTO_once(name##_once_bss_get(), name##_init);                          \
+    return name##_storage_bss_get();                                          \
+  }                                                                           \
+  static void name##_do_init(type *out)
+
+
+#endif /* OPENSSL_HEADER_FIPSMODULE_DELOCATE_H */
diff --git a/src/crypto/fipsmodule/digest/digest.c b/src/crypto/fipsmodule/digest/digest.c
new file mode 100644
index 0000000..00e6d4b
--- /dev/null
+++ b/src/crypto/fipsmodule/digest/digest.c
@@ -0,0 +1,251 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/digest.h>
+
+#include <assert.h>
+#include <string.h>
+
+#include <openssl/err.h>
+#include <openssl/mem.h>
+
+#include "internal.h"
+#include "../../internal.h"
+
+
+int EVP_MD_type(const EVP_MD *md) { return md->type; }
+
+uint32_t EVP_MD_flags(const EVP_MD *md) { return md->flags; }
+
+size_t EVP_MD_size(const EVP_MD *md) { return md->md_size; }
+
+size_t EVP_MD_block_size(const EVP_MD *md) { return md->block_size; }
+
+
+void EVP_MD_CTX_init(EVP_MD_CTX *ctx) {
+  OPENSSL_memset(ctx, 0, sizeof(EVP_MD_CTX));
+}
+
+EVP_MD_CTX *EVP_MD_CTX_create(void) {
+  EVP_MD_CTX *ctx = OPENSSL_malloc(sizeof(EVP_MD_CTX));
+
+  if (ctx) {
+    EVP_MD_CTX_init(ctx);
+  }
+
+  return ctx;
+}
+
+int EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx) {
+  if (ctx->digest && ctx->digest->ctx_size && ctx->md_data) {
+    OPENSSL_cleanse(ctx->md_data, ctx->digest->ctx_size);
+    OPENSSL_free(ctx->md_data);
+  }
+
+  assert(ctx->pctx == NULL || ctx->pctx_ops != NULL);
+  if (ctx->pctx_ops) {
+    ctx->pctx_ops->free(ctx->pctx);
+  }
+
+  EVP_MD_CTX_init(ctx);
+
+  return 1;
+}
+
+void EVP_MD_CTX_destroy(EVP_MD_CTX *ctx) {
+  if (!ctx) {
+    return;
+  }
+
+  EVP_MD_CTX_cleanup(ctx);
+  OPENSSL_free(ctx);
+}
+
+int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) {
+  uint8_t *tmp_buf = NULL;
+
+  if (in == NULL || in->digest == NULL) {
+    OPENSSL_PUT_ERROR(DIGEST, DIGEST_R_INPUT_NOT_INITIALIZED);
+    return 0;
+  }
+
+  if (out->digest == in->digest) {
+    /* |md_data| will be the correct size in this case so it's removed from
+     * |out| at this point so that |EVP_MD_CTX_cleanup| doesn't free it and
+     * then it's reused. */
+    tmp_buf = out->md_data;
+    out->md_data = NULL;
+  }
+
+  EVP_MD_CTX_cleanup(out);
+
+  out->digest = in->digest;
+  if (in->md_data && in->digest->ctx_size) {
+    if (tmp_buf) {
+      out->md_data = tmp_buf;
+    } else {
+      out->md_data = OPENSSL_malloc(in->digest->ctx_size);
+      if (!out->md_data) {
+        OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE);
+        return 0;
+      }
+    }
+    OPENSSL_memcpy(out->md_data, in->md_data, in->digest->ctx_size);
+  }
+
+  assert(in->pctx == NULL || in->pctx_ops != NULL);
+  out->pctx_ops = in->pctx_ops;
+  if (in->pctx && in->pctx_ops) {
+    out->pctx = in->pctx_ops->dup(in->pctx);
+    if (!out->pctx) {
+      EVP_MD_CTX_cleanup(out);
+      return 0;
+    }
+  }
+
+  return 1;
+}
+
+int EVP_MD_CTX_copy(EVP_MD_CTX *out, const EVP_MD_CTX *in) {
+  EVP_MD_CTX_init(out);
+  return EVP_MD_CTX_copy_ex(out, in);
+}
+
+int EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *engine) {
+  if (ctx->digest != type) {
+    if (ctx->digest && ctx->digest->ctx_size > 0) {
+      OPENSSL_free(ctx->md_data);
+      ctx->md_data = NULL;
+    }
+    ctx->digest = type;
+    if (type->ctx_size > 0) {
+      ctx->md_data = OPENSSL_malloc(type->ctx_size);
+      if (ctx->md_data == NULL) {
+        OPENSSL_PUT_ERROR(DIGEST, ERR_R_MALLOC_FAILURE);
+        return 0;
+      }
+    }
+  }
+
+  assert(ctx->pctx == NULL || ctx->pctx_ops != NULL);
+
+  ctx->digest->init(ctx);
+  return 1;
+}
+
+int EVP_DigestInit(EVP_MD_CTX *ctx, const EVP_MD *type) {
+  EVP_MD_CTX_init(ctx);
+  return EVP_DigestInit_ex(ctx, type, NULL);
+}
+
+int EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *data, size_t len) {
+  ctx->digest->update(ctx, data, len);
+  return 1;
+}
+
+int EVP_DigestFinal_ex(EVP_MD_CTX *ctx, uint8_t *md_out, unsigned int *size) {
+  assert(ctx->digest->md_size <= EVP_MAX_MD_SIZE);
+  ctx->digest->final(ctx, md_out);
+  if (size != NULL) {
+    *size = ctx->digest->md_size;
+  }
+  OPENSSL_cleanse(ctx->md_data, ctx->digest->ctx_size);
+  return 1;
+}
+
+int EVP_DigestFinal(EVP_MD_CTX *ctx, uint8_t *md, unsigned int *size) {
+  (void)EVP_DigestFinal_ex(ctx, md, size);
+  EVP_MD_CTX_cleanup(ctx);
+  return 1;
+}
+
+int EVP_Digest(const void *data, size_t count, uint8_t *out_md,
+               unsigned int *out_size, const EVP_MD *type, ENGINE *impl) {
+  EVP_MD_CTX ctx;
+  int ret;
+
+  EVP_MD_CTX_init(&ctx);
+  ret = EVP_DigestInit_ex(&ctx, type, impl) &&
+        EVP_DigestUpdate(&ctx, data, count) &&
+        EVP_DigestFinal_ex(&ctx, out_md, out_size);
+  EVP_MD_CTX_cleanup(&ctx);
+
+  return ret;
+}
+
+
+const EVP_MD *EVP_MD_CTX_md(const EVP_MD_CTX *ctx) {
+  if (ctx == NULL) {
+    return NULL;
+  }
+  return ctx->digest;
+}
+
+size_t EVP_MD_CTX_size(const EVP_MD_CTX *ctx) {
+  return EVP_MD_size(EVP_MD_CTX_md(ctx));
+}
+
+size_t EVP_MD_CTX_block_size(const EVP_MD_CTX *ctx) {
+  return EVP_MD_block_size(EVP_MD_CTX_md(ctx));
+}
+
+int EVP_MD_CTX_type(const EVP_MD_CTX *ctx) {
+  return EVP_MD_type(EVP_MD_CTX_md(ctx));
+}
+
+int EVP_add_digest(const EVP_MD *digest) {
+  return 1;
+}
diff --git a/src/crypto/fipsmodule/digest/digests.c b/src/crypto/fipsmodule/digest/digests.c
new file mode 100644
index 0000000..f2fa349
--- /dev/null
+++ b/src/crypto/fipsmodule/digest/digests.c
@@ -0,0 +1,280 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/digest.h>
+
+#include <assert.h>
+#include <string.h>
+
+#include <openssl/md4.h>
+#include <openssl/md5.h>
+#include <openssl/nid.h>
+#include <openssl/sha.h>
+
+#include "internal.h"
+#include "../delocate.h"
+#include "../../internal.h"
+
+#if defined(NDEBUG)
+#define CHECK(x) (void) (x)
+#else
+#define CHECK(x) assert(x)
+#endif
+
+
+static void md4_init(EVP_MD_CTX *ctx) {
+  CHECK(MD4_Init(ctx->md_data));
+}
+
+static void md4_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(MD4_Update(ctx->md_data, data, count));
+}
+
+static void md4_final(EVP_MD_CTX *ctx, uint8_t *out) {
+  CHECK(MD4_Final(out, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md4) {
+  out->type = NID_md4;
+  out->md_size = MD4_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = md4_init;
+  out->update = md4_update;
+  out->final = md4_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(MD4_CTX);
+}
+
+
+static void md5_init(EVP_MD_CTX *ctx) {
+  CHECK(MD5_Init(ctx->md_data));
+}
+
+static void md5_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(MD5_Update(ctx->md_data, data, count));
+}
+
+static void md5_final(EVP_MD_CTX *ctx, uint8_t *out) {
+  CHECK(MD5_Final(out, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md5) {
+  out->type = NID_md5;
+  out->md_size = MD5_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = md5_init;
+  out->update = md5_update;
+  out->final = md5_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(MD5_CTX);
+}
+
+
+static void sha1_init(EVP_MD_CTX *ctx) {
+  CHECK(SHA1_Init(ctx->md_data));
+}
+
+static void sha1_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(SHA1_Update(ctx->md_data, data, count));
+}
+
+static void sha1_final(EVP_MD_CTX *ctx, uint8_t *md) {
+  CHECK(SHA1_Final(md, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha1) {
+  out->type = NID_sha1;
+  out->md_size = SHA_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = sha1_init;
+  out->update = sha1_update;
+  out->final = sha1_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(SHA_CTX);
+}
+
+
+static void sha224_init(EVP_MD_CTX *ctx) {
+  CHECK(SHA224_Init(ctx->md_data));
+}
+
+static void sha224_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(SHA224_Update(ctx->md_data, data, count));
+}
+
+static void sha224_final(EVP_MD_CTX *ctx, uint8_t *md) {
+  CHECK(SHA224_Final(md, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha224) {
+  out->type = NID_sha224;
+  out->md_size = SHA224_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = sha224_init;
+  out->update = sha224_update;
+  out->final = sha224_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(SHA256_CTX);
+}
+
+
+static void sha256_init(EVP_MD_CTX *ctx) {
+  CHECK(SHA256_Init(ctx->md_data));
+}
+
+static void sha256_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(SHA256_Update(ctx->md_data, data, count));
+}
+
+static void sha256_final(EVP_MD_CTX *ctx, uint8_t *md) {
+  CHECK(SHA256_Final(md, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha256) {
+  out->type = NID_sha256;
+  out->md_size = SHA256_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = sha256_init;
+  out->update = sha256_update;
+  out->final = sha256_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(SHA256_CTX);
+}
+
+
+static void sha384_init(EVP_MD_CTX *ctx) {
+  CHECK(SHA384_Init(ctx->md_data));
+}
+
+static void sha384_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(SHA384_Update(ctx->md_data, data, count));
+}
+
+static void sha384_final(EVP_MD_CTX *ctx, uint8_t *md) {
+  CHECK(SHA384_Final(md, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha384) {
+  out->type = NID_sha384;
+  out->md_size = SHA384_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = sha384_init;
+  out->update = sha384_update;
+  out->final = sha384_final;
+  out->block_size = 128;
+  out->ctx_size = sizeof(SHA512_CTX);
+}
+
+
+static void sha512_init(EVP_MD_CTX *ctx) {
+  CHECK(SHA512_Init(ctx->md_data));
+}
+
+static void sha512_update(EVP_MD_CTX *ctx, const void *data, size_t count) {
+  CHECK(SHA512_Update(ctx->md_data, data, count));
+}
+
+static void sha512_final(EVP_MD_CTX *ctx, uint8_t *md) {
+  CHECK(SHA512_Final(md, ctx->md_data));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_sha512) {
+  out->type = NID_sha512;
+  out->md_size = SHA512_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = sha512_init;
+  out->update = sha512_update;
+  out->final = sha512_final;
+  out->block_size = 128;
+  out->ctx_size = sizeof(SHA512_CTX);
+}
+
+
+typedef struct {
+  MD5_CTX md5;
+  SHA_CTX sha1;
+} MD5_SHA1_CTX;
+
+static void md5_sha1_init(EVP_MD_CTX *md_ctx) {
+  MD5_SHA1_CTX *ctx = md_ctx->md_data;
+  CHECK(MD5_Init(&ctx->md5) && SHA1_Init(&ctx->sha1));
+}
+
+static void md5_sha1_update(EVP_MD_CTX *md_ctx, const void *data,
+                            size_t count) {
+  MD5_SHA1_CTX *ctx = md_ctx->md_data;
+  CHECK(MD5_Update(&ctx->md5, data, count) &&
+        SHA1_Update(&ctx->sha1, data, count));
+}
+
+static void md5_sha1_final(EVP_MD_CTX *md_ctx, uint8_t *out) {
+  MD5_SHA1_CTX *ctx = md_ctx->md_data;
+  CHECK(MD5_Final(out, &ctx->md5) &&
+        SHA1_Final(out + MD5_DIGEST_LENGTH, &ctx->sha1));
+}
+
+DEFINE_METHOD_FUNCTION(EVP_MD, EVP_md5_sha1) {
+  out->type = NID_md5_sha1;
+  out->md_size = MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH;
+  out->flags = 0;
+  out->init = md5_sha1_init;
+  out->update = md5_sha1_update;
+  out->final = md5_sha1_final;
+  out->block_size = 64;
+  out->ctx_size = sizeof(MD5_SHA1_CTX);
+}
+
+#undef CHECK
diff --git a/src/crypto/fipsmodule/digest/internal.h b/src/crypto/fipsmodule/digest/internal.h
new file mode 100644
index 0000000..e3d812a
--- /dev/null
+++ b/src/crypto/fipsmodule/digest/internal.h
@@ -0,0 +1,112 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#ifndef OPENSSL_HEADER_DIGEST_INTERNAL_H
+#define OPENSSL_HEADER_DIGEST_INTERNAL_H
+
+#include <openssl/base.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+struct env_md_st {
+  /* type contains a NID identifing the digest function. (For example,
+   * NID_md5.) */
+  int type;
+
+  /* md_size contains the size, in bytes, of the resulting digest. */
+  unsigned md_size;
+
+  /* flags contains the OR of |EVP_MD_FLAG_*| values. */
+  uint32_t flags;
+
+  /* init initialises the state in |ctx->md_data|. */
+  void (*init)(EVP_MD_CTX *ctx);
+
+  /* update hashes |len| bytes of |data| into the state in |ctx->md_data|. */
+  void (*update)(EVP_MD_CTX *ctx, const void *data, size_t count);
+
+  /* final completes the hash and writes |md_size| bytes of digest to |out|. */
+  void (*final)(EVP_MD_CTX *ctx, uint8_t *out);
+
+  /* block_size contains the hash's native block size. */
+  unsigned block_size;
+
+  /* ctx_size contains the size, in bytes, of the state of the hash function. */
+  unsigned ctx_size;
+};
+
+/* evp_md_pctx_ops contains function pointers to allow the |pctx| member of
+ * |EVP_MD_CTX| to be manipulated without breaking layering by calling EVP
+ * functions. */
+struct evp_md_pctx_ops {
+  /* free is called when an |EVP_MD_CTX| is being freed and the |pctx| also
+   * needs to be freed. */
+  void (*free) (EVP_PKEY_CTX *pctx);
+
+  /* dup is called when an |EVP_MD_CTX| is copied and so the |pctx| also needs
+   * to be copied. */
+  EVP_PKEY_CTX* (*dup) (EVP_PKEY_CTX *pctx);
+};
+
+
+#if defined(__cplusplus)
+}  /* extern C */
+#endif
+
+#endif  /* OPENSSL_HEADER_DIGEST_INTERNAL */
diff --git a/src/crypto/fipsmodule/digest/md32_common.h b/src/crypto/fipsmodule/digest/md32_common.h
new file mode 100644
index 0000000..7371629
--- /dev/null
+++ b/src/crypto/fipsmodule/digest/md32_common.h
@@ -0,0 +1,269 @@
+/* ====================================================================
+ * Copyright (c) 1999-2007 The OpenSSL Project.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ *    software must display the following acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ *    endorse or promote products derived from this software without
+ *    prior written permission. For written permission, please contact
+ *    licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ *    nor may "OpenSSL" appear in their names without prior written
+ *    permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ *    acknowledgment:
+ *    "This product includes software developed by the OpenSSL Project
+ *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ==================================================================== */
+
+#include <openssl/base.h>
+
+#include <assert.h>
+
+#include "../../internal.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+/* This is a generic 32-bit "collector" for message digest algorithms. It
+ * collects input character stream into chunks of 32-bit values and invokes the
+ * block function that performs the actual hash calculations. To make use of
+ * this mechanism, the following macros must be defined before including
+ * md32_common.h.
+ *
+ * One of |DATA_ORDER_IS_BIG_ENDIAN| or |DATA_ORDER_IS_LITTLE_ENDIAN| must be
+ * defined to specify the byte order of the input stream.
+ *
+ * |HASH_CBLOCK| must be defined as the integer block size, in bytes.
+ *
+ * |HASH_CTX| must be defined as the name of the context structure, which must
+ * have at least the following members:
+ *
+ *     typedef struct <name>_state_st {
+ *       uint32_t h[<chaining length> / sizeof(uint32_t)];
+ *       uint32_t Nl, Nh;
+ *       uint8_t data[HASH_CBLOCK];
+ *       unsigned num;
+ *       ...
+ *     } <NAME>_CTX;
+ *
+ * <chaining length> is the output length of the hash in bytes, before
+ * any truncation (e.g. 64 for SHA-224 and SHA-256, 128 for SHA-384 and
+ * SHA-512).
+ *
+ * |HASH_UPDATE| must be defined as the name of the "Update" function to
+ * generate.
+ *
+ * |HASH_TRANSFORM| must be defined as the  the name of the "Transform"
+ * function to generate.
+ *
+ * |HASH_FINAL| must be defined as the name of "Final" function to generate.
+ *
+ * |HASH_BLOCK_DATA_ORDER| must be defined as the name of the "Block" function.
+ * That function must be implemented manually. It must be capable of operating
+ * on *unaligned* input data in its original (data) byte order. It must have
+ * this signature:
+ *
+ *     void HASH_BLOCK_DATA_ORDER(uint32_t *state, const uint8_t *data,
+ *                                size_t num);
+ *
+ * It must update the hash state |state| with |num| blocks of data from |data|,
+ * where each block is |HASH_CBLOCK| bytes; i.e. |data| points to a array of
+ * |HASH_CBLOCK * num| bytes. |state| points to the |h| member of a |HASH_CTX|,
+ * and so will have |<chaining length> / sizeof(uint32_t)| elements.
+ *
+ * |HASH_MAKE_STRING(c, s)| must be defined as a block statement that converts
+ * the hash state |c->h| into the output byte order, storing the result in |s|.
+ */
+
+#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+#error "DATA_ORDER must be defined!"
+#endif
+
+#ifndef HASH_CBLOCK
+#error "HASH_CBLOCK must be defined!"
+#endif
+#ifndef HASH_CTX
+#error "HASH_CTX must be defined!"
+#endif
+
+#ifndef HASH_UPDATE
+#error "HASH_UPDATE must be defined!"
+#endif
+#ifndef HASH_TRANSFORM
+#error "HASH_TRANSFORM must be defined!"
+#endif
+#ifndef HASH_FINAL
+#error "HASH_FINAL must be defined!"
+#endif
+
+#ifndef HASH_BLOCK_DATA_ORDER
+#error "HASH_BLOCK_DATA_ORDER must be defined!"
+#endif
+
+#ifndef HASH_MAKE_STRING
+#error "HASH_MAKE_STRING must be defined!"
+#endif
+
+#if defined(DATA_ORDER_IS_BIG_ENDIAN)
+
+#define HOST_c2l(c, l)                     \
+  do {                                     \
+    (l) = (((uint32_t)(*((c)++))) << 24);  \
+    (l) |= (((uint32_t)(*((c)++))) << 16); \
+    (l) |= (((uint32_t)(*((c)++))) << 8);  \
+    (l) |= (((uint32_t)(*((c)++))));       \
+  } while (0)
+
+#define HOST_l2c(l, c)                        \
+  do {                                        \
+    *((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
+    *((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
+    *((c)++) = (uint8_t)(((l) >> 8) & 0xff);  \
+    *((c)++) = (uint8_t)(((l)) & 0xff);       \
+  } while (0)
+
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+
+#define HOST_c2l(c, l)                     \
+  do {                                     \
+    (l) = (((uint32_t)(*((c)++))));        \
+    (l) |= (((uint32_t)(*((c)++))) << 8);  \
+    (l) |= (((uint32_t)(*((c)++))) << 16); \
+    (l) |= (((uint32_t)(*((c)++))) << 24); \
+  } while (0)
+
+#define HOST_l2c(l, c)                        \
+  do {                                        \
+    *((c)++) = (uint8_t)(((l)) & 0xff);       \
+    *((c)++) = (uint8_t)(((l) >> 8) & 0xff);  \
+    *((c)++) = (uint8_t)(((l) >> 16) & 0xff); \
+    *((c)++) = (uint8_t)(((l) >> 24) & 0xff); \
+  } while (0)
+
+#endif /* DATA_ORDER */
+
+int HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len) {
+  const uint8_t *data = data_;
+
+  if (len == 0) {
+    return 1;
+  }
+
+  uint32_t l = c->Nl + (((uint32_t)len) << 3);
+  if (l < c->Nl) {
+    /* Handle carries. */
+    c->Nh++;
+  }
+  c->Nh += (uint32_t)(len >> 29);
+  c->Nl = l;
+
+  size_t n = c->num;
+  if (n != 0) {
+    if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) {
+      OPENSSL_memcpy(c->data + n, data, HASH_CBLOCK - n);
+      HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
+      n = HASH_CBLOCK - n;
+      data += n;
+      len -= n;
+      c->num = 0;
+      /* Keep |c->data| zeroed when unused. */
+      OPENSSL_memset(c->data, 0, HASH_CBLOCK);
+    } else {
+      OPENSSL_memcpy(c->data + n, data, len);
+      c->num += (unsigned)len;
+      return 1;
+    }
+  }
+
+  n = len / HASH_CBLOCK;
+  if (n > 0) {
+    HASH_BLOCK_DATA_ORDER(c->h, data, n);
+    n *= HASH_CBLOCK;
+    data += n;
+    len -= n;
+  }
+
+  if (len != 0) {
+    c->num = (unsigned)len;
+    OPENSSL_memcpy(c->data, data, len);
+  }
+  return 1;
+}
+
+
+void HASH_TRANSFORM(HASH_CTX *c, const uint8_t *data) {
+  HASH_BLOCK_DATA_ORDER(c->h, data, 1);
+}
+
+
+int HASH_FINAL(uint8_t *md, HASH_CTX *c) {
+  /* |c->data| always has room for at least one byte. A full block would have
+   * been consumed. */
+  size_t n = c->num;
+  assert(n < HASH_CBLOCK);
+  c->data[n] = 0x80;
+  n++;
+
+  /* Fill the block with zeros if there isn't room for a 64-bit length. */
+  if (n > (HASH_CBLOCK - 8)) {
+    OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - n);
+    n = 0;
+    HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
+  }
+  OPENSSL_memset(c->data + n, 0, HASH_CBLOCK - 8 - n);
+
+  /* Append a 64-bit length to the block and process it. */
+  uint8_t *p = c->data + HASH_CBLOCK - 8;
+#if defined(DATA_ORDER_IS_BIG_ENDIAN)
+  HOST_l2c(c->Nh, p);
+  HOST_l2c(c->Nl, p);
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+  HOST_l2c(c->Nl, p);
+  HOST_l2c(c->Nh, p);
+#endif
+  assert(p == c->data + HASH_CBLOCK);
+  HASH_BLOCK_DATA_ORDER(c->h, c->data, 1);
+  c->num = 0;
+  OPENSSL_memset(c->data, 0, HASH_CBLOCK);
+
+  HASH_MAKE_STRING(c, md);
+  return 1;
+}
+
+
+#if defined(__cplusplus)
+} /* extern C */
+#endif
diff --git a/src/crypto/fipsmodule/hmac/hmac.c b/src/crypto/fipsmodule/hmac/hmac.c
new file mode 100644
index 0000000..3292350
--- /dev/null
+++ b/src/crypto/fipsmodule/hmac/hmac.c
@@ -0,0 +1,206 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/hmac.h>
+
+#include <assert.h>
+#include <string.h>
+
+#include <openssl/digest.h>
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+uint8_t *HMAC(const EVP_MD *evp_md, const void *key, size_t key_len,
+              const uint8_t *data, size_t data_len, uint8_t *out,
+              unsigned int *out_len) {
+  HMAC_CTX ctx;
+  HMAC_CTX_init(&ctx);
+  if (!HMAC_Init_ex(&ctx, key, key_len, evp_md, NULL) ||
+      !HMAC_Update(&ctx, data, data_len) ||
+      !HMAC_Final(&ctx, out, out_len)) {
+    out = NULL;
+  }
+
+  HMAC_CTX_cleanup(&ctx);
+  return out;
+}
+
+void HMAC_CTX_init(HMAC_CTX *ctx) {
+  ctx->md = NULL;
+  EVP_MD_CTX_init(&ctx->i_ctx);
+  EVP_MD_CTX_init(&ctx->o_ctx);
+  EVP_MD_CTX_init(&ctx->md_ctx);
+}
+
+void HMAC_CTX_cleanup(HMAC_CTX *ctx) {
+  EVP_MD_CTX_cleanup(&ctx->i_ctx);
+  EVP_MD_CTX_cleanup(&ctx->o_ctx);
+  EVP_MD_CTX_cleanup(&ctx->md_ctx);
+  OPENSSL_cleanse(ctx, sizeof(HMAC_CTX));
+}
+
+int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, size_t key_len,
+                 const EVP_MD *md, ENGINE *impl) {
+  if (md == NULL) {
+    md = ctx->md;
+  }
+
+  /* If either |key| is non-NULL or |md| has changed, initialize with a new key
+   * rather than rewinding the previous one.
+   *
+   * TODO(davidben,eroman): Passing the previous |md| with a NULL |key| is
+   * ambiguous between using the empty key and reusing the previous key. There
+   * exist callers which intend the latter, but the former is an awkward edge
+   * case. Fix to API to avoid this. */
+  if (md != ctx->md || key != NULL) {
+    uint8_t pad[EVP_MAX_MD_BLOCK_SIZE];
+    uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE];
+    unsigned key_block_len;
+
+    size_t block_size = EVP_MD_block_size(md);
+    assert(block_size <= sizeof(key_block));
+    if (block_size < key_len) {
+      /* Long keys are hashed. */
+      if (!EVP_DigestInit_ex(&ctx->md_ctx, md, impl) ||
+          !EVP_DigestUpdate(&ctx->md_ctx, key, key_len) ||
+          !EVP_DigestFinal_ex(&ctx->md_ctx, key_block, &key_block_len)) {
+        return 0;
+      }
+    } else {
+      assert(key_len <= sizeof(key_block));
+      OPENSSL_memcpy(key_block, key, key_len);
+      key_block_len = (unsigned)key_len;
+    }
+    /* Keys are then padded with zeros. */
+    if (key_block_len != EVP_MAX_MD_BLOCK_SIZE) {
+      OPENSSL_memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len);
+    }
+
+    for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
+      pad[i] = 0x36 ^ key_block[i];
+    }
+    if (!EVP_DigestInit_ex(&ctx->i_ctx, md, impl) ||
+        !EVP_DigestUpdate(&ctx->i_ctx, pad, EVP_MD_block_size(md))) {
+      return 0;
+    }
+
+    for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
+      pad[i] = 0x5c ^ key_block[i];
+    }
+    if (!EVP_DigestInit_ex(&ctx->o_ctx, md, impl) ||
+        !EVP_DigestUpdate(&ctx->o_ctx, pad, EVP_MD_block_size(md))) {
+      return 0;
+    }
+
+    ctx->md = md;
+  }
+
+  if (!EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->i_ctx)) {
+    return 0;
+  }
+
+  return 1;
+}
+
+int HMAC_Update(HMAC_CTX *ctx, const uint8_t *data, size_t data_len) {
+  return EVP_DigestUpdate(&ctx->md_ctx, data, data_len);
+}
+
+int HMAC_Final(HMAC_CTX *ctx, uint8_t *out, unsigned int *out_len) {
+  unsigned int i;
+  uint8_t buf[EVP_MAX_MD_SIZE];
+
+  /* TODO(davidben): The only thing that can officially fail here is
+   * |EVP_MD_CTX_copy_ex|, but even that should be impossible in this case. */
+  if (!EVP_DigestFinal_ex(&ctx->md_ctx, buf, &i) ||
+      !EVP_MD_CTX_copy_ex(&ctx->md_ctx, &ctx->o_ctx) ||
+      !EVP_DigestUpdate(&ctx->md_ctx, buf, i) ||
+      !EVP_DigestFinal_ex(&ctx->md_ctx, out, out_len)) {
+    *out_len = 0;
+    return 0;
+  }
+
+  return 1;
+}
+
+size_t HMAC_size(const HMAC_CTX *ctx) {
+  return EVP_MD_size(ctx->md);
+}
+
+int HMAC_CTX_copy_ex(HMAC_CTX *dest, const HMAC_CTX *src) {
+  if (!EVP_MD_CTX_copy_ex(&dest->i_ctx, &src->i_ctx) ||
+      !EVP_MD_CTX_copy_ex(&dest->o_ctx, &src->o_ctx) ||
+      !EVP_MD_CTX_copy_ex(&dest->md_ctx, &src->md_ctx)) {
+    return 0;
+  }
+
+  dest->md = src->md;
+  return 1;
+}
+
+int HMAC_Init(HMAC_CTX *ctx, const void *key, int key_len, const EVP_MD *md) {
+  if (key && md) {
+    HMAC_CTX_init(ctx);
+  }
+  return HMAC_Init_ex(ctx, key, key_len, md, NULL);
+}
+
+int HMAC_CTX_copy(HMAC_CTX *dest, const HMAC_CTX *src) {
+  HMAC_CTX_init(dest);
+  return HMAC_CTX_copy_ex(dest, src);
+}
diff --git a/src/crypto/fipsmodule/inject-hash.go b/src/crypto/fipsmodule/inject-hash.go
new file mode 100644
index 0000000..7f2c9c9
--- /dev/null
+++ b/src/crypto/fipsmodule/inject-hash.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2017, Google Inc.
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+// inject-hash parses an archive containing a file object file. It finds a FIPS
+// module inside that object, calculates its hash and replaces the default hash
+// value in the object with the calculated value.
+package main
+
+import (
+	"bytes"
+	"crypto/hmac"
+	"crypto/sha256"
+	"debug/elf"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+)
+
+func do(outPath, arInput string) error {
+	arFile, err := os.Open(arInput)
+	if err != nil {
+		return err
+	}
+	defer arFile.Close()
+
+	ar, err := ParseAR(arFile)
+	if err != nil {
+		return err
+	}
+
+	if len(ar) != 1 {
+		return fmt.Errorf("expected one file in archive, but found %d", len(ar))
+	}
+
+	var objectBytes []byte
+	for _, contents := range ar {
+		objectBytes = contents
+	}
+
+	object, err := elf.NewFile(bytes.NewReader(objectBytes))
+	if err != nil {
+		return errors.New("failed to parse object: " + err.Error())
+	}
+
+	// Find the .text section.
+
+	var textSection *elf.Section
+	var textSectionIndex elf.SectionIndex
+	for i, section := range object.Sections {
+		if section.Name == ".text" {
+			textSectionIndex = elf.SectionIndex(i)
+			textSection = section
+			break
+		}
+	}
+
+	if textSection == nil {
+		return errors.New("failed to find .text section in object")
+	}
+
+	// Find the starting and ending symbols for the module.
+
+	var startSeen, endSeen bool
+	var start, end uint64
+
+	symbols, err := object.Symbols()
+	if err != nil {
+		return errors.New("failed to parse symbols: " + err.Error())
+	}
+
+	for _, symbol := range symbols {
+		if symbol.Section != textSectionIndex {
+			continue
+		}
+
+		switch symbol.Name {
+		case "BORINGSSL_bcm_text_start":
+			if startSeen {
+				return errors.New("duplicate start symbol found")
+			}
+			startSeen = true
+			start = symbol.Value
+		case "BORINGSSL_bcm_text_end":
+			if endSeen {
+				return errors.New("duplicate end symbol found")
+			}
+			endSeen = true
+			end = symbol.Value
+		default:
+			continue
+		}
+	}
+
+	if !startSeen || !endSeen {
+		return errors.New("could not find module boundaries in object")
+	}
+
+	if max := textSection.Size; start > max || start > end || end > max {
+		return fmt.Errorf("invalid module boundaries: start: %x, end: %x, max: %x", start, end, max)
+	}
+
+	// Extract the module from the .text section and hash it.
+
+	text := textSection.Open()
+	if _, err := text.Seek(int64(start), 0); err != nil {
+		return errors.New("failed to seek to module start in .text: " + err.Error())
+	}
+	moduleText := make([]byte, end-start)
+	if _, err := io.ReadFull(text, moduleText); err != nil {
+		return errors.New("failed to read .text: " + err.Error())
+	}
+
+	var zeroKey [32]byte
+	mac := hmac.New(sha256.New, zeroKey[:])
+	mac.Write(moduleText)
+	calculated := mac.Sum(nil)
+
+	// Replace the default hash value in the object with the calculated
+	// value and write it out.
+
+	offset := bytes.Index(objectBytes, uninitHashValue[:])
+	if offset < 0 {
+		return errors.New("did not find uninitialised hash value in object file")
+	}
+
+	if bytes.Index(objectBytes[offset+1:], uninitHashValue[:]) >= 0 {
+		return errors.New("found two occurrences of uninitialised hash value in object file")
+	}
+
+	copy(objectBytes[offset:], calculated)
+
+	return ioutil.WriteFile(outPath, objectBytes, 0644)
+}
+
+func main() {
+	arInput := flag.String("in", "", "Path to a .a file")
+	outPath := flag.String("o", "", "Path to output object")
+
+	flag.Parse()
+
+	if err := do(*outPath, *arInput); err != nil {
+		fmt.Fprintf(os.Stderr, "%s\n", err)
+		os.Exit(1)
+	}
+}
diff --git a/src/crypto/fipsmodule/intcheck1.png b/src/crypto/fipsmodule/intcheck1.png
new file mode 100644
index 0000000..2fec4ea
--- /dev/null
+++ b/src/crypto/fipsmodule/intcheck1.png
Binary files differ
diff --git a/src/crypto/fipsmodule/intcheck2.png b/src/crypto/fipsmodule/intcheck2.png
new file mode 100644
index 0000000..3e66267
--- /dev/null
+++ b/src/crypto/fipsmodule/intcheck2.png
Binary files differ
diff --git a/src/crypto/fipsmodule/intcheck3.png b/src/crypto/fipsmodule/intcheck3.png
new file mode 100644
index 0000000..9a22d33
--- /dev/null
+++ b/src/crypto/fipsmodule/intcheck3.png
Binary files differ
diff --git a/src/crypto/fipsmodule/is_fips.c b/src/crypto/fipsmodule/is_fips.c
new file mode 100644
index 0000000..c23e621
--- /dev/null
+++ b/src/crypto/fipsmodule/is_fips.c
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#include <openssl/crypto.h>
+
+
+/* This file exists in order to give the fipsmodule target, in non-FIPS mode,
+ * something to compile. */
+
+int FIPS_mode(void) {
+#if defined(BORINGSSL_FIPS)
+  return 1;
+#else
+  return 0;
+#endif
+}
diff --git a/src/crypto/fipsmodule/md4/md4.c b/src/crypto/fipsmodule/md4/md4.c
new file mode 100644
index 0000000..3028c8b
--- /dev/null
+++ b/src/crypto/fipsmodule/md4/md4.c
@@ -0,0 +1,254 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/md4.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "../../internal.h"
+
+
+uint8_t *MD4(const uint8_t *data, size_t len, uint8_t *out) {
+  MD4_CTX ctx;
+  MD4_Init(&ctx);
+  MD4_Update(&ctx, data, len);
+  MD4_Final(out, &ctx);
+
+  return out;
+}
+
+/* Implemented from RFC1186 The MD4 Message-Digest Algorithm. */
+
+int MD4_Init(MD4_CTX *md4) {
+  OPENSSL_memset(md4, 0, sizeof(MD4_CTX));
+  md4->h[0] = 0x67452301UL;
+  md4->h[1] = 0xefcdab89UL;
+  md4->h[2] = 0x98badcfeUL;
+  md4->h[3] = 0x10325476UL;
+  return 1;
+}
+
+void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
+
+#define DATA_ORDER_IS_LITTLE_ENDIAN
+
+#define HASH_CTX MD4_CTX
+#define HASH_CBLOCK 64
+#define HASH_UPDATE MD4_Update
+#define HASH_TRANSFORM MD4_Transform
+#define HASH_FINAL MD4_Final
+#define HASH_MAKE_STRING(c, s) \
+  do {                         \
+    uint32_t ll;               \
+    ll = (c)->h[0];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[1];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[2];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[3];            \
+    HOST_l2c(ll, (s));         \
+  } while (0)
+#define HASH_BLOCK_DATA_ORDER md4_block_data_order
+
+#include "../digest/md32_common.h"
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
+ * simplified to the code below.  Wei attributes these optimizations
+ * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel. */
+#define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define G(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d)))
+#define H(b, c, d) ((b) ^ (c) ^ (d))
+
+#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
+
+#define R0(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + F((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+  } while (0)
+
+#define R1(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + G((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+  } while (0)
+
+#define R2(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + H((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+  } while (0)
+
+void md4_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
+  uint32_t A, B, C, D, l;
+  uint32_t X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X12, X13, X14, X15;
+
+  A = state[0];
+  B = state[1];
+  C = state[2];
+  D = state[3];
+
+  for (; num--;) {
+    HOST_c2l(data, l);
+    X0 = l;
+    HOST_c2l(data, l);
+    X1 = l;
+    /* Round 0 */
+    R0(A, B, C, D, X0, 3, 0);
+    HOST_c2l(data, l);
+    X2 = l;
+    R0(D, A, B, C, X1, 7, 0);
+    HOST_c2l(data, l);
+    X3 = l;
+    R0(C, D, A, B, X2, 11, 0);
+    HOST_c2l(data, l);
+    X4 = l;
+    R0(B, C, D, A, X3, 19, 0);
+    HOST_c2l(data, l);
+    X5 = l;
+    R0(A, B, C, D, X4, 3, 0);
+    HOST_c2l(data, l);
+    X6 = l;
+    R0(D, A, B, C, X5, 7, 0);
+    HOST_c2l(data, l);
+    X7 = l;
+    R0(C, D, A, B, X6, 11, 0);
+    HOST_c2l(data, l);
+    X8 = l;
+    R0(B, C, D, A, X7, 19, 0);
+    HOST_c2l(data, l);
+    X9 = l;
+    R0(A, B, C, D, X8, 3, 0);
+    HOST_c2l(data, l);
+    X10 = l;
+    R0(D, A, B, C, X9, 7, 0);
+    HOST_c2l(data, l);
+    X11 = l;
+    R0(C, D, A, B, X10, 11, 0);
+    HOST_c2l(data, l);
+    X12 = l;
+    R0(B, C, D, A, X11, 19, 0);
+    HOST_c2l(data, l);
+    X13 = l;
+    R0(A, B, C, D, X12, 3, 0);
+    HOST_c2l(data, l);
+    X14 = l;
+    R0(D, A, B, C, X13, 7, 0);
+    HOST_c2l(data, l);
+    X15 = l;
+    R0(C, D, A, B, X14, 11, 0);
+    R0(B, C, D, A, X15, 19, 0);
+    /* Round 1 */
+    R1(A, B, C, D, X0, 3, 0x5A827999L);
+    R1(D, A, B, C, X4, 5, 0x5A827999L);
+    R1(C, D, A, B, X8, 9, 0x5A827999L);
+    R1(B, C, D, A, X12, 13, 0x5A827999L);
+    R1(A, B, C, D, X1, 3, 0x5A827999L);
+    R1(D, A, B, C, X5, 5, 0x5A827999L);
+    R1(C, D, A, B, X9, 9, 0x5A827999L);
+    R1(B, C, D, A, X13, 13, 0x5A827999L);
+    R1(A, B, C, D, X2, 3, 0x5A827999L);
+    R1(D, A, B, C, X6, 5, 0x5A827999L);
+    R1(C, D, A, B, X10, 9, 0x5A827999L);
+    R1(B, C, D, A, X14, 13, 0x5A827999L);
+    R1(A, B, C, D, X3, 3, 0x5A827999L);
+    R1(D, A, B, C, X7, 5, 0x5A827999L);
+    R1(C, D, A, B, X11, 9, 0x5A827999L);
+    R1(B, C, D, A, X15, 13, 0x5A827999L);
+    /* Round 2 */
+    R2(A, B, C, D, X0, 3, 0x6ED9EBA1L);
+    R2(D, A, B, C, X8, 9, 0x6ED9EBA1L);
+    R2(C, D, A, B, X4, 11, 0x6ED9EBA1L);
+    R2(B, C, D, A, X12, 15, 0x6ED9EBA1L);
+    R2(A, B, C, D, X2, 3, 0x6ED9EBA1L);
+    R2(D, A, B, C, X10, 9, 0x6ED9EBA1L);
+    R2(C, D, A, B, X6, 11, 0x6ED9EBA1L);
+    R2(B, C, D, A, X14, 15, 0x6ED9EBA1L);
+    R2(A, B, C, D, X1, 3, 0x6ED9EBA1L);
+    R2(D, A, B, C, X9, 9, 0x6ED9EBA1L);
+    R2(C, D, A, B, X5, 11, 0x6ED9EBA1L);
+    R2(B, C, D, A, X13, 15, 0x6ED9EBA1L);
+    R2(A, B, C, D, X3, 3, 0x6ED9EBA1L);
+    R2(D, A, B, C, X11, 9, 0x6ED9EBA1L);
+    R2(C, D, A, B, X7, 11, 0x6ED9EBA1L);
+    R2(B, C, D, A, X15, 15, 0x6ED9EBA1L);
+
+    A = state[0] += A;
+    B = state[1] += B;
+    C = state[2] += C;
+    D = state[3] += D;
+  }
+}
+
+#undef DATA_ORDER_IS_LITTLE_ENDIAN
+#undef HASH_CTX
+#undef HASH_CBLOCK
+#undef HASH_UPDATE
+#undef HASH_TRANSFORM
+#undef HASH_FINAL
+#undef HASH_MAKE_STRING
+#undef HASH_BLOCK_DATA_ORDER
+#undef F
+#undef G
+#undef H
+#undef ROTATE
+#undef R0
+#undef R1
+#undef R2
+#undef HOST_c2l
+#undef HOST_l2c
diff --git a/src/crypto/fipsmodule/md5/asm/md5-586.pl b/src/crypto/fipsmodule/md5/asm/md5-586.pl
new file mode 100644
index 0000000..4ba08ec
--- /dev/null
+++ b/src/crypto/fipsmodule/md5/asm/md5-586.pl
@@ -0,0 +1,312 @@
+#!/usr/local/bin/perl
+
+# Normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X);
+# version, non-normal is the
+# md5_block_x86(MD5_CTX *c, ULONG *X,int blocks);
+
+$normal=0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$0);
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$tmp1="edi";
+$tmp2="ebp";
+$X="esi";
+
+# What we need to load into $tmp for the next round
+%Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D));
+@xo=(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,	# R0
+ 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12,	# R1
+ 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2,	# R2
+ 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9,	# R3
+ );
+
+&md5_block("md5_block_asm_data_order");
+&asm_finish();
+
+close STDOUT;
+
+sub Np
+	{
+	local($p)=@_;
+	local(%n)=($A,$D,$B,$A,$C,$B,$D,$C);
+	return($n{$p});
+	}
+
+sub R0
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&mov($tmp1,$C)  if $pos < 0;
+	&mov($tmp2,&DWP($xo[$ki]*4,$K,"",0)) if $pos < 0; # very first one
+
+	# body proper
+
+	&comment("R0 $ki");
+	&xor($tmp1,$d); # F function - part 2
+
+	&and($tmp1,$b); # F function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$d); # F function - part 4
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# next tmp1 for R0
+	&mov($tmp1,&Np($c)) if $pos == 1;	# next tmp1 for R1
+
+	&rotl($a,$s);
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+	&add($a,$b);
+	}
+
+sub R1
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&comment("R1 $ki");
+
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$b); # G function - part 2
+	&and($tmp1,$d); # G function - part 3
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+	&xor($tmp1,$c);			# G function - part 4
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# G function - part 1
+	&mov($tmp1,&Np($c)) if $pos == 1;	# G function - part 1
+
+	&rotl($a,$s);
+
+	&add($a,$b);
+	}
+
+sub R2
+	{
+	local($n,$pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+	# This one is different, only 3 logical operations
+
+if (($n & 1) == 0)
+	{
+	&comment("R2 $ki");
+	# make sure to do 'D' first, not 'B', else we clash with
+	# the last add from the previous round.
+
+	&xor($tmp1,$d); # H function - part 2
+
+	&xor($tmp1,$b); # H function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&add($a,$tmp1);
+
+	&rotl($a,$s);
+
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0));
+	&mov($tmp1,&Np($c));
+	}
+else
+	{
+	&comment("R2 $ki");
+	# make sure to do 'D' first, not 'B', else we clash with
+	# the last add from the previous round.
+
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&add($b,$c);			# MOVED FORWARD
+	&xor($tmp1,$d); # H function - part 2
+
+	&xor($tmp1,$b); # H function - part 3
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0)) if ($pos != 2);
+
+	&add($a,$tmp1);
+	&mov($tmp1,&Np($c)) if $pos < 1;	# H function - part 1
+	&mov($tmp1,-1) if $pos == 1;		# I function - part 1
+
+	&rotl($a,$s);
+
+	&add($a,$b);
+	}
+	}
+
+sub R3
+	{
+	local($pos,$a,$b,$c,$d,$K,$ki,$s,$t)=@_;
+
+	&comment("R3 $ki");
+
+	# &not($tmp1)
+	&xor($tmp1,$d) if $pos < 0; 	# I function - part 2
+
+	&or($tmp1,$b);				# I function - part 3
+	&lea($a,&DWP($t,$a,$tmp2,1));
+
+	&xor($tmp1,$c); 			# I function - part 4
+	&mov($tmp2,&DWP($xo[$ki+1]*4,$K,"",0))	if $pos != 2; # load X/k value
+	&mov($tmp2,&wparam(0)) if $pos == 2;
+
+	&add($a,$tmp1);
+	&mov($tmp1,-1) if $pos < 1;	# H function - part 1
+	&add($K,64) if $pos >=1 && !$normal;
+
+	&rotl($a,$s);
+
+	&xor($tmp1,&Np($d)) if $pos <= 0; 	# I function - part = first time
+	&mov($tmp1,&DWP( 0,$tmp2,"",0)) if $pos > 0;
+	&add($a,$b);
+	}
+
+
+sub md5_block
+	{
+	local($name)=@_;
+
+	&function_begin_B($name,"",3);
+
+	# parameter 1 is the MD5_CTX structure.
+	# A	0
+	# B	4
+	# C	8
+	# D 	12
+
+	&push("esi");
+	 &push("edi");
+	&mov($tmp1,	&wparam(0)); # edi
+	 &mov($X,	&wparam(1)); # esi
+	&mov($C,	&wparam(2));
+	 &push("ebp");
+	&shl($C,	6);
+	&push("ebx");
+	 &add($C,	$X); # offset we end at
+	&sub($C,	64);
+	 &mov($A,	&DWP( 0,$tmp1,"",0));
+	&push($C);	# Put on the TOS
+	 &mov($B,	&DWP( 4,$tmp1,"",0));
+	&mov($C,	&DWP( 8,$tmp1,"",0));
+	 &mov($D,	&DWP(12,$tmp1,"",0));
+
+	&set_label("start") unless $normal;
+	&comment("");
+	&comment("R0 section");
+
+	&R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478);
+	&R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756);
+	&R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db);
+	&R0( 0,$B,$C,$D,$A,$X, 3,22,0xc1bdceee);
+	&R0( 0,$A,$B,$C,$D,$X, 4, 7,0xf57c0faf);
+	&R0( 0,$D,$A,$B,$C,$X, 5,12,0x4787c62a);
+	&R0( 0,$C,$D,$A,$B,$X, 6,17,0xa8304613);
+	&R0( 0,$B,$C,$D,$A,$X, 7,22,0xfd469501);
+	&R0( 0,$A,$B,$C,$D,$X, 8, 7,0x698098d8);
+	&R0( 0,$D,$A,$B,$C,$X, 9,12,0x8b44f7af);
+	&R0( 0,$C,$D,$A,$B,$X,10,17,0xffff5bb1);
+	&R0( 0,$B,$C,$D,$A,$X,11,22,0x895cd7be);
+	&R0( 0,$A,$B,$C,$D,$X,12, 7,0x6b901122);
+	&R0( 0,$D,$A,$B,$C,$X,13,12,0xfd987193);
+	&R0( 0,$C,$D,$A,$B,$X,14,17,0xa679438e);
+	&R0( 1,$B,$C,$D,$A,$X,15,22,0x49b40821);
+
+	&comment("");
+	&comment("R1 section");
+	&R1(-1,$A,$B,$C,$D,$X,16, 5,0xf61e2562);
+	&R1( 0,$D,$A,$B,$C,$X,17, 9,0xc040b340);
+	&R1( 0,$C,$D,$A,$B,$X,18,14,0x265e5a51);
+	&R1( 0,$B,$C,$D,$A,$X,19,20,0xe9b6c7aa);
+	&R1( 0,$A,$B,$C,$D,$X,20, 5,0xd62f105d);
+	&R1( 0,$D,$A,$B,$C,$X,21, 9,0x02441453);
+	&R1( 0,$C,$D,$A,$B,$X,22,14,0xd8a1e681);
+	&R1( 0,$B,$C,$D,$A,$X,23,20,0xe7d3fbc8);
+	&R1( 0,$A,$B,$C,$D,$X,24, 5,0x21e1cde6);
+	&R1( 0,$D,$A,$B,$C,$X,25, 9,0xc33707d6);
+	&R1( 0,$C,$D,$A,$B,$X,26,14,0xf4d50d87);
+	&R1( 0,$B,$C,$D,$A,$X,27,20,0x455a14ed);
+	&R1( 0,$A,$B,$C,$D,$X,28, 5,0xa9e3e905);
+	&R1( 0,$D,$A,$B,$C,$X,29, 9,0xfcefa3f8);
+	&R1( 0,$C,$D,$A,$B,$X,30,14,0x676f02d9);
+	&R1( 1,$B,$C,$D,$A,$X,31,20,0x8d2a4c8a);
+
+	&comment("");
+	&comment("R2 section");
+	&R2( 0,-1,$A,$B,$C,$D,$X,32, 4,0xfffa3942);
+	&R2( 1, 0,$D,$A,$B,$C,$X,33,11,0x8771f681);
+	&R2( 2, 0,$C,$D,$A,$B,$X,34,16,0x6d9d6122);
+	&R2( 3, 0,$B,$C,$D,$A,$X,35,23,0xfde5380c);
+	&R2( 4, 0,$A,$B,$C,$D,$X,36, 4,0xa4beea44);
+	&R2( 5, 0,$D,$A,$B,$C,$X,37,11,0x4bdecfa9);
+	&R2( 6, 0,$C,$D,$A,$B,$X,38,16,0xf6bb4b60);
+	&R2( 7, 0,$B,$C,$D,$A,$X,39,23,0xbebfbc70);
+	&R2( 8, 0,$A,$B,$C,$D,$X,40, 4,0x289b7ec6);
+	&R2( 9, 0,$D,$A,$B,$C,$X,41,11,0xeaa127fa);
+	&R2(10, 0,$C,$D,$A,$B,$X,42,16,0xd4ef3085);
+	&R2(11, 0,$B,$C,$D,$A,$X,43,23,0x04881d05);
+	&R2(12, 0,$A,$B,$C,$D,$X,44, 4,0xd9d4d039);
+	&R2(13, 0,$D,$A,$B,$C,$X,45,11,0xe6db99e5);
+	&R2(14, 0,$C,$D,$A,$B,$X,46,16,0x1fa27cf8);
+	&R2(15, 1,$B,$C,$D,$A,$X,47,23,0xc4ac5665);
+
+	&comment("");
+	&comment("R3 section");
+	&R3(-1,$A,$B,$C,$D,$X,48, 6,0xf4292244);
+	&R3( 0,$D,$A,$B,$C,$X,49,10,0x432aff97);
+	&R3( 0,$C,$D,$A,$B,$X,50,15,0xab9423a7);
+	&R3( 0,$B,$C,$D,$A,$X,51,21,0xfc93a039);
+	&R3( 0,$A,$B,$C,$D,$X,52, 6,0x655b59c3);
+	&R3( 0,$D,$A,$B,$C,$X,53,10,0x8f0ccc92);
+	&R3( 0,$C,$D,$A,$B,$X,54,15,0xffeff47d);
+	&R3( 0,$B,$C,$D,$A,$X,55,21,0x85845dd1);
+	&R3( 0,$A,$B,$C,$D,$X,56, 6,0x6fa87e4f);
+	&R3( 0,$D,$A,$B,$C,$X,57,10,0xfe2ce6e0);
+	&R3( 0,$C,$D,$A,$B,$X,58,15,0xa3014314);
+	&R3( 0,$B,$C,$D,$A,$X,59,21,0x4e0811a1);
+	&R3( 0,$A,$B,$C,$D,$X,60, 6,0xf7537e82);
+	&R3( 0,$D,$A,$B,$C,$X,61,10,0xbd3af235);
+	&R3( 0,$C,$D,$A,$B,$X,62,15,0x2ad7d2bb);
+	&R3( 2,$B,$C,$D,$A,$X,63,21,0xeb86d391);
+
+	# &mov($tmp2,&wparam(0));	# done in the last R3
+	# &mov($tmp1,	&DWP( 0,$tmp2,"",0)); # done is the last R3
+
+	&add($A,$tmp1);
+	 &mov($tmp1,	&DWP( 4,$tmp2,"",0));
+
+	&add($B,$tmp1);
+	&mov($tmp1,	&DWP( 8,$tmp2,"",0));
+
+	&add($C,$tmp1);
+	&mov($tmp1,	&DWP(12,$tmp2,"",0));
+
+	&add($D,$tmp1);
+	&mov(&DWP( 0,$tmp2,"",0),$A);
+
+	&mov(&DWP( 4,$tmp2,"",0),$B);
+	&mov($tmp1,&swtmp(0)) unless $normal;
+
+	&mov(&DWP( 8,$tmp2,"",0),$C);
+	 &mov(&DWP(12,$tmp2,"",0),$D);
+
+	&cmp($tmp1,$X) unless $normal;			# check count
+	 &jae(&label("start")) unless $normal;
+
+	&pop("eax"); # pop the temp variable off the stack
+	 &pop("ebx");
+	&pop("ebp");
+	 &pop("edi");
+	&pop("esi");
+	 &ret();
+	&function_end_B($name);
+	}
+
diff --git a/src/crypto/fipsmodule/md5/asm/md5-x86_64.pl b/src/crypto/fipsmodule/md5/asm/md5-x86_64.pl
new file mode 100644
index 0000000..568f4f5
--- /dev/null
+++ b/src/crypto/fipsmodule/md5/asm/md5-x86_64.pl
@@ -0,0 +1,370 @@
+#!/usr/bin/perl -w
+#
+# MD5 optimized for AMD64.
+#
+# Author: Marc Bevand <bevand_m (at) epita.fr>
+# Licence: I hereby disclaim the copyright on this code and place it
+# in the public domain.
+#
+
+use strict;
+
+my $code;
+
+# round1_step() does:
+#   dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = z' (copy of z for the next step)
+# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
+sub round1_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	xor	$y,		%r11d		/* y ^ ... */
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	and	$x,		%r11d		/* x & ... */
+	xor	$z,		%r11d		/* z ^ ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	add	%r11d,		$dst		/* dst += ... */
+	rol	\$$s,		$dst		/* dst <<< s */
+	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round2_step() does:
+#   dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = z' (copy of z for the next step)
+#   %r12d = z' (copy of z for the next step)
+# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
+sub round2_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	1*4(%rsi),	%r10d		/* (NEXT STEP) X[1] */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= " mov	%edx,		%r12d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	not	%r11d				/* not z */
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	and	$x,		%r12d		/* x & z */
+	and	$y,		%r11d		/* y & (not z) */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	or	%r11d,		%r12d		/* (y & (not z)) | (x & z) */
+	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
+	add	%r12d,		$dst		/* dst += ... */
+	mov	$y,		%r12d		/* (NEXT STEP) z' = $y */
+	rol	\$$s,		$dst		/* dst <<< s */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round3_step() does:
+#   dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = y' (copy of y for the next step)
+# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
+sub round3_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	5*4(%rsi),	%r10d		/* (NEXT STEP) X[5] */\n" if ($pos == -1);
+    $code .= " mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
+    $code .= <<EOF;
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	xor	$z,		%r11d		/* z ^ ... */
+	xor	$x,		%r11d		/* x ^ ... */
+	add	%r11d,		$dst		/* dst += ... */
+	rol	\$$s,		$dst		/* dst <<< s */
+	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+# round4_step() does:
+#   dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
+#   %r10d = X[k_next]
+#   %r11d = not z' (copy of not z for the next step)
+# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
+sub round4_step
+{
+    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
+    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
+    $code .= " mov	\$0xffffffff,	%r11d\n" if ($pos == -1);
+    $code .= " xor	%edx,		%r11d		/* (NEXT STEP) not z' = not %edx*/\n"
+    if ($pos == -1);
+    $code .= <<EOF;
+	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
+	or	$x,		%r11d		/* x | ... */
+	xor	$y,		%r11d		/* y ^ ... */
+	add	%r11d,		$dst		/* dst += ... */
+	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
+	mov	\$0xffffffff,	%r11d
+	rol	\$$s,		$dst		/* dst <<< s */
+	xor	$y,		%r11d		/* (NEXT STEP) not z' = not $y */
+	add	$x,		$dst		/* dst += x */
+EOF
+}
+
+no warnings qw(uninitialized);
+my $flavour = shift;
+my $output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$code .= <<EOF;
+.text
+.align 16
+
+.globl md5_block_asm_data_order
+.type md5_block_asm_data_order,\@function,3
+md5_block_asm_data_order:
+	push	%rbp
+	push	%rbx
+	push	%r12
+	push	%r14
+	push	%r15
+.Lprologue:
+
+	# rdi = arg #1 (ctx, MD5_CTX pointer)
+	# rsi = arg #2 (ptr, data pointer)
+	# rdx = arg #3 (nbr, number of 16-word blocks to process)
+	mov	%rdi,		%rbp	# rbp = ctx
+	shl	\$6,		%rdx	# rdx = nbr in bytes
+	lea	(%rsi,%rdx),	%rdi	# rdi = end
+	mov	0*4(%rbp),	%eax	# eax = ctx->A
+	mov	1*4(%rbp),	%ebx	# ebx = ctx->B
+	mov	2*4(%rbp),	%ecx	# ecx = ctx->C
+	mov	3*4(%rbp),	%edx	# edx = ctx->D
+	# end is 'rdi'
+	# ptr is 'rsi'
+	# A is 'eax'
+	# B is 'ebx'
+	# C is 'ecx'
+	# D is 'edx'
+
+	cmp	%rdi,		%rsi		# cmp end with ptr
+	je	.Lend				# jmp if ptr == end
+
+	# BEGIN of loop over 16-word blocks
+.Lloop:	# save old values of A, B, C, D
+	mov	%eax,		%r8d
+	mov	%ebx,		%r9d
+	mov	%ecx,		%r14d
+	mov	%edx,		%r15d
+EOF
+round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
+round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
+round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
+round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
+round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
+round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
+
+round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
+round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
+round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
+round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
+round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
+round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
+
+round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
+round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
+round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
+round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
+round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
+round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
+
+round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
+round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
+round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
+round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
+round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
+round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
+$code .= <<EOF;
+	# add old values of A, B, C, D
+	add	%r8d,	%eax
+	add	%r9d,	%ebx
+	add	%r14d,	%ecx
+	add	%r15d,	%edx
+
+	# loop control
+	add	\$64,		%rsi		# ptr += 64
+	cmp	%rdi,		%rsi		# cmp end with ptr
+	jb	.Lloop				# jmp if ptr < end
+	# END of loop over 16-word blocks
+
+.Lend:
+	mov	%eax,		0*4(%rbp)	# ctx->A = A
+	mov	%ebx,		1*4(%rbp)	# ctx->B = B
+	mov	%ecx,		2*4(%rbp)	# ctx->C = C
+	mov	%edx,		3*4(%rbp)	# ctx->D = D
+
+	mov	(%rsp),%r15
+	mov	8(%rsp),%r14
+	mov	16(%rsp),%r12
+	mov	24(%rsp),%rbx
+	mov	32(%rsp),%rbp
+	add	\$40,%rsp
+.Lepilogue:
+	ret
+.size md5_block_asm_data_order,.-md5_block_asm_data_order
+EOF
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+my $rec="%rcx";
+my $frame="%rdx";
+my $context="%r8";
+my $disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lin_prologue
+
+	lea	40(%rax),%rax
+
+	mov	-8(%rax),%rbp
+	mov	-16(%rax),%rbx
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r14
+	mov	-40(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_md5_block_asm_data_order
+	.rva	.LSEH_end_md5_block_asm_data_order
+	.rva	.LSEH_info_md5_block_asm_data_order
+
+.section	.xdata
+.align	8
+.LSEH_info_md5_block_asm_data_order:
+	.byte	9,0,0,0
+	.rva	se_handler
+___
+}
+
+print $code;
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/md5/md5.c b/src/crypto/fipsmodule/md5/md5.c
new file mode 100644
index 0000000..15a0f53
--- /dev/null
+++ b/src/crypto/fipsmodule/md5/md5.c
@@ -0,0 +1,299 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/md5.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+uint8_t *MD5(const uint8_t *data, size_t len, uint8_t *out) {
+  MD5_CTX ctx;
+  MD5_Init(&ctx);
+  MD5_Update(&ctx, data, len);
+  MD5_Final(out, &ctx);
+
+  return out;
+}
+
+int MD5_Init(MD5_CTX *md5) {
+  OPENSSL_memset(md5, 0, sizeof(MD5_CTX));
+  md5->h[0] = 0x67452301UL;
+  md5->h[1] = 0xefcdab89UL;
+  md5->h[2] = 0x98badcfeUL;
+  md5->h[3] = 0x10325476UL;
+  return 1;
+}
+
+#if !defined(OPENSSL_NO_ASM) && \
+    (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
+#define MD5_ASM
+#define md5_block_data_order md5_block_asm_data_order
+#endif
+
+
+void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
+
+#define DATA_ORDER_IS_LITTLE_ENDIAN
+
+#define HASH_CTX MD5_CTX
+#define HASH_CBLOCK 64
+#define HASH_UPDATE MD5_Update
+#define HASH_TRANSFORM MD5_Transform
+#define HASH_FINAL MD5_Final
+#define HASH_MAKE_STRING(c, s) \
+  do {                         \
+    uint32_t ll;               \
+    ll = (c)->h[0];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[1];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[2];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[3];            \
+    HOST_l2c(ll, (s));         \
+  } while (0)
+#define HASH_BLOCK_DATA_ORDER md5_block_data_order
+
+#include "../digest/md32_common.h"
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, the above can be
+ * simplified to the code below.  Wei attributes these optimizations
+ * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
+ */
+#define F(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define G(b, c, d) ((((b) ^ (c)) & (d)) ^ (c))
+#define H(b, c, d) ((b) ^ (c) ^ (d))
+#define I(b, c, d) (((~(d)) | (b)) ^ (c))
+
+#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
+
+#define R0(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + F((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+    (a) += (b);                            \
+  } while (0)
+
+#define R1(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + G((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+    (a) += (b);                            \
+  } while (0)
+
+#define R2(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + H((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+    (a) += (b);                            \
+  } while (0)
+
+#define R3(a, b, c, d, k, s, t)            \
+  do {                                     \
+    (a) += ((k) + (t) + I((b), (c), (d))); \
+    (a) = ROTATE(a, s);                    \
+    (a) += (b);                            \
+  } while (0)
+
+#ifndef md5_block_data_order
+#ifdef X
+#undef X
+#endif
+void md5_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
+  uint32_t A, B, C, D, l;
+  uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10, XX11, XX12,
+      XX13, XX14, XX15;
+#define X(i) XX##i
+
+  A = state[0];
+  B = state[1];
+  C = state[2];
+  D = state[3];
+
+  for (; num--;) {
+    HOST_c2l(data, l);
+    X(0) = l;
+    HOST_c2l(data, l);
+    X(1) = l;
+    /* Round 0 */
+    R0(A, B, C, D, X(0), 7, 0xd76aa478L);
+    HOST_c2l(data, l);
+    X(2) = l;
+    R0(D, A, B, C, X(1), 12, 0xe8c7b756L);
+    HOST_c2l(data, l);
+    X(3) = l;
+    R0(C, D, A, B, X(2), 17, 0x242070dbL);
+    HOST_c2l(data, l);
+    X(4) = l;
+    R0(B, C, D, A, X(3), 22, 0xc1bdceeeL);
+    HOST_c2l(data, l);
+    X(5) = l;
+    R0(A, B, C, D, X(4), 7, 0xf57c0fafL);
+    HOST_c2l(data, l);
+    X(6) = l;
+    R0(D, A, B, C, X(5), 12, 0x4787c62aL);
+    HOST_c2l(data, l);
+    X(7) = l;
+    R0(C, D, A, B, X(6), 17, 0xa8304613L);
+    HOST_c2l(data, l);
+    X(8) = l;
+    R0(B, C, D, A, X(7), 22, 0xfd469501L);
+    HOST_c2l(data, l);
+    X(9) = l;
+    R0(A, B, C, D, X(8), 7, 0x698098d8L);
+    HOST_c2l(data, l);
+    X(10) = l;
+    R0(D, A, B, C, X(9), 12, 0x8b44f7afL);
+    HOST_c2l(data, l);
+    X(11) = l;
+    R0(C, D, A, B, X(10), 17, 0xffff5bb1L);
+    HOST_c2l(data, l);
+    X(12) = l;
+    R0(B, C, D, A, X(11), 22, 0x895cd7beL);
+    HOST_c2l(data, l);
+    X(13) = l;
+    R0(A, B, C, D, X(12), 7, 0x6b901122L);
+    HOST_c2l(data, l);
+    X(14) = l;
+    R0(D, A, B, C, X(13), 12, 0xfd987193L);
+    HOST_c2l(data, l);
+    X(15) = l;
+    R0(C, D, A, B, X(14), 17, 0xa679438eL);
+    R0(B, C, D, A, X(15), 22, 0x49b40821L);
+    /* Round 1 */
+    R1(A, B, C, D, X(1), 5, 0xf61e2562L);
+    R1(D, A, B, C, X(6), 9, 0xc040b340L);
+    R1(C, D, A, B, X(11), 14, 0x265e5a51L);
+    R1(B, C, D, A, X(0), 20, 0xe9b6c7aaL);
+    R1(A, B, C, D, X(5), 5, 0xd62f105dL);
+    R1(D, A, B, C, X(10), 9, 0x02441453L);
+    R1(C, D, A, B, X(15), 14, 0xd8a1e681L);
+    R1(B, C, D, A, X(4), 20, 0xe7d3fbc8L);
+    R1(A, B, C, D, X(9), 5, 0x21e1cde6L);
+    R1(D, A, B, C, X(14), 9, 0xc33707d6L);
+    R1(C, D, A, B, X(3), 14, 0xf4d50d87L);
+    R1(B, C, D, A, X(8), 20, 0x455a14edL);
+    R1(A, B, C, D, X(13), 5, 0xa9e3e905L);
+    R1(D, A, B, C, X(2), 9, 0xfcefa3f8L);
+    R1(C, D, A, B, X(7), 14, 0x676f02d9L);
+    R1(B, C, D, A, X(12), 20, 0x8d2a4c8aL);
+    /* Round 2 */
+    R2(A, B, C, D, X(5), 4, 0xfffa3942L);
+    R2(D, A, B, C, X(8), 11, 0x8771f681L);
+    R2(C, D, A, B, X(11), 16, 0x6d9d6122L);
+    R2(B, C, D, A, X(14), 23, 0xfde5380cL);
+    R2(A, B, C, D, X(1), 4, 0xa4beea44L);
+    R2(D, A, B, C, X(4), 11, 0x4bdecfa9L);
+    R2(C, D, A, B, X(7), 16, 0xf6bb4b60L);
+    R2(B, C, D, A, X(10), 23, 0xbebfbc70L);
+    R2(A, B, C, D, X(13), 4, 0x289b7ec6L);
+    R2(D, A, B, C, X(0), 11, 0xeaa127faL);
+    R2(C, D, A, B, X(3), 16, 0xd4ef3085L);
+    R2(B, C, D, A, X(6), 23, 0x04881d05L);
+    R2(A, B, C, D, X(9), 4, 0xd9d4d039L);
+    R2(D, A, B, C, X(12), 11, 0xe6db99e5L);
+    R2(C, D, A, B, X(15), 16, 0x1fa27cf8L);
+    R2(B, C, D, A, X(2), 23, 0xc4ac5665L);
+    /* Round 3 */
+    R3(A, B, C, D, X(0), 6, 0xf4292244L);
+    R3(D, A, B, C, X(7), 10, 0x432aff97L);
+    R3(C, D, A, B, X(14), 15, 0xab9423a7L);
+    R3(B, C, D, A, X(5), 21, 0xfc93a039L);
+    R3(A, B, C, D, X(12), 6, 0x655b59c3L);
+    R3(D, A, B, C, X(3), 10, 0x8f0ccc92L);
+    R3(C, D, A, B, X(10), 15, 0xffeff47dL);
+    R3(B, C, D, A, X(1), 21, 0x85845dd1L);
+    R3(A, B, C, D, X(8), 6, 0x6fa87e4fL);
+    R3(D, A, B, C, X(15), 10, 0xfe2ce6e0L);
+    R3(C, D, A, B, X(6), 15, 0xa3014314L);
+    R3(B, C, D, A, X(13), 21, 0x4e0811a1L);
+    R3(A, B, C, D, X(4), 6, 0xf7537e82L);
+    R3(D, A, B, C, X(11), 10, 0xbd3af235L);
+    R3(C, D, A, B, X(2), 15, 0x2ad7d2bbL);
+    R3(B, C, D, A, X(9), 21, 0xeb86d391L);
+
+    A = state[0] += A;
+    B = state[1] += B;
+    C = state[2] += C;
+    D = state[3] += D;
+  }
+}
+#undef X
+#endif
+
+#undef DATA_ORDER_IS_LITTLE_ENDIAN
+#undef HASH_CTX
+#undef HASH_CBLOCK
+#undef HASH_UPDATE
+#undef HASH_TRANSFORM
+#undef HASH_FINAL
+#undef HASH_MAKE_STRING
+#undef HASH_BLOCK_DATA_ORDER
+#undef F
+#undef G
+#undef H
+#undef I
+#undef ROTATE
+#undef R0
+#undef R1
+#undef R2
+#undef R3
+#undef HOST_c2l
+#undef HOST_l2c
diff --git a/src/crypto/fipsmodule/sha/asm/sha1-586.pl b/src/crypto/fipsmodule/sha/asm/sha1-586.pl
new file mode 100644
index 0000000..4221e27
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha1-586.pl
@@ -0,0 +1,1480 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
+# functions were re-implemented to address P4 performance issue [see
+# commentary below], and in 2006 the rest was rewritten in order to
+# gain freedom to liberate licensing terms.
+
+# January, September 2004.
+#
+# It was noted that Intel IA-32 C compiler generates code which
+# performs ~30% *faster* on P4 CPU than original *hand-coded*
+# SHA1 assembler implementation. To address this problem (and
+# prove that humans are still better than machines:-), the
+# original code was overhauled, which resulted in following
+# performance changes:
+#
+#		compared with original	compared with Intel cc
+#		assembler impl.		generated code
+# Pentium	-16%			+48%
+# PIII/AMD	+8%			+16%
+# P4		+85%(!)			+45%
+#
+# As you can see Pentium came out as looser:-( Yet I reckoned that
+# improvement on P4 outweights the loss and incorporate this
+# re-tuned code to 0.9.7 and later.
+# ----------------------------------------------------------------
+#					<appro@fy.chalmers.se>
+
+# August 2009.
+#
+# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
+# '(c&d) + (b&(c^d))', which allows to accumulate partial results
+# and lighten "pressure" on scratch registers. This resulted in
+# >12% performance improvement on contemporary AMD cores (with no
+# degradation on other CPUs:-). Also, the code was revised to maximize
+# "distance" between instructions producing input to 'lea' instruction
+# and the 'lea' instruction itself, which is essential for Intel Atom
+# core and resulted in ~15% improvement.
+
+# October 2010.
+#
+# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
+# is to offload message schedule denoted by Wt in NIST specification,
+# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
+# and in SSE2 context was first explored by Dean Gaudet in 2004, see
+# http://arctic.org/~dean/crypto/sha1.html. Since then several things
+# have changed that made it interesting again:
+#
+# a) XMM units became faster and wider;
+# b) instruction set became more versatile;
+# c) an important observation was made by Max Locktykhin, which made
+#    it possible to reduce amount of instructions required to perform
+#    the operation in question, for further details see
+#    http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
+
+# April 2011.
+#
+# Add AVX code path, probably most controversial... The thing is that
+# switch to AVX alone improves performance by as little as 4% in
+# comparison to SSSE3 code path. But below result doesn't look like
+# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
+# pair of µ-ops, and it's the additional µ-ops, two per round, that
+# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
+# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
+# equivalent 'sh[rl]d' that is responsible for the impressive 5.1
+# cycles per processed byte. But 'sh[rl]d' is not something that used
+# to be fast, nor does it appear to be fast in upcoming Bulldozer
+# [according to its optimization manual]. Which is why AVX code path
+# is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
+# One can argue that it's unfair to AMD, but without 'sh[rl]d' it
+# makes no sense to keep the AVX code path. If somebody feels that
+# strongly, it's probably more appropriate to discuss possibility of
+# using vector rotate XOP on AMD...
+
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
+######################################################################
+# Current performance is summarized in following table. Numbers are
+# CPU clock cycles spent to process single byte (less is better).
+#
+#		x86		SSSE3		AVX
+# Pentium	15.7		-
+# PIII		11.5		-
+# P4		10.6		-
+# AMD K8	7.1		-
+# Core2		7.3		6.0/+22%	-
+# Westmere	7.3		5.5/+33%	-
+# Sandy Bridge	8.8		6.2/+40%	5.1(**)/+73%
+# Ivy Bridge	7.2		4.8/+51%	4.7(**)/+53%
+# Haswell	6.5		4.3/+51%	4.1(**)/+58%
+# Skylake	6.4		4.1/+55%	4.1(**)/+55%
+# Bulldozer	11.6		6.0/+92%
+# VIA Nano	10.6		7.5/+41%
+# Atom		12.5		9.3(*)/+35%
+# Silvermont	14.5		9.9(*)/+46%
+# Goldmont	8.8		6.7/+30%	1.7(***)/+415%
+#
+# (*)	Loop is 1056 instructions long and expected result is ~8.25.
+#	The discrepancy is because of front-end limitations, so
+#	called MS-ROM penalties, and on Silvermont even rotate's
+#	limited parallelism.
+#
+# (**)	As per above comment, the result is for AVX *plus* sh[rl]d.
+#
+# (***)	SHAEXT result
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
+
+$xmm=$ymm=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+# In upstream, this is controlled by shelling out to the compiler to check
+# versions, but BoringSSL is intended to be used with pre-generated perlasm
+# output, so this isn't useful anyway.
+$ymm = 1;
+
+$ymm = 0 unless ($xmm);
+
+$shaext=$xmm;	### set to zero if compiling for 1.0.1
+
+# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
+# been tested.
+$shaext = 0;
+
+&external_label("OPENSSL_ia32cap_P") if ($xmm);
+
+
+$A="eax";
+$B="ebx";
+$C="ecx";
+$D="edx";
+$E="edi";
+$T="esi";
+$tmp1="ebp";
+
+@V=($A,$B,$C,$D,$E,$T);
+
+$alt=0;	# 1 denotes alternative IALU implementation, which performs
+	# 8% *worse* on P4, same on Westmere and Atom, 2% better on
+	# Sandy Bridge...
+
+sub BODY_00_15
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("00_15 $n");
+
+	&mov($f,$c);			# f to hold F_00_19(b,c,d)
+	 if ($n==0)  { &mov($tmp1,$a); }
+	 else        { &mov($a,$tmp1); }
+	&rotl($tmp1,5);			# tmp1=ROTATE(a,5)
+	 &xor($f,$d);
+	&add($tmp1,$e);			# tmp1+=e;
+	 &mov($e,&swtmp($n%16));	# e becomes volatile and is loaded
+	 				# with xi, also note that e becomes
+					# f in next round...
+	&and($f,$b);
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &xor($f,$d);			# f holds F_00_19(b,c,d)
+	&lea($tmp1,&DWP(0x5a827999,$tmp1,$e));	# tmp1+=K_00_19+xi
+
+	if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
+		      &add($f,$tmp1); }	# f+=tmp1
+	else        { &add($tmp1,$f); }	# f becomes a in next round
+	&mov($tmp1,$a)			if ($alt && $n==15);
+	}
+
+sub BODY_16_19
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("16_19 $n");
+
+if ($alt) {
+	&xor($c,$d);
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&and($tmp1,$c);			# tmp1 to hold F_00_19(b,c,d), b&=c^d
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($tmp1,$d);			# tmp1=F_00_19(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($e,$tmp1);		# e+=F_00_19(b,c,d)
+	&xor($c,$d);			# restore $c
+	 &mov($tmp1,$a);		# b in next round
+	&rotr($b,$n==16?2:7);		# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+} else {
+	&mov($tmp1,$c);			# tmp1 to hold F_00_19(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&and($tmp1,$b);
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &xor($tmp1,$d);		# tmp1=F_00_19(b,c,d)
+	&add($e,$tmp1);			# e+=F_00_19(b,c,d)
+	 &mov($tmp1,$a);
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($tmp1,5);			# ROTATE(a,5)
+	 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$tmp1);		# f+=ROTATE(a,5)
+}
+	}
+
+sub BODY_20_39
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+	local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
+
+	&comment("20_39 $n");
+
+if ($alt) {
+	&xor($tmp1,$c);			# tmp1 to hold F_20_39(b,c,d), b^=c
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);			# tmp1 holds F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+8)%16));
+	&add($e,$tmp1);			# e+=F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &mov($tmp1,$a);		# b in next round
+	&rotr($b,7);			# b=ROTATE(b,30)
+	 &mov(&swtmp($n%16),$f)		if($n<77);# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &xor($b,$c)			if($n==39);# warm up for BODY_40_59
+	&and($tmp1,$b)			if($n==39);
+	 &lea($f,&DWP($K,$f,$e));	# f+=e+K_XX_YY
+	&mov($e,&swtmp(($n+1)%16))	if($n<79);# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+	&rotr($a,5)			if ($n==79);
+} else {
+	&mov($tmp1,$b);			# tmp1 to hold F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$c);
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($tmp1,$d);			# tmp1 holds F_20_39(b,c,d)
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($e,$tmp1);		# e+=F_20_39(b,c,d)
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov($tmp1,$a);
+	&rotl($tmp1,5);			# ROTATE(a,5)
+	 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
+	&lea($f,&DWP($K,$f,$e));	# f+=e+K_XX_YY
+	 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
+	&add($f,$tmp1);			# f+=ROTATE(a,5)
+}
+	}
+
+sub BODY_40_59
+	{
+	local($n,$a,$b,$c,$d,$e,$f)=@_;
+
+	&comment("40_59 $n");
+
+if ($alt) {
+	&add($e,$tmp1);			# e+=b&(c^d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&mov($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&xor($c,$d);			# restore $c
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &and($tmp1,$c);
+	&rotr($b,7);			# b=ROTATE(b,30)
+	 &add($e,$tmp1);		# e+=c&d
+	&mov($tmp1,$a);			# b in next round
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&rotl($a,5);			# ROTATE(a,5)
+	 &xor($b,$c)			if ($n<59);
+	&and($tmp1,$b)			if ($n<59);# tmp1 to hold F_40_59(b,c,d)
+	 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$a);			# f+=ROTATE(a,5)
+} else {
+	&mov($tmp1,$c);			# tmp1 to hold F_40_59(b,c,d)
+	 &xor($f,&swtmp(($n+2)%16));	# f to hold Xupdate(xi,xa,xb,xc,xd)
+	&xor($tmp1,$d);
+	 &xor($f,&swtmp(($n+8)%16));
+	&and($tmp1,$b);
+	 &xor($f,&swtmp(($n+13)%16));	# f holds xa^xb^xc^xd
+	&rotl($f,1);			# f=ROTATE(f,1)
+	 &add($tmp1,$e);		# b&(c^d)+=e
+	&rotr($b,2);			# b=ROTATE(b,30)
+	 &mov($e,$a);			# e becomes volatile
+	&rotl($e,5);			# ROTATE(a,5)
+	 &mov(&swtmp($n%16),$f);	# xi=f
+	&lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
+	 &mov($tmp1,$c);
+	&add($f,$e);			# f+=ROTATE(a,5)
+	 &and($tmp1,$d);
+	&mov($e,&swtmp(($n+1)%16));	# pre-fetch f for next round
+	 &add($f,$tmp1);		# f+=c&d
+}
+	}
+
+&function_begin("sha1_block_data_order");
+if ($xmm) {
+  &static_label("shaext_shortcut")	if ($shaext);
+  &static_label("ssse3_shortcut");
+  &static_label("avx_shortcut")		if ($ymm);
+  &static_label("K_XX_XX");
+
+	&call	(&label("pic_point"));	# make it PIC!
+  &set_label("pic_point");
+	&blindpop($tmp1);
+	&picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+
+	&mov	($A,&DWP(0,$T));
+	&mov	($D,&DWP(4,$T));
+	&test	($D,1<<9);		# check SSSE3 bit
+	&jz	(&label("x86"));
+	&mov	($C,&DWP(8,$T));
+	&test	($A,1<<24);		# check FXSR bit
+	&jz	(&label("x86"));
+	if ($shaext) {
+		&test	($C,1<<29);		# check SHA bit
+		&jnz	(&label("shaext_shortcut"));
+	}
+	if ($ymm) {
+		&and	($D,1<<28);		# mask AVX bit
+		&and	($A,1<<30);		# mask "Intel CPU" bit
+		&or	($A,$D);
+		&cmp	($A,1<<28|1<<30);
+		&je	(&label("avx_shortcut"));
+	}
+	&jmp	(&label("ssse3_shortcut"));
+  &set_label("x86",16);
+}
+	&mov($tmp1,&wparam(0));	# SHA_CTX *c
+	&mov($T,&wparam(1));	# const void *input
+	&mov($A,&wparam(2));	# size_t num
+	&stack_push(16+3);	# allocate X[16]
+	&shl($A,6);
+	&add($A,$T);
+	&mov(&wparam(2),$A);	# pointer beyond the end of input
+	&mov($E,&DWP(16,$tmp1));# pre-load E
+	&jmp(&label("loop"));
+
+&set_label("loop",16);
+
+	# copy input chunk to X, but reversing byte order!
+	for ($i=0; $i<16; $i+=4)
+		{
+		&mov($A,&DWP(4*($i+0),$T));
+		&mov($B,&DWP(4*($i+1),$T));
+		&mov($C,&DWP(4*($i+2),$T));
+		&mov($D,&DWP(4*($i+3),$T));
+		&bswap($A);
+		&bswap($B);
+		&bswap($C);
+		&bswap($D);
+		&mov(&swtmp($i+0),$A);
+		&mov(&swtmp($i+1),$B);
+		&mov(&swtmp($i+2),$C);
+		&mov(&swtmp($i+3),$D);
+		}
+	&mov(&wparam(1),$T);	# redundant in 1st spin
+
+	&mov($A,&DWP(0,$tmp1));	# load SHA_CTX
+	&mov($B,&DWP(4,$tmp1));
+	&mov($C,&DWP(8,$tmp1));
+	&mov($D,&DWP(12,$tmp1));
+	# E is pre-loaded
+
+	for($i=0;$i<16;$i++)	{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<20;$i++)	{ &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<40;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<60;$i++)	{ &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+	for(;$i<80;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+
+	(($V[5] eq $D) and ($V[0] eq $E)) or die;	# double-check
+
+	&mov($tmp1,&wparam(0));	# re-load SHA_CTX*
+	&mov($D,&wparam(1));	# D is last "T" and is discarded
+
+	&add($E,&DWP(0,$tmp1));	# E is last "A"...
+	&add($T,&DWP(4,$tmp1));
+	&add($A,&DWP(8,$tmp1));
+	&add($B,&DWP(12,$tmp1));
+	&add($C,&DWP(16,$tmp1));
+
+	&mov(&DWP(0,$tmp1),$E);	# update SHA_CTX
+	 &add($D,64);		# advance input pointer
+	&mov(&DWP(4,$tmp1),$T);
+	 &cmp($D,&wparam(2));	# have we reached the end yet?
+	&mov(&DWP(8,$tmp1),$A);
+	 &mov($E,$C);		# C is last "E" which needs to be "pre-loaded"
+	&mov(&DWP(12,$tmp1),$B);
+	 &mov($T,$D);		# input pointer
+	&mov(&DWP(16,$tmp1),$C);
+	&jb(&label("loop"));
+
+	&stack_pop(16+3);
+&function_end("sha1_block_data_order");
+
+if ($xmm) {
+if ($shaext) {
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("edi","esi","ecx");
+my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
+my @MSG=map("xmm$_",(4..7));
+
+sub sha1rnds4 {
+ my ($dst,$src,$imm)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm);	}
+}
+sub sha1op38 {
+ my ($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
+}
+sub sha1nexte	{ sha1op38(0xc8,@_); }
+sub sha1msg1	{ sha1op38(0xc9,@_); }
+sub sha1msg2	{ sha1op38(0xca,@_); }
+
+&function_begin("_sha1_block_data_order_shaext");
+	&call	(&label("pic_point"));	# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tmp1);
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("shaext_shortcut");
+	&mov	($ctx,&wparam(0));
+	&mov	("ebx","esp");
+	&mov	($inp,&wparam(1));
+	&mov	($num,&wparam(2));
+	&sub	("esp",32);
+
+	&movdqu	($ABCD,&QWP(0,$ctx));
+	&movd	($E,&DWP(16,$ctx));
+	&and	("esp",-32);
+	&movdqa	($BSWAP,&QWP(0x50,$tmp1));	# byte-n-word swap
+
+	&movdqu	(@MSG[0],&QWP(0,$inp));
+	&pshufd	($ABCD,$ABCD,0b00011011);	# flip word order
+	&movdqu	(@MSG[1],&QWP(0x10,$inp));
+	&pshufd	($E,$E,0b00011011);		# flip word order
+	&movdqu	(@MSG[2],&QWP(0x20,$inp));
+	&pshufb	(@MSG[0],$BSWAP);
+	&movdqu	(@MSG[3],&QWP(0x30,$inp));
+	&pshufb	(@MSG[1],$BSWAP);
+	&pshufb	(@MSG[2],$BSWAP);
+	&pshufb	(@MSG[3],$BSWAP);
+	&jmp	(&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+	&dec		($num);
+	&lea		("eax",&DWP(0x40,$inp));
+	&movdqa		(&QWP(0,"esp"),$E);	# offload $E
+	&paddd		($E,@MSG[0]);
+	&cmovne		($inp,"eax");
+	&movdqa		(&QWP(16,"esp"),$ABCD);	# offload $ABCD
+
+for($i=0;$i<20-4;$i+=2) {
+	&sha1msg1	(@MSG[0],@MSG[1]);
+	&movdqa		($E_,$ABCD);
+	&sha1rnds4	($ABCD,$E,int($i/5));	# 0-3...
+	&sha1nexte	($E_,@MSG[1]);
+	&pxor		(@MSG[0],@MSG[2]);
+	&sha1msg1	(@MSG[1],@MSG[2]);
+	&sha1msg2	(@MSG[0],@MSG[3]);
+
+	&movdqa		($E,$ABCD);
+	&sha1rnds4	($ABCD,$E_,int(($i+1)/5));
+	&sha1nexte	($E,@MSG[2]);
+	&pxor		(@MSG[1],@MSG[3]);
+	&sha1msg2	(@MSG[1],@MSG[0]);
+
+	push(@MSG,shift(@MSG));	push(@MSG,shift(@MSG));
+}
+	&movdqu		(@MSG[0],&QWP(0,$inp));
+	&movdqa		($E_,$ABCD);
+	&sha1rnds4	($ABCD,$E,3);		# 64-67
+	&sha1nexte	($E_,@MSG[1]);
+	&movdqu		(@MSG[1],&QWP(0x10,$inp));
+	&pshufb		(@MSG[0],$BSWAP);
+
+	&movdqa		($E,$ABCD);
+	&sha1rnds4	($ABCD,$E_,3);		# 68-71
+	&sha1nexte	($E,@MSG[2]);
+	&movdqu		(@MSG[2],&QWP(0x20,$inp));
+	&pshufb		(@MSG[1],$BSWAP);
+
+	&movdqa		($E_,$ABCD);
+	&sha1rnds4	($ABCD,$E,3);		# 72-75
+	&sha1nexte	($E_,@MSG[3]);
+	&movdqu		(@MSG[3],&QWP(0x30,$inp));
+	&pshufb		(@MSG[2],$BSWAP);
+
+	&movdqa		($E,$ABCD);
+	&sha1rnds4	($ABCD,$E_,3);		# 76-79
+	&movdqa		($E_,&QWP(0,"esp"));
+	&pshufb		(@MSG[3],$BSWAP);
+	&sha1nexte	($E,$E_);
+	&paddd		($ABCD,&QWP(16,"esp"));
+
+	&jnz		(&label("loop_shaext"));
+
+	&pshufd	($ABCD,$ABCD,0b00011011);
+	&pshufd	($E,$E,0b00011011);
+	&movdqu	(&QWP(0,$ctx),$ABCD)
+	&movd	(&DWP(16,$ctx),$E);
+	&mov	("esp","ebx");
+&function_end("_sha1_block_data_order_shaext");
+}
+######################################################################
+# The SSSE3 implementation.
+#
+# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
+# 32 elements of the message schedule or Xupdate outputs. First 4
+# quadruples are simply byte-swapped input, next 4 are calculated
+# according to method originally suggested by Dean Gaudet (modulo
+# being implemented in SSSE3). Once 8 quadruples or 32 elements are
+# collected, it switches to routine proposed by Max Locktyukhin.
+#
+# Calculations inevitably require temporary reqisters, and there are
+# no %xmm registers left to spare. For this reason part of the ring
+# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
+# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
+# X[-5], and X[4] - X[-4]...
+#
+# Another notable optimization is aggressive stack frame compression
+# aiming to minimize amount of 9-byte instructions...
+#
+# Yet another notable optimization is "jumping" $B variable. It means
+# that there is no register permanently allocated for $B value. This
+# allowed to eliminate one instruction from body_20_39...
+#
+my $Xi=4;			# 4xSIMD Xupdate round, start pre-seeded
+my @X=map("xmm$_",(4..7,0..3));	# pre-seeded for $Xi=4
+my @V=($A,$B,$C,$D,$E);
+my $j=0;			# hash round
+my $rx=0;
+my @T=($T,$tmp1);
+my $inp;
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+&function_begin("_sha1_block_data_order_ssse3");
+	&call	(&label("pic_point"));	# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tmp1);
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("ssse3_shortcut");
+
+	&movdqa	(@X[3],&QWP(0,$tmp1));		# K_00_19
+	&movdqa	(@X[4],&QWP(16,$tmp1));		# K_20_39
+	&movdqa	(@X[5],&QWP(32,$tmp1));		# K_40_59
+	&movdqa	(@X[6],&QWP(48,$tmp1));		# K_60_79
+	&movdqa	(@X[2],&QWP(64,$tmp1));		# pbswap mask
+
+	&mov	($E,&wparam(0));		# load argument block
+	&mov	($inp=@T[1],&wparam(1));
+	&mov	($D,&wparam(2));
+	&mov	(@T[0],"esp");
+
+	# stack frame layout
+	#
+	# +0	X[0]+K	X[1]+K	X[2]+K	X[3]+K	# XMM->IALU xfer area
+	#	X[4]+K	X[5]+K	X[6]+K	X[7]+K
+	#	X[8]+K	X[9]+K	X[10]+K	X[11]+K
+	#	X[12]+K	X[13]+K	X[14]+K	X[15]+K
+	#
+	# +64	X[0]	X[1]	X[2]	X[3]	# XMM->XMM backtrace area
+	#	X[4]	X[5]	X[6]	X[7]
+	#	X[8]	X[9]	X[10]	X[11]	# even borrowed for K_00_19
+	#
+	# +112	K_20_39	K_20_39	K_20_39	K_20_39	# constants
+	#	K_40_59	K_40_59	K_40_59	K_40_59
+	#	K_60_79	K_60_79	K_60_79	K_60_79
+	#	K_00_19	K_00_19	K_00_19	K_00_19
+	#	pbswap mask
+	#
+	# +192	ctx				# argument block
+	# +196	inp
+	# +200	end
+	# +204	esp
+	&sub	("esp",208);
+	&and	("esp",-64);
+
+	&movdqa	(&QWP(112+0,"esp"),@X[4]);	# copy constants
+	&movdqa	(&QWP(112+16,"esp"),@X[5]);
+	&movdqa	(&QWP(112+32,"esp"),@X[6]);
+	&shl	($D,6);				# len*64
+	&movdqa	(&QWP(112+48,"esp"),@X[3]);
+	&add	($D,$inp);			# end of input
+	&movdqa	(&QWP(112+64,"esp"),@X[2]);
+	&add	($inp,64);
+	&mov	(&DWP(192+0,"esp"),$E);		# save argument block
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&mov	(&DWP(192+8,"esp"),$D);
+	&mov	(&DWP(192+12,"esp"),@T[0]);	# save original %esp
+
+	&mov	($A,&DWP(0,$E));		# load context
+	&mov	($B,&DWP(4,$E));
+	&mov	($C,&DWP(8,$E));
+	&mov	($D,&DWP(12,$E));
+	&mov	($E,&DWP(16,$E));
+	&mov	(@T[0],$B);			# magic seed
+
+	&movdqu	(@X[-4&7],&QWP(-64,$inp));	# load input to %xmm[0-3]
+	&movdqu	(@X[-3&7],&QWP(-48,$inp));
+	&movdqu	(@X[-2&7],&QWP(-32,$inp));
+	&movdqu	(@X[-1&7],&QWP(-16,$inp));
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&pshufb	(@X[-3&7],@X[2]);
+	&pshufb	(@X[-2&7],@X[2]);
+	&movdqa	(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+	&pshufb	(@X[-1&7],@X[2]);
+	&paddd	(@X[-4&7],@X[3]);		# add K_00_19
+	&paddd	(@X[-3&7],@X[3]);
+	&paddd	(@X[-2&7],@X[3]);
+	&movdqa	(&QWP(0,"esp"),@X[-4&7]);	# X[]+K xfer to IALU
+	&psubd	(@X[-4&7],@X[3]);		# restore X[]
+	&movdqa	(&QWP(0+16,"esp"),@X[-3&7]);
+	&psubd	(@X[-3&7],@X[3]);
+	&movdqa	(&QWP(0+32,"esp"),@X[-2&7]);
+	&mov	(@T[1],$C);
+	&psubd	(@X[-2&7],@X[3]);
+	&xor	(@T[1],$D);
+	&pshufd	(@X[0],@X[-4&7],0xee);		# was &movdqa	(@X[0],@X[-3&7]);
+	&and	(@T[0],@T[1]);
+	&jmp	(&label("loop"));
+
+######################################################################
+# SSE instruction sequence is first broken to groups of indepentent
+# instructions, independent in respect to their inputs and shifter
+# (not all architectures have more than one). Then IALU instructions
+# are "knitted in" between the SSE groups. Distance is maintained for
+# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
+# [which allegedly also implements SSSE3]...
+#
+# Temporary registers usage. X[2] is volatile at the entry and at the
+# end is restored from backtrace ring buffer. X[3] is expected to
+# contain current K_XX_XX constant and is used to caclulate X[-1]+K
+# from previous round, it becomes volatile the moment the value is
+# saved to stack for transfer to IALU. X[4] becomes volatile whenever
+# X[-4] is accumulated and offloaded to backtrace ring buffer, at the
+# end it is loaded with next K_XX_XX [which becomes X[3] in next
+# round]...
+#
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&punpcklqdq(@X[0],@X[-3&7]);	# compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
+	&movdqa	(@X[2],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &paddd	(@X[3],@X[-1&7]);
+	  &movdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	&psrldq	(@X[2],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+
+	&pxor	(@X[2],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[2]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@X[4],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&movdqa (@X[2],@X[0]);
+	 eval(shift(@insns));
+
+	&pslldq	(@X[4],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@X[2],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	&movdqa	(@X[3],@X[4]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@X[4],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&por	(@X[0],@X[2]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	  &movdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@X[3],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	&pxor   (@X[0],@X[4]);
+	  &movdqa	(@X[4],&QWP(112-16+16*(($Xi)/5),"esp"));	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[3]);		# "X[0]"^=("X[0]"<<96)<<<2
+	  &pshufd	(@X[1],@X[-3&7],0xee)	if ($Xi<7);	# was &movdqa	(@X[1],@X[-2&7])
+	  &pshufd	(@X[3],@X[-1&7],0xee)	if ($Xi==7);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));		# body_20_39
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	&punpcklqdq(@X[2],@X[-1&7]);	# compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	  &movdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);	# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);
+	 if ($Xi%5) {
+	  &movdqa	(@X[4],@X[3]);	# "perpetuate" K_XX_XX...
+	 } else {			# ... or load next one
+	  &movdqa	(@X[4],&QWP(112-16+16*($Xi/5),"esp"));
+	 }
+	 eval(shift(@insns));		# ror
+	  &paddd	(@X[3],@X[-1&7]);
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[2]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&movdqa	(@X[2],@X[0]);
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	&psrld	(@X[2],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+	 eval(shift(@insns))		if (@insns[1] =~ /_rol/);
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);
+
+	&por	(@X[0],@X[2]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &movdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	  &pshufd	(@X[3],@X[-1],0xee)	if ($Xi<19);	# was &movdqa	(@X[3],@X[0])
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &paddd	(@X[3],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&mov	($inp=@T[1],&DWP(192+4,"esp"));
+	&cmp	($inp,&DWP(192+8,"esp"));
+	&je	(&label("done"));
+
+	&movdqa	(@X[3],&QWP(112+48,"esp"));	# K_00_19
+	&movdqa	(@X[2],&QWP(112+64,"esp"));	# pbswap mask
+	&movdqu	(@X[-4&7],&QWP(0,$inp));	# load input
+	&movdqu	(@X[-3&7],&QWP(16,$inp));
+	&movdqu	(@X[-2&7],&QWP(32,$inp));
+	&movdqu	(@X[-1&7],&QWP(48,$inp));
+	&add	($inp,64);
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&movdqa	(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@X[3]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {	# ((c^d)&b)^d
+	# on start @T[0]=(c^d)&b
+	return &body_20_39()	if ($rx==19);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&$_ror	($b,$j?7:2);',	# $b>>>2
+	'&xor	(@T[0],$d);',
+	'&mov	(@T[1],$a);',	# $b in next round
+
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&xor	($b,$c);',	# $c^$d for next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&and	(@T[1],$b);',	# ($b&($c^$d)) for next round
+
+	'&xor	($b,$c);',	# restore $b
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_20_39 () {	# b^d^c
+	# on entry @T[0]=b^d
+	return &body_40_59()	if ($rx==39);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&xor	(@T[0],$d)	if($j==19);'.
+	'&xor	(@T[0],$c)	if($j> 19);',	# ($b^$d^$c)
+	'&mov	(@T[1],$a);',	# $b in next round
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j< 79);',	# $b^$d for next round
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_40_59 () {	# ((b^c)&(c^d))^c
+	# on entry @T[0]=(b^c), (c^=d)
+	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,&DWP(4*($j&15),"esp"));',	# X[]+K xfer
+	'&and	(@T[0],$c)	if ($j>=40);',	# (b^c)&(c^d)
+	'&xor	($c,$d)		if ($j>=40);',	# restore $c
+
+	'&$_ror	($b,7);',	# $b>>>2
+	'&mov	(@T[1],$a);',	# $b for next round
+	'&xor	(@T[0],$c);',
+
+	'&$_rol	($a,5);',
+	'&add	($e,@T[0]);',
+	'&xor	(@T[1],$c)	if ($j==59);'.
+	'&xor	(@T[1],$b)	if ($j< 59);',	# b^c for next round
+
+	'&xor	($b,$c)		if ($j< 59);',	# c^d for next round
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+######
+sub bodyx_00_19 () {	# ((c^d)&b)^d
+	# on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
+	return &bodyx_20_39()	if ($rx==19);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+
+	'&rorx	($b,$b,2)			if ($j==0);'.	# $b>>>2
+	'&rorx	($b,@T[1],7)			if ($j!=0);',	# $b>>>2
+	'&lea	($e,&DWP(0,$e,@T[0]));',
+	'&rorx	(@T[0],$a,5);',
+
+	'&andn	(@T[1],$a,$c);',
+	'&and	($a,$b)',
+	'&add	($d,&DWP(4*(($j+1)&15),"esp"));',	# X[]+K xfer
+
+	'&xor	(@T[1],$a)',
+	'&add	($e,@T[0]);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub bodyx_20_39 () {	# b^d^c
+	# on start $b=b^c^d
+	return &bodyx_40_59()	if ($rx==39);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+
+	'&add	($e,($j==19?@T[0]:$b))',
+	'&rorx	($b,@T[1],7);',	# $b>>>2
+	'&rorx	(@T[0],$a,5);',
+
+	'&xor	($a,$b)				if ($j<79);',
+	'&add	($d,&DWP(4*(($j+1)&15),"esp"))	if ($j<79);',	# X[]+K xfer
+	'&xor	($a,$c)				if ($j<79);',
+	'&add	($e,@T[0]);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub bodyx_40_59 () {	# ((b^c)&(c^d))^c
+	# on start $b=((b^c)&(c^d))^c
+	return &bodyx_20_39()	if ($rx==59);	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+
+	'&rorx	(@T[0],$a,5)',
+	'&lea	($e,&DWP(0,$e,$b))',
+	'&rorx	($b,@T[1],7)',	# $b>>>2
+	'&add	($d,&DWP(4*(($j+1)&15),"esp"))',	# X[]+K xfer
+
+	'&mov	(@T[1],$c)',
+	'&xor	($a,$b)',	# b^c for next round
+	'&xor	(@T[1],$b)',	# c^d for next round
+
+	'&and	($a,@T[1])',
+	'&add	($e,@T[0])',
+	'&xor	($a,$b)'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+&set_label("loop",16);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	($B,$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&xor	($B,$D);
+	&mov	(&DWP(16,@T[1]),$E);
+	&mov	(@T[1],@T[0]);
+	&pshufd	(@X[0],@X[-4&7],0xee);		# was &movdqa	(@X[0],@X[-3&7]);
+	&and	(@T[0],$B);
+	&mov	($B,$T[1]);
+
+	&jmp	(&label("loop"));
+
+&set_label("done",16);		$j=$saved_j; @V=@saved_V;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&mov	("esp",&DWP(192+12,"esp"));	# restore %esp
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&mov	(&DWP(16,@T[1]),$E);
+
+&function_end("_sha1_block_data_order_ssse3");
+
+$rx=0;	# reset
+
+if ($ymm) {
+my $Xi=4;			# 4xSIMD Xupdate round, start pre-seeded
+my @X=map("xmm$_",(4..7,0..3));	# pre-seeded for $Xi=4
+my @V=($A,$B,$C,$D,$E);
+my $j=0;			# hash round
+my @T=($T,$tmp1);
+my $inp;
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+&function_begin("_sha1_block_data_order_avx");
+	&call	(&label("pic_point"));	# make it PIC!
+	&set_label("pic_point");
+	&blindpop($tmp1);
+	&lea	($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
+&set_label("avx_shortcut");
+	&vzeroall();
+
+	&vmovdqa(@X[3],&QWP(0,$tmp1));		# K_00_19
+	&vmovdqa(@X[4],&QWP(16,$tmp1));		# K_20_39
+	&vmovdqa(@X[5],&QWP(32,$tmp1));		# K_40_59
+	&vmovdqa(@X[6],&QWP(48,$tmp1));		# K_60_79
+	&vmovdqa(@X[2],&QWP(64,$tmp1));		# pbswap mask
+
+	&mov	($E,&wparam(0));		# load argument block
+	&mov	($inp=@T[1],&wparam(1));
+	&mov	($D,&wparam(2));
+	&mov	(@T[0],"esp");
+
+	# stack frame layout
+	#
+	# +0	X[0]+K	X[1]+K	X[2]+K	X[3]+K	# XMM->IALU xfer area
+	#	X[4]+K	X[5]+K	X[6]+K	X[7]+K
+	#	X[8]+K	X[9]+K	X[10]+K	X[11]+K
+	#	X[12]+K	X[13]+K	X[14]+K	X[15]+K
+	#
+	# +64	X[0]	X[1]	X[2]	X[3]	# XMM->XMM backtrace area
+	#	X[4]	X[5]	X[6]	X[7]
+	#	X[8]	X[9]	X[10]	X[11]	# even borrowed for K_00_19
+	#
+	# +112	K_20_39	K_20_39	K_20_39	K_20_39	# constants
+	#	K_40_59	K_40_59	K_40_59	K_40_59
+	#	K_60_79	K_60_79	K_60_79	K_60_79
+	#	K_00_19	K_00_19	K_00_19	K_00_19
+	#	pbswap mask
+	#
+	# +192	ctx				# argument block
+	# +196	inp
+	# +200	end
+	# +204	esp
+	&sub	("esp",208);
+	&and	("esp",-64);
+
+	&vmovdqa(&QWP(112+0,"esp"),@X[4]);	# copy constants
+	&vmovdqa(&QWP(112+16,"esp"),@X[5]);
+	&vmovdqa(&QWP(112+32,"esp"),@X[6]);
+	&shl	($D,6);				# len*64
+	&vmovdqa(&QWP(112+48,"esp"),@X[3]);
+	&add	($D,$inp);			# end of input
+	&vmovdqa(&QWP(112+64,"esp"),@X[2]);
+	&add	($inp,64);
+	&mov	(&DWP(192+0,"esp"),$E);		# save argument block
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&mov	(&DWP(192+8,"esp"),$D);
+	&mov	(&DWP(192+12,"esp"),@T[0]);	# save original %esp
+
+	&mov	($A,&DWP(0,$E));		# load context
+	&mov	($B,&DWP(4,$E));
+	&mov	($C,&DWP(8,$E));
+	&mov	($D,&DWP(12,$E));
+	&mov	($E,&DWP(16,$E));
+	&mov	(@T[0],$B);			# magic seed
+
+	&vmovdqu(@X[-4&7],&QWP(-64,$inp));	# load input to %xmm[0-3]
+	&vmovdqu(@X[-3&7],&QWP(-48,$inp));
+	&vmovdqu(@X[-2&7],&QWP(-32,$inp));
+	&vmovdqu(@X[-1&7],&QWP(-16,$inp));
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&vpshufb(@X[-3&7],@X[-3&7],@X[2]);
+	&vpshufb(@X[-2&7],@X[-2&7],@X[2]);
+	&vmovdqa(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+	&vpshufb(@X[-1&7],@X[-1&7],@X[2]);
+	&vpaddd	(@X[0],@X[-4&7],@X[3]);		# add K_00_19
+	&vpaddd	(@X[1],@X[-3&7],@X[3]);
+	&vpaddd	(@X[2],@X[-2&7],@X[3]);
+	&vmovdqa(&QWP(0,"esp"),@X[0]);		# X[]+K xfer to IALU
+	&mov	(@T[1],$C);
+	&vmovdqa(&QWP(0+16,"esp"),@X[1]);
+	&xor	(@T[1],$D);
+	&vmovdqa(&QWP(0+32,"esp"),@X[2]);
+	&and	(@T[0],@T[1]);
+	&jmp	(&label("loop"));
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	  &vmovdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@X[2],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[2],@X[2],@X[-2&7]);		# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[2]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@X[2],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@X[4],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@X[3],@X[4],30);
+	&vpor	(@X[0],@X[0],@X[2]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@X[4],@X[4],2);
+	  &vmovdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[4]);		# "X[0]"^=("X[0]"<<96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(@X[4],&QWP(112-16+16*(($Xi)/5),"esp"));	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@X[2],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	  &vmovdqa	(&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);	# save X[] to backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 if ($Xi%5) {
+	  &vmovdqa	(@X[4],@X[3]);	# "perpetuate" K_XX_XX...
+	 } else {			# ... or load next one
+	  &vmovdqa	(@X[4],&QWP(112-16+16*($Xi/5),"esp"));
+	 }
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[2]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@X[2],@X[0],30);
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@X[2]);	# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	  &vmovdqa	(@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19);	# restore X[] from backtrace buffer
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@X[3],@X[3],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vmovdqa	(&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&mov	($inp=@T[1],&DWP(192+4,"esp"));
+	&cmp	($inp,&DWP(192+8,"esp"));
+	&je	(&label("done"));
+
+	&vmovdqa(@X[3],&QWP(112+48,"esp"));	# K_00_19
+	&vmovdqa(@X[2],&QWP(112+64,"esp"));	# pbswap mask
+	&vmovdqu(@X[-4&7],&QWP(0,$inp));	# load input
+	&vmovdqu(@X[-3&7],&QWP(16,$inp));
+	&vmovdqu(@X[-2&7],&QWP(32,$inp));
+	&vmovdqu(@X[-1&7],&QWP(48,$inp));
+	&add	($inp,64);
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);		# byte swap
+	&mov	(&DWP(192+4,"esp"),$inp);
+	&vmovdqa(&QWP(112-16,"esp"),@X[3]);	# borrow last backtrace slot
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb	(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa	(&QWP(0+16*$Xi,"esp"),@X[$Xi&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+&set_label("loop",16);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	($B,$C);
+	&mov	(&DWP(8,@T[1]),$C);
+	&xor	($B,$D);
+	&mov	(&DWP(12,@T[1]),$D);
+	&mov	(&DWP(16,@T[1]),$E);
+	&mov	(@T[1],@T[0]);
+	&and	(@T[0],$B);
+	&mov	($B,@T[1]);
+
+	&jmp	(&label("loop"));
+
+&set_label("done",16);		$j=$saved_j; @V=@saved_V;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+	&vzeroall();
+
+	&mov	(@T[1],&DWP(192,"esp"));	# update context
+	&add	($A,&DWP(0,@T[1]));
+	&mov	("esp",&DWP(192+12,"esp"));	# restore %esp
+	&add	(@T[0],&DWP(4,@T[1]));		# $b
+	&add	($C,&DWP(8,@T[1]));
+	&mov	(&DWP(0,@T[1]),$A);
+	&add	($D,&DWP(12,@T[1]));
+	&mov	(&DWP(4,@T[1]),@T[0]);
+	&add	($E,&DWP(16,@T[1]));
+	&mov	(&DWP(8,@T[1]),$C);
+	&mov	(&DWP(12,@T[1]),$D);
+	&mov	(&DWP(16,@T[1]),$E);
+&function_end("_sha1_block_data_order_avx");
+}
+&set_label("K_XX_XX",64);
+&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999);	# K_00_19
+&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1);	# K_20_39
+&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc);	# K_40_59
+&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6);	# K_60_79
+&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# pbswap mask
+&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
+}
+&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha1-armv4-large.pl b/src/crypto/fipsmodule/sha/asm/sha1-armv4-large.pl
new file mode 100644
index 0000000..944248f
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha1-armv4-large.pl
@@ -0,0 +1,701 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# sha1_block procedure for ARMv4.
+#
+# January 2007.
+
+# Size/performance trade-off
+# ====================================================================
+# impl		size in bytes	comp cycles[*]	measured performance
+# ====================================================================
+# thumb		304		3212		4420
+# armv4-small	392/+29%	1958/+64%	2250/+96%
+# armv4-compact	740/+89%	1552/+26%	1840/+22%
+# armv4-large	1420/+92%	1307/+19%	1370/+34%[***]
+# full unroll	~5100/+260%	~1260/+4%	~1300/+5%
+# ====================================================================
+# thumb		= same as 'small' but in Thumb instructions[**] and
+#		  with recurring code in two private functions;
+# small		= detached Xload/update, loops are folded;
+# compact	= detached Xload/update, 5x unroll;
+# large		= interleaved Xload/update, 5x unroll;
+# full unroll	= interleaved Xload/update, full unroll, estimated[!];
+#
+# [*]	Manually counted instructions in "grand" loop body. Measured
+#	performance is affected by prologue and epilogue overhead,
+#	i-cache availability, branch penalties, etc.
+# [**]	While each Thumb instruction is twice smaller, they are not as
+#	diverse as ARM ones: e.g., there are only two arithmetic
+#	instructions with 3 arguments, no [fixed] rotate, addressing
+#	modes are limited. As result it takes more instructions to do
+#	the same job in Thumb, therefore the code is never twice as
+#	small and always slower.
+# [***]	which is also ~35% better than compiler generated code. Dual-
+#	issue Cortex A8 core was measured to process input block in
+#	~990 cycles.
+
+# August 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 13% improvement on
+# Cortex A8 core and in absolute terms ~870 cycles per input block
+# [or 13.6 cycles per byte].
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 10%
+# improvement on Cortex A8 core and 12.2 cycles per byte.
+
+# September 2013.
+#
+# Add NEON implementation (see sha1-586.pl for background info). On
+# Cortex A8 it was measured to process one byte in 6.7 cycles or >80%
+# faster than integer-only code. Because [fully unrolled] NEON code
+# is ~2.5x larger and there are some redundant instructions executed
+# when processing last block, improvement is not as big for smallest
+# blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per
+# byte, which is also >80% faster than integer-only code. Cortex-A15
+# is even faster spending 5.6 cycles per byte outperforming integer-
+# only code by factor of 2.
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.35 cpb on Apple A7.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+$ctx="r0";
+$inp="r1";
+$len="r2";
+$a="r3";
+$b="r4";
+$c="r5";
+$d="r6";
+$e="r7";
+$K="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+$Xi="r14";
+@V=($a,$b,$c,$d,$e);
+
+sub Xupdate {
+my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
+$code.=<<___;
+	ldr	$t0,[$Xi,#15*4]
+	ldr	$t1,[$Xi,#13*4]
+	ldr	$t2,[$Xi,#7*4]
+	add	$e,$K,$e,ror#2			@ E+=K_xx_xx
+	ldr	$t3,[$Xi,#2*4]
+	eor	$t0,$t0,$t1
+	eor	$t2,$t2,$t3			@ 1 cycle stall
+	eor	$t1,$c,$d			@ F_xx_xx
+	mov	$t0,$t0,ror#31
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	eor	$t0,$t0,$t2,ror#31
+	str	$t0,[$Xi,#-4]!
+	$opt1					@ F_xx_xx
+	$opt2					@ F_xx_xx
+	add	$e,$e,$t0			@ E+=X[i]
+___
+}
+
+sub BODY_00_15 {
+my ($a,$b,$c,$d,$e)=@_;
+$code.=<<___;
+#if __ARM_ARCH__<7
+	ldrb	$t1,[$inp,#2]
+	ldrb	$t0,[$inp,#3]
+	ldrb	$t2,[$inp,#1]
+	add	$e,$K,$e,ror#2			@ E+=K_00_19
+	ldrb	$t3,[$inp],#4
+	orr	$t0,$t0,$t1,lsl#8
+	eor	$t1,$c,$d			@ F_xx_xx
+	orr	$t0,$t0,$t2,lsl#16
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+	orr	$t0,$t0,$t3,lsl#24
+#else
+	ldr	$t0,[$inp],#4			@ handles unaligned
+	add	$e,$K,$e,ror#2			@ E+=K_00_19
+	eor	$t1,$c,$d			@ F_xx_xx
+	add	$e,$e,$a,ror#27			@ E+=ROR(A,27)
+#ifdef __ARMEL__
+	rev	$t0,$t0				@ byte swap
+#endif
+#endif
+	and	$t1,$b,$t1,ror#2
+	add	$e,$e,$t0			@ E+=X[i]
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	str	$t0,[$Xi,#-4]!
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_16_19 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"and $t1,$b,$t1,ror#2");
+$code.=<<___;
+	eor	$t1,$t1,$d,ror#2		@ F_00_19(B,C,D)
+	add	$e,$e,$t1			@ E+=F_00_19(B,C,D)
+___
+}
+
+sub BODY_20_39 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"eor $t1,$b,$t1,ror#2");
+$code.=<<___;
+	add	$e,$e,$t1			@ E+=F_20_39(B,C,D)
+___
+}
+
+sub BODY_40_59 {
+my ($a,$b,$c,$d,$e)=@_;
+	&Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
+$code.=<<___;
+	add	$e,$e,$t1			@ E+=F_40_59(B,C,D)
+	add	$e,$e,$t2,ror#2
+___
+}
+
+$code=<<___;
+#include <openssl/arm_arch.h>
+
+.text
+.code	32
+
+.global	sha1_block_data_order
+.type	sha1_block_data_order,%function
+
+.align	5
+sha1_block_data_order:
+#if __ARM_MAX_ARCH__>=7
+	sub	r3,pc,#8		@ sha1_block_data_order
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+#ifdef	__APPLE__
+	ldr	r12,[r12]
+#endif
+	tst	r12,#ARMV8_SHA1
+	bne	.LARMv8
+	tst	r12,#ARMV7_NEON
+	bne	.LNEON
+#endif
+	stmdb	sp!,{r4-r12,lr}
+	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
+	ldmia	$ctx,{$a,$b,$c,$d,$e}
+.Lloop:
+	ldr	$K,.LK_00_19
+	mov	$Xi,sp
+	sub	sp,sp,#15*4
+	mov	$c,$c,ror#30
+	mov	$d,$d,ror#30
+	mov	$e,$e,ror#30		@ [6]
+.L_00_15:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_00_15		@ [((11+4)*5+2)*3]
+	sub	sp,sp,#25*4
+___
+	&BODY_00_15(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+	&BODY_16_19(@V);	unshift(@V,pop(@V));
+$code.=<<___;
+
+	ldr	$K,.LK_20_39		@ [+15+16*4]
+	cmn	sp,#0			@ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_20_39(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp			@ preserve carry
+	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]
+	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes
+
+	ldr	$K,.LK_40_59
+	sub	sp,sp,#20*4		@ [+2]
+.L_40_59:
+___
+for($i=0;$i<5;$i++) {
+	&BODY_40_59(@V);	unshift(@V,pop(@V));
+}
+$code.=<<___;
+	teq	$Xi,sp
+	bne	.L_40_59		@ [+((12+5)*5+2)*4]
+
+	ldr	$K,.LK_60_79
+	sub	sp,sp,#20*4
+	cmp	sp,#0			@ set carry to denote 60_79
+	b	.L_20_39_or_60_79	@ [+4], spare 300 bytes
+.L_done:
+	add	sp,sp,#80*4		@ "deallocate" stack frame
+	ldmia	$ctx,{$K,$t0,$t1,$t2,$t3}
+	add	$a,$K,$a
+	add	$b,$t0,$b
+	add	$c,$t1,$c,ror#2
+	add	$d,$t2,$d,ror#2
+	add	$e,$t3,$e,ror#2
+	stmia	$ctx,{$a,$b,$c,$d,$e}
+	teq	$inp,$len
+	bne	.Lloop			@ [+18], total 1307
+
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia	sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	sha1_block_data_order,.-sha1_block_data_order
+
+.align	5
+.LK_00_19:	.word	0x5a827999
+.LK_20_39:	.word	0x6ed9eba1
+.LK_40_59:	.word	0x8f1bbcdc
+.LK_60_79:	.word	0xca62c1d6
+#if __ARM_MAX_ARCH__>=7
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-sha1_block_data_order
+#endif
+.asciz	"SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	5
+___
+#####################################################################
+# NEON stuff
+#
+{{{
+my @V=($a,$b,$c,$d,$e);
+my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14));
+my $Xi=4;
+my @X=map("q$_",(8..11,0..3));
+my @Tx=("q12","q13");
+my ($K,$zero)=("q14","q15");
+my $j=0;
+
+sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub body_00_19 () {
+	(
+	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
+	'&bic	($t0,$d,$b)',
+	'&add	($e,$e,$Ki)',		# e+=X[i]+K
+	'&and	($t1,$c,$b)',
+	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
+	'&eor	($t1,$t1,$t0)',		# F_00_19
+	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
+	'&add	($e,$e,$t1);'.		# e+=F_00_19
+	'$j++;	unshift(@V,pop(@V));'
+	)
+}
+sub body_20_39 () {
+	(
+	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
+	'&eor	($t0,$b,$d)',
+	'&add	($e,$e,$Ki)',		# e+=X[i]+K
+	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)',
+	'&eor	($t1,$t0,$c)',		# F_20_39
+	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
+	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
+	'&add	($e,$e,$t1);'.		# e+=F_20_39
+	'$j++;	unshift(@V,pop(@V));'
+	)
+}
+sub body_40_59 () {
+	(
+	'($a,$b,$c,$d,$e)=@V;'.		# '$code.="@ $j\n";'.
+	'&add	($e,$e,$Ki)',		# e+=X[i]+K
+	'&and	($t0,$c,$d)',
+	'&ldr	($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
+	'&add	($e,$e,$a,"ror#27")',	# e+=ROR(A,27)
+	'&eor	($t1,$c,$d)',
+	'&add	($e,$e,$t0)',
+	'&and	($t1,$t1,$b)',
+	'&mov	($b,$b,"ror#2")',	# b=ROR(b,2)
+	'&add	($e,$e,$t1);'.		# e+=F_40_59
+	'$j++;	unshift(@V,pop(@V));'
+	)
+}
+
+sub Xupdate_16_31 ()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e);
+
+	&vext_8		(@X[0],@X[-4&7],@X[-3&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vadd_i32	(@Tx[1],@X[-1&7],$K);
+	 eval(shift(@insns));
+	  &vld1_32	("{$K\[]}","[$K_XX_XX,:32]!")	if ($Xi%5==0);
+	 eval(shift(@insns));
+	&vext_8		(@Tx[0],@X[-1&7],$zero,4);	# "X[-3]", 3 words
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@Tx[0],@Tx[0],@X[0]);		# "X[0]"^="X[-3]"^"X[-8]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vst1_32	("{@Tx[1]}","[$Xfer,:128]!");	# X[]+K xfer
+	  &sub		($Xfer,$Xfer,64)		if ($Xi%4==0);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vext_8		(@Tx[1],$zero,@Tx[0],4);	# "X[0]"<<96, extract one dword
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@Tx[0],@Tx[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsri_32	(@X[0],@Tx[0],31);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	(@Tx[0],@Tx[1],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshl_u32	(@Tx[1],@Tx[1],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@X[0],@X[0],@Tx[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@X[0],@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+
+	foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_32_79 ()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e);
+
+	&vext_8		(@Tx[0],@X[-2&7],@X[-1&7],8);	# compose "X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vadd_i32	(@Tx[1],@X[-1&7],$K);
+	 eval(shift(@insns));
+	  &vld1_32	("{$K\[]}","[$K_XX_XX,:32]!")	if ($Xi%5==0);
+	 eval(shift(@insns));
+	&veor		(@Tx[0],@Tx[0],@X[0]);		# "X[-6]"^="X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	(@X[0],@Tx[0],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vst1_32	("{@Tx[1]}","[$Xfer,:128]!");	# X[]+K xfer
+	  &sub		($Xfer,$Xfer,64)		if ($Xi%4==0);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	(@X[0],@Tx[0],2);		# "X[0]"="X[-6]"<<<2
+
+	foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_80 ()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e);
+
+	&vadd_i32	(@Tx[1],@X[-1&7],$K);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vst1_32	("{@Tx[1]}","[$Xfer,:128]!");
+	&sub		($Xfer,$Xfer,64);
+
+	&teq		($inp,$len);
+	&sub		($K_XX_XX,$K_XX_XX,16);	# rewind $K_XX_XX
+	&subeq		($inp,$inp,64);		# reload last block to avoid SEGV
+	&vld1_8		("{@X[-4&7]-@X[-3&7]}","[$inp]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_8		("{@X[-2&7]-@X[-1&7]}","[$inp]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$K\[]}","[$K_XX_XX,:32]!");	# load K_00_19
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vrev32_8	(@X[-4&7],@X[-4&7]);
+
+	foreach (@insns) { eval; }		# remaining instructions
+
+   $Xi=0;
+}
+
+sub Xloop()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e);
+
+	&vrev32_8	(@X[($Xi-3)&7],@X[($Xi-3)&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[$Xi&7],@X[($Xi-4)&7],$K);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vst1_32	("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU
+
+	foreach (@insns) { eval; }
+
+  $Xi++;
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.type	sha1_block_data_order_neon,%function
+.align	4
+sha1_block_data_order_neon:
+.LNEON:
+	stmdb	sp!,{r4-r12,lr}
+	add	$len,$inp,$len,lsl#6	@ $len to point at the end of $inp
+	@ dmb				@ errata #451034 on early Cortex A8
+	@ vstmdb	sp!,{d8-d15}	@ ABI specification says so
+	mov	$saved_sp,sp
+	sub	sp,sp,#64		@ alloca
+	adr	$K_XX_XX,.LK_00_19
+	bic	sp,sp,#15		@ align for 128-bit stores
+
+	ldmia	$ctx,{$a,$b,$c,$d,$e}	@ load context
+	mov	$Xfer,sp
+
+	vld1.8		{@X[-4&7]-@X[-3&7]},[$inp]!	@ handles unaligned
+	veor		$zero,$zero,$zero
+	vld1.8		{@X[-2&7]-@X[-1&7]},[$inp]!
+	vld1.32		{${K}\[]},[$K_XX_XX,:32]!	@ load K_00_19
+	vrev32.8	@X[-4&7],@X[-4&7]		@ yes, even on
+	vrev32.8	@X[-3&7],@X[-3&7]		@ big-endian...
+	vrev32.8	@X[-2&7],@X[-2&7]
+	vadd.i32	@X[0],@X[-4&7],$K
+	vrev32.8	@X[-1&7],@X[-1&7]
+	vadd.i32	@X[1],@X[-3&7],$K
+	vst1.32		{@X[0]},[$Xfer,:128]!
+	vadd.i32	@X[2],@X[-2&7],$K
+	vst1.32		{@X[1]},[$Xfer,:128]!
+	vst1.32		{@X[2]},[$Xfer,:128]!
+	ldr		$Ki,[sp]			@ big RAW stall
+
+.Loop_neon:
+___
+	&Xupdate_16_31(\&body_00_19);
+	&Xupdate_16_31(\&body_00_19);
+	&Xupdate_16_31(\&body_00_19);
+	&Xupdate_16_31(\&body_00_19);
+	&Xupdate_32_79(\&body_00_19);
+	&Xupdate_32_79(\&body_20_39);
+	&Xupdate_32_79(\&body_20_39);
+	&Xupdate_32_79(\&body_20_39);
+	&Xupdate_32_79(\&body_20_39);
+	&Xupdate_32_79(\&body_20_39);
+	&Xupdate_32_79(\&body_40_59);
+	&Xupdate_32_79(\&body_40_59);
+	&Xupdate_32_79(\&body_40_59);
+	&Xupdate_32_79(\&body_40_59);
+	&Xupdate_32_79(\&body_40_59);
+	&Xupdate_32_79(\&body_20_39);
+	&Xuplast_80(\&body_20_39);
+	&Xloop(\&body_20_39);
+	&Xloop(\&body_20_39);
+	&Xloop(\&body_20_39);
+$code.=<<___;
+	ldmia	$ctx,{$Ki,$t0,$t1,$Xfer}	@ accumulate context
+	add	$a,$a,$Ki
+	ldr	$Ki,[$ctx,#16]
+	add	$b,$b,$t0
+	add	$c,$c,$t1
+	add	$d,$d,$Xfer
+	moveq	sp,$saved_sp
+	add	$e,$e,$Ki
+	ldrne	$Ki,[sp]
+	stmia	$ctx,{$a,$b,$c,$d,$e}
+	addne	$Xfer,sp,#3*16
+	bne	.Loop_neon
+
+	@ vldmia	sp!,{d8-d15}
+	ldmia	sp!,{r4-r12,pc}
+.size	sha1_block_data_order_neon,.-sha1_block_data_order_neon
+#endif
+___
+}}}
+#####################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3));
+my @MSG=map("q$_",(4..7));
+my @Kxx=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.type	sha1_block_data_order_armv8,%function
+.align	5
+sha1_block_data_order_armv8:
+.LARMv8:
+	vstmdb	sp!,{d8-d15}		@ ABI specification says so
+
+	veor	$E,$E,$E
+	adr	r3,.LK_00_19
+	vld1.32	{$ABCD},[$ctx]!
+	vld1.32	{$E\[0]},[$ctx]
+	sub	$ctx,$ctx,#16
+	vld1.32	{@Kxx[0]\[]},[r3,:32]!
+	vld1.32	{@Kxx[1]\[]},[r3,:32]!
+	vld1.32	{@Kxx[2]\[]},[r3,:32]!
+	vld1.32	{@Kxx[3]\[]},[r3,:32]
+
+.Loop_v8:
+	vld1.8		{@MSG[0]-@MSG[1]},[$inp]!
+	vld1.8		{@MSG[2]-@MSG[3]},[$inp]!
+	vrev32.8	@MSG[0],@MSG[0]
+	vrev32.8	@MSG[1],@MSG[1]
+
+	vadd.i32	$W0,@Kxx[0],@MSG[0]
+	vrev32.8	@MSG[2],@MSG[2]
+	vmov		$ABCD_SAVE,$ABCD	@ offload
+	subs		$len,$len,#1
+
+	vadd.i32	$W1,@Kxx[0],@MSG[1]
+	vrev32.8	@MSG[3],@MSG[3]
+	sha1h		$E1,$ABCD		@ 0
+	sha1c		$ABCD,$E,$W0
+	vadd.i32	$W0,@Kxx[$j],@MSG[2]
+	sha1su0		@MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+	sha1h		$E0,$ABCD		@ $i
+	sha1$f		$ABCD,$E1,$W1
+	vadd.i32	$W1,@Kxx[$j],@MSG[3]
+	sha1su1		@MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+	sha1su0		@MSG[1],@MSG[2],@MSG[3]
+___
+	($E0,$E1)=($E1,$E0);	($W0,$W1)=($W1,$W0);
+	push(@MSG,shift(@MSG));	$j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+	sha1h		$E0,$ABCD		@ $i
+	sha1p		$ABCD,$E1,$W1
+	vadd.i32	$W1,@Kxx[$j],@MSG[3]
+
+	sha1h		$E1,$ABCD		@ 18
+	sha1p		$ABCD,$E0,$W0
+
+	sha1h		$E0,$ABCD		@ 19
+	sha1p		$ABCD,$E1,$W1
+
+	vadd.i32	$E,$E,$E0
+	vadd.i32	$ABCD,$ABCD,$ABCD_SAVE
+	bne		.Loop_v8
+
+	vst1.32		{$ABCD},[$ctx]!
+	vst1.32		{$E\[0]},[$ctx]
+
+	vldmia	sp!,{d8-d15}
+	ret					@ bx lr
+.size	sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.comm	OPENSSL_armcap_P,4,4
+.hidden	OPENSSL_armcap_P
+#endif
+___
+
+{   my  %opcode = (
+	"sha1c"		=> 0xf2000c40,	"sha1p"		=> 0xf2100c40,
+	"sha1m"		=> 0xf2200c40,	"sha1su0"	=> 0xf2300c40,
+	"sha1h"		=> 0xf3b902c0,	"sha1su1"	=> 0xf3ba0380	);
+
+    sub unsha1 {
+	my ($mnemonic,$arg)=@_;
+
+	if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+					 |(($2&7)<<17)|(($2&8)<<4)
+					 |(($3&7)<<1) |(($3&8)<<2);
+	    # since ARMv7 instructions are always encoded little-endian.
+	    # correct solution is to use .inst directive, but older
+	    # assemblers don't implement it:-(
+	    sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
+			$word&0xff,($word>>8)&0xff,
+			($word>>16)&0xff,($word>>24)&0xff,
+			$mnemonic,$arg;
+	}
+    }
+}
+
+foreach (split($/,$code)) {
+	s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo	or
+	s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo;
+
+	s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo;
+
+	s/\bret\b/bx	lr/o		or
+	s/\bbx\s+lr\b/.word\t0xe12fff1e/o;	# make it possible to compile with -march=armv4
+
+	print $_,$/;
+}
+
+close STDOUT; # enforce flush
diff --git a/src/crypto/fipsmodule/sha/asm/sha1-armv8.pl b/src/crypto/fipsmodule/sha/asm/sha1-armv8.pl
new file mode 100644
index 0000000..dbbb54b
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha1-armv8.pl
@@ -0,0 +1,347 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA1 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+#		hardware-assisted	software(*)
+# Apple A7	2.31			4.13 (+14%)
+# Cortex-A53	2.24			8.03 (+97%)
+# Cortex-A57	2.35			7.88 (+74%)
+# Denver	2.13			3.97 (+0%)(**)
+# X-Gene				8.80 (+200%)
+#
+# (*)	Software results are presented mostly for reference purposes.
+# (**)	Keep in mind that Denver relies on binary translation, which
+#	optimizes compiler output at run-time.
+
+$flavour = shift;
+$output  = shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+($ctx,$inp,$num)=("x0","x1","x2");
+@Xw=map("w$_",(3..17,19));
+@Xx=map("x$_",(3..17,19));
+@V=($A,$B,$C,$D,$E)=map("w$_",(20..24));
+($t0,$t1,$t2,$K)=map("w$_",(25..28));
+
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i<15 && !($i&1));
+	lsr	@Xx[$i+1],@Xx[$i],#32
+___
+$code.=<<___ if ($i<14 && !($i&1));
+	ldr	@Xx[$i+2],[$inp,#`($i+2)*4-64`]
+___
+$code.=<<___ if ($i<14 && ($i&1));
+#ifdef	__ARMEB__
+	ror	@Xx[$i+1],@Xx[$i+1],#32
+#else
+	rev32	@Xx[$i+1],@Xx[$i+1]
+#endif
+___
+$code.=<<___ if ($i<14);
+	bic	$t0,$d,$b
+	and	$t1,$c,$b
+	ror	$t2,$a,#27
+	add	$d,$d,$K		// future e+=K
+	orr	$t0,$t0,$t1
+	add	$e,$e,$t2		// e+=rot(a,5)
+	ror	$b,$b,#2
+	add	$d,$d,@Xw[($i+1)&15]	// future e+=X[i]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+___
+$code.=<<___ if ($i==19);
+	movz	$K,#0xeba1
+	movk	$K,#0x6ed9,lsl#16
+___
+$code.=<<___ if ($i>=14);
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+	bic	$t0,$d,$b
+	and	$t1,$c,$b
+	ror	$t2,$a,#27
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+	add	$d,$d,$K		// future e+=K
+	orr	$t0,$t0,$t1
+	add	$e,$e,$t2		// e+=rot(a,5)
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+	ror	$b,$b,#2
+	add	$d,$d,@Xw[($i+1)&15]	// future e+=X[i]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+	 ror	@Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==59);
+	movz	$K,#0xc1d6
+	movk	$K,#0xca62,lsl#16
+___
+$code.=<<___;
+	orr	$t0,$b,$c
+	and	$t1,$b,$c
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+	ror	$t2,$a,#27
+	and	$t0,$t0,$d
+	add	$d,$d,$K		// future e+=K
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+	add	$e,$e,$t2		// e+=rot(a,5)
+	orr	$t0,$t0,$t1
+	ror	$b,$b,#2
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+	add	$d,$d,@Xw[($i+1)&15]	// future e+=X[i]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+	 ror	@Xw[$j],@Xw[$j],#31
+___
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=($i+2)&15;
+
+$code.=<<___ if ($i==39);
+	movz	$K,#0xbcdc
+	movk	$K,#0x8f1b,lsl#16
+___
+$code.=<<___ if ($i<78);
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+2)&15]
+	eor	$t0,$d,$b
+	ror	$t2,$a,#27
+	add	$d,$d,$K		// future e+=K
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+8)&15]
+	eor	$t0,$t0,$c
+	add	$e,$e,$t2		// e+=rot(a,5)
+	ror	$b,$b,#2
+	 eor	@Xw[$j],@Xw[$j],@Xw[($j+13)&15]
+	add	$d,$d,@Xw[($i+1)&15]	// future e+=X[i]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+	 ror	@Xw[$j],@Xw[$j],#31
+___
+$code.=<<___ if ($i==78);
+	ldp	@Xw[1],@Xw[2],[$ctx]
+	eor	$t0,$d,$b
+	ror	$t2,$a,#27
+	add	$d,$d,$K		// future e+=K
+	eor	$t0,$t0,$c
+	add	$e,$e,$t2		// e+=rot(a,5)
+	ror	$b,$b,#2
+	add	$d,$d,@Xw[($i+1)&15]	// future e+=X[i]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+___
+$code.=<<___ if ($i==79);
+	ldp	@Xw[3],@Xw[4],[$ctx,#8]
+	eor	$t0,$d,$b
+	ror	$t2,$a,#27
+	eor	$t0,$t0,$c
+	add	$e,$e,$t2		// e+=rot(a,5)
+	ror	$b,$b,#2
+	ldr	@Xw[5],[$ctx,#16]
+	add	$e,$e,$t0		// e+=F(b,c,d)
+___
+}
+
+$code.=<<___;
+#include <openssl/arm_arch.h>
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	sha1_block_data_order
+.type	sha1_block_data_order,%function
+.align	6
+sha1_block_data_order:
+	ldr	x16,.LOPENSSL_armcap_P
+	adr	x17,.LOPENSSL_armcap_P
+	add	x16,x16,x17
+	ldr	w16,[x16]
+	tst	w16,#ARMV8_SHA1
+	b.ne	.Lv8_entry
+
+	stp	x29,x30,[sp,#-96]!
+	add	x29,sp,#0
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+
+	ldp	$A,$B,[$ctx]
+	ldp	$C,$D,[$ctx,#8]
+	ldr	$E,[$ctx,#16]
+
+.Loop:
+	ldr	@Xx[0],[$inp],#64
+	movz	$K,#0x7999
+	sub	$num,$num,#1
+	movk	$K,#0x5a82,lsl#16
+#ifdef	__ARMEB__
+	ror	$Xx[0],@Xx[0],#32
+#else
+	rev32	@Xx[0],@Xx[0]
+#endif
+	add	$E,$E,$K		// warm it up
+	add	$E,$E,@Xw[0]
+___
+for($i=0;$i<20;$i++)	{ &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+for(;$i<40;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+for(;$i<60;$i++)	{ &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+for(;$i<80;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	add	$B,$B,@Xw[2]
+	add	$C,$C,@Xw[3]
+	add	$A,$A,@Xw[1]
+	add	$D,$D,@Xw[4]
+	add	$E,$E,@Xw[5]
+	stp	$A,$B,[$ctx]
+	stp	$C,$D,[$ctx,#8]
+	str	$E,[$ctx,#16]
+	cbnz	$num,.Loop
+
+	ldp	x19,x20,[sp,#16]
+	ldp	x21,x22,[sp,#32]
+	ldp	x23,x24,[sp,#48]
+	ldp	x25,x26,[sp,#64]
+	ldp	x27,x28,[sp,#80]
+	ldr	x29,[sp],#96
+	ret
+.size	sha1_block_data_order,.-sha1_block_data_order
+___
+{{{
+my ($ABCD,$E,$E0,$E1)=map("v$_.16b",(0..3));
+my @MSG=map("v$_.16b",(4..7));
+my @Kxx=map("v$_.4s",(16..19));
+my ($W0,$W1)=("v20.4s","v21.4s");
+my $ABCD_SAVE="v22.16b";
+
+$code.=<<___;
+.type	sha1_block_armv8,%function
+.align	6
+sha1_block_armv8:
+.Lv8_entry:
+	stp	x29,x30,[sp,#-16]!
+	add	x29,sp,#0
+
+	adr	x4,.Lconst
+	eor	$E,$E,$E
+	ld1.32	{$ABCD},[$ctx],#16
+	ld1.32	{$E}[0],[$ctx]
+	sub	$ctx,$ctx,#16
+	ld1.32	{@Kxx[0]-@Kxx[3]},[x4]
+
+.Loop_hw:
+	ld1	{@MSG[0]-@MSG[3]},[$inp],#64
+	sub	$num,$num,#1
+	rev32	@MSG[0],@MSG[0]
+	rev32	@MSG[1],@MSG[1]
+
+	add.i32	$W0,@Kxx[0],@MSG[0]
+	rev32	@MSG[2],@MSG[2]
+	orr	$ABCD_SAVE,$ABCD,$ABCD	// offload
+
+	add.i32	$W1,@Kxx[0],@MSG[1]
+	rev32	@MSG[3],@MSG[3]
+	sha1h	$E1,$ABCD
+	sha1c	$ABCD,$E,$W0		// 0
+	add.i32	$W0,@Kxx[$j],@MSG[2]
+	sha1su0	@MSG[0],@MSG[1],@MSG[2]
+___
+for ($j=0,$i=1;$i<20-3;$i++) {
+my $f=("c","p","m","p")[$i/5];
+$code.=<<___;
+	sha1h	$E0,$ABCD		// $i
+	sha1$f	$ABCD,$E1,$W1
+	add.i32	$W1,@Kxx[$j],@MSG[3]
+	sha1su1	@MSG[0],@MSG[3]
+___
+$code.=<<___ if ($i<20-4);
+	sha1su0	@MSG[1],@MSG[2],@MSG[3]
+___
+	($E0,$E1)=($E1,$E0);		($W0,$W1)=($W1,$W0);
+	push(@MSG,shift(@MSG));		$j++ if ((($i+3)%5)==0);
+}
+$code.=<<___;
+	sha1h	$E0,$ABCD		// $i
+	sha1p	$ABCD,$E1,$W1
+	add.i32	$W1,@Kxx[$j],@MSG[3]
+
+	sha1h	$E1,$ABCD		// 18
+	sha1p	$ABCD,$E0,$W0
+
+	sha1h	$E0,$ABCD		// 19
+	sha1p	$ABCD,$E1,$W1
+
+	add.i32	$E,$E,$E0
+	add.i32	$ABCD,$ABCD,$ABCD_SAVE
+
+	cbnz	$num,.Loop_hw
+
+	st1.32	{$ABCD},[$ctx],#16
+	st1.32	{$E}[0],[$ctx]
+
+	ldr	x29,[sp],#16
+	ret
+.size	sha1_block_armv8,.-sha1_block_armv8
+.align	6
+.Lconst:
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	//K_00_19
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	//K_20_39
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	//K_40_59
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	//K_60_79
+.LOPENSSL_armcap_P:
+.quad	OPENSSL_armcap_P-.
+.asciz	"SHA1 block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+.comm	OPENSSL_armcap_P,4,4
+___
+}}}
+
+{   my	%opcode = (
+	"sha1c"		=> 0x5e000000,	"sha1p"		=> 0x5e001000,
+	"sha1m"		=> 0x5e002000,	"sha1su0"	=> 0x5e003000,
+	"sha1h"		=> 0x5e280800,	"sha1su1"	=> 0x5e281800	);
+
+    sub unsha1 {
+	my ($mnemonic,$arg)=@_;
+
+	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+	&&
+	sprintf ".inst\t0x%08x\t//%s %s",
+			$opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+			$mnemonic,$arg;
+    }
+}
+
+foreach(split("\n",$code)) {
+
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\b(sha1\w+)\s+([qv].*)/unsha1($1,$2)/geo;
+
+	s/\.\w?32\b//o		and s/\.16b/\.4s/go;
+	m/(ld|st)1[^\[]+\[0\]/o	and s/\.4s/\.s/go;
+
+	print $_,"\n";
+}
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha1-x86_64.pl b/src/crypto/fipsmodule/sha/asm/sha1-x86_64.pl
new file mode 100755
index 0000000..25a1cad
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha1-x86_64.pl
@@ -0,0 +1,2057 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# sha1_block procedure for x86_64.
+#
+# It was brought to my attention that on EM64T compiler-generated code
+# was far behind 32-bit assembler implementation. This is unlike on
+# Opteron where compiler-generated code was only 15% behind 32-bit
+# assembler, which originally made it hard to motivate the effort.
+# There was suggestion to mechanically translate 32-bit code, but I
+# dismissed it, reasoning that x86_64 offers enough register bank
+# capacity to fully utilize SHA-1 parallelism. Therefore this fresh
+# implementation:-) However! While 64-bit code does perform better
+# on Opteron, I failed to beat 32-bit assembler on EM64T core. Well,
+# x86_64 does offer larger *addressable* bank, but out-of-order core
+# reaches for even more registers through dynamic aliasing, and EM64T
+# core must have managed to run-time optimize even 32-bit code just as
+# good as 64-bit one. Performance improvement is summarized in the
+# following table:
+#
+#		gcc 3.4		32-bit asm	cycles/byte
+# Opteron	+45%		+20%		6.8
+# Xeon P4	+65%		+0%		9.9
+# Core2		+60%		+10%		7.0
+
+# August 2009.
+#
+# The code was revised to minimize code size and to maximize
+# "distance" between instructions producing input to 'lea'
+# instruction and the 'lea' instruction itself, which is essential
+# for Intel Atom core.
+
+# October 2010.
+#
+# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
+# is to offload message schedule denoted by Wt in NIST specification,
+# or Xupdate in OpenSSL source, to SIMD unit. See sha1-586.pl module
+# for background and implementation details. The only difference from
+# 32-bit code is that 64-bit code doesn't have to spill @X[] elements
+# to free temporary registers.
+
+# April 2011.
+#
+# Add AVX code path. See sha1-586.pl for further information.
+
+# May 2013.
+#
+# Add AVX2+BMI code path. Initial attempt (utilizing BMI instructions
+# and loading pair of consecutive blocks to 256-bit %ymm registers)
+# did not provide impressive performance improvement till a crucial
+# hint regarding the number of Xupdate iterations to pre-compute in
+# advance was provided by Ilya Albrekht of Intel Corp.
+
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
+######################################################################
+# Current performance is summarized in following table. Numbers are
+# CPU clock cycles spent to process single byte (less is better).
+#
+#		x86_64		SSSE3		AVX[2]
+# P4		9.05		-
+# Opteron	6.26		-
+# Core2		6.55		6.05/+8%	-
+# Westmere	6.73		5.30/+27%	-
+# Sandy Bridge	7.70		6.10/+26%	4.99/+54%
+# Ivy Bridge	6.06		4.67/+30%	4.60/+32%
+# Haswell	5.45		4.15/+31%	3.57/+53%
+# Skylake	5.18		4.06/+28%	3.54/+46%
+# Bulldozer	9.11		5.95/+53%
+# VIA Nano	9.32		7.15/+30%
+# Atom		10.3		9.17/+12%
+# Silvermont	13.1(*)		9.37/+40%
+# Goldmont	8.13		6.42/+27%	1.70/+380%(**)
+#
+# (*)	obviously suboptimal result, nothing was done about it,
+#	because SSSE3 code is compiled unconditionally;
+# (**)	SHAEXT result
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+# In upstream, this is controlled by shelling out to the compiler to check
+# versions, but BoringSSL is intended to be used with pre-generated perlasm
+# output, so this isn't useful anyway.
+#
+# TODO(davidben): Enable AVX2 code after testing by setting $avx to 2. Is it
+# necessary to disable AVX2 code when SHA Extensions code is disabled? Upstream
+# did not tie them together until after $shaext was added.
+$avx = 1;
+
+# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
+# been tested.
+$shaext=0;	### set to zero if compiling for 1.0.1
+$avx=1		if (!$shaext && $avx);
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+$ctx="%rdi";	# 1st arg
+$inp="%rsi";	# 2nd arg
+$num="%rdx";	# 3rd arg
+
+# reassign arguments in order to produce more compact code
+$ctx="%r8";
+$inp="%r9";
+$num="%r10";
+
+$t0="%eax";
+$t1="%ebx";
+$t2="%ecx";
+@xi=("%edx","%ebp","%r14d");
+$A="%esi";
+$B="%edi";
+$C="%r11d";
+$D="%r12d";
+$E="%r13d";
+
+@V=($A,$B,$C,$D,$E);
+
+sub BODY_00_19 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+$code.=<<___ if ($i==0);
+	mov	`4*$i`($inp),$xi[0]
+	bswap	$xi[0]
+___
+$code.=<<___ if ($i<15);
+	mov	`4*$j`($inp),$xi[1]
+	mov	$d,$t0
+	mov	$xi[0],`4*$i`(%rsp)
+	mov	$a,$t2
+	bswap	$xi[1]
+	xor	$c,$t0
+	rol	\$5,$t2
+	and	$b,$t0
+	lea	0x5a827999($xi[0],$e),$e
+	add	$t2,$e
+	xor	$d,$t0
+	rol	\$30,$b
+	add	$t0,$e
+___
+$code.=<<___ if ($i>=15);
+	xor	`4*($j%16)`(%rsp),$xi[1]
+	mov	$d,$t0
+	mov	$xi[0],`4*($i%16)`(%rsp)
+	mov	$a,$t2
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	xor	$c,$t0
+	rol	\$5,$t2
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	and	$b,$t0
+	lea	0x5a827999($xi[0],$e),$e
+	rol	\$30,$b
+	xor	$d,$t0
+	add	$t2,$e
+	rol	\$1,$xi[1]
+	add	$t0,$e
+___
+push(@xi,shift(@xi));
+}
+
+sub BODY_20_39 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+my $K=($i<40)?0x6ed9eba1:0xca62c1d6;
+$code.=<<___ if ($i<79);
+	xor	`4*($j%16)`(%rsp),$xi[1]
+	mov	$b,$t0
+	`"mov	$xi[0],".4*($i%16)."(%rsp)"	if ($i<72)`
+	mov	$a,$t2
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	xor	$d,$t0
+	rol	\$5,$t2
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	lea	$K($xi[0],$e),$e
+	xor	$c,$t0
+	add	$t2,$e
+	rol	\$30,$b
+	add	$t0,$e
+	rol	\$1,$xi[1]
+___
+$code.=<<___ if ($i==79);
+	mov	$b,$t0
+	mov	$a,$t2
+	xor	$d,$t0
+	lea	$K($xi[0],$e),$e
+	rol	\$5,$t2
+	xor	$c,$t0
+	add	$t2,$e
+	rol	\$30,$b
+	add	$t0,$e
+___
+push(@xi,shift(@xi));
+}
+
+sub BODY_40_59 {
+my ($i,$a,$b,$c,$d,$e)=@_;
+my $j=$i+1;
+$code.=<<___;
+	xor	`4*($j%16)`(%rsp),$xi[1]
+	mov	$d,$t0
+	mov	$xi[0],`4*($i%16)`(%rsp)
+	mov	$d,$t1
+	xor	`4*(($j+2)%16)`(%rsp),$xi[1]
+	and	$c,$t0
+	mov	$a,$t2
+	xor	`4*(($j+8)%16)`(%rsp),$xi[1]
+	lea	0x8f1bbcdc($xi[0],$e),$e
+	xor	$c,$t1
+	rol	\$5,$t2
+	add	$t0,$e
+	rol	\$1,$xi[1]
+	and	$b,$t1
+	add	$t2,$e
+	rol	\$30,$b
+	add	$t1,$e
+___
+push(@xi,shift(@xi));
+}
+
+$code.=<<___;
+.text
+.extern	OPENSSL_ia32cap_addr
+
+.globl	sha1_block_data_order
+.type	sha1_block_data_order,\@function,3
+.align	16
+sha1_block_data_order:
+	lea	OPENSSL_ia32cap_addr(%rip),%r10
+	mov	(%r10),%r10
+	mov	0(%r10),%r9d
+	mov	4(%r10),%r8d
+	mov	8(%r10),%r10d
+	test	\$`1<<9`,%r8d		# check SSSE3 bit
+	jz	.Lialu
+___
+$code.=<<___ if ($shaext);
+	test	\$`1<<29`,%r10d		# check SHA bit
+	jnz	_shaext_shortcut
+___
+$code.=<<___ if ($avx>1);
+	and	\$`1<<3|1<<5|1<<8`,%r10d	# check AVX2+BMI1+BMI2
+	cmp	\$`1<<3|1<<5|1<<8`,%r10d
+	je	_avx2_shortcut
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<28`,%r8d		# mask AVX bit
+	and	\$`1<<30`,%r9d		# mask "Intel CPU" bit
+	or	%r9d,%r8d
+	cmp	\$`1<<28|1<<30`,%r8d
+	je	_avx_shortcut
+___
+$code.=<<___;
+	jmp	_ssse3_shortcut
+
+.align	16
+.Lialu:
+	mov	%rsp,%rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rdi,$ctx	# reassigned argument
+	sub	\$`8+16*4`,%rsp
+	mov	%rsi,$inp	# reassigned argument
+	and	\$-64,%rsp
+	mov	%rdx,$num	# reassigned argument
+	mov	%rax,`16*4`(%rsp)
+.Lprologue:
+
+	mov	0($ctx),$A
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	16($ctx),$E
+	jmp	.Lloop
+
+.align	16
+.Lloop:
+___
+for($i=0;$i<20;$i++)	{ &BODY_00_19($i,@V); unshift(@V,pop(@V)); }
+for(;$i<40;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+for(;$i<60;$i++)	{ &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
+for(;$i<80;$i++)	{ &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	add	0($ctx),$A
+	add	4($ctx),$B
+	add	8($ctx),$C
+	add	12($ctx),$D
+	add	16($ctx),$E
+	mov	$A,0($ctx)
+	mov	$B,4($ctx)
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+
+	sub	\$1,$num
+	lea	`16*4`($inp),$inp
+	jnz	.Lloop
+
+	mov	`16*4`(%rsp),%rsi
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue:
+	ret
+.size	sha1_block_data_order,.-sha1_block_data_order
+___
+if ($shaext) {{{
+######################################################################
+# Intel SHA Extensions implementation of SHA1 update function.
+#
+my ($ctx,$inp,$num)=("%rdi","%rsi","%rdx");
+my ($ABCD,$E,$E_,$BSWAP,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(0..3,8,9));
+my @MSG=map("%xmm$_",(4..7));
+
+$code.=<<___;
+.type	sha1_block_data_order_shaext,\@function,3
+.align	32
+sha1_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+	lea	`-8-4*16`(%rsp),%rsp
+	movaps	%xmm6,-8-4*16(%rax)
+	movaps	%xmm7,-8-3*16(%rax)
+	movaps	%xmm8,-8-2*16(%rax)
+	movaps	%xmm9,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+	movdqu	($ctx),$ABCD
+	movd	16($ctx),$E
+	movdqa	K_XX_XX+0xa0(%rip),$BSWAP	# byte-n-word swap
+
+	movdqu	($inp),@MSG[0]
+	pshufd	\$0b00011011,$ABCD,$ABCD	# flip word order
+	movdqu	0x10($inp),@MSG[1]
+	pshufd	\$0b00011011,$E,$E		# flip word order
+	movdqu	0x20($inp),@MSG[2]
+	pshufb	$BSWAP,@MSG[0]
+	movdqu	0x30($inp),@MSG[3]
+	pshufb	$BSWAP,@MSG[1]
+	pshufb	$BSWAP,@MSG[2]
+	movdqa	$E,$E_SAVE			# offload $E
+	pshufb	$BSWAP,@MSG[3]
+	jmp	.Loop_shaext
+
+.align	16
+.Loop_shaext:
+	dec		$num
+	lea		0x40($inp),%r8		# next input block
+	paddd		@MSG[0],$E
+	cmovne		%r8,$inp
+	movdqa		$ABCD,$ABCD_SAVE	# offload $ABCD
+___
+for($i=0;$i<20-4;$i+=2) {
+$code.=<<___;
+	sha1msg1	@MSG[1],@MSG[0]
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$`int($i/5)`,$E,$ABCD	# 0-3...
+	sha1nexte	@MSG[1],$E_
+	pxor		@MSG[2],@MSG[0]
+	sha1msg1	@MSG[2],@MSG[1]
+	sha1msg2	@MSG[3],@MSG[0]
+
+	movdqa		$ABCD,$E
+	sha1rnds4	\$`int(($i+1)/5)`,$E_,$ABCD
+	sha1nexte	@MSG[2],$E
+	pxor		@MSG[3],@MSG[1]
+	sha1msg2	@MSG[0],@MSG[1]
+___
+	push(@MSG,shift(@MSG));	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	movdqu		($inp),@MSG[0]
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$3,$E,$ABCD		# 64-67
+	sha1nexte	@MSG[1],$E_
+	movdqu		0x10($inp),@MSG[1]
+	pshufb		$BSWAP,@MSG[0]
+
+	movdqa		$ABCD,$E
+	sha1rnds4	\$3,$E_,$ABCD		# 68-71
+	sha1nexte	@MSG[2],$E
+	movdqu		0x20($inp),@MSG[2]
+	pshufb		$BSWAP,@MSG[1]
+
+	movdqa		$ABCD,$E_
+	sha1rnds4	\$3,$E,$ABCD		# 72-75
+	sha1nexte	@MSG[3],$E_
+	movdqu		0x30($inp),@MSG[3]
+	pshufb		$BSWAP,@MSG[2]
+
+	movdqa		$ABCD,$E
+	sha1rnds4	\$3,$E_,$ABCD		# 76-79
+	sha1nexte	$E_SAVE,$E
+	pshufb		$BSWAP,@MSG[3]
+
+	paddd		$ABCD_SAVE,$ABCD
+	movdqa		$E,$E_SAVE		# offload $E
+
+	jnz		.Loop_shaext
+
+	pshufd	\$0b00011011,$ABCD,$ABCD
+	pshufd	\$0b00011011,$E,$E
+	movdqu	$ABCD,($ctx)
+	movd	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-8-4*16(%rax),%xmm6
+	movaps	-8-3*16(%rax),%xmm7
+	movaps	-8-2*16(%rax),%xmm8
+	movaps	-8-1*16(%rax),%xmm9
+	mov	%rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+	ret
+.size	sha1_block_data_order_shaext,.-sha1_block_data_order_shaext
+___
+}}}
+{{{
+my $Xi=4;
+my @X=map("%xmm$_",(4..7,0..3));
+my @Tx=map("%xmm$_",(8..10));
+my $Kx="%xmm11";
+my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp");	# size optimization
+my @T=("%esi","%edi");
+my $j=0;
+my $rx=0;
+my $K_XX_XX="%r14";
+my $fp="%r11";
+
+my $_rol=sub { &rol(@_) };
+my $_ror=sub { &ror(@_) };
+
+{ my $sn;
+sub align32() {
+  ++$sn;
+$code.=<<___;
+	jmp	.Lalign32_$sn	# see "Decoded ICache" in manual
+.align	32
+.Lalign32_$sn:
+___
+}
+}
+
+$code.=<<___;
+.type	sha1_block_data_order_ssse3,\@function,3
+.align	16
+sha1_block_data_order_ssse3:
+_ssse3_shortcut:
+	mov	%rsp,$fp	# frame pointer
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13		# redundant, done to share Win64 SE handler
+	push	%r14
+	lea	`-64-($win64?6*16:0)`(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,-40-6*16($fp)
+	movaps	%xmm7,-40-5*16($fp)
+	movaps	%xmm8,-40-4*16($fp)
+	movaps	%xmm9,-40-3*16($fp)
+	movaps	%xmm10,-40-2*16($fp)
+	movaps	%xmm11,-40-1*16($fp)
+.Lprologue_ssse3:
+___
+$code.=<<___;
+	and	\$-64,%rsp
+	mov	%rdi,$ctx	# reassigned argument
+	mov	%rsi,$inp	# reassigned argument
+	mov	%rdx,$num	# reassigned argument
+
+	shl	\$6,$num
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	movdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	movdqa	-64($K_XX_XX),@Tx[1]	# K_00_19
+	movdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	movdqu	16($inp),@X[-3&7]
+	movdqu	32($inp),@X[-2&7]
+	movdqu	48($inp),@X[-1&7]
+	pshufb	@X[2],@X[-4&7]		# byte swap
+	pshufb	@X[2],@X[-3&7]
+	pshufb	@X[2],@X[-2&7]
+	add	\$64,$inp
+	paddd	@Tx[1],@X[-4&7]		# add K_00_19
+	pshufb	@X[2],@X[-1&7]
+	paddd	@Tx[1],@X[-3&7]
+	paddd	@Tx[1],@X[-2&7]
+	movdqa	@X[-4&7],0(%rsp)	# X[]+K xfer to IALU
+	psubd	@Tx[1],@X[-4&7]		# restore X[]
+	movdqa	@X[-3&7],16(%rsp)
+	psubd	@Tx[1],@X[-3&7]
+	movdqa	@X[-2&7],32(%rsp)
+	psubd	@Tx[1],@X[-2&7]
+	jmp	.Loop_ssse3
+___
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub Xupdate_ssse3_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));		# ror
+	&pshufd	(@X[0],@X[-4&7],0xee);	# was &movdqa	(@X[0],@X[-3&7]);
+	 eval(shift(@insns));
+	&movdqa	(@Tx[0],@X[-1&7]);
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&punpcklqdq(@X[0],@X[-3&7]);	# compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	&psrldq	(@Tx[0],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&pxor	(@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&movdqa	(@Tx[2],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+
+	&pslldq	(@Tx[2],12);		# "X[0]"<<96, extract one dword
+	&paddd	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	&movdqa	(@Tx[1],@Tx[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&psrld	(@Tx[2],30);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pslld	(@Tx[1],2);
+	&pxor	(@X[0],@Tx[2]);
+	 eval(shift(@insns));
+	  &movdqa	(@Tx[2],eval(2*16*(($Xi)/5)-64)."($K_XX_XX)");	# K_XX_XX
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[1]);		# "X[0]"^=("X[0]">>96)<<<2
+	&pshufd (@Tx[1],@X[-1&7],0xee)	if ($Xi==7);	# was &movdqa	(@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xupdate_ssse3_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns))		if ($Xi==8);
+	&pxor	(@X[0],@X[-4&7]);	# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns))		if ($Xi==8);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns))		if (@insns[1] =~ /_ror/);
+	 eval(shift(@insns))		if (@insns[0] =~ /_ror/);
+	&punpcklqdq(@Tx[0],@X[-1&7]);	# compose "X[-6]", was &palignr(@Tx[0],@X[-2&7],8);
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&pxor	(@X[0],@X[-7&7]);	# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	if ($Xi%5) {
+	  &movdqa	(@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
+	} else {			# ... or load next one
+	  &movdqa	(@Tx[2],eval(2*16*($Xi/5)-64)."($K_XX_XX)");
+	}
+	 eval(shift(@insns));		# ror
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+
+	&pxor	(@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns))		if (@insns[0] =~ /_ror/);
+
+	&movdqa	(@Tx[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# body_20_39
+
+	&pslld	(@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psrld	(@Tx[0],30);
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+
+	&por	(@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns))		if (@insns[1] =~ /_rol/);
+	 eval(shift(@insns))		if (@insns[0] =~ /_rol/);
+	  &pshufd(@Tx[1],@X[-1&7],0xee)	if ($Xi<19);	# was &movdqa	(@Tx[1],@X[0])
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+		push(@Tx,shift(@Tx));
+}
+
+sub Xuplast_ssse3_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &paddd	(@Tx[1],@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &movdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$num);
+	&je	(".Ldone_ssse3");
+
+	unshift(@Tx,pop(@Tx));
+
+	&movdqa	(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&movdqa	(@Tx[1],"-64($K_XX_XX)");	# K_00_19
+	&movdqu	(@X[-4&7],"0($inp)");		# load input
+	&movdqu	(@X[-3&7],"16($inp)");
+	&movdqu	(@X[-2&7],"32($inp)");
+	&movdqu	(@X[-1&7],"48($inp)");
+	&pshufb	(@X[-4&7],@X[2]);		# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&pshufb	(@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddd	(@X[($Xi-4)&7],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&movdqa	(eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&psubd	(@X[($Xi-4)&7],@Tx[1]);
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_ssse3()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+sub body_00_19 () {	# ((c^d)&b)^d
+	# on start @T[0]=(c^d)&b
+	return &body_20_39() if ($rx==19); $rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&$_ror	($b,$j?7:2)',	# $b>>>2
+	'&xor	(@T[0],$d)',
+	'&mov	(@T[1],$a)',	# $b for next round
+
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&xor	($b,$c)',	# $c^$d for next round
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&and	(@T[1],$b)',	# ($b&($c^$d)) for next round
+
+	'&xor	($b,$c)',	# restore $b
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_20_39 () {	# b^d^c
+	# on entry @T[0]=b^d
+	return &body_40_59() if ($rx==39); $rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&xor	(@T[0],$d)	if($j==19);'.
+	'&xor	(@T[0],$c)	if($j> 19)',	# ($b^$d^$c)
+	'&mov	(@T[1],$a)',	# $b for next round
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&xor	(@T[1],$c)	if ($j< 79)',	# $b^$d for next round
+
+	'&$_ror	($b,7)',	# $b>>>2
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+
+sub body_40_59 () {	# ((b^c)&(c^d))^c
+	# on entry @T[0]=(b^c), (c^=d)
+	$rx++;
+	(
+	'($a,$b,$c,$d,$e)=@V;'.
+	'&add	($e,eval(4*($j&15))."(%rsp)")',	# X[]+K xfer
+	'&and	(@T[0],$c)	if ($j>=40)',	# (b^c)&(c^d)
+	'&xor	($c,$d)		if ($j>=40)',	# restore $c
+
+	'&$_ror	($b,7)',	# $b>>>2
+	'&mov	(@T[1],$a)',	# $b for next round
+	'&xor	(@T[0],$c)',
+
+	'&$_rol	($a,5)',
+	'&add	($e,@T[0])',
+	'&xor	(@T[1],$c)	if ($j==59);'.
+	'&xor	(@T[1],$b)	if ($j< 59)',	# b^c for next round
+
+	'&xor	($b,$c)		if ($j< 59)',	# c^d for next round
+	'&add	($e,$a);'	.'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
+	);
+}
+$code.=<<___;
+.align	16
+.Loop_ssse3:
+___
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_16_31(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_00_19);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_40_59);
+	&Xupdate_ssse3_32_79(\&body_20_39);
+	&Xuplast_ssse3_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+	&Xloop_ssse3(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_ssse3
+
+.align	16
+.Ldone_ssse3:
+___
+				$j=$saved_j; @V=@saved_V;
+
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+	&Xtail_ssse3(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16($fp),%xmm6
+	movaps	-40-5*16($fp),%xmm7
+	movaps	-40-4*16($fp),%xmm8
+	movaps	-40-3*16($fp),%xmm9
+	movaps	-40-2*16($fp),%xmm10
+	movaps	-40-1*16($fp),%xmm11
+___
+$code.=<<___;
+	mov	-40($fp),%r14
+	mov	-32($fp),%r13
+	mov	-24($fp),%r12
+	mov	-16($fp),%rbp
+	mov	-8($fp),%rbx
+	lea	($fp),%rsp
+.Lepilogue_ssse3:
+	ret
+.size	sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
+___
+
+if ($avx) {
+$Xi=4;				# reset variables
+@X=map("%xmm$_",(4..7,0..3));
+@Tx=map("%xmm$_",(8..10));
+$j=0;
+$rx=0;
+
+my $done_avx_label=".Ldone_avx";
+
+my $_rol=sub { &shld(@_[0],@_) };
+my $_ror=sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	sha1_block_data_order_avx,\@function,3
+.align	16
+sha1_block_data_order_avx:
+_avx_shortcut:
+	mov	%rsp,$fp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13		# redundant, done to share Win64 SE handler
+	push	%r14
+	lea	`-64-($win64?6*16:0)`(%rsp),%rsp
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	vmovaps	%xmm6,-40-6*16($fp)
+	vmovaps	%xmm7,-40-5*16($fp)
+	vmovaps	%xmm8,-40-4*16($fp)
+	vmovaps	%xmm9,-40-3*16($fp)
+	vmovaps	%xmm10,-40-2*16($fp)
+	vmovaps	%xmm11,-40-1*16($fp)
+.Lprologue_avx:
+___
+$code.=<<___;
+	and	\$-64,%rsp
+	mov	%rdi,$ctx	# reassigned argument
+	mov	%rsi,$inp	# reassigned argument
+	mov	%rdx,$num	# reassigned argument
+
+	shl	\$6,$num
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	mov	4($ctx),$B
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	$B,@T[0]		# magic seed
+	mov	16($ctx),$E
+	mov	$C,@T[1]
+	xor	$D,@T[1]
+	and	@T[1],@T[0]
+
+	vmovdqa	64($K_XX_XX),@X[2]	# pbswap mask
+	vmovdqa	-64($K_XX_XX),$Kx	# K_00_19
+	vmovdqu	0($inp),@X[-4&7]	# load input to %xmm[0-3]
+	vmovdqu	16($inp),@X[-3&7]
+	vmovdqu	32($inp),@X[-2&7]
+	vmovdqu	48($inp),@X[-1&7]
+	vpshufb	@X[2],@X[-4&7],@X[-4&7]	# byte swap
+	add	\$64,$inp
+	vpshufb	@X[2],@X[-3&7],@X[-3&7]
+	vpshufb	@X[2],@X[-2&7],@X[-2&7]
+	vpshufb	@X[2],@X[-1&7],@X[-1&7]
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqa	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vmovdqa	@X[1],16(%rsp)
+	vmovdqa	@X[2],32(%rsp)
+	jmp	.Loop_avx
+___
+
+sub Xupdate_avx_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 40 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vmovdqa	($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 to 44 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	 eval(shift(@insns));
+	 eval(shift(@insns))	if (@insns[0] !~ /&ro[rl]/);
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	  &vmovdqa	($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)")	if ($Xi%5==0);
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# ror
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));		# body_20_39
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));		# rol
+	 eval(shift(@insns));
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+  $Xi++;	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xuplast_avx_80()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	  &vpaddd	(@Tx[1],$Kx,@X[-1&7]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	  &vmovdqa	(eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]);	# X[]+K xfer IALU
+
+	 foreach (@insns) { eval; }		# remaining instructions
+
+	&cmp	($inp,$num);
+	&je	($done_avx_label);
+
+	&vmovdqa(@X[2],"64($K_XX_XX)");		# pbswap mask
+	&vmovdqa($Kx,"-64($K_XX_XX)");		# K_00_19
+	&vmovdqu(@X[-4&7],"0($inp)");		# load input
+	&vmovdqu(@X[-3&7],"16($inp)");
+	&vmovdqu(@X[-2&7],"32($inp)");
+	&vmovdqu(@X[-1&7],"48($inp)");
+	&vpshufb(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&add	($inp,64);
+
+  $Xi=0;
+}
+
+sub Xloop_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vpaddd	(@X[$Xi&7],@X[($Xi-4)&7],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]);	# X[]+K xfer to IALU
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	foreach (@insns) { eval; }
+  $Xi++;
+}
+
+sub Xtail_avx()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	foreach (@insns) { eval; }
+}
+
+$code.=<<___;
+.align	16
+.Loop_avx:
+___
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_16_31(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_00_19);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_40_59);
+	&Xupdate_avx_32_79(\&body_20_39);
+	&Xuplast_avx_80(\&body_20_39);	# can jump to "done"
+
+				$saved_j=$j; @saved_V=@V;
+
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+	&Xloop_avx(\&body_20_39);
+
+$code.=<<___;
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	add	12($ctx),$D
+	mov	$A,0($ctx)
+	add	16($ctx),$E
+	mov	@T[0],4($ctx)
+	mov	@T[0],$B			# magic seed
+	mov	$C,8($ctx)
+	mov	$C,@T[1]
+	mov	$D,12($ctx)
+	xor	$D,@T[1]
+	mov	$E,16($ctx)
+	and	@T[1],@T[0]
+	jmp	.Loop_avx
+
+.align	16
+$done_avx_label:
+___
+				$j=$saved_j; @V=@saved_V;
+
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+	&Xtail_avx(\&body_20_39);
+
+$code.=<<___;
+	vzeroupper
+
+	add	0($ctx),$A			# update context
+	add	4($ctx),@T[0]
+	add	8($ctx),$C
+	mov	$A,0($ctx)
+	add	12($ctx),$D
+	mov	@T[0],4($ctx)
+	add	16($ctx),$E
+	mov	$C,8($ctx)
+	mov	$D,12($ctx)
+	mov	$E,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16($fp),%xmm6
+	movaps	-40-5*16($fp),%xmm7
+	movaps	-40-4*16($fp),%xmm8
+	movaps	-40-3*16($fp),%xmm9
+	movaps	-40-2*16($fp),%xmm10
+	movaps	-40-1*16($fp),%xmm11
+___
+$code.=<<___;
+	mov	-40($fp),%r14
+	mov	-32($fp),%r13
+	mov	-24($fp),%r12
+	mov	-16($fp),%rbp
+	mov	-8($fp),%rbx
+	lea	($fp),%rsp
+.Lepilogue_avx:
+	ret
+.size	sha1_block_data_order_avx,.-sha1_block_data_order_avx
+___
+
+if ($avx>1) {
+use integer;
+$Xi=4;					# reset variables
+@X=map("%ymm$_",(4..7,0..3));
+@Tx=map("%ymm$_",(8..10));
+$Kx="%ymm11";
+$j=0;
+
+my @ROTX=("%eax","%ebp","%ebx","%ecx","%edx","%esi");
+my ($a5,$t0)=("%r12d","%edi");
+
+my ($A,$F,$B,$C,$D,$E)=@ROTX;
+my $rx=0;
+my $frame="%r13";
+
+$code.=<<___;
+.type	sha1_block_data_order_avx2,\@function,3
+.align	16
+sha1_block_data_order_avx2:
+_avx2_shortcut:
+	mov	%rsp,$fp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	lea	-6*16(%rsp),%rsp
+	vmovaps	%xmm6,-40-6*16($fp)
+	vmovaps	%xmm7,-40-5*16($fp)
+	vmovaps	%xmm8,-40-4*16($fp)
+	vmovaps	%xmm9,-40-3*16($fp)
+	vmovaps	%xmm10,-40-2*16($fp)
+	vmovaps	%xmm11,-40-1*16($fp)
+.Lprologue_avx2:
+___
+$code.=<<___;
+	mov	%rdi,$ctx		# reassigned argument
+	mov	%rsi,$inp		# reassigned argument
+	mov	%rdx,$num		# reassigned argument
+
+	lea	-640(%rsp),%rsp
+	shl	\$6,$num
+	 lea	64($inp),$frame
+	and	\$-128,%rsp
+	add	$inp,$num
+	lea	K_XX_XX+64(%rip),$K_XX_XX
+
+	mov	0($ctx),$A		# load context
+	 cmp	$num,$frame
+	 cmovae	$inp,$frame		# next or same block
+	mov	4($ctx),$F
+	mov	8($ctx),$C
+	mov	12($ctx),$D
+	mov	16($ctx),$E
+	vmovdqu	64($K_XX_XX),@X[2]	# pbswap mask
+
+	vmovdqu		($inp),%xmm0
+	vmovdqu		16($inp),%xmm1
+	vmovdqu		32($inp),%xmm2
+	vmovdqu		48($inp),%xmm3
+	lea		64($inp),$inp
+	vinserti128	\$1,($frame),@X[-4&7],@X[-4&7]
+	vinserti128	\$1,16($frame),@X[-3&7],@X[-3&7]
+	vpshufb		@X[2],@X[-4&7],@X[-4&7]
+	vinserti128	\$1,32($frame),@X[-2&7],@X[-2&7]
+	vpshufb		@X[2],@X[-3&7],@X[-3&7]
+	vinserti128	\$1,48($frame),@X[-1&7],@X[-1&7]
+	vpshufb		@X[2],@X[-2&7],@X[-2&7]
+	vmovdqu		-64($K_XX_XX),$Kx	# K_00_19
+	vpshufb		@X[2],@X[-1&7],@X[-1&7]
+
+	vpaddd	$Kx,@X[-4&7],@X[0]	# add K_00_19
+	vpaddd	$Kx,@X[-3&7],@X[1]
+	vmovdqu	@X[0],0(%rsp)		# X[]+K xfer to IALU
+	vpaddd	$Kx,@X[-2&7],@X[2]
+	vmovdqu	@X[1],32(%rsp)
+	vpaddd	$Kx,@X[-1&7],@X[3]
+	vmovdqu	@X[2],64(%rsp)
+	vmovdqu	@X[3],96(%rsp)
+___
+for (;$Xi<8;$Xi++) {	# Xupdate_avx2_16_31
+    use integer;
+
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	&vpsrld	(@Tx[0],@X[0],31);
+	&vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	&vmovdqu("32*$Xi(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	push(@X,shift(@X));	# "rotate" X[]
+}
+$code.=<<___;
+	lea	128(%rsp),$frame
+	jmp	.Loop_avx2
+.align	32
+.Loop_avx2:
+	rorx	\$2,$F,$B
+	andn	$D,$F,$t0
+	and	$C,$F
+	xor	$t0,$F
+___
+sub bodyx_00_19 () {	# 8 instructions, 3 cycles critical path
+	# at start $f=(b&c)^(~b&d), $b>>>=2
+	return &bodyx_20_39() if ($rx==19); $rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+	'&andn	($t0,$a,$c)',			# ~b&d for next round
+
+	'&add	($e,$f)',			# e+=(b&c)^(~b&d)
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)',			# b>>>2 for next round
+	'&and	($a,$b)',			# b&c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&xor	($a,$t0);'.			# f=(b&c)^(~b&d) for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub bodyx_20_39 () {	# 7 instructions, 2 cycles critical path
+	# on entry $f=b^c^d, $b>>>=2
+	return &bodyx_40_59() if ($rx==39); $rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+
+	'&lea	($e,"($e,$f)")',		# e+=b^c^d
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)	if ($j<79)',	# b>>>2 in next round
+	'&xor	($a,$b)		if ($j<79)',	# b^c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&xor	($a,$c)		if ($j<79);'.	# f=b^c^d for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub bodyx_40_59 () {	# 10 instructions, 3 cycles critical path
+	# on entry $f=((b^c)&(c^d)), $b>>>=2
+	$rx++;
+	(
+	'($a,$f,$b,$c,$d,$e)=@ROTX;'.
+
+	'&add	($e,((32*($j/4)+4*($j%4))%256-128)."($frame)");'.	# e+=X[i]+K
+	 '&lea	($frame,"256($frame)")	if ($j%32==31);',
+	'&xor	($f,$c)		if ($j>39)',	# (b^c)&(c^d)^c
+	'&mov	($t0,$b)	if ($j<59)',	# count on zero latency
+	'&xor	($t0,$c)	if ($j<59)',	# c^d for next round
+
+	'&lea	($e,"($e,$f)")',		# e+=(b^c)&(c^d)^c
+	'&rorx	($a5,$a,27)',			# a<<<5
+	'&rorx	($f,$a,2)',			# b>>>2 in next round
+	'&xor	($a,$b)',			# b^c for next round
+
+	'&add	($e,$a5)',			# e+=a<<<5
+	'&and	($a,$t0)	if ($j< 59);'.	# f=(b^c)&(c^d) for next round
+	'&xor	($a,$c)		if ($j==59);'.	# f=b^c^d for next round
+
+	'unshift(@ROTX,pop(@ROTX)); $j++;'
+	)
+}
+
+sub Xupdate_avx2_16_31()		# recall that $Xi starts wtih 4
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 35 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@X[0],@X[-3&7],@X[-4&7],8);	# compose "X[-14]" in "X[0]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrldq(@Tx[0],@X[-1&7],4);		# "X[-3]", 3 dwords
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"^="X[-16]"
+	&vpxor	(@Tx[0],@Tx[0],@X[-2&7]);	# "X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-3]"^"X[-8]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],31);
+	&vmovdqu($Kx,eval(2*16*(($Xi)/5)-64)."($K_XX_XX)")	if ($Xi%5==0);	# K_XX_XX
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslldq(@Tx[2],@X[0],12);		# "X[0]"<<96, extract one dword
+	&vpaddd	(@X[0],@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[1],@Tx[2],30);
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=1
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpslld	(@Tx[2],@Tx[2],2);
+	&vpxor	(@X[0],@X[0],@Tx[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[2]);		# "X[0]"^=("X[0]">>96)<<<2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vmovdqu(eval(32*($Xi))."(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	 foreach (@insns) { eval; }	# remaining instructions [if any]
+
+	$Xi++;
+	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xupdate_avx2_32_79()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 35 to 50 instructions
+  my ($a,$b,$c,$d,$e);
+
+	&vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8);	# compose "X[-6]"
+	&vpxor	(@X[0],@X[0],@X[-4&7]);		# "X[0]"="X[-32]"^"X[-16]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@X[-7&7]);		# "X[0]"^="X[-28]"
+	&vmovdqu($Kx,eval(2*16*($Xi/5)-64)."($K_XX_XX)")	if ($Xi%5==0);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpxor	(@X[0],@X[0],@Tx[0]);		# "X[0]"^="X[-6]"
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpsrld	(@Tx[0],@X[0],30);
+	&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	#&vpslld	(@X[0],@X[0],2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpor	(@X[0],@X[0],@Tx[0]);		# "X[0]"<<<=2
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vpaddd	(@Tx[1],@X[0],$Kx);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	&vmovdqu("32*$Xi(%rsp)",@Tx[1]);	# X[]+K xfer to IALU
+
+	 foreach (@insns) { eval; }	# remaining instructions
+
+	$Xi++;
+	push(@X,shift(@X));	# "rotate" X[]
+}
+
+sub Xloop_avx2()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body,&$body);	# 32 instructions
+  my ($a,$b,$c,$d,$e);
+
+	 foreach (@insns) { eval; }
+}
+
+	&align32();
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+	&Xupdate_avx2_32_79(\&bodyx_00_19);
+
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+	&Xupdate_avx2_32_79(\&bodyx_20_39);
+
+	&align32();
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+	&Xupdate_avx2_32_79(\&bodyx_40_59);
+
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+	&Xloop_avx2(\&bodyx_20_39);
+
+$code.=<<___;
+	lea	128($inp),$frame
+	lea	128($inp),%rdi			# borrow $t0
+	cmp	$num,$frame
+	cmovae	$inp,$frame			# next or previous block
+
+	# output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+	add	0($ctx),@ROTX[0]		# update context
+	add	4($ctx),@ROTX[1]
+	add	8($ctx),@ROTX[3]
+	mov	@ROTX[0],0($ctx)
+	add	12($ctx),@ROTX[4]
+	mov	@ROTX[1],4($ctx)
+	 mov	@ROTX[0],$A			# A=d
+	add	16($ctx),@ROTX[5]
+	 mov	@ROTX[3],$a5
+	mov	@ROTX[3],8($ctx)
+	 mov	@ROTX[4],$D			# D=b
+	 #xchg	@ROTX[5],$F			# F=c, C=f
+	mov	@ROTX[4],12($ctx)
+	 mov	@ROTX[1],$F			# F=e
+	mov	@ROTX[5],16($ctx)
+	#mov	$F,16($ctx)
+	 mov	@ROTX[5],$E			# E=c
+	 mov	$a5,$C				# C=f
+	 #xchg	$F,$E				# E=c, F=e
+
+	cmp	$num,$inp
+	je	.Ldone_avx2
+___
+
+$Xi=4;				# reset variables
+@X=map("%ymm$_",(4..7,0..3));
+
+$code.=<<___;
+	vmovdqu	64($K_XX_XX),@X[2]		# pbswap mask
+	cmp	$num,%rdi			# borrowed $t0
+	ja	.Last_avx2
+
+	vmovdqu		-64(%rdi),%xmm0		# low part of @X[-4&7]
+	vmovdqu		-48(%rdi),%xmm1
+	vmovdqu		-32(%rdi),%xmm2
+	vmovdqu		-16(%rdi),%xmm3
+	vinserti128	\$1,0($frame),@X[-4&7],@X[-4&7]
+	vinserti128	\$1,16($frame),@X[-3&7],@X[-3&7]
+	vinserti128	\$1,32($frame),@X[-2&7],@X[-2&7]
+	vinserti128	\$1,48($frame),@X[-1&7],@X[-1&7]
+	jmp	.Last_avx2
+
+.align	32
+.Last_avx2:
+	lea	128+16(%rsp),$frame
+	rorx	\$2,$F,$B
+	andn	$D,$F,$t0
+	and	$C,$F
+	xor	$t0,$F
+	sub	\$-128,$inp
+___
+	$rx=$j=0;	@ROTX=($A,$F,$B,$C,$D,$E);
+
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+	&Xloop_avx2	(\&bodyx_00_19);
+
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	($Kx,"-64($K_XX_XX)");		# K_00_19
+	  &vpshufb	(@X[-4&7],@X[-4&7],@X[2]);	# byte swap
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vpshufb	(@X[-3&7],@X[-3&7],@X[2]);
+	  &vpaddd	(@Tx[0],@X[-4&7],$Kx);		# add K_00_19
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	("0(%rsp)",@Tx[0]);
+	  &vpshufb	(@X[-2&7],@X[-2&7],@X[2]);
+	  &vpaddd	(@Tx[1],@X[-3&7],$Kx);
+	&Xloop_avx2	(\&bodyx_20_39);
+	  &vmovdqu	("32(%rsp)",@Tx[1]);
+	  &vpshufb	(@X[-1&7],@X[-1&7],@X[2]);
+	  &vpaddd	(@X[2],@X[-2&7],$Kx);
+
+	&Xloop_avx2	(\&bodyx_40_59);
+	&align32	();
+	  &vmovdqu	("64(%rsp)",@X[2]);
+	  &vpaddd	(@X[3],@X[-1&7],$Kx);
+	&Xloop_avx2	(\&bodyx_40_59);
+	  &vmovdqu	("96(%rsp)",@X[3]);
+	&Xloop_avx2	(\&bodyx_40_59);
+	&Xupdate_avx2_16_31(\&bodyx_40_59);
+
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xupdate_avx2_16_31(\&bodyx_20_39);
+	&Xloop_avx2	(\&bodyx_20_39);
+
+$code.=<<___;
+	lea	128(%rsp),$frame
+
+	# output is d-e-[a]-f-b-c => A=d,F=e,C=f,D=b,E=c
+	add	0($ctx),@ROTX[0]		# update context
+	add	4($ctx),@ROTX[1]
+	add	8($ctx),@ROTX[3]
+	mov	@ROTX[0],0($ctx)
+	add	12($ctx),@ROTX[4]
+	mov	@ROTX[1],4($ctx)
+	 mov	@ROTX[0],$A			# A=d
+	add	16($ctx),@ROTX[5]
+	 mov	@ROTX[3],$a5
+	mov	@ROTX[3],8($ctx)
+	 mov	@ROTX[4],$D			# D=b
+	 #xchg	@ROTX[5],$F			# F=c, C=f
+	mov	@ROTX[4],12($ctx)
+	 mov	@ROTX[1],$F			# F=e
+	mov	@ROTX[5],16($ctx)
+	#mov	$F,16($ctx)
+	 mov	@ROTX[5],$E			# E=c
+	 mov	$a5,$C				# C=f
+	 #xchg	$F,$E				# E=c, F=e
+
+	cmp	$num,$inp
+	jbe	.Loop_avx2
+
+.Ldone_avx2:
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	-40-6*16($fp),%xmm6
+	movaps	-40-5*16($fp),%xmm7
+	movaps	-40-4*16($fp),%xmm8
+	movaps	-40-3*16($fp),%xmm9
+	movaps	-40-2*16($fp),%xmm10
+	movaps	-40-1*16($fp),%xmm11
+___
+$code.=<<___;
+	mov	-40($fp),%r14
+	mov	-32($fp),%r13
+	mov	-24($fp),%r12
+	mov	-16($fp),%rbp
+	mov	-8($fp),%rbx
+	lea	($fp),%rsp
+.Lepilogue_avx2:
+	ret
+.size	sha1_block_data_order_avx2,.-sha1_block_data_order_avx2
+___
+}
+}
+$code.=<<___;
+.align	64
+K_XX_XX:
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x5a827999,0x5a827999,0x5a827999,0x5a827999	# K_00_19
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1	# K_20_39
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc	# K_40_59
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6	# K_60_79
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f	# pbswap mask
+.byte	0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
+___
+}}}
+$code.=<<___;
+.asciz	"SHA1 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align	64
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lcommon_seh_tail
+
+	mov	`16*4`(%rax),%rax	# pull saved stack pointer
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+
+	jmp	.Lcommon_seh_tail
+.size	se_handler,.-se_handler
+___
+
+$code.=<<___ if ($shaext);
+.type	shaext_handler,\@abi-omnipotent
+.align	16
+shaext_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue_shaext(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lcommon_seh_tail
+
+	lea	.Lepilogue_shaext(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lcommon_seh_tail
+
+	lea	-8-4*16(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$8,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+	jmp	.Lcommon_seh_tail
+.size	shaext_handler,.-shaext_handler
+___
+
+$code.=<<___;
+.type	ssse3_handler,\@abi-omnipotent
+.align	16
+ssse3_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	208($context),%rax	# pull context->R11
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	-40-6*16(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$12,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore cotnext->R12
+	mov	%r13,224($context)	# restore cotnext->R13
+	mov	%r14,232($context)	# restore cotnext->R14
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	ssse3_handler,.-ssse3_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_sha1_block_data_order
+	.rva	.LSEH_end_sha1_block_data_order
+	.rva	.LSEH_info_sha1_block_data_order
+___
+$code.=<<___ if ($shaext);
+	.rva	.LSEH_begin_sha1_block_data_order_shaext
+	.rva	.LSEH_end_sha1_block_data_order_shaext
+	.rva	.LSEH_info_sha1_block_data_order_shaext
+___
+$code.=<<___;
+	.rva	.LSEH_begin_sha1_block_data_order_ssse3
+	.rva	.LSEH_end_sha1_block_data_order_ssse3
+	.rva	.LSEH_info_sha1_block_data_order_ssse3
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_sha1_block_data_order_avx
+	.rva	.LSEH_end_sha1_block_data_order_avx
+	.rva	.LSEH_info_sha1_block_data_order_avx
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_sha1_block_data_order_avx2
+	.rva	.LSEH_end_sha1_block_data_order_avx2
+	.rva	.LSEH_info_sha1_block_data_order_avx2
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_sha1_block_data_order:
+	.byte	9,0,0,0
+	.rva	se_handler
+___
+$code.=<<___ if ($shaext);
+.LSEH_info_sha1_block_data_order_shaext:
+	.byte	9,0,0,0
+	.rva	shaext_handler
+___
+$code.=<<___;
+.LSEH_info_sha1_block_data_order_ssse3:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_sha1_block_data_order_avx:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_sha1_block_data_order_avx2:
+	.byte	9,0,0,0
+	.rva	ssse3_handler
+	.rva	.Lprologue_avx2,.Lepilogue_avx2		# HandlerData[]
+___
+}
+
+####################################################################
+
+sub sha1rnds4 {
+    if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-7]),\s*%xmm([0-7])/) {
+      my @opcode=(0x0f,0x3a,0xcc);
+	push @opcode,0xc0|($2&7)|(($3&7)<<3);		# ModR/M
+	my $c=$1;
+	push @opcode,$c=~/^0/?oct($c):$c;
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return "sha1rnds4\t".@_[0];
+    }
+}
+
+sub sha1op38 {
+    my $instr = shift;
+    my %opcodelet = (
+		"sha1nexte" => 0xc8,
+  		"sha1msg1"  => 0xc9,
+		"sha1msg2"  => 0xca	);
+
+    if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
+      my @opcode=(0x0f,0x38);
+      my $rex=0;
+	$rex|=0x04			if ($2>=8);
+	$rex|=0x01			if ($1>=8);
+	unshift @opcode,0x40|$rex	if ($rex);
+	push @opcode,$opcodelet{$instr};
+	push @opcode,0xc0|($1&7)|(($2&7)<<3);		# ModR/M
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return $instr."\t".@_[0];
+    }
+}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo	or
+	s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo;
+
+	print $_,"\n";
+}
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha256-586.pl b/src/crypto/fipsmodule/sha/asm/sha256-586.pl
new file mode 100644
index 0000000..834d00f
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha256-586.pl
@@ -0,0 +1,1283 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256 block transform for x86. September 2007.
+#
+# Performance improvement over compiler generated code varies from
+# 10% to 40% [see below]. Not very impressive on some µ-archs, but
+# it's 5 times smaller and optimizies amount of writes.
+#
+# May 2012.
+#
+# Optimization including two of Pavel Semjanov's ideas, alternative
+# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
+# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
+# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
+# on P4, where it kills performance, nor Sandy Bridge, where folded
+# loop is approximately as fast...
+#
+# June 2012.
+#
+# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
+# May version, >60% over original. Add AVX+shrd code path, >25%
+# improvement on Sandy Bridge over May version, 60% over original.
+#
+# May 2013.
+#
+# Replace AMD XOP code path with SSSE3 to cover more processors.
+# (Biggest improvement coefficient is on upcoming Atom Silvermont,
+# not shown.) Add AVX+BMI code path.
+#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+#
+# Performance in clock cycles per processed byte (less is better):
+#
+#		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)
+# Pentium	46	57	40/38		-	-
+# PIII		36	33	27/24		-	-
+# P4		41	38	28		-	17.3
+# AMD K8	27	25	19/15.5		-	14.9
+# Core2		26	23	18/15.6		14.3	13.8
+# Westmere	27	-	19/15.7		13.4	12.3
+# Sandy Bridge	25	-	15.9		12.4	11.6
+# Ivy Bridge	24	-	15.0		11.4	10.3
+# Haswell	22	-	13.9		9.46	7.80
+# Skylake	20	-	14.9		9.50	7.70
+# Bulldozer	36	-	27/22		17.0	13.6
+# VIA Nano	36	-	25/22		16.8	16.5
+# Atom		50	-	30/25		21.9	18.9
+# Silvermont	40	-	34/31		22.9	20.6
+# Goldmont	29	-	20		16.3(***)
+#
+# (*)	numbers after slash are for unrolled loop, where applicable;
+# (**)	x86_64 assembly performance is presented for reference
+#	purposes, results are best-available;
+# (***)	SHAEXT result is 4.1, strangely enough better than 64-bit one;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
+
+$xmm=$avx=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+# In upstream, this is controlled by shelling out to the compiler to check
+# versions, but BoringSSL is intended to be used with pre-generated perlasm
+# output, so this isn't useful anyway.
+#
+# TODO(davidben): Enable AVX2 code after testing by setting $avx to 2.
+$avx = 1;
+
+$avx = 0 unless ($xmm);
+
+$shaext=$xmm;	### set to zero if compiling for 1.0.1
+
+# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
+# been tested.
+$shaext = 0;
+
+$unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
+			# fully unrolled loop was measured to run about
+			# 3-4x slower. If slowdown coefficient is N and
+			# unrolled loop is m times faster, then you break
+			# even at (N-1)/(m-1) blocks. Then it needs to be
+			# adjusted for probability of code being evicted,
+			# code size/cache size=1/4. Typical m is 1.15...
+
+$A="eax";
+$E="edx";
+$T="ebx";
+$Aoff=&DWP(4,"esp");
+$Boff=&DWP(8,"esp");
+$Coff=&DWP(12,"esp");
+$Doff=&DWP(16,"esp");
+$Eoff=&DWP(20,"esp");
+$Foff=&DWP(24,"esp");
+$Goff=&DWP(28,"esp");
+$Hoff=&DWP(32,"esp");
+$Xoff=&DWP(36,"esp");
+$K256="ebp";
+
+sub BODY_16_63() {
+	&mov	($T,"ecx");			# "ecx" is preloaded
+	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
+	&ror	("ecx",18-7);
+	 &mov	("edi","esi");
+	&ror	("esi",19-17);
+	 &xor	("ecx",$T);
+	 &shr	($T,3);
+	&ror	("ecx",7);
+	 &xor	("esi","edi");
+	 &xor	($T,"ecx");			# T = sigma0(X[-15])
+	&ror	("esi",17);
+	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
+	&shr	("edi",10);
+	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
+	#&xor	("edi","esi")			# sigma1(X[-2])
+	# &add	($T,"edi");			# T += sigma1(X[-2])
+	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
+
+	&BODY_00_15(1);
+}
+sub BODY_00_15() {
+    my $in_16_63=shift;
+
+	&mov	("ecx",$E);
+	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
+	 &mov	("esi",$Foff);
+	&ror	("ecx",25-11);
+	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
+	 &mov	("edi",$Goff);
+	&xor	("ecx",$E);
+	 &xor	("esi","edi");
+	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
+	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
+	&ror	("ecx",11-6);
+	 &and	("esi",$E);
+	 &mov	($Eoff,$E);		# modulo-scheduled
+	&xor	($E,"ecx");
+	 &add	($T,$Hoff);		# T += h
+	 &xor	("esi","edi");		# Ch(e,f,g)
+	&ror	($E,6);			# Sigma1(e)
+	 &mov	("ecx",$A);
+	 &add	($T,"esi");		# T += Ch(e,f,g)
+
+	&ror	("ecx",22-13);
+	 &add	($T,$E);		# T += Sigma1(e)
+	 &mov	("edi",$Boff);
+	&xor	("ecx",$A);
+	 &mov	($Aoff,$A);		# modulo-scheduled
+	 &lea	("esp",&DWP(-4,"esp"));
+	&ror	("ecx",13-2);
+	 &mov	("esi",&DWP(0,$K256));
+	&xor	("ecx",$A);
+	 &mov	($E,$Eoff);		# e in next iteration, d in this one
+	 &xor	($A,"edi");		# a ^= b
+	&ror	("ecx",2);		# Sigma0(a)
+
+	 &add	($T,"esi");		# T+= K[i]
+	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
+	&add	($E,$T);		# d += T
+	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
+	&add	($T,"ecx");		# T += Sigma0(a)
+	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
+	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
+	&add	($K256,4);
+	 &add	($A,$T);		# h += T
+}
+
+&external_label("OPENSSL_ia32cap_P")		if (!$i386);
+
+&function_begin("sha256_block_data_order");
+	&mov	("esi",wparam(0));	# ctx
+	&mov	("edi",wparam(1));	# inp
+	&mov	("eax",wparam(2));	# num
+	&mov	("ebx","esp");		# saved sp
+
+	&call	(&label("pic_point"));	# make it PIC!
+&set_label("pic_point");
+	&blindpop($K256);
+	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
+
+	&sub	("esp",16);
+	&and	("esp",-64);
+
+	&shl	("eax",6);
+	&add	("eax","edi");
+	&mov	(&DWP(0,"esp"),"esi");	# ctx
+	&mov	(&DWP(4,"esp"),"edi");	# inp
+	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
+	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
+						if (!$i386 && $xmm) {
+	&picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
+	&mov	("ecx",&DWP(0,"edx"));
+	&mov	("ebx",&DWP(4,"edx"));
+	&test	("ecx",1<<20);		# check for P4
+	&jnz	(&label("loop"));
+	&mov	("edx",&DWP(8,"edx"))	if ($xmm);
+	&test	("ecx",1<<24);		# check for FXSR
+	&jz	($unroll_after?&label("no_xmm"):&label("loop"));
+	&and	("ecx",1<<30);		# mask "Intel CPU" bit
+	&and	("ebx",1<<28|1<<9);	# mask AVX and SSSE3 bits
+	&test	("edx",1<<29)		if ($shaext);	# check for SHA
+	&jnz	(&label("shaext"))	if ($shaext);
+	&or	("ecx","ebx");
+	&and	("ecx",1<<28|1<<30);
+	&cmp	("ecx",1<<28|1<<30);
+					if ($xmm) {
+	&je	(&label("AVX"))		if ($avx);
+	&test	("ebx",1<<9);		# check for SSSE3
+	&jnz	(&label("SSSE3"));
+					} else {
+	&je	(&label("loop_shrd"));
+					}
+						if ($unroll_after) {
+&set_label("no_xmm");
+	&sub	("eax","edi");
+	&cmp	("eax",$unroll_after);
+	&jae	(&label("unrolled"));
+						} }
+	&jmp	(&label("loop"));
+
+sub COMPACT_LOOP() {
+my $suffix=shift;
+
+&set_label("loop$suffix",$suffix?32:16);
+    # copy input block to stack reversing byte and dword order
+    for($i=0;$i<4;$i++) {
+	&mov	("eax",&DWP($i*16+0,"edi"));
+	&mov	("ebx",&DWP($i*16+4,"edi"));
+	&mov	("ecx",&DWP($i*16+8,"edi"));
+	&bswap	("eax");
+	&mov	("edx",&DWP($i*16+12,"edi"));
+	&bswap	("ebx");
+	&push	("eax");
+	&bswap	("ecx");
+	&push	("ebx");
+	&bswap	("edx");
+	&push	("ecx");
+	&push	("edx");
+    }
+	&add	("edi",64);
+	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
+	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
+
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($A,&DWP(0,"esi"));
+	&mov	("ebx",&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	# &mov	($Aoff,$A);
+	&mov	($Boff,"ebx");
+	&xor	("ebx","ecx");
+	&mov	($Coff,"ecx");
+	&mov	($Doff,"edi");
+	&mov	(&DWP(0,"esp"),"ebx");	# magic
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("edi",&DWP(28,"esi"));
+	# &mov	($Eoff,$E);
+	&mov	($Foff,"ebx");
+	&mov	($Goff,"ecx");
+	&mov	($Hoff,"edi");
+
+&set_label("00_15$suffix",16);
+
+	&BODY_00_15();
+
+	&cmp	("esi",0xc19bf174);
+	&jne	(&label("00_15$suffix"));
+
+	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
+	&jmp	(&label("16_63$suffix"));
+
+&set_label("16_63$suffix",16);
+
+	&BODY_16_63();
+
+	&cmp	("esi",0xc67178f2);
+	&jne	(&label("16_63$suffix"));
+
+	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
+	# &mov	($A,$Aoff);
+	&mov	("ebx",$Boff);
+	# &mov	("edi",$Coff);
+	&mov	("ecx",$Doff);
+	&add	($A,&DWP(0,"esi"));
+	&add	("ebx",&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$A);
+	&mov	(&DWP(4,"esi"),"ebx");
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	# &mov	($E,$Eoff);
+	&mov	("eax",$Foff);
+	&mov	("ebx",$Goff);
+	&mov	("ecx",$Hoff);
+	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
+	&add	($E,&DWP(16,"esi"));
+	&add	("eax",&DWP(20,"esi"));
+	&add	("ebx",&DWP(24,"esi"));
+	&add	("ecx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"eax");
+	&mov	(&DWP(24,"esi"),"ebx");
+	&mov	(&DWP(28,"esi"),"ecx");
+
+	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
+	&sub	($K256,4*64);			# rewind K
+
+	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
+	&jb	(&label("loop$suffix"));
+}
+	&COMPACT_LOOP();
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+						if (!$i386 && !$xmm) {
+	# ~20% improvement on Sandy Bridge
+	local *ror = sub { &shrd(@_[0],@_) };
+	&COMPACT_LOOP("_shrd");
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+						}
+
+&set_label("K256",64);	# Yes! I keep it in the code segment!
+@K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
+	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
+	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
+	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
+	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
+	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
+	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
+	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
+	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
+	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
+	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
+	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
+	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
+	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
+	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
+	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
+&data_word(@K256);
+&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
+&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
+sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
+
+if (!$i386 && $unroll_after) {
+my @AH=($A,$K256);
+
+&set_label("unrolled",16);
+	&lea	("esp",&DWP(-96,"esp"));
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("ebx",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");		# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"ebx");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("ebx",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"ebx");
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&jmp	(&label("grand_loop"));
+
+&set_label("grand_loop",16);
+    # copy input block to stack reversing byte order
+    for($i=0;$i<5;$i++) {
+	&mov	("ebx",&DWP(12*$i+0,"edi"));
+	&mov	("ecx",&DWP(12*$i+4,"edi"));
+	&bswap	("ebx");
+	&mov	("esi",&DWP(12*$i+8,"edi"));
+	&bswap	("ecx");
+	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
+	&bswap	("esi");
+	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
+	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
+    }
+	&mov	("ebx",&DWP($i*12,"edi"));
+	&add	("edi",64);
+	&bswap	("ebx");
+	&mov	(&DWP(96+4,"esp"),"edi");
+	&mov	(&DWP(32+12*$i,"esp"),"ebx");
+
+    my ($t1,$t2) = ("ecx","esi");
+
+    for ($i=0;$i<64;$i++) {
+
+      if ($i>=16) {
+	&mov	($T,$t1);			# $t1 is preloaded
+	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
+	&ror	($t1,18-7);
+	 &mov	("edi",$t2);
+	&ror	($t2,19-17);
+	 &xor	($t1,$T);
+	 &shr	($T,3);
+	&ror	($t1,7);
+	 &xor	($t2,"edi");
+	 &xor	($T,$t1);			# T = sigma0(X[-15])
+	&ror	($t2,17);
+	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
+	&shr	("edi",10);
+	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
+	#&xor	("edi",$t2)			# sigma1(X[-2])
+	# &add	($T,"edi");			# T += sigma1(X[-2])
+	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
+      }
+	&mov	($t1,$E);
+	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
+	 &mov	($t2,&off($f));
+	&ror	($E,25-11);
+	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
+	 &mov	("edi",&off($g));
+	&xor	($E,$t1);
+	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
+	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
+	 &xor	($t2,"edi");
+	&ror	($E,11-6);
+	 &and	($t2,$t1);
+	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
+	&xor	($E,$t1);
+	 &add	($T,&off($h));		# T += h
+	 &xor	("edi",$t2);		# Ch(e,f,g)
+	&ror	($E,6);			# Sigma1(e)
+	 &mov	($t1,$AH[0]);
+	 &add	($T,"edi");		# T += Ch(e,f,g)
+
+	&ror	($t1,22-13);
+	 &mov	($t2,$AH[0]);
+	 &mov	("edi",&off($b));
+	&xor	($t1,$AH[0]);
+	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
+	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
+	&ror	($t1,13-2);
+	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
+	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
+	&xor	($t1,$t2);
+	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
+	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
+	&ror	($t1,2);		# Sigma0(a)
+
+	 &add	($AH[1],$E);		# h += T
+	 &add	($E,&off($d));		# d += T
+	&add	($AH[1],$t1);		# h += Sigma0(a)
+	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
+
+	@AH = reverse(@AH);		# rotate(a,h)
+	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
+    }
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");		# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ebx",&DWP(24,"esp"));
+	&mov	("ecx",&DWP(28,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ebx",&DWP(24,"esi"));
+	&add	("ecx",&DWP(28,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	&mov	(&DWP(24,"esi"),"ebx");
+	&mov	(&DWP(28,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	 &mov	(&DWP(24,"esp"),"ebx");
+	 &mov	(&DWP(28,"esp"),"ecx");
+
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_loop"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+&function_end_A();
+}
+						if (!$i386 && $xmm) {{{
+if ($shaext) {
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$end)=("esi","edi","eax");
+my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
+my @MSG=map("xmm$_",(3..6));
+
+sub sha256op38 {
+ my ($opcodelet,$dst,$src)=@_;
+    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
+    {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
+}
+sub sha256rnds2	{ sha256op38(0xcb,@_); }
+sub sha256msg1	{ sha256op38(0xcc,@_); }
+sub sha256msg2	{ sha256op38(0xcd,@_); }
+
+&set_label("shaext",32);
+	&sub		("esp",32);
+
+	&movdqu		($ABEF,&QWP(0,$ctx));		# DCBA
+	&lea		($K256,&DWP(0x80,$K256));
+	&movdqu		($CDGH,&QWP(16,$ctx));		# HGFE
+	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
+
+	&pshufd		($Wi,$ABEF,0x1b);		# ABCD
+	&pshufd		($ABEF,$ABEF,0xb1);		# CDAB
+	&pshufd		($CDGH,$CDGH,0x1b);		# EFGH
+	&palignr	($ABEF,$CDGH,8);		# ABEF
+	&punpcklqdq	($CDGH,$Wi);			# CDGH
+	&jmp		(&label("loop_shaext"));
+
+&set_label("loop_shaext",16);
+	&movdqu		(@MSG[0],&QWP(0,$inp));
+	&movdqu		(@MSG[1],&QWP(0x10,$inp));
+	&movdqu		(@MSG[2],&QWP(0x20,$inp));
+	&pshufb		(@MSG[0],$TMP);
+	&movdqu		(@MSG[3],&QWP(0x30,$inp));
+	&movdqa		(&QWP(16,"esp"),$CDGH);		# offload
+
+	&movdqa		($Wi,&QWP(0*16-0x80,$K256));
+	&paddd		($Wi,@MSG[0]);
+	&pshufb		(@MSG[1],$TMP);
+	&sha256rnds2	($CDGH,$ABEF);			# 0-3
+	&pshufd		($Wi,$Wi,0x0e);
+	&nop		();
+	&movdqa		(&QWP(0,"esp"),$ABEF);		# offload
+	&sha256rnds2	($ABEF,$CDGH);
+
+	&movdqa		($Wi,&QWP(1*16-0x80,$K256));
+	&paddd		($Wi,@MSG[1]);
+	&pshufb		(@MSG[2],$TMP);
+	&sha256rnds2	($CDGH,$ABEF);			# 4-7
+	&pshufd		($Wi,$Wi,0x0e);
+	&lea		($inp,&DWP(0x40,$inp));
+	&sha256msg1	(@MSG[0],@MSG[1]);
+	&sha256rnds2	($ABEF,$CDGH);
+
+	&movdqa		($Wi,&QWP(2*16-0x80,$K256));
+	&paddd		($Wi,@MSG[2]);
+	&pshufb		(@MSG[3],$TMP);
+	&sha256rnds2	($CDGH,$ABEF);			# 8-11
+	&pshufd		($Wi,$Wi,0x0e);
+	&movdqa		($TMP,@MSG[3]);
+	&palignr	($TMP,@MSG[2],4);
+	&nop		();
+	&paddd		(@MSG[0],$TMP);
+	&sha256msg1	(@MSG[1],@MSG[2]);
+	&sha256rnds2	($ABEF,$CDGH);
+
+	&movdqa		($Wi,&QWP(3*16-0x80,$K256));
+	&paddd		($Wi,@MSG[3]);
+	&sha256msg2	(@MSG[0],@MSG[3]);
+	&sha256rnds2	($CDGH,$ABEF);			# 12-15
+	&pshufd		($Wi,$Wi,0x0e);
+	&movdqa		($TMP,@MSG[0]);
+	&palignr	($TMP,@MSG[3],4);
+	&nop		();
+	&paddd		(@MSG[1],$TMP);
+	&sha256msg1	(@MSG[2],@MSG[3]);
+	&sha256rnds2	($ABEF,$CDGH);
+
+for($i=4;$i<16-3;$i++) {
+	&movdqa		($Wi,&QWP($i*16-0x80,$K256));
+	&paddd		($Wi,@MSG[0]);
+	&sha256msg2	(@MSG[1],@MSG[0]);
+	&sha256rnds2	($CDGH,$ABEF);			# 16-19...
+	&pshufd		($Wi,$Wi,0x0e);
+	&movdqa		($TMP,@MSG[1]);
+	&palignr	($TMP,@MSG[0],4);
+	&nop		();
+	&paddd		(@MSG[2],$TMP);
+	&sha256msg1	(@MSG[3],@MSG[0]);
+	&sha256rnds2	($ABEF,$CDGH);
+
+	push(@MSG,shift(@MSG));
+}
+	&movdqa		($Wi,&QWP(13*16-0x80,$K256));
+	&paddd		($Wi,@MSG[0]);
+	&sha256msg2	(@MSG[1],@MSG[0]);
+	&sha256rnds2	($CDGH,$ABEF);			# 52-55
+	&pshufd		($Wi,$Wi,0x0e);
+	&movdqa		($TMP,@MSG[1])
+	&palignr	($TMP,@MSG[0],4);
+	&sha256rnds2	($ABEF,$CDGH);
+	&paddd		(@MSG[2],$TMP);
+
+	&movdqa		($Wi,&QWP(14*16-0x80,$K256));
+	&paddd		($Wi,@MSG[1]);
+	&sha256rnds2	($CDGH,$ABEF);			# 56-59
+	&pshufd		($Wi,$Wi,0x0e);
+	&sha256msg2	(@MSG[2],@MSG[1]);
+	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
+	&sha256rnds2	($ABEF,$CDGH);
+
+	&movdqa		($Wi,&QWP(15*16-0x80,$K256));
+	&paddd		($Wi,@MSG[2]);
+	&nop		();
+	&sha256rnds2	($CDGH,$ABEF);			# 60-63
+	&pshufd		($Wi,$Wi,0x0e);
+	&cmp		($end,$inp);
+	&nop		();
+	&sha256rnds2	($ABEF,$CDGH);
+
+	&paddd		($CDGH,&QWP(16,"esp"));
+	&paddd		($ABEF,&QWP(0,"esp"));
+	&jnz		(&label("loop_shaext"));
+
+	&pshufd		($CDGH,$CDGH,0xb1);		# DCHG
+	&pshufd		($TMP,$ABEF,0x1b);		# FEBA
+	&pshufd		($ABEF,$ABEF,0xb1);		# BAFE
+	&punpckhqdq	($ABEF,$CDGH);			# DCBA
+	&palignr	($CDGH,$TMP,8);			# HGFE
+
+	&mov		("esp",&DWP(32+12,"esp"));
+	&movdqu		(&QWP(0,$ctx),$ABEF);
+	&movdqu		(&QWP(16,$ctx),$CDGH);
+&function_end_A();
+}
+
+my @X = map("xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
+my @AH = ($A,$T);
+
+&set_label("SSSE3",32);
+	&lea	("esp",&DWP(-96,"esp"));
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&movdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_ssse3"));
+
+&set_label("grand_ssse3",16);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&movdqu	(@X[0],&QWP(0,"edi"));
+	&movdqu	(@X[1],&QWP(16,"edi"));
+	&movdqu	(@X[2],&QWP(32,"edi"));
+	&movdqu	(@X[3],&QWP(48,"edi"));
+	&add	("edi",64);
+	&pshufb	(@X[0],$t3);
+	&mov	(&DWP(96+4,"esp"),"edi");
+	&pshufb	(@X[1],$t3);
+	&movdqa	($t0,&QWP(0,$K256));
+	&pshufb	(@X[2],$t3);
+	&movdqa	($t1,&QWP(16,$K256));
+	&paddd	($t0,@X[0]);
+	&pshufb	(@X[3],$t3);
+	&movdqa	($t2,&QWP(32,$K256));
+	&paddd	($t1,@X[1]);
+	&movdqa	($t3,&QWP(48,$K256));
+	&movdqa	(&QWP(32+0,"esp"),$t0);
+	&paddd	($t2,@X[2]);
+	&movdqa	(&QWP(32+16,"esp"),$t1);
+	&paddd	($t3,@X[3]);
+	&movdqa	(&QWP(32+32,"esp"),$t2);
+	&movdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp	(&label("ssse3_00_47"));
+
+&set_label("ssse3_00_47",16);
+	&add		($K256,64);
+
+sub SSSE3_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
+
+	  eval(shift(@insns));
+	&movdqa		($t0,@X[1]);
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	&movdqa		($t3,@X[3]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&palignr	($t0,@X[0],4);		# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &palignr	($t3,@X[2],4);		# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t1,$t0);
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	&movdqa		($t2,$t0);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t0,3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t2,7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,32-18);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&psrld		($t2,18-7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,18-7);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&pxor		($t0,$t1);		# sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,10);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,19-17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,$t3,0b10000000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &psrldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));			# @
+	 &psrld		($t3,10);
+	  eval(shift(@insns));
+	 &psrlq		($t2,17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrlq		($t2,19-17);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,$t3,0b00001000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&movdqa		($t2,&QWP(16*$j,$K256));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pslldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));			# @
+	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		($t2,@X[0]);
+	  eval(shift(@insns));			# @
+
+	foreach (@insns) { eval; }		# remaining instructions
+
+	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
+}
+
+sub body_00_15 () {
+	(
+	'&mov	("ecx",$E);',
+	'&ror	($E,25-11);',
+	 '&mov	("esi",&off($f));',
+	'&xor	($E,"ecx");',
+	 '&mov	("edi",&off($g));',
+	 '&xor	("esi","edi");',
+	'&ror	($E,11-6);',
+	 '&and	("esi","ecx");',
+	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
+	'&xor	($E,"ecx");',
+	 '&xor	("edi","esi");',	# Ch(e,f,g)
+	'&ror	($E,6);',		# T = Sigma1(e)
+	 '&mov	("ecx",$AH[0]);',
+	 '&add	($E,"edi");',		# T += Ch(e,f,g)
+	 '&mov	("edi",&off($b));',
+	'&mov	("esi",$AH[0]);',
+
+	'&ror	("ecx",22-13);',
+	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
+	'&xor	("ecx",$AH[0]);',
+	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
+	 '&add	($E,&off($h));',	# T += h
+	'&ror	("ecx",13-2);',
+	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
+	'&xor	("ecx","esi");',
+	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
+	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
+	'&ror	("ecx",2);',		# Sigma0(a)
+
+	 '&add	($AH[1],$E);',		# h += T
+	 '&add	($E,&off($d));',	# d += T
+	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
+
+	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
+	);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&SSSE3_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("ssse3_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&movdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_ssse3"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+&function_end_A();
+						if ($avx) {
+&set_label("AVX",32);
+						if ($avx>1) {
+	&and	("edx",1<<8|1<<3);		# check for BMI2+BMI1
+	&cmp	("edx",1<<8|1<<3);
+	&je	(&label("AVX_BMI"));
+						}
+	&lea	("esp",&DWP(-96,"esp"));
+	&vzeroall	();
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&vmovdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_avx"));
+
+&set_label("grand_avx",32);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&vmovdqu	(@X[0],&QWP(0,"edi"));
+	&vmovdqu	(@X[1],&QWP(16,"edi"));
+	&vmovdqu	(@X[2],&QWP(32,"edi"));
+	&vmovdqu	(@X[3],&QWP(48,"edi"));
+	&add		("edi",64);
+	&vpshufb	(@X[0],@X[0],$t3);
+	&mov		(&DWP(96+4,"esp"),"edi");
+	&vpshufb	(@X[1],@X[1],$t3);
+	&vpshufb	(@X[2],@X[2],$t3);
+	&vpaddd		($t0,@X[0],&QWP(0,$K256));
+	&vpshufb	(@X[3],@X[3],$t3);
+	&vpaddd		($t1,@X[1],&QWP(16,$K256));
+	&vpaddd		($t2,@X[2],&QWP(32,$K256));
+	&vpaddd		($t3,@X[3],&QWP(48,$K256));
+	&vmovdqa	(&QWP(32+0,"esp"),$t0);
+	&vmovdqa	(&QWP(32+16,"esp"),$t1);
+	&vmovdqa	(&QWP(32+32,"esp"),$t2);
+	&vmovdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp		(&label("avx_00_47"));
+
+&set_label("avx_00_47",16);
+	&add		($K256,64);
+
+sub Xupdate_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
+	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
+	'&vpsrld	($t2,$t0,7);',
+	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
+	'&vpsrld	($t3,$t0,3);',
+	'&vpslld	($t1,$t0,14);',
+	'&vpxor		($t0,$t3,$t2);',
+	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&vpsrld	($t2,$t2,18-7);',
+	'&vpxor		($t0,$t0,$t1);',
+	'&vpslld	($t1,$t1,25-14);',
+	'&vpxor		($t0,$t0,$t2);',
+	 '&vpsrld	($t2,$t3,10);',
+	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
+	 '&vpsrlq	($t1,$t3,17);',
+	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
+	 '&vpxor	($t2,$t2,$t1);',
+	 '&vpsrlq	($t3,$t3,19);',
+	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
+	 '&vpshufd	($t3,$t2,0b10000100);',
+	'&vpsrldq	($t3,$t3,8);',
+	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
+	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&vpsrld	($t2,$t3,10);',
+	 '&vpsrlq	($t1,$t3,17);',
+	 '&vpxor	($t2,$t2,$t1);',
+	 '&vpsrlq	($t3,$t3,19);',
+	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
+	 '&vpshufd	($t3,$t2,0b11101000);',
+	'&vpslldq	($t3,$t3,8);',
+	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+local *ror = sub { &shrd(@_[0],@_) };
+sub AVX_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
+my $insn;
+
+	foreach (Xupdate_AVX()) {		# 31 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval($insn = shift(@insns));
+	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
+	}
+	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
+	foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("avx_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&vmovdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_avx"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+	&vzeroall	();
+&function_end_A();
+						if ($avx>1) {
+sub bodyx_00_15 () {			# +10%
+	(
+	'&rorx	("ecx",$E,6)',
+	'&rorx	("esi",$E,11)',
+	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
+	'&rorx	("edi",$E,25)',
+	'&xor	("ecx","esi")',
+	 '&andn	("esi",$E,&off($g))',
+	'&xor	("ecx","edi")',		# Sigma1(e)
+	 '&and	($E,&off($f))',
+	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
+	 '&or	($E,"esi")',		# T = Ch(e,f,g)
+
+	'&rorx	("edi",$AH[0],2)',
+	'&rorx	("esi",$AH[0],13)',
+	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
+	'&rorx	("ecx",$AH[0],22)',
+	'&xor	("esi","edi")',
+	 '&mov	("edi",&off($b))',
+	'&xor	("ecx","esi")',		# Sigma0(a)
+
+	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
+	 '&add	($E,&off($h))',		# T += h
+	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
+	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
+	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
+
+	 '&add	("ecx",$E)',		# h += T
+	 '&add	($E,&off($d))',		# d += T
+	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
+
+	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
+	);
+}
+
+&set_label("AVX_BMI",32);
+	&lea	("esp",&DWP(-96,"esp"));
+	&vzeroall	();
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&mov	($AH[0],&DWP(0,"esi"));
+	&mov	($AH[1],&DWP(4,"esi"));
+	&mov	("ecx",&DWP(8,"esi"));
+	&mov	("edi",&DWP(12,"esi"));
+	#&mov	(&DWP(0,"esp"),$AH[0]);
+	&mov	(&DWP(4,"esp"),$AH[1]);
+	&xor	($AH[1],"ecx");			# magic
+	&mov	(&DWP(8,"esp"),"ecx");
+	&mov	(&DWP(12,"esp"),"edi");
+	&mov	($E,&DWP(16,"esi"));
+	&mov	("edi",&DWP(20,"esi"));
+	&mov	("ecx",&DWP(24,"esi"));
+	&mov	("esi",&DWP(28,"esi"));
+	#&mov	(&DWP(16,"esp"),$E);
+	&mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+	&mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esp"),"esi");
+	&vmovdqa	($t3,&QWP(256,$K256));
+	&jmp	(&label("grand_avx_bmi"));
+
+&set_label("grand_avx_bmi",32);
+	# load input, reverse byte order, add K256[0..15], save to stack
+	&vmovdqu	(@X[0],&QWP(0,"edi"));
+	&vmovdqu	(@X[1],&QWP(16,"edi"));
+	&vmovdqu	(@X[2],&QWP(32,"edi"));
+	&vmovdqu	(@X[3],&QWP(48,"edi"));
+	&add		("edi",64);
+	&vpshufb	(@X[0],@X[0],$t3);
+	&mov		(&DWP(96+4,"esp"),"edi");
+	&vpshufb	(@X[1],@X[1],$t3);
+	&vpshufb	(@X[2],@X[2],$t3);
+	&vpaddd		($t0,@X[0],&QWP(0,$K256));
+	&vpshufb	(@X[3],@X[3],$t3);
+	&vpaddd		($t1,@X[1],&QWP(16,$K256));
+	&vpaddd		($t2,@X[2],&QWP(32,$K256));
+	&vpaddd		($t3,@X[3],&QWP(48,$K256));
+	&vmovdqa	(&QWP(32+0,"esp"),$t0);
+	&vmovdqa	(&QWP(32+16,"esp"),$t1);
+	&vmovdqa	(&QWP(32+32,"esp"),$t2);
+	&vmovdqa	(&QWP(32+48,"esp"),$t3);
+	&jmp		(&label("avx_bmi_00_47"));
+
+&set_label("avx_bmi_00_47",16);
+	&add		($K256,64);
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));		# rotate(@X)
+    }
+	&cmp	(&DWP(16*$j,$K256),0x00010203);
+	&jne	(&label("avx_bmi_00_47"));
+
+    for ($i=0; $i<16; ) {
+	foreach(bodyx_00_15()) { eval; }
+    }
+
+	&mov	("esi",&DWP(96,"esp"));	#ctx
+					#&mov	($AH[0],&DWP(0,"esp"));
+	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
+					#&mov	("edi", &DWP(8,"esp"));
+	&mov	("ecx",&DWP(12,"esp"));
+	&add	($AH[0],&DWP(0,"esi"));
+	&add	($AH[1],&DWP(4,"esi"));
+	&add	("edi",&DWP(8,"esi"));
+	&add	("ecx",&DWP(12,"esi"));
+	&mov	(&DWP(0,"esi"),$AH[0]);
+	&mov	(&DWP(4,"esi"),$AH[1]);
+	&mov	(&DWP(8,"esi"),"edi");
+	&mov	(&DWP(12,"esi"),"ecx");
+	 #&mov	(&DWP(0,"esp"),$AH[0]);
+	 &mov	(&DWP(4,"esp"),$AH[1]);
+	 &xor	($AH[1],"edi");			# magic
+	 &mov	(&DWP(8,"esp"),"edi");
+	 &mov	(&DWP(12,"esp"),"ecx");
+	#&mov	($E,&DWP(16,"esp"));
+	&mov	("edi",&DWP(20,"esp"));
+	&mov	("ecx",&DWP(24,"esp"));
+	&add	($E,&DWP(16,"esi"));
+	&add	("edi",&DWP(20,"esi"));
+	&add	("ecx",&DWP(24,"esi"));
+	&mov	(&DWP(16,"esi"),$E);
+	&mov	(&DWP(20,"esi"),"edi");
+	 &mov	(&DWP(20,"esp"),"edi");
+	&mov	("edi",&DWP(28,"esp"));
+	&mov	(&DWP(24,"esi"),"ecx");
+	 #&mov	(&DWP(16,"esp"),$E);
+	&add	("edi",&DWP(28,"esi"));
+	 &mov	(&DWP(24,"esp"),"ecx");
+	&mov	(&DWP(28,"esi"),"edi");
+	 &mov	(&DWP(28,"esp"),"edi");
+	&mov	("edi",&DWP(96+4,"esp"));	# inp
+
+	&vmovdqa	($t3,&QWP(64,$K256));
+	&sub	($K256,3*64);			# rewind K
+	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
+	&jb	(&label("grand_avx_bmi"));
+
+	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
+	&vzeroall	();
+&function_end_A();
+						}
+						}
+						}}}
+&function_end_B("sha256_block_data_order");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha256-armv4.pl b/src/crypto/fipsmodule/sha/asm/sha256-armv4.pl
new file mode 100644
index 0000000..3eea080
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha256-armv4.pl
@@ -0,0 +1,733 @@
+#! /usr/bin/env perl
+# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License").  You may not use
+# this file except in compliance with the License.  You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPL terms is granted.
+# ====================================================================
+
+# SHA256 block procedure for ARMv4. May 2007.
+
+# Performance is ~2x better than gcc 3.4 generated code and in "abso-
+# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
+# byte [on single-issue Xscale PXA250 core].
+
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 22% improvement on
+# Cortex A8 core and ~20 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 16%
+# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
+
+# September 2013.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process one
+# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
+# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
+# code (meaning that latter performs sub-optimally, nothing was done
+# about it).
+
+# May 2014.
+#
+# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+$ctx="r0";	$t0="r0";
+$inp="r1";	$t4="r1";
+$len="r2";	$t1="r2";
+$T1="r3";	$t3="r3";
+$A="r4";
+$B="r5";
+$C="r6";
+$D="r7";
+$E="r8";
+$F="r9";
+$G="r10";
+$H="r11";
+@V=($A,$B,$C,$D,$E,$F,$G,$H);
+$t2="r12";
+$Ktbl="r14";
+
+@Sigma0=( 2,13,22);
+@Sigma1=( 6,11,25);
+@sigma0=( 7,18, 3);
+@sigma1=(17,19,10);
+
+sub BODY_00_15 {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___ if ($i<16);
+#if __ARM_ARCH__>=7
+	@ ldr	$t1,[$inp],#4			@ $i
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+# ifndef __ARMEB__
+	rev	$t1,$t1
+# endif
+#else
+	@ ldrb	$t1,[$inp,#3]			@ $i
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	ldrb	$t2,[$inp,#2]
+	ldrb	$t0,[$inp,#1]
+	orr	$t1,$t1,$t2,lsl#8
+	ldrb	$t2,[$inp],#4
+	orr	$t1,$t1,$t0,lsl#16
+# if $i==15
+	str	$inp,[sp,#17*4]			@ make room for $t4
+# endif
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
+	orr	$t1,$t1,$t2,lsl#24
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+#endif
+___
+$code.=<<___;
+	ldr	$t2,[$Ktbl],#4			@ *K256++
+	add	$h,$h,$t1			@ h+=X[i]
+	str	$t1,[sp,#`$i%16`*4]
+	eor	$t1,$f,$g
+	add	$h,$h,$t0,ror#$Sigma1[0]	@ h+=Sigma1(e)
+	and	$t1,$t1,$e
+	add	$h,$h,$t2			@ h+=K256[i]
+	eor	$t1,$t1,$g			@ Ch(e,f,g)
+	eor	$t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
+	add	$h,$h,$t1			@ h+=Ch(e,f,g)
+#if $i==31
+	and	$t2,$t2,#0xff
+	cmp	$t2,#0xf2			@ done?
+#endif
+#if $i<15
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4			@ prefetch
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+#else
+	ldr	$t1,[sp,#`($i+2)%16`*4]		@ from future BODY_16_xx
+	eor	$t2,$a,$b			@ a^b, b^c in next round
+	ldr	$t4,[sp,#`($i+15)%16`*4]	@ from future BODY_16_xx
+#endif
+	eor	$t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]`	@ Sigma0(a)
+	and	$t3,$t3,$t2			@ (b^c)&=(a^b)
+	add	$d,$d,$h			@ d+=h
+	eor	$t3,$t3,$b			@ Maj(a,b,c)
+	add	$h,$h,$t0,ror#$Sigma0[0]	@ h+=Sigma0(a)
+	@ add	$h,$h,$t3			@ h+=Maj(a,b,c)
+___
+	($t2,$t3)=($t3,$t2);
+}
+
+sub BODY_16_XX {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+	@ ldr	$t1,[sp,#`($i+1)%16`*4]		@ $i
+	@ ldr	$t4,[sp,#`($i+14)%16`*4]
+	mov	$t0,$t1,ror#$sigma0[0]
+	add	$a,$a,$t2			@ h+=Maj(a,b,c) from the past
+	mov	$t2,$t4,ror#$sigma1[0]
+	eor	$t0,$t0,$t1,ror#$sigma0[1]
+	eor	$t2,$t2,$t4,ror#$sigma1[1]
+	eor	$t0,$t0,$t1,lsr#$sigma0[2]	@ sigma0(X[i+1])
+	ldr	$t1,[sp,#`($i+0)%16`*4]
+	eor	$t2,$t2,$t4,lsr#$sigma1[2]	@ sigma1(X[i+14])
+	ldr	$t4,[sp,#`($i+9)%16`*4]
+
+	add	$t2,$t2,$t0
+	eor	$t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`	@ from BODY_00_15
+	add	$t1,$t1,$t2
+	eor	$t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]`	@ Sigma1(e)
+	add	$t1,$t1,$t4			@ X[i]
+___
+	&BODY_00_15(@_);
+}
+
+$code=<<___;
+#ifndef __KERNEL__
+# include <openssl/arm_arch.h>
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+#endif
+
+.text
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code   32
+#endif
+
+.type	K256,%object
+.align	5
+K256:
+.word	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size	K256,.-K256
+.word	0				@ terminator
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-.Lsha256_block_data_order
+#endif
+.align	5
+
+.global	sha256_block_data_order
+.type	sha256_block_data_order,%function
+sha256_block_data_order:
+.Lsha256_block_data_order:
+#if __ARM_ARCH__<7 && !defined(__thumb2__)
+	sub	r3,pc,#8		@ sha256_block_data_order
+#else
+	adr	r3,.Lsha256_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+#ifdef	__APPLE__
+	ldr	r12,[r12]
+#endif
+	tst	r12,#ARMV8_SHA256
+	bne	.LARMv8
+	tst	r12,#ARMV7_NEON
+	bne	.LNEON
+#endif
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+	stmdb	sp!,{$ctx,$inp,$len,r4-r11,lr}
+	ldmia	$ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
+	sub	$Ktbl,r3,#256+32	@ K256
+	sub	sp,sp,#16*4		@ alloca(X[16])
+.Loop:
+# if __ARM_ARCH__>=7
+	ldr	$t1,[$inp],#4
+# else
+	ldrb	$t1,[$inp,#3]
+# endif
+	eor	$t3,$B,$C		@ magic
+	eor	$t2,$t2,$t2
+___
+for($i=0;$i<16;$i++)	{ &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=".Lrounds_16_xx:\n";
+for (;$i<32;$i++)	{ &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+#if __ARM_ARCH__>=7
+	ite	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	ldreq	$t3,[sp,#16*4]		@ pull ctx
+	bne	.Lrounds_16_xx
+
+	add	$A,$A,$t2		@ h+=Maj(a,b,c) from the past
+	ldr	$t0,[$t3,#0]
+	ldr	$t1,[$t3,#4]
+	ldr	$t2,[$t3,#8]
+	add	$A,$A,$t0
+	ldr	$t0,[$t3,#12]
+	add	$B,$B,$t1
+	ldr	$t1,[$t3,#16]
+	add	$C,$C,$t2
+	ldr	$t2,[$t3,#20]
+	add	$D,$D,$t0
+	ldr	$t0,[$t3,#24]
+	add	$E,$E,$t1
+	ldr	$t1,[$t3,#28]
+	add	$F,$F,$t2
+	ldr	$inp,[sp,#17*4]		@ pull inp
+	ldr	$t2,[sp,#18*4]		@ pull inp+len
+	add	$G,$G,$t0
+	add	$H,$H,$t1
+	stmia	$t3,{$A,$B,$C,$D,$E,$F,$G,$H}
+	cmp	$inp,$t2
+	sub	$Ktbl,$Ktbl,#256	@ rewind Ktbl
+	bne	.Loop
+
+	add	sp,sp,#`16+3`*4	@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r11,pc}
+#else
+	ldmia	sp!,{r4-r11,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	sha256_block_data_order,.-sha256_block_data_order
+___
+######################################################################
+# NEON stuff
+#
+{{{
+my @X=map("q$_",(0..3));
+my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
+my $Xfer=$t4;
+my $j=0;
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+  my $arg = pop;
+    $arg = "#$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+sub Xupdate()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	&vext_8		($T0,@X[0],@X[1],4);	# X[1..4]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vext_8		($T1,@X[2],@X[3],4);	# X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T2,$T0,$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += X[9..12]
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T1,$T0,$sigma0[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T2,$T0,32-$sigma0[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vshr_u32	($T3,$T0,$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T2);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vsli_32	($T3,$T0,32-$sigma0[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&veor		($T1,$T1,$T3);		# sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dhi(@X[3]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(@X[0],@X[0],$T1);	# X[0..3] += sigma0(X[1..4])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dhi(@X[3]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dhi(@X[3]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T5,&Dlo(@X[0]),$sigma1[2]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vshr_u32	($T4,&Dlo(@X[0]),$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &vsli_32	($T4,&Dlo(@X[0]),32-$sigma1[1]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	  &veor		($T5,$T5,$T4);		# sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	(&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 while($#insns>=2) { eval(shift(@insns)); }
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub Xpreload()
+{ use integer;
+  my $body = shift;
+  my @insns = (&$body,&$body,&$body,&$body);
+  my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vld1_32	("{$T0}","[$Ktbl,:128]!");
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vrev32_8	(@X[0],@X[0]);
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&vadd_i32	($T0,$T0,@X[0]);
+	 foreach (@insns) { eval; }	# remaining instructions
+	&vst1_32	("{$T0}","[$Xfer,:128]!");
+
+	push(@X,shift(@X));		# "rotate" X[]
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
+	'&add	($h,$h,$t1)',			# h+=X[i]+K[i]
+	'&eor	($t1,$f,$g)',
+	'&eor	($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
+	'&add	($a,$a,$t2)',			# h+=Maj(a,b,c) from the past
+	'&and	($t1,$t1,$e)',
+	'&eor	($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))',	# Sigma1(e)
+	'&eor	($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
+	'&eor	($t1,$t1,$g)',			# Ch(e,f,g)
+	'&add	($h,$h,$t2,"ror#$Sigma1[0]")',	# h+=Sigma1(e)
+	'&eor	($t2,$a,$b)',			# a^b, b^c in next round
+	'&eor	($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))',	# Sigma0(a)
+	'&add	($h,$h,$t1)',			# h+=Ch(e,f,g)
+	'&ldr	($t1,sprintf "[sp,#%d]",4*(($j+1)&15))	if (($j&15)!=15);'.
+	'&ldr	($t1,"[$Ktbl]")				if ($j==15);'.
+	'&ldr	($t1,"[sp,#64]")			if ($j==31)',
+	'&and	($t3,$t3,$t2)',			# (b^c)&=(a^b)
+	'&add	($d,$d,$h)',			# d+=h
+	'&add	($h,$h,$t0,"ror#$Sigma0[0]");'.	# h+=Sigma0(a)
+	'&eor	($t3,$t3,$b)',			# Maj(a,b,c)
+	'$j++;	unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
+	)
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.global	sha256_block_data_order_neon
+.type	sha256_block_data_order_neon,%function
+.align	5
+.skip	16
+sha256_block_data_order_neon:
+.LNEON:
+	stmdb	sp!,{r4-r12,lr}
+
+	sub	$H,sp,#16*4+16
+	adr	$Ktbl,K256
+	bic	$H,$H,#15		@ align for 128-bit stores
+	mov	$t2,sp
+	mov	sp,$H			@ alloca
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+
+	vld1.8		{@X[0]},[$inp]!
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	vld1.32		{$T0},[$Ktbl,:128]!
+	vld1.32		{$T1},[$Ktbl,:128]!
+	vld1.32		{$T2},[$Ktbl,:128]!
+	vld1.32		{$T3},[$Ktbl,:128]!
+	vrev32.8	@X[0],@X[0]		@ yes, even on
+	str		$ctx,[sp,#64]
+	vrev32.8	@X[1],@X[1]		@ big-endian
+	str		$inp,[sp,#68]
+	mov		$Xfer,sp
+	vrev32.8	@X[2],@X[2]
+	str		$len,[sp,#72]
+	vrev32.8	@X[3],@X[3]
+	str		$t2,[sp,#76]		@ save original sp
+	vadd.i32	$T0,$T0,@X[0]
+	vadd.i32	$T1,$T1,@X[1]
+	vst1.32		{$T0},[$Xfer,:128]!
+	vadd.i32	$T2,$T2,@X[2]
+	vst1.32		{$T1},[$Xfer,:128]!
+	vadd.i32	$T3,$T3,@X[3]
+	vst1.32		{$T2},[$Xfer,:128]!
+	vst1.32		{$T3},[$Xfer,:128]!
+
+	ldmia		$ctx,{$A-$H}
+	sub		$Xfer,$Xfer,#64
+	ldr		$t1,[sp,#0]
+	eor		$t2,$t2,$t2
+	eor		$t3,$B,$C
+	b		.L_00_48
+
+.align	4
+.L_00_48:
+___
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+	&Xupdate(\&body_00_15);
+$code.=<<___;
+	teq	$t1,#0				@ check for K256 terminator
+	ldr	$t1,[sp,#0]
+	sub	$Xfer,$Xfer,#64
+	bne	.L_00_48
+
+	ldr		$inp,[sp,#68]
+	ldr		$t0,[sp,#72]
+	sub		$Ktbl,$Ktbl,#256	@ rewind $Ktbl
+	teq		$inp,$t0
+	it		eq
+	subeq		$inp,$inp,#64		@ avoid SEGV
+	vld1.8		{@X[0]},[$inp]!		@ load next input block
+	vld1.8		{@X[1]},[$inp]!
+	vld1.8		{@X[2]},[$inp]!
+	vld1.8		{@X[3]},[$inp]!
+	it		ne
+	strne		$inp,[sp,#68]
+	mov		$Xfer,sp
+___
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+	&Xpreload(\&body_00_15);
+$code.=<<___;
+	ldr	$t0,[$t1,#0]
+	add	$A,$A,$t2			@ h+=Maj(a,b,c) from the past
+	ldr	$t2,[$t1,#4]
+	ldr	$t3,[$t1,#8]
+	ldr	$t4,[$t1,#12]
+	add	$A,$A,$t0			@ accumulate
+	ldr	$t0,[$t1,#16]
+	add	$B,$B,$t2
+	ldr	$t2,[$t1,#20]
+	add	$C,$C,$t3
+	ldr	$t3,[$t1,#24]
+	add	$D,$D,$t4
+	ldr	$t4,[$t1,#28]
+	add	$E,$E,$t0
+	str	$A,[$t1],#4
+	add	$F,$F,$t2
+	str	$B,[$t1],#4
+	add	$G,$G,$t3
+	str	$C,[$t1],#4
+	add	$H,$H,$t4
+	str	$D,[$t1],#4
+	stmia	$t1,{$E-$H}
+
+	ittte	ne
+	movne	$Xfer,sp
+	ldrne	$t1,[sp,#0]
+	eorne	$t2,$t2,$t2
+	ldreq	sp,[sp,#76]			@ restore original sp
+	itt	ne
+	eorne	$t3,$B,$C
+	bne	.L_00_48
+
+	ldmia	sp!,{r4-r12,pc}
+.size	sha256_block_data_order_neon,.-sha256_block_data_order_neon
+#endif
+___
+}}}
+######################################################################
+# ARMv8 stuff
+#
+{{{
+my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
+my @MSG=map("q$_",(8..11));
+my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
+my $Ktbl="r3";
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+
+# if defined(__thumb2__)
+#  define INST(a,b,c,d)	.byte	c,d|0xc,a,b
+# else
+#  define INST(a,b,c,d)	.byte	a,b,c,d
+# endif
+
+.type	sha256_block_data_order_armv8,%function
+.align	5
+sha256_block_data_order_armv8:
+.LARMv8:
+	vld1.32	{$ABCD,$EFGH},[$ctx]
+	sub	$Ktbl,$Ktbl,#256+32
+	add	$len,$inp,$len,lsl#6	@ len to point at the end of inp
+	b	.Loop_v8
+
+.align	4
+.Loop_v8:
+	vld1.8		{@MSG[0]-@MSG[1]},[$inp]!
+	vld1.8		{@MSG[2]-@MSG[3]},[$inp]!
+	vld1.32		{$W0},[$Ktbl]!
+	vrev32.8	@MSG[0],@MSG[0]
+	vrev32.8	@MSG[1],@MSG[1]
+	vrev32.8	@MSG[2],@MSG[2]
+	vrev32.8	@MSG[3],@MSG[3]
+	vmov		$ABCD_SAVE,$ABCD	@ offload
+	vmov		$EFGH_SAVE,$EFGH
+	teq		$inp,$len
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+	vld1.32		{$W1},[$Ktbl]!
+	vadd.i32	$W0,$W0,@MSG[0]
+	sha256su0	@MSG[0],@MSG[1]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+	sha256su1	@MSG[0],@MSG[2],@MSG[3]
+___
+	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	vld1.32		{$W1},[$Ktbl]!
+	vadd.i32	$W0,$W0,@MSG[0]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	vld1.32		{$W0},[$Ktbl]!
+	vadd.i32	$W1,$W1,@MSG[1]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	vld1.32		{$W1},[$Ktbl]
+	vadd.i32	$W0,$W0,@MSG[2]
+	sub		$Ktbl,$Ktbl,#256-16	@ rewind
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	vadd.i32	$W1,$W1,@MSG[3]
+	vmov		$abcd,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	vadd.i32	$ABCD,$ABCD,$ABCD_SAVE
+	vadd.i32	$EFGH,$EFGH,$EFGH_SAVE
+	it		ne
+	bne		.Loop_v8
+
+	vst1.32		{$ABCD,$EFGH},[$ctx]
+
+	ret		@ bx lr
+.size	sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
+#endif
+___
+}}}
+$code.=<<___;
+.asciz  "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm   OPENSSL_armcap_P,4,4
+.hidden OPENSSL_armcap_P
+#endif
+___
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+	last if (!s/^#/@/ and !/^$/);
+	print;
+}
+close SELF;
+
+{   my  %opcode = (
+	"sha256h"	=> 0xf3000c40,	"sha256h2"	=> 0xf3100c40,
+	"sha256su0"	=> 0xf3ba03c0,	"sha256su1"	=> 0xf3200c40	);
+
+    sub unsha256 {
+	my ($mnemonic,$arg)=@_;
+
+	if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
+	    my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
+					 |(($2&7)<<17)|(($2&8)<<4)
+					 |(($3&7)<<1) |(($3&8)<<2);
+	    # since ARMv7 instructions are always encoded little-endian.
+	    # correct solution is to use .inst directive, but older
+	    # assemblers don't implement it:-(
+	    sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
+			$word&0xff,($word>>8)&0xff,
+			($word>>16)&0xff,($word>>24)&0xff,
+			$mnemonic,$arg;
+	}
+    }
+}
+
+foreach (split($/,$code)) {
+
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
+
+	s/\bret\b/bx	lr/go		or
+	s/\bbx\s+lr\b/.word\t0xe12fff1e/go;	# make it possible to compile with -march=armv4
+
+	print $_,"\n";
+}
+
+close STDOUT; # enforce flush
diff --git a/src/crypto/fipsmodule/sha/asm/sha512-586.pl b/src/crypto/fipsmodule/sha/asm/sha512-586.pl
new file mode 100644
index 0000000..a757ee7
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha512-586.pl
@@ -0,0 +1,918 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA512 block transform for x86. September 2007.
+#
+# May 2013.
+#
+# Add SSSE3 code path, 20-25% improvement [over original SSE2 code].
+#
+# Performance in clock cycles per processed byte (less is better):
+#
+#		gcc	icc	x86 asm	SIMD(*)	x86_64(**)
+# Pentium	100	97	61	-	-
+# PIII		75	77	56	-	-
+# P4		116	95	82	34.6	30.8
+# AMD K8	54	55	36	20.7	9.57
+# Core2		66	57	40	15.9	9.97
+# Westmere	70	-	38	12.2	9.58
+# Sandy Bridge	58	-	35	11.9	11.2
+# Ivy Bridge	50	-	33	11.5	8.17
+# Haswell	46	-	29	11.3	7.66
+# Skylake	40	-	26	13.3	7.25
+# Bulldozer	121	-	50	14.0	13.5
+# VIA Nano	91	-	52	33	14.7
+# Atom		126	-	68	48(***)	14.7
+# Silvermont	97	-	58	42(***)	17.5
+# Goldmont	80	-	48	19.5	12.0
+#
+# (*)	whichever best applicable.
+# (**)	x86_64 assembler performance is presented for reference
+#	purposes, the results are for integer-only code.
+# (***)	paddq is increadibly slow on Atom.
+#
+# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
+# performance improvement over compiler generated code reaches ~60%,
+# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
+# to 50%, but it's less important as they are expected to execute SSE2
+# code-path, which is commonly ~2-3x faster [than compiler generated
+# code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
+# though it does not use 128-bit operations. The latter means that
+# SSE2-aware kernel is no longer required to execute the code. Another
+# difference is that new code optimizes amount of writes, but at the
+# cost of increased data cache "footprint" by 1/2KB.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$Tlo=&DWP(0,"esp");	$Thi=&DWP(4,"esp");
+$Alo=&DWP(8,"esp");	$Ahi=&DWP(8+4,"esp");
+$Blo=&DWP(16,"esp");	$Bhi=&DWP(16+4,"esp");
+$Clo=&DWP(24,"esp");	$Chi=&DWP(24+4,"esp");
+$Dlo=&DWP(32,"esp");	$Dhi=&DWP(32+4,"esp");
+$Elo=&DWP(40,"esp");	$Ehi=&DWP(40+4,"esp");
+$Flo=&DWP(48,"esp");	$Fhi=&DWP(48+4,"esp");
+$Glo=&DWP(56,"esp");	$Ghi=&DWP(56+4,"esp");
+$Hlo=&DWP(64,"esp");	$Hhi=&DWP(64+4,"esp");
+$K512="ebp";
+
+$Asse2=&QWP(0,"esp");
+$Bsse2=&QWP(8,"esp");
+$Csse2=&QWP(16,"esp");
+$Dsse2=&QWP(24,"esp");
+$Esse2=&QWP(32,"esp");
+$Fsse2=&QWP(40,"esp");
+$Gsse2=&QWP(48,"esp");
+$Hsse2=&QWP(56,"esp");
+
+$A="mm0";	# B-D and
+$E="mm4";	# F-H are commonly loaded to respectively mm1-mm3 and
+		# mm5-mm7, but it's done on on-demand basis...
+$BxC="mm2";	# ... except for B^C
+
+sub BODY_00_15_sse2 {
+    my $phase=shift;
+
+	#&movq	("mm5",$Fsse2);			# load f
+	#&movq	("mm6",$Gsse2);			# load g
+
+	&movq	("mm1",$E);			# %mm1 is sliding right
+	 &pxor	("mm5","mm6");			# f^=g
+	&psrlq	("mm1",14);
+	 &movq	($Esse2,$E);			# modulo-scheduled save e
+	 &pand	("mm5",$E);			# f&=e
+	&psllq	($E,23);			# $E is sliding left
+	 &movq	($A,"mm3")			if ($phase<2);
+	 &movq	(&QWP(8*9,"esp"),"mm7")		# save X[i]
+	&movq	("mm3","mm1");			# %mm3 is T1
+	 &psrlq	("mm1",4);
+	 &pxor	("mm5","mm6");			# Ch(e,f,g)
+	&pxor	("mm3",$E);
+	 &psllq	($E,23);
+	&pxor	("mm3","mm1");
+	 &movq	($Asse2,$A);			# modulo-scheduled save a
+	 &paddq	("mm7","mm5");			# X[i]+=Ch(e,f,g)
+	&pxor	("mm3",$E);
+	 &psrlq	("mm1",23);
+	 &paddq	("mm7",$Hsse2);			# X[i]+=h
+	&pxor	("mm3","mm1");
+	 &psllq	($E,4);
+	 &paddq	("mm7",QWP(0,$K512));		# X[i]+=K512[i]
+	&pxor	("mm3",$E);			# T1=Sigma1_512(e)
+
+	 &movq	($E,$Dsse2);			# e = load d, e in next round
+	&paddq	("mm3","mm7");			# T1+=X[i]
+	 &movq	("mm5",$A);			# %mm5 is sliding right
+	 &psrlq	("mm5",28);
+	&paddq	($E,"mm3");			# d += T1
+	 &movq	("mm6",$A);			# %mm6 is sliding left
+	 &movq	("mm7","mm5");
+	 &psllq	("mm6",25);
+	&movq	("mm1",$Bsse2);			# load b
+	 &psrlq	("mm5",6);
+	 &pxor	("mm7","mm6");
+	&sub	("esp",8);
+	 &psllq	("mm6",5);
+	 &pxor	("mm7","mm5");
+	&pxor	($A,"mm1");			# a^b, b^c in next round
+	 &psrlq	("mm5",5);
+	 &pxor	("mm7","mm6");
+	&pand	($BxC,$A);			# (b^c)&(a^b)
+	 &psllq	("mm6",6);
+	 &pxor	("mm7","mm5");
+	&pxor	($BxC,"mm1");			# [h=]Maj(a,b,c)
+	 &pxor	("mm6","mm7");			# Sigma0_512(a)
+	 &movq	("mm7",&QWP(8*(9+16-1),"esp"))	if ($phase!=0);	# pre-fetch
+	 &movq	("mm5",$Fsse2)			if ($phase==0);	# load f
+
+    if ($phase>1) {
+	&paddq	($BxC,"mm6");			# h+=Sigma0(a)
+	 &add	($K512,8);
+	#&paddq	($BxC,"mm3");			# h+=T1
+
+	($A,$BxC) = ($BxC,$A);			# rotate registers
+    } else {
+	&paddq	("mm3",$BxC);			# T1+=Maj(a,b,c)
+	 &movq	($BxC,$A);
+	 &add	($K512,8);
+	&paddq	("mm3","mm6");			# T1+=Sigma0(a)
+	 &movq	("mm6",$Gsse2)			if ($phase==0);	# load g
+	#&movq	($A,"mm3");			# h=T1
+    }
+}
+
+sub BODY_00_15_x86 {
+	#define Sigma1(x)	(ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+	#	LO		lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+	#	HI		hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+	&mov	("ecx",$Elo);
+	&mov	("edx",$Ehi);
+	&mov	("esi","ecx");
+
+	&shr	("ecx",9);	# lo>>9
+	&mov	("edi","edx");
+	&shr	("edx",9);	# hi>>9
+	&mov	("ebx","ecx");
+	&shl	("esi",14);	# lo<<14
+	&mov	("eax","edx");
+	&shl	("edi",14);	# hi<<14
+	&xor	("ebx","esi");
+
+	&shr	("ecx",14-9);	# lo>>14
+	&xor	("eax","edi");
+	&shr	("edx",14-9);	# hi>>14
+	&xor	("eax","ecx");
+	&shl	("esi",18-14);	# lo<<18
+	&xor	("ebx","edx");
+	&shl	("edi",18-14);	# hi<<18
+	&xor	("ebx","esi");
+
+	&shr	("ecx",18-14);	# lo>>18
+	&xor	("eax","edi");
+	&shr	("edx",18-14);	# hi>>18
+	&xor	("eax","ecx");
+	&shl	("esi",23-18);	# lo<<23
+	&xor	("ebx","edx");
+	&shl	("edi",23-18);	# hi<<23
+	&xor	("eax","esi");
+	&xor	("ebx","edi");			# T1 = Sigma1(e)
+
+	&mov	("ecx",$Flo);
+	&mov	("edx",$Fhi);
+	&mov	("esi",$Glo);
+	&mov	("edi",$Ghi);
+	 &add	("eax",$Hlo);
+	 &adc	("ebx",$Hhi);			# T1 += h
+	&xor	("ecx","esi");
+	&xor	("edx","edi");
+	&and	("ecx",$Elo);
+	&and	("edx",$Ehi);
+	 &add	("eax",&DWP(8*(9+15)+0,"esp"));
+	 &adc	("ebx",&DWP(8*(9+15)+4,"esp"));	# T1 += X[0]
+	&xor	("ecx","esi");
+	&xor	("edx","edi");			# Ch(e,f,g) = (f^g)&e)^g
+
+	&mov	("esi",&DWP(0,$K512));
+	&mov	("edi",&DWP(4,$K512));		# K[i]
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += Ch(e,f,g)
+	&mov	("ecx",$Dlo);
+	&mov	("edx",$Dhi);
+	&add	("eax","esi");
+	&adc	("ebx","edi");			# T1 += K[i]
+	&mov	($Tlo,"eax");
+	&mov	($Thi,"ebx");			# put T1 away
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# d += T1
+
+	#define Sigma0(x)	(ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+	#	LO		lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+	#	HI		hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+	&mov	("ecx",$Alo);
+	&mov	("edx",$Ahi);
+	&mov	($Dlo,"eax");
+	&mov	($Dhi,"ebx");
+	&mov	("esi","ecx");
+
+	&shr	("ecx",2);	# lo>>2
+	&mov	("edi","edx");
+	&shr	("edx",2);	# hi>>2
+	&mov	("ebx","ecx");
+	&shl	("esi",4);	# lo<<4
+	&mov	("eax","edx");
+	&shl	("edi",4);	# hi<<4
+	&xor	("ebx","esi");
+
+	&shr	("ecx",7-2);	# lo>>7
+	&xor	("eax","edi");
+	&shr	("edx",7-2);	# hi>>7
+	&xor	("ebx","ecx");
+	&shl	("esi",25-4);	# lo<<25
+	&xor	("eax","edx");
+	&shl	("edi",25-4);	# hi<<25
+	&xor	("eax","esi");
+
+	&shr	("ecx",28-7);	# lo>>28
+	&xor	("ebx","edi");
+	&shr	("edx",28-7);	# hi>>28
+	&xor	("eax","ecx");
+	&shl	("esi",30-25);	# lo<<30
+	&xor	("ebx","edx");
+	&shl	("edi",30-25);	# hi<<30
+	&xor	("eax","esi");
+	&xor	("ebx","edi");			# Sigma0(a)
+
+	&mov	("ecx",$Alo);
+	&mov	("edx",$Ahi);
+	&mov	("esi",$Blo);
+	&mov	("edi",$Bhi);
+	&add	("eax",$Tlo);
+	&adc	("ebx",$Thi);			# T1 = Sigma0(a)+T1
+	&or	("ecx","esi");
+	&or	("edx","edi");
+	&and	("ecx",$Clo);
+	&and	("edx",$Chi);
+	&and	("esi",$Alo);
+	&and	("edi",$Ahi);
+	&or	("ecx","esi");
+	&or	("edx","edi");			# Maj(a,b,c) = ((a|b)&c)|(a&b)
+
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += Maj(a,b,c)
+	&mov	($Tlo,"eax");
+	&mov	($Thi,"ebx");
+
+	&mov	(&LB("edx"),&BP(0,$K512));	# pre-fetch LSB of *K
+	&sub	("esp",8);
+	&lea	($K512,&DWP(8,$K512));		# K++
+}
+
+
+&function_begin("sha512_block_data_order");
+	&mov	("esi",wparam(0));	# ctx
+	&mov	("edi",wparam(1));	# inp
+	&mov	("eax",wparam(2));	# num
+	&mov	("ebx","esp");		# saved sp
+
+	&call	(&label("pic_point"));	# make it PIC!
+&set_label("pic_point");
+	&blindpop($K512);
+	&lea	($K512,&DWP(&label("K512")."-".&label("pic_point"),$K512));
+
+	&sub	("esp",16);
+	&and	("esp",-64);
+
+	&shl	("eax",7);
+	&add	("eax","edi");
+	&mov	(&DWP(0,"esp"),"esi");	# ctx
+	&mov	(&DWP(4,"esp"),"edi");	# inp
+	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
+	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
+
+if ($sse2) {
+	&picmeup("edx","OPENSSL_ia32cap_P",$K512,&label("K512"));
+	&mov	("ecx",&DWP(0,"edx"));
+	&test	("ecx",1<<26);
+	&jz	(&label("loop_x86"));
+
+	&mov	("edx",&DWP(4,"edx"));
+
+	# load ctx->h[0-7]
+	&movq	($A,&QWP(0,"esi"));
+	 &and	("ecx",1<<24);		# XMM registers availability
+	&movq	("mm1",&QWP(8,"esi"));
+	 &and	("edx",1<<9);		# SSSE3 bit
+	&movq	($BxC,&QWP(16,"esi"));
+	 &or	("ecx","edx");
+	&movq	("mm3",&QWP(24,"esi"));
+	&movq	($E,&QWP(32,"esi"));
+	&movq	("mm5",&QWP(40,"esi"));
+	&movq	("mm6",&QWP(48,"esi"));
+	&movq	("mm7",&QWP(56,"esi"));
+	&cmp	("ecx",1<<24|1<<9);
+	&je	(&label("SSSE3"));
+	&sub	("esp",8*10);
+	&jmp	(&label("loop_sse2"));
+
+&set_label("loop_sse2",16);
+	#&movq	($Asse2,$A);
+	&movq	($Bsse2,"mm1");
+	&movq	($Csse2,$BxC);
+	&movq	($Dsse2,"mm3");
+	#&movq	($Esse2,$E);
+	&movq	($Fsse2,"mm5");
+	&movq	($Gsse2,"mm6");
+	&pxor	($BxC,"mm1");			# magic
+	&movq	($Hsse2,"mm7");
+	&movq	("mm3",$A);			# magic
+
+	&mov	("eax",&DWP(0,"edi"));
+	&mov	("ebx",&DWP(4,"edi"));
+	&add	("edi",8);
+	&mov	("edx",15);			# counter
+	&bswap	("eax");
+	&bswap	("ebx");
+	&jmp	(&label("00_14_sse2"));
+
+&set_label("00_14_sse2",16);
+	&movd	("mm1","eax");
+	&mov	("eax",&DWP(0,"edi"));
+	&movd	("mm7","ebx");
+	&mov	("ebx",&DWP(4,"edi"));
+	&add	("edi",8);
+	&bswap	("eax");
+	&bswap	("ebx");
+	&punpckldq("mm7","mm1");
+
+	&BODY_00_15_sse2();
+
+	&dec	("edx");
+	&jnz	(&label("00_14_sse2"));
+
+	&movd	("mm1","eax");
+	&movd	("mm7","ebx");
+	&punpckldq("mm7","mm1");
+
+	&BODY_00_15_sse2(1);
+
+	&pxor	($A,$A);			# A is in %mm3
+	&mov	("edx",32);			# counter
+	&jmp	(&label("16_79_sse2"));
+
+&set_label("16_79_sse2",16);
+    for ($j=0;$j<2;$j++) {			# 2x unroll
+	#&movq	("mm7",&QWP(8*(9+16-1),"esp"));	# prefetched in BODY_00_15
+	&movq	("mm5",&QWP(8*(9+16-14),"esp"));
+	&movq	("mm1","mm7");
+	&psrlq	("mm7",1);
+	 &movq	("mm6","mm5");
+	 &psrlq	("mm5",6);
+	&psllq	("mm1",56);
+	 &paddq	($A,"mm3");			# from BODY_00_15
+	 &movq	("mm3","mm7");
+	&psrlq	("mm7",7-1);
+	 &pxor	("mm3","mm1");
+	 &psllq	("mm1",63-56);
+	&pxor	("mm3","mm7");
+	 &psrlq	("mm7",8-7);
+	&pxor	("mm3","mm1");
+	 &movq	("mm1","mm5");
+	 &psrlq	("mm5",19-6);
+	&pxor	("mm7","mm3");			# sigma0
+
+	 &psllq	("mm6",3);
+	 &pxor	("mm1","mm5");
+	&paddq	("mm7",&QWP(8*(9+16),"esp"));
+	 &pxor	("mm1","mm6");
+	 &psrlq	("mm5",61-19);
+	&paddq	("mm7",&QWP(8*(9+16-9),"esp"));
+	 &pxor	("mm1","mm5");
+	 &psllq	("mm6",45-3);
+	&movq	("mm5",$Fsse2);			# load f
+	 &pxor	("mm1","mm6");			# sigma1
+	&movq	("mm6",$Gsse2);			# load g
+
+	&paddq	("mm7","mm1");			# X[i]
+	#&movq	(&QWP(8*9,"esp"),"mm7");	# moved to BODY_00_15
+
+	&BODY_00_15_sse2(2);
+    }
+	&dec	("edx");
+	&jnz	(&label("16_79_sse2"));
+
+	#&movq	($A,$Asse2);
+	&paddq	($A,"mm3");			# from BODY_00_15
+	&movq	("mm1",$Bsse2);
+	#&movq	($BxC,$Csse2);
+	&movq	("mm3",$Dsse2);
+	#&movq	($E,$Esse2);
+	&movq	("mm5",$Fsse2);
+	&movq	("mm6",$Gsse2);
+	&movq	("mm7",$Hsse2);
+
+	&pxor	($BxC,"mm1");			# de-magic
+	&paddq	($A,&QWP(0,"esi"));
+	&paddq	("mm1",&QWP(8,"esi"));
+	&paddq	($BxC,&QWP(16,"esi"));
+	&paddq	("mm3",&QWP(24,"esi"));
+	&paddq	($E,&QWP(32,"esi"));
+	&paddq	("mm5",&QWP(40,"esi"));
+	&paddq	("mm6",&QWP(48,"esi"));
+	&paddq	("mm7",&QWP(56,"esi"));
+
+	&mov	("eax",8*80);
+	&movq	(&QWP(0,"esi"),$A);
+	&movq	(&QWP(8,"esi"),"mm1");
+	&movq	(&QWP(16,"esi"),$BxC);
+	&movq	(&QWP(24,"esi"),"mm3");
+	&movq	(&QWP(32,"esi"),$E);
+	&movq	(&QWP(40,"esi"),"mm5");
+	&movq	(&QWP(48,"esi"),"mm6");
+	&movq	(&QWP(56,"esi"),"mm7");
+
+	&lea	("esp",&DWP(0,"esp","eax"));	# destroy frame
+	&sub	($K512,"eax");			# rewind K
+
+	&cmp	("edi",&DWP(8*10+8,"esp"));	# are we done yet?
+	&jb	(&label("loop_sse2"));
+
+	&mov	("esp",&DWP(8*10+12,"esp"));	# restore sp
+	&emms	();
+&function_end_A();
+
+&set_label("SSSE3",32);
+{ my ($cnt,$frame)=("ecx","edx");
+  my @X=map("xmm$_",(0..7));
+  my $j;
+  my $i=0;
+
+	&lea	($frame,&DWP(-64,"esp"));
+	&sub	("esp",256);
+
+	# fixed stack frame layout
+	#
+	# +0	A B C D E F G H		# backing store
+	# +64	X[0]+K[i] .. X[15]+K[i]	# XMM->MM xfer area
+	# +192				# XMM off-load ring buffer
+	# +256				# saved parameters
+
+	&movdqa		(@X[1],&QWP(80*8,$K512));		# byte swap mask
+	&movdqu		(@X[0],&QWP(0,"edi"));
+	&pshufb		(@X[0],@X[1]);
+    for ($j=0;$j<8;$j++) {
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3])	if ($j>4); # off-load
+	&movdqa		(@X[3],&QWP(16*($j%8),$K512));
+	&movdqa		(@X[2],@X[1])				if ($j<7); # perpetuate byte swap mask
+	&movdqu		(@X[1],&QWP(16*($j+1),"edi"))		if ($j<7); # next input
+	&movdqa		(@X[1],&QWP(16*(($j+1)%4),$frame))	if ($j==7);# restore @X[0]
+	&paddq		(@X[3],@X[0]);
+	&pshufb		(@X[1],@X[2])				if ($j<7);
+	&movdqa		(&QWP(16*($j%8)-128,$frame),@X[3]);	# xfer X[i]+K[i]
+
+	push(@X,shift(@X));					# rotate(@X)
+    }
+	#&jmp		(&label("loop_ssse3"));
+	&nop		();
+
+&set_label("loop_ssse3",32);
+	&movdqa		(@X[2],&QWP(16*(($j+1)%4),$frame));	# pre-restore @X[1]
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3]);	# off-load @X[3]
+	&lea		($K512,&DWP(16*8,$K512));
+
+	#&movq	($Asse2,$A);			# off-load A-H
+	&movq	($Bsse2,"mm1");
+	 &mov	("ebx","edi");
+	&movq	($Csse2,$BxC);
+	 &lea	("edi",&DWP(128,"edi"));	# advance input
+	&movq	($Dsse2,"mm3");
+	 &cmp	("edi","eax");
+	#&movq	($Esse2,$E);
+	&movq	($Fsse2,"mm5");
+	 &cmovb	("ebx","edi");
+	&movq	($Gsse2,"mm6");
+	 &mov	("ecx",4);			# loop counter
+	&pxor	($BxC,"mm1");			# magic
+	&movq	($Hsse2,"mm7");
+	&pxor	("mm3","mm3");			# magic
+
+	&jmp		(&label("00_47_ssse3"));
+
+sub BODY_00_15_ssse3 {		# "phase-less" copy of BODY_00_15_sse2
+	(
+	'&movq	("mm1",$E)',				# %mm1 is sliding right
+	'&movq	("mm7",&QWP(((-8*$i)%128)-128,$frame))',# X[i]+K[i]
+	 '&pxor	("mm5","mm6")',				# f^=g
+	'&psrlq	("mm1",14)',
+	 '&movq	(&QWP(8*($i+4)%64,"esp"),$E)',		# modulo-scheduled save e
+	 '&pand	("mm5",$E)',				# f&=e
+	'&psllq	($E,23)',				# $E is sliding left
+	'&paddq	($A,"mm3")',				# [h+=Maj(a,b,c)]
+	'&movq	("mm3","mm1")',				# %mm3 is T1
+	 '&psrlq("mm1",4)',
+	 '&pxor	("mm5","mm6")',				# Ch(e,f,g)
+	'&pxor	("mm3",$E)',
+	 '&psllq($E,23)',
+	'&pxor	("mm3","mm1")',
+	 '&movq	(&QWP(8*$i%64,"esp"),$A)',		# modulo-scheduled save a
+	 '&paddq("mm7","mm5")',				# X[i]+=Ch(e,f,g)
+	'&pxor	("mm3",$E)',
+	 '&psrlq("mm1",23)',
+	 '&paddq("mm7",&QWP(8*($i+7)%64,"esp"))',	# X[i]+=h
+	'&pxor	("mm3","mm1")',
+	 '&psllq($E,4)',
+	'&pxor	("mm3",$E)',				# T1=Sigma1_512(e)
+
+	 '&movq	($E,&QWP(8*($i+3)%64,"esp"))',		# e = load d, e in next round
+	'&paddq	("mm3","mm7")',				# T1+=X[i]
+	 '&movq	("mm5",$A)',				# %mm5 is sliding right
+	 '&psrlq("mm5",28)',
+	'&paddq	($E,"mm3")',				# d += T1
+	 '&movq	("mm6",$A)',				# %mm6 is sliding left
+	 '&movq	("mm7","mm5")',
+	 '&psllq("mm6",25)',
+	'&movq	("mm1",&QWP(8*($i+1)%64,"esp"))',	# load b
+	 '&psrlq("mm5",6)',
+	 '&pxor	("mm7","mm6")',
+	 '&psllq("mm6",5)',
+	 '&pxor	("mm7","mm5")',
+	'&pxor	($A,"mm1")',				# a^b, b^c in next round
+	 '&psrlq("mm5",5)',
+	 '&pxor	("mm7","mm6")',
+	'&pand	($BxC,$A)',				# (b^c)&(a^b)
+	 '&psllq("mm6",6)',
+	 '&pxor	("mm7","mm5")',
+	'&pxor	($BxC,"mm1")',				# [h=]Maj(a,b,c)
+	 '&pxor	("mm6","mm7")',				# Sigma0_512(a)
+	 '&movq	("mm5",&QWP(8*($i+5-1)%64,"esp"))',	# pre-load f
+	'&paddq	($BxC,"mm6")',				# h+=Sigma0(a)
+	 '&movq	("mm6",&QWP(8*($i+6-1)%64,"esp"))',	# pre-load g
+
+	'($A,$BxC) = ($BxC,$A); $i--;'
+	);
+}
+
+&set_label("00_47_ssse3",32);
+
+    for(;$j<16;$j++) {
+	my ($t0,$t2,$t1)=@X[2..4];
+	my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+	&movdqa		($t2,@X[5]);
+	&movdqa		(@X[1],$t0);			# restore @X[1]
+	&palignr	($t0,@X[0],8);			# X[1..2]
+	&movdqa		(&QWP(16*($j%4),$frame),@X[4]);	# off-load @X[4]
+	 &palignr	($t2,@X[4],8);			# X[9..10]
+
+	&movdqa		($t1,$t0);
+	&psrlq		($t0,7);
+	 &paddq		(@X[0],$t2);			# X[0..1] += X[9..10]
+	&movdqa		($t2,$t1);
+	&psrlq		($t1,1);
+	&psllq		($t2,64-8);
+	&pxor		($t0,$t1);
+	&psrlq		($t1,8-1);
+	&pxor		($t0,$t2);
+	&psllq		($t2,8-1);
+	&pxor		($t0,$t1);
+	 &movdqa	($t1,@X[7]);
+	&pxor		($t0,$t2);			# sigma0(X[1..2])
+	 &movdqa	($t2,@X[7]);
+	 &psrlq		($t1,6);
+	&paddq		(@X[0],$t0);			# X[0..1] += sigma0(X[1..2])
+
+	&movdqa		($t0,@X[7]);
+	&psrlq		($t2,19);
+	&psllq		($t0,64-61);
+	&pxor		($t1,$t2);
+	&psrlq		($t2,61-19);
+	&pxor		($t1,$t0);
+	&psllq		($t0,61-19);
+	&pxor		($t1,$t2);
+	&movdqa		($t2,&QWP(16*(($j+2)%4),$frame));# pre-restore @X[1]
+	&pxor		($t1,$t0);			# sigma0(X[1..2])
+	&movdqa		($t0,&QWP(16*($j%8),$K512));
+	 eval(shift(@insns));
+	&paddq		(@X[0],$t1);			# X[0..1] += sigma0(X[14..15])
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	 eval(shift(@insns));
+	&paddq		($t0,@X[0]);
+	 foreach(@insns) { eval; }
+	&movdqa		(&QWP(16*($j%8)-128,$frame),$t0);# xfer X[i]+K[i]
+
+	push(@X,shift(@X));				# rotate(@X)
+    }
+	&lea		($K512,&DWP(16*8,$K512));
+	&dec		("ecx");
+	&jnz		(&label("00_47_ssse3"));
+
+	&movdqa		(@X[1],&QWP(0,$K512));		# byte swap mask
+	&lea		($K512,&DWP(-80*8,$K512));	# rewind
+	&movdqu		(@X[0],&QWP(0,"ebx"));
+	&pshufb		(@X[0],@X[1]);
+
+    for ($j=0;$j<8;$j++) {	# load next or same block
+	my @insns = (&BODY_00_15_ssse3(),&BODY_00_15_ssse3());
+
+	&movdqa		(&QWP(16*(($j-1)%4),$frame),@X[3])	if ($j>4); # off-load
+	&movdqa		(@X[3],&QWP(16*($j%8),$K512));
+	&movdqa		(@X[2],@X[1])				if ($j<7); # perpetuate byte swap mask
+	&movdqu		(@X[1],&QWP(16*($j+1),"ebx"))		if ($j<7); # next input
+	&movdqa		(@X[1],&QWP(16*(($j+1)%4),$frame))	if ($j==7);# restore @X[0]
+	&paddq		(@X[3],@X[0]);
+	&pshufb		(@X[1],@X[2])				if ($j<7);
+	 foreach(@insns) { eval; }
+	&movdqa		(&QWP(16*($j%8)-128,$frame),@X[3]);# xfer X[i]+K[i]
+
+	push(@X,shift(@X));				# rotate(@X)
+    }
+
+	#&movq	($A,$Asse2);			# load A-H
+	&movq	("mm1",$Bsse2);
+	&paddq	($A,"mm3");			# from BODY_00_15
+	#&movq	($BxC,$Csse2);
+	&movq	("mm3",$Dsse2);
+	#&movq	($E,$Esse2);
+	#&movq	("mm5",$Fsse2);
+	#&movq	("mm6",$Gsse2);
+	&movq	("mm7",$Hsse2);
+
+	&pxor	($BxC,"mm1");			# de-magic
+	&paddq	($A,&QWP(0,"esi"));
+	&paddq	("mm1",&QWP(8,"esi"));
+	&paddq	($BxC,&QWP(16,"esi"));
+	&paddq	("mm3",&QWP(24,"esi"));
+	&paddq	($E,&QWP(32,"esi"));
+	&paddq	("mm5",&QWP(40,"esi"));
+	&paddq	("mm6",&QWP(48,"esi"));
+	&paddq	("mm7",&QWP(56,"esi"));
+
+	&movq	(&QWP(0,"esi"),$A);
+	&movq	(&QWP(8,"esi"),"mm1");
+	&movq	(&QWP(16,"esi"),$BxC);
+	&movq	(&QWP(24,"esi"),"mm3");
+	&movq	(&QWP(32,"esi"),$E);
+	&movq	(&QWP(40,"esi"),"mm5");
+	&movq	(&QWP(48,"esi"),"mm6");
+	&movq	(&QWP(56,"esi"),"mm7");
+
+    	&cmp	("edi","eax")			# are we done yet?
+	&jb	(&label("loop_ssse3"));
+
+	&mov	("esp",&DWP(64+12,$frame));	# restore sp
+	&emms	();
+}
+&function_end_A();
+}
+&set_label("loop_x86",16);
+    # copy input block to stack reversing byte and qword order
+    for ($i=0;$i<8;$i++) {
+	&mov	("eax",&DWP($i*16+0,"edi"));
+	&mov	("ebx",&DWP($i*16+4,"edi"));
+	&mov	("ecx",&DWP($i*16+8,"edi"));
+	&mov	("edx",&DWP($i*16+12,"edi"));
+	&bswap	("eax");
+	&bswap	("ebx");
+	&bswap	("ecx");
+	&bswap	("edx");
+	&push	("eax");
+	&push	("ebx");
+	&push	("ecx");
+	&push	("edx");
+    }
+	&add	("edi",128);
+	&sub	("esp",9*8);		# place for T,A,B,C,D,E,F,G,H
+	&mov	(&DWP(8*(9+16)+4,"esp"),"edi");
+
+	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
+	&lea	("edi",&DWP(8,"esp"));
+	&mov	("ecx",16);
+	&data_word(0xA5F3F689);		# rep movsd
+
+&set_label("00_15_x86",16);
+	&BODY_00_15_x86();
+
+	&cmp	(&LB("edx"),0x94);
+	&jne	(&label("00_15_x86"));
+
+&set_label("16_79_x86",16);
+	#define sigma0(x)	(ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
+	#	LO		lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+	#	HI		hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
+	&mov	("ecx",&DWP(8*(9+15+16-1)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16-1)+4,"esp"));
+	&mov	("esi","ecx");
+
+	&shr	("ecx",1);	# lo>>1
+	&mov	("edi","edx");
+	&shr	("edx",1);	# hi>>1
+	&mov	("eax","ecx");
+	&shl	("esi",24);	# lo<<24
+	&mov	("ebx","edx");
+	&shl	("edi",24);	# hi<<24
+	&xor	("ebx","esi");
+
+	&shr	("ecx",7-1);	# lo>>7
+	&xor	("eax","edi");
+	&shr	("edx",7-1);	# hi>>7
+	&xor	("eax","ecx");
+	&shl	("esi",31-24);	# lo<<31
+	&xor	("ebx","edx");
+	&shl	("edi",25-24);	# hi<<25
+	&xor	("ebx","esi");
+
+	&shr	("ecx",8-7);	# lo>>8
+	&xor	("eax","edi");
+	&shr	("edx",8-7);	# hi>>8
+	&xor	("eax","ecx");
+	&shl	("edi",31-25);	# hi<<31
+	&xor	("ebx","edx");
+	&xor	("eax","edi");			# T1 = sigma0(X[-15])
+
+	&mov	(&DWP(0,"esp"),"eax");
+	&mov	(&DWP(4,"esp"),"ebx");		# put T1 away
+
+	#define sigma1(x)	(ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+	#	LO		lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+	#	HI		hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+	&mov	("ecx",&DWP(8*(9+15+16-14)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16-14)+4,"esp"));
+	&mov	("esi","ecx");
+
+	&shr	("ecx",6);	# lo>>6
+	&mov	("edi","edx");
+	&shr	("edx",6);	# hi>>6
+	&mov	("eax","ecx");
+	&shl	("esi",3);	# lo<<3
+	&mov	("ebx","edx");
+	&shl	("edi",3);	# hi<<3
+	&xor	("eax","esi");
+
+	&shr	("ecx",19-6);	# lo>>19
+	&xor	("ebx","edi");
+	&shr	("edx",19-6);	# hi>>19
+	&xor	("eax","ecx");
+	&shl	("esi",13-3);	# lo<<13
+	&xor	("ebx","edx");
+	&shl	("edi",13-3);	# hi<<13
+	&xor	("ebx","esi");
+
+	&shr	("ecx",29-19);	# lo>>29
+	&xor	("eax","edi");
+	&shr	("edx",29-19);	# hi>>29
+	&xor	("ebx","ecx");
+	&shl	("edi",26-13);	# hi<<26
+	&xor	("eax","edx");
+	&xor	("eax","edi");			# sigma1(X[-2])
+
+	&mov	("ecx",&DWP(8*(9+15+16)+0,"esp"));
+	&mov	("edx",&DWP(8*(9+15+16)+4,"esp"));
+	&add	("eax",&DWP(0,"esp"));
+	&adc	("ebx",&DWP(4,"esp"));		# T1 = sigma1(X[-2])+T1
+	&mov	("esi",&DWP(8*(9+15+16-9)+0,"esp"));
+	&mov	("edi",&DWP(8*(9+15+16-9)+4,"esp"));
+	&add	("eax","ecx");
+	&adc	("ebx","edx");			# T1 += X[-16]
+	&add	("eax","esi");
+	&adc	("ebx","edi");			# T1 += X[-7]
+	&mov	(&DWP(8*(9+15)+0,"esp"),"eax");
+	&mov	(&DWP(8*(9+15)+4,"esp"),"ebx");	# save X[0]
+
+	&BODY_00_15_x86();
+
+	&cmp	(&LB("edx"),0x17);
+	&jne	(&label("16_79_x86"));
+
+	&mov	("esi",&DWP(8*(9+16+80)+0,"esp"));# ctx
+	&mov	("edi",&DWP(8*(9+16+80)+4,"esp"));# inp
+    for($i=0;$i<4;$i++) {
+	&mov	("eax",&DWP($i*16+0,"esi"));
+	&mov	("ebx",&DWP($i*16+4,"esi"));
+	&mov	("ecx",&DWP($i*16+8,"esi"));
+	&mov	("edx",&DWP($i*16+12,"esi"));
+	&add	("eax",&DWP(8+($i*16)+0,"esp"));
+	&adc	("ebx",&DWP(8+($i*16)+4,"esp"));
+	&mov	(&DWP($i*16+0,"esi"),"eax");
+	&mov	(&DWP($i*16+4,"esi"),"ebx");
+	&add	("ecx",&DWP(8+($i*16)+8,"esp"));
+	&adc	("edx",&DWP(8+($i*16)+12,"esp"));
+	&mov	(&DWP($i*16+8,"esi"),"ecx");
+	&mov	(&DWP($i*16+12,"esi"),"edx");
+    }
+	&add	("esp",8*(9+16+80));		# destroy frame
+	&sub	($K512,8*80);			# rewind K
+
+	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
+	&jb	(&label("loop_x86"));
+
+	&mov	("esp",&DWP(12,"esp"));		# restore sp
+&function_end_A();
+
+&set_label("K512",64);	# Yes! I keep it in the code segment!
+	&data_word(0xd728ae22,0x428a2f98);	# u64
+	&data_word(0x23ef65cd,0x71374491);	# u64
+	&data_word(0xec4d3b2f,0xb5c0fbcf);	# u64
+	&data_word(0x8189dbbc,0xe9b5dba5);	# u64
+	&data_word(0xf348b538,0x3956c25b);	# u64
+	&data_word(0xb605d019,0x59f111f1);	# u64
+	&data_word(0xaf194f9b,0x923f82a4);	# u64
+	&data_word(0xda6d8118,0xab1c5ed5);	# u64
+	&data_word(0xa3030242,0xd807aa98);	# u64
+	&data_word(0x45706fbe,0x12835b01);	# u64
+	&data_word(0x4ee4b28c,0x243185be);	# u64
+	&data_word(0xd5ffb4e2,0x550c7dc3);	# u64
+	&data_word(0xf27b896f,0x72be5d74);	# u64
+	&data_word(0x3b1696b1,0x80deb1fe);	# u64
+	&data_word(0x25c71235,0x9bdc06a7);	# u64
+	&data_word(0xcf692694,0xc19bf174);	# u64
+	&data_word(0x9ef14ad2,0xe49b69c1);	# u64
+	&data_word(0x384f25e3,0xefbe4786);	# u64
+	&data_word(0x8b8cd5b5,0x0fc19dc6);	# u64
+	&data_word(0x77ac9c65,0x240ca1cc);	# u64
+	&data_word(0x592b0275,0x2de92c6f);	# u64
+	&data_word(0x6ea6e483,0x4a7484aa);	# u64
+	&data_word(0xbd41fbd4,0x5cb0a9dc);	# u64
+	&data_word(0x831153b5,0x76f988da);	# u64
+	&data_word(0xee66dfab,0x983e5152);	# u64
+	&data_word(0x2db43210,0xa831c66d);	# u64
+	&data_word(0x98fb213f,0xb00327c8);	# u64
+	&data_word(0xbeef0ee4,0xbf597fc7);	# u64
+	&data_word(0x3da88fc2,0xc6e00bf3);	# u64
+	&data_word(0x930aa725,0xd5a79147);	# u64
+	&data_word(0xe003826f,0x06ca6351);	# u64
+	&data_word(0x0a0e6e70,0x14292967);	# u64
+	&data_word(0x46d22ffc,0x27b70a85);	# u64
+	&data_word(0x5c26c926,0x2e1b2138);	# u64
+	&data_word(0x5ac42aed,0x4d2c6dfc);	# u64
+	&data_word(0x9d95b3df,0x53380d13);	# u64
+	&data_word(0x8baf63de,0x650a7354);	# u64
+	&data_word(0x3c77b2a8,0x766a0abb);	# u64
+	&data_word(0x47edaee6,0x81c2c92e);	# u64
+	&data_word(0x1482353b,0x92722c85);	# u64
+	&data_word(0x4cf10364,0xa2bfe8a1);	# u64
+	&data_word(0xbc423001,0xa81a664b);	# u64
+	&data_word(0xd0f89791,0xc24b8b70);	# u64
+	&data_word(0x0654be30,0xc76c51a3);	# u64
+	&data_word(0xd6ef5218,0xd192e819);	# u64
+	&data_word(0x5565a910,0xd6990624);	# u64
+	&data_word(0x5771202a,0xf40e3585);	# u64
+	&data_word(0x32bbd1b8,0x106aa070);	# u64
+	&data_word(0xb8d2d0c8,0x19a4c116);	# u64
+	&data_word(0x5141ab53,0x1e376c08);	# u64
+	&data_word(0xdf8eeb99,0x2748774c);	# u64
+	&data_word(0xe19b48a8,0x34b0bcb5);	# u64
+	&data_word(0xc5c95a63,0x391c0cb3);	# u64
+	&data_word(0xe3418acb,0x4ed8aa4a);	# u64
+	&data_word(0x7763e373,0x5b9cca4f);	# u64
+	&data_word(0xd6b2b8a3,0x682e6ff3);	# u64
+	&data_word(0x5defb2fc,0x748f82ee);	# u64
+	&data_word(0x43172f60,0x78a5636f);	# u64
+	&data_word(0xa1f0ab72,0x84c87814);	# u64
+	&data_word(0x1a6439ec,0x8cc70208);	# u64
+	&data_word(0x23631e28,0x90befffa);	# u64
+	&data_word(0xde82bde9,0xa4506ceb);	# u64
+	&data_word(0xb2c67915,0xbef9a3f7);	# u64
+	&data_word(0xe372532b,0xc67178f2);	# u64
+	&data_word(0xea26619c,0xca273ece);	# u64
+	&data_word(0x21c0c207,0xd186b8c7);	# u64
+	&data_word(0xcde0eb1e,0xeada7dd6);	# u64
+	&data_word(0xee6ed178,0xf57d4f7f);	# u64
+	&data_word(0x72176fba,0x06f067aa);	# u64
+	&data_word(0xa2c898a6,0x0a637dc5);	# u64
+	&data_word(0xbef90dae,0x113f9804);	# u64
+	&data_word(0x131c471b,0x1b710b35);	# u64
+	&data_word(0x23047d84,0x28db77f5);	# u64
+	&data_word(0x40c72493,0x32caab7b);	# u64
+	&data_word(0x15c9bebc,0x3c9ebe0a);	# u64
+	&data_word(0x9c100d4c,0x431d67c4);	# u64
+	&data_word(0xcb3e42b6,0x4cc5d4be);	# u64
+	&data_word(0xfc657e2a,0x597f299c);	# u64
+	&data_word(0x3ad6faec,0x5fcb6fab);	# u64
+	&data_word(0x4a475817,0x6c44198c);	# u64
+
+	&data_word(0x04050607,0x00010203);	# byte swap
+	&data_word(0x0c0d0e0f,0x08090a0b);	# mask
+&function_end_B("sha512_block_data_order");
+&asciz("SHA512 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha512-armv4.pl b/src/crypto/fipsmodule/sha/asm/sha512-armv4.pl
new file mode 100644
index 0000000..fd92f7a
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha512-armv4.pl
@@ -0,0 +1,666 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Permission to use under GPL terms is granted.
+# ====================================================================
+
+# SHA512 block procedure for ARMv4. September 2007.
+
+# This code is ~4.5 (four and a half) times faster than code generated
+# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
+# Xscale PXA250 core].
+#
+# July 2010.
+#
+# Rescheduling for dual-issue pipeline resulted in 6% improvement on
+# Cortex A8 core and ~40 cycles per processed byte.
+
+# February 2011.
+#
+# Profiler-assisted and platform-specific optimization resulted in 7%
+# improvement on Coxtex A8 core and ~38 cycles per byte.
+
+# March 2011.
+#
+# Add NEON implementation. On Cortex A8 it was measured to process
+# one byte in 23.3 cycles or ~60% faster than integer-only code.
+
+# August 2012.
+#
+# Improve NEON performance by 12% on Snapdragon S4. In absolute
+# terms it's 22.6 cycles per byte, which is disappointing result.
+# Technical writers asserted that 3-way S4 pipeline can sustain
+# multiple NEON instructions per cycle, but dual NEON issue could
+# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
+# for further details. On side note Cortex-A15 processes one byte in
+# 16 cycles.
+
+# Byte order [in]dependence. =========================================
+#
+# Originally caller was expected to maintain specific *dword* order in
+# h[0-7], namely with most significant dword at *lower* address, which
+# was reflected in below two parameters as 0 and 4. Now caller is
+# expected to maintain native byte order for whole 64-bit values.
+$hi="HI";
+$lo="LO";
+# ====================================================================
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+    die "can't locate arm-xlate.pl";
+
+    open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+    open STDOUT,">$output";
+}
+
+$ctx="r0";	# parameter block
+$inp="r1";
+$len="r2";
+
+$Tlo="r3";
+$Thi="r4";
+$Alo="r5";
+$Ahi="r6";
+$Elo="r7";
+$Ehi="r8";
+$t0="r9";
+$t1="r10";
+$t2="r11";
+$t3="r12";
+############	r13 is stack pointer
+$Ktbl="r14";
+############	r15 is program counter
+
+$Aoff=8*0;
+$Boff=8*1;
+$Coff=8*2;
+$Doff=8*3;
+$Eoff=8*4;
+$Foff=8*5;
+$Goff=8*6;
+$Hoff=8*7;
+$Xoff=8*8;
+
+sub BODY_00_15() {
+my $magic = shift;
+$code.=<<___;
+	@ Sigma1(x)	(ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+	@ LO		lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+	@ HI		hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+	mov	$t0,$Elo,lsr#14
+	str	$Tlo,[sp,#$Xoff+0]
+	mov	$t1,$Ehi,lsr#14
+	str	$Thi,[sp,#$Xoff+4]
+	eor	$t0,$t0,$Ehi,lsl#18
+	ldr	$t2,[sp,#$Hoff+0]	@ h.lo
+	eor	$t1,$t1,$Elo,lsl#18
+	ldr	$t3,[sp,#$Hoff+4]	@ h.hi
+	eor	$t0,$t0,$Elo,lsr#18
+	eor	$t1,$t1,$Ehi,lsr#18
+	eor	$t0,$t0,$Ehi,lsl#14
+	eor	$t1,$t1,$Elo,lsl#14
+	eor	$t0,$t0,$Ehi,lsr#9
+	eor	$t1,$t1,$Elo,lsr#9
+	eor	$t0,$t0,$Elo,lsl#23
+	eor	$t1,$t1,$Ehi,lsl#23	@ Sigma1(e)
+	adds	$Tlo,$Tlo,$t0
+	ldr	$t0,[sp,#$Foff+0]	@ f.lo
+	adc	$Thi,$Thi,$t1		@ T += Sigma1(e)
+	ldr	$t1,[sp,#$Foff+4]	@ f.hi
+	adds	$Tlo,$Tlo,$t2
+	ldr	$t2,[sp,#$Goff+0]	@ g.lo
+	adc	$Thi,$Thi,$t3		@ T += h
+	ldr	$t3,[sp,#$Goff+4]	@ g.hi
+
+	eor	$t0,$t0,$t2
+	str	$Elo,[sp,#$Eoff+0]
+	eor	$t1,$t1,$t3
+	str	$Ehi,[sp,#$Eoff+4]
+	and	$t0,$t0,$Elo
+	str	$Alo,[sp,#$Aoff+0]
+	and	$t1,$t1,$Ehi
+	str	$Ahi,[sp,#$Aoff+4]
+	eor	$t0,$t0,$t2
+	ldr	$t2,[$Ktbl,#$lo]	@ K[i].lo
+	eor	$t1,$t1,$t3		@ Ch(e,f,g)
+	ldr	$t3,[$Ktbl,#$hi]	@ K[i].hi
+
+	adds	$Tlo,$Tlo,$t0
+	ldr	$Elo,[sp,#$Doff+0]	@ d.lo
+	adc	$Thi,$Thi,$t1		@ T += Ch(e,f,g)
+	ldr	$Ehi,[sp,#$Doff+4]	@ d.hi
+	adds	$Tlo,$Tlo,$t2
+	and	$t0,$t2,#0xff
+	adc	$Thi,$Thi,$t3		@ T += K[i]
+	adds	$Elo,$Elo,$Tlo
+	ldr	$t2,[sp,#$Boff+0]	@ b.lo
+	adc	$Ehi,$Ehi,$Thi		@ d += T
+	teq	$t0,#$magic
+
+	ldr	$t3,[sp,#$Coff+0]	@ c.lo
+#if __ARM_ARCH__>=7
+	it	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	orreq	$Ktbl,$Ktbl,#1
+	@ Sigma0(x)	(ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+	@ LO		lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+	@ HI		hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+	mov	$t0,$Alo,lsr#28
+	mov	$t1,$Ahi,lsr#28
+	eor	$t0,$t0,$Ahi,lsl#4
+	eor	$t1,$t1,$Alo,lsl#4
+	eor	$t0,$t0,$Ahi,lsr#2
+	eor	$t1,$t1,$Alo,lsr#2
+	eor	$t0,$t0,$Alo,lsl#30
+	eor	$t1,$t1,$Ahi,lsl#30
+	eor	$t0,$t0,$Ahi,lsr#7
+	eor	$t1,$t1,$Alo,lsr#7
+	eor	$t0,$t0,$Alo,lsl#25
+	eor	$t1,$t1,$Ahi,lsl#25	@ Sigma0(a)
+	adds	$Tlo,$Tlo,$t0
+	and	$t0,$Alo,$t2
+	adc	$Thi,$Thi,$t1		@ T += Sigma0(a)
+
+	ldr	$t1,[sp,#$Boff+4]	@ b.hi
+	orr	$Alo,$Alo,$t2
+	ldr	$t2,[sp,#$Coff+4]	@ c.hi
+	and	$Alo,$Alo,$t3
+	and	$t3,$Ahi,$t1
+	orr	$Ahi,$Ahi,$t1
+	orr	$Alo,$Alo,$t0		@ Maj(a,b,c).lo
+	and	$Ahi,$Ahi,$t2
+	adds	$Alo,$Alo,$Tlo
+	orr	$Ahi,$Ahi,$t3		@ Maj(a,b,c).hi
+	sub	sp,sp,#8
+	adc	$Ahi,$Ahi,$Thi		@ h += T
+	tst	$Ktbl,#1
+	add	$Ktbl,$Ktbl,#8
+___
+}
+$code=<<___;
+#ifndef __KERNEL__
+# include <openssl/arm_arch.h>
+# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
+# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
+#else
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_MAX_ARCH__ 7
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+#endif
+
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1)	.word	lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1)	.word	hi0,lo0, hi1,lo1
+#endif
+
+.text
+#if __ARM_ARCH__<7 || defined(__APPLE__)
+.code	32
+#else
+.syntax unified
+# ifdef __thumb2__
+#  define adrl adr
+.thumb
+# else
+.code   32
+# endif
+#endif
+
+.type	K512,%object
+.align	5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size	K512,.-K512
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.LOPENSSL_armcap:
+.word	OPENSSL_armcap_P-.Lsha512_block_data_order
+.skip	32-4
+#else
+.skip	32
+#endif
+
+.global	sha512_block_data_order
+.type	sha512_block_data_order,%function
+sha512_block_data_order:
+.Lsha512_block_data_order:
+#if __ARM_ARCH__<7
+	sub	r3,pc,#8		@ sha512_block_data_order
+#else
+	adr	r3,sha512_block_data_order
+#endif
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+	ldr	r12,.LOPENSSL_armcap
+	ldr	r12,[r3,r12]		@ OPENSSL_armcap_P
+#ifdef	__APPLE__
+	ldr	r12,[r12]
+#endif
+	tst	r12,#ARMV7_NEON
+	bne	.LNEON
+#endif
+	add	$len,$inp,$len,lsl#7	@ len to point at the end of inp
+	stmdb	sp!,{r4-r12,lr}
+	sub	$Ktbl,r3,#672		@ K512
+	sub	sp,sp,#9*8
+
+	ldr	$Elo,[$ctx,#$Eoff+$lo]
+	ldr	$Ehi,[$ctx,#$Eoff+$hi]
+	ldr	$t0, [$ctx,#$Goff+$lo]
+	ldr	$t1, [$ctx,#$Goff+$hi]
+	ldr	$t2, [$ctx,#$Hoff+$lo]
+	ldr	$t3, [$ctx,#$Hoff+$hi]
+.Loop:
+	str	$t0, [sp,#$Goff+0]
+	str	$t1, [sp,#$Goff+4]
+	str	$t2, [sp,#$Hoff+0]
+	str	$t3, [sp,#$Hoff+4]
+	ldr	$Alo,[$ctx,#$Aoff+$lo]
+	ldr	$Ahi,[$ctx,#$Aoff+$hi]
+	ldr	$Tlo,[$ctx,#$Boff+$lo]
+	ldr	$Thi,[$ctx,#$Boff+$hi]
+	ldr	$t0, [$ctx,#$Coff+$lo]
+	ldr	$t1, [$ctx,#$Coff+$hi]
+	ldr	$t2, [$ctx,#$Doff+$lo]
+	ldr	$t3, [$ctx,#$Doff+$hi]
+	str	$Tlo,[sp,#$Boff+0]
+	str	$Thi,[sp,#$Boff+4]
+	str	$t0, [sp,#$Coff+0]
+	str	$t1, [sp,#$Coff+4]
+	str	$t2, [sp,#$Doff+0]
+	str	$t3, [sp,#$Doff+4]
+	ldr	$Tlo,[$ctx,#$Foff+$lo]
+	ldr	$Thi,[$ctx,#$Foff+$hi]
+	str	$Tlo,[sp,#$Foff+0]
+	str	$Thi,[sp,#$Foff+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+	ldrb	$Tlo,[$inp,#7]
+	ldrb	$t0, [$inp,#6]
+	ldrb	$t1, [$inp,#5]
+	ldrb	$t2, [$inp,#4]
+	ldrb	$Thi,[$inp,#3]
+	ldrb	$t3, [$inp,#2]
+	orr	$Tlo,$Tlo,$t0,lsl#8
+	ldrb	$t0, [$inp,#1]
+	orr	$Tlo,$Tlo,$t1,lsl#16
+	ldrb	$t1, [$inp],#8
+	orr	$Tlo,$Tlo,$t2,lsl#24
+	orr	$Thi,$Thi,$t3,lsl#8
+	orr	$Thi,$Thi,$t0,lsl#16
+	orr	$Thi,$Thi,$t1,lsl#24
+#else
+	ldr	$Tlo,[$inp,#4]
+	ldr	$Thi,[$inp],#8
+#ifdef __ARMEL__
+	rev	$Tlo,$Tlo
+	rev	$Thi,$Thi
+#endif
+#endif
+___
+	&BODY_00_15(0x94);
+$code.=<<___;
+	tst	$Ktbl,#1
+	beq	.L00_15
+	ldr	$t0,[sp,#`$Xoff+8*(16-1)`+0]
+	ldr	$t1,[sp,#`$Xoff+8*(16-1)`+4]
+	bic	$Ktbl,$Ktbl,#1
+.L16_79:
+	@ sigma0(x)	(ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
+	@ LO		lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+	@ HI		hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
+	mov	$Tlo,$t0,lsr#1
+	ldr	$t2,[sp,#`$Xoff+8*(16-14)`+0]
+	mov	$Thi,$t1,lsr#1
+	ldr	$t3,[sp,#`$Xoff+8*(16-14)`+4]
+	eor	$Tlo,$Tlo,$t1,lsl#31
+	eor	$Thi,$Thi,$t0,lsl#31
+	eor	$Tlo,$Tlo,$t0,lsr#8
+	eor	$Thi,$Thi,$t1,lsr#8
+	eor	$Tlo,$Tlo,$t1,lsl#24
+	eor	$Thi,$Thi,$t0,lsl#24
+	eor	$Tlo,$Tlo,$t0,lsr#7
+	eor	$Thi,$Thi,$t1,lsr#7
+	eor	$Tlo,$Tlo,$t1,lsl#25
+
+	@ sigma1(x)	(ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+	@ LO		lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+	@ HI		hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+	mov	$t0,$t2,lsr#19
+	mov	$t1,$t3,lsr#19
+	eor	$t0,$t0,$t3,lsl#13
+	eor	$t1,$t1,$t2,lsl#13
+	eor	$t0,$t0,$t3,lsr#29
+	eor	$t1,$t1,$t2,lsr#29
+	eor	$t0,$t0,$t2,lsl#3
+	eor	$t1,$t1,$t3,lsl#3
+	eor	$t0,$t0,$t2,lsr#6
+	eor	$t1,$t1,$t3,lsr#6
+	ldr	$t2,[sp,#`$Xoff+8*(16-9)`+0]
+	eor	$t0,$t0,$t3,lsl#26
+
+	ldr	$t3,[sp,#`$Xoff+8*(16-9)`+4]
+	adds	$Tlo,$Tlo,$t0
+	ldr	$t0,[sp,#`$Xoff+8*16`+0]
+	adc	$Thi,$Thi,$t1
+
+	ldr	$t1,[sp,#`$Xoff+8*16`+4]
+	adds	$Tlo,$Tlo,$t2
+	adc	$Thi,$Thi,$t3
+	adds	$Tlo,$Tlo,$t0
+	adc	$Thi,$Thi,$t1
+___
+	&BODY_00_15(0x17);
+$code.=<<___;
+#if __ARM_ARCH__>=7
+	ittt	eq			@ Thumb2 thing, sanity check in ARM
+#endif
+	ldreq	$t0,[sp,#`$Xoff+8*(16-1)`+0]
+	ldreq	$t1,[sp,#`$Xoff+8*(16-1)`+4]
+	beq	.L16_79
+	bic	$Ktbl,$Ktbl,#1
+
+	ldr	$Tlo,[sp,#$Boff+0]
+	ldr	$Thi,[sp,#$Boff+4]
+	ldr	$t0, [$ctx,#$Aoff+$lo]
+	ldr	$t1, [$ctx,#$Aoff+$hi]
+	ldr	$t2, [$ctx,#$Boff+$lo]
+	ldr	$t3, [$ctx,#$Boff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Aoff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Aoff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Boff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Boff+$hi]
+
+	ldr	$Alo,[sp,#$Coff+0]
+	ldr	$Ahi,[sp,#$Coff+4]
+	ldr	$Tlo,[sp,#$Doff+0]
+	ldr	$Thi,[sp,#$Doff+4]
+	ldr	$t0, [$ctx,#$Coff+$lo]
+	ldr	$t1, [$ctx,#$Coff+$hi]
+	ldr	$t2, [$ctx,#$Doff+$lo]
+	ldr	$t3, [$ctx,#$Doff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Coff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Coff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Doff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Doff+$hi]
+
+	ldr	$Tlo,[sp,#$Foff+0]
+	ldr	$Thi,[sp,#$Foff+4]
+	ldr	$t0, [$ctx,#$Eoff+$lo]
+	ldr	$t1, [$ctx,#$Eoff+$hi]
+	ldr	$t2, [$ctx,#$Foff+$lo]
+	ldr	$t3, [$ctx,#$Foff+$hi]
+	adds	$Elo,$Elo,$t0
+	str	$Elo,[$ctx,#$Eoff+$lo]
+	adc	$Ehi,$Ehi,$t1
+	str	$Ehi,[$ctx,#$Eoff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Foff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Foff+$hi]
+
+	ldr	$Alo,[sp,#$Goff+0]
+	ldr	$Ahi,[sp,#$Goff+4]
+	ldr	$Tlo,[sp,#$Hoff+0]
+	ldr	$Thi,[sp,#$Hoff+4]
+	ldr	$t0, [$ctx,#$Goff+$lo]
+	ldr	$t1, [$ctx,#$Goff+$hi]
+	ldr	$t2, [$ctx,#$Hoff+$lo]
+	ldr	$t3, [$ctx,#$Hoff+$hi]
+	adds	$t0,$Alo,$t0
+	str	$t0, [$ctx,#$Goff+$lo]
+	adc	$t1,$Ahi,$t1
+	str	$t1, [$ctx,#$Goff+$hi]
+	adds	$t2,$Tlo,$t2
+	str	$t2, [$ctx,#$Hoff+$lo]
+	adc	$t3,$Thi,$t3
+	str	$t3, [$ctx,#$Hoff+$hi]
+
+	add	sp,sp,#640
+	sub	$Ktbl,$Ktbl,#640
+
+	teq	$inp,$len
+	bne	.Loop
+
+	add	sp,sp,#8*9		@ destroy frame
+#if __ARM_ARCH__>=5
+	ldmia	sp!,{r4-r12,pc}
+#else
+	ldmia	sp!,{r4-r12,lr}
+	tst	lr,#1
+	moveq	pc,lr			@ be binary compatible with V4, yet
+	bx	lr			@ interoperable with Thumb ISA:-)
+#endif
+.size	sha512_block_data_order,.-sha512_block_data_order
+___
+
+{
+my @Sigma0=(28,34,39);
+my @Sigma1=(14,18,41);
+my @sigma0=(1, 8, 7);
+my @sigma1=(19,61,6);
+
+my $Ktbl="r3";
+my $cnt="r12";	# volatile register known as ip, intra-procedure-call scratch
+
+my @X=map("d$_",(0..15));
+my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
+
+sub NEON_00_15() {
+my $i=shift;
+my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31));	# temps
+
+$code.=<<___ if ($i<16 || $i&1);
+	vshr.u64	$t0,$e,#@Sigma1[0]	@ $i
+#if $i<16
+	vld1.64		{@X[$i%16]},[$inp]!	@ handles unaligned
+#endif
+	vshr.u64	$t1,$e,#@Sigma1[1]
+#if $i>0
+	 vadd.i64	$a,$Maj			@ h+=Maj from the past
+#endif
+	vshr.u64	$t2,$e,#@Sigma1[2]
+___
+$code.=<<___;
+	vld1.64		{$K},[$Ktbl,:64]!	@ K[i++]
+	vsli.64		$t0,$e,#`64-@Sigma1[0]`
+	vsli.64		$t1,$e,#`64-@Sigma1[1]`
+	vmov		$Ch,$e
+	vsli.64		$t2,$e,#`64-@Sigma1[2]`
+#if $i<16 && defined(__ARMEL__)
+	vrev64.8	@X[$i],@X[$i]
+#endif
+	veor		$t1,$t0
+	vbsl		$Ch,$f,$g		@ Ch(e,f,g)
+	vshr.u64	$t0,$a,#@Sigma0[0]
+	veor		$t2,$t1			@ Sigma1(e)
+	vadd.i64	$T1,$Ch,$h
+	vshr.u64	$t1,$a,#@Sigma0[1]
+	vsli.64		$t0,$a,#`64-@Sigma0[0]`
+	vadd.i64	$T1,$t2
+	vshr.u64	$t2,$a,#@Sigma0[2]
+	vadd.i64	$K,@X[$i%16]
+	vsli.64		$t1,$a,#`64-@Sigma0[1]`
+	veor		$Maj,$a,$b
+	vsli.64		$t2,$a,#`64-@Sigma0[2]`
+	veor		$h,$t0,$t1
+	vadd.i64	$T1,$K
+	vbsl		$Maj,$c,$b		@ Maj(a,b,c)
+	veor		$h,$t2			@ Sigma0(a)
+	vadd.i64	$d,$T1
+	vadd.i64	$Maj,$T1
+	@ vadd.i64	$h,$Maj
+___
+}
+
+sub NEON_16_79() {
+my $i=shift;
+
+if ($i&1)	{ &NEON_00_15($i,@_); return; }
+
+# 2x-vectorized, therefore runs every 2nd round
+my @X=map("q$_",(0..7));			# view @X as 128-bit vector
+my ($t0,$t1,$s0,$s1) = map("q$_",(12..15));	# temps
+my ($d0,$d1,$d2) = map("d$_",(24..26));		# temps from NEON_00_15
+my $e=@_[4];					# $e from NEON_00_15
+$i /= 2;
+$code.=<<___;
+	vshr.u64	$t0,@X[($i+7)%8],#@sigma1[0]
+	vshr.u64	$t1,@X[($i+7)%8],#@sigma1[1]
+	 vadd.i64	@_[0],d30			@ h+=Maj from the past
+	vshr.u64	$s1,@X[($i+7)%8],#@sigma1[2]
+	vsli.64		$t0,@X[($i+7)%8],#`64-@sigma1[0]`
+	vext.8		$s0,@X[$i%8],@X[($i+1)%8],#8	@ X[i+1]
+	vsli.64		$t1,@X[($i+7)%8],#`64-@sigma1[1]`
+	veor		$s1,$t0
+	vshr.u64	$t0,$s0,#@sigma0[0]
+	veor		$s1,$t1				@ sigma1(X[i+14])
+	vshr.u64	$t1,$s0,#@sigma0[1]
+	vadd.i64	@X[$i%8],$s1
+	vshr.u64	$s1,$s0,#@sigma0[2]
+	vsli.64		$t0,$s0,#`64-@sigma0[0]`
+	vsli.64		$t1,$s0,#`64-@sigma0[1]`
+	vext.8		$s0,@X[($i+4)%8],@X[($i+5)%8],#8	@ X[i+9]
+	veor		$s1,$t0
+	vshr.u64	$d0,$e,#@Sigma1[0]		@ from NEON_00_15
+	vadd.i64	@X[$i%8],$s0
+	vshr.u64	$d1,$e,#@Sigma1[1]		@ from NEON_00_15
+	veor		$s1,$t1				@ sigma0(X[i+1])
+	vshr.u64	$d2,$e,#@Sigma1[2]		@ from NEON_00_15
+	vadd.i64	@X[$i%8],$s1
+___
+	&NEON_00_15(2*$i,@_);
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch	armv7-a
+.fpu	neon
+
+.global	sha512_block_data_order_neon
+.type	sha512_block_data_order_neon,%function
+.align	4
+sha512_block_data_order_neon:
+.LNEON:
+	dmb				@ errata #451034 on early Cortex A8
+	add	$len,$inp,$len,lsl#7	@ len to point at the end of inp
+	adr	$Ktbl,K512
+	VFP_ABI_PUSH
+	vldmia	$ctx,{$A-$H}		@ load context
+.Loop_neon:
+___
+for($i=0;$i<16;$i++)	{ &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	mov		$cnt,#4
+.L16_79_neon:
+	subs		$cnt,#1
+___
+for(;$i<32;$i++)	{ &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	bne		.L16_79_neon
+
+	 vadd.i64	$A,d30		@ h+=Maj from the past
+	vldmia		$ctx,{d24-d31}	@ load context to temp
+	vadd.i64	q8,q12		@ vectorized accumulate
+	vadd.i64	q9,q13
+	vadd.i64	q10,q14
+	vadd.i64	q11,q15
+	vstmia		$ctx,{$A-$H}	@ save context
+	teq		$inp,$len
+	sub		$Ktbl,#640	@ rewind K512
+	bne		.Loop_neon
+
+	VFP_ABI_POP
+	ret				@ bx lr
+.size	sha512_block_data_order_neon,.-sha512_block_data_order_neon
+#endif
+___
+}
+$code.=<<___;
+.asciz	"SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
+.comm	OPENSSL_armcap_P,4,4
+.hidden	OPENSSL_armcap_P
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
+$code =~ s/\bret\b/bx	lr/gm;
+
+open SELF,$0;
+while(<SELF>) {
+	next if (/^#!/);
+	last if (!s/^#/@/ and !/^$/);
+	print;
+}
+close SELF;
+
+print $code;
+close STDOUT; # enforce flush
diff --git a/src/crypto/fipsmodule/sha/asm/sha512-armv8.pl b/src/crypto/fipsmodule/sha/asm/sha512-armv8.pl
new file mode 100644
index 0000000..b4e558a
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha512-armv8.pl
@@ -0,0 +1,430 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# SHA256/512 for ARMv8.
+#
+# Performance in cycles per processed byte and improvement coefficient
+# over code generated with "default" compiler:
+#
+#		SHA256-hw	SHA256(*)	SHA512
+# Apple A7	1.97		10.5 (+33%)	6.73 (-1%(**))
+# Cortex-A53	2.38		15.5 (+115%)	10.0 (+150%(***))
+# Cortex-A57	2.31		11.6 (+86%)	7.51 (+260%(***))
+# Denver	2.01		10.5 (+26%)	6.70 (+8%)
+# X-Gene			20.0 (+100%)	12.8 (+300%(***))
+#
+# (*)	Software SHA256 results are of lesser relevance, presented
+#	mostly for informational purposes.
+# (**)	The result is a trade-off: it's possible to improve it by
+#	10% (or by 1 cycle per round), but at the cost of 20% loss
+#	on Cortex-A53 (or by 4 cycles per round).
+# (***)	Super-impressive coefficients over gcc-generated code are
+#	indication of some compiler "pathology", most notably code
+#	generated with -mgeneral-regs-only is significanty faster
+#	and the gap is only 40-90%.
+
+$flavour=shift;
+$output=shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+if ($output =~ /512/) {
+	$BITS=512;
+	$SZ=8;
+	@Sigma0=(28,34,39);
+	@Sigma1=(14,18,41);
+	@sigma0=(1,  8, 7);
+	@sigma1=(19,61, 6);
+	$rounds=80;
+	$reg_t="x";
+} else {
+	$BITS=256;
+	$SZ=4;
+	@Sigma0=( 2,13,22);
+	@Sigma1=( 6,11,25);
+	@sigma0=( 7,18, 3);
+	@sigma1=(17,19,10);
+	$rounds=64;
+	$reg_t="w";
+}
+
+$func="sha${BITS}_block_data_order";
+
+($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30));
+
+@X=map("$reg_t$_",(3..15,0..2));
+@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27));
+($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28));
+
+sub BODY_00_xx {
+my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_;
+my $j=($i+1)&15;
+my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]);
+   $T0=@X[$i+3] if ($i<11);
+
+$code.=<<___	if ($i<16);
+#ifndef	__ARMEB__
+	rev	@X[$i],@X[$i]			// $i
+#endif
+___
+$code.=<<___	if ($i<13 && ($i&1));
+	ldp	@X[$i+1],@X[$i+2],[$inp],#2*$SZ
+___
+$code.=<<___	if ($i==13);
+	ldp	@X[14],@X[15],[$inp]
+___
+$code.=<<___	if ($i>=14);
+	ldr	@X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`]
+___
+$code.=<<___	if ($i>0 && $i<16);
+	add	$a,$a,$t1			// h+=Sigma0(a)
+___
+$code.=<<___	if ($i>=11);
+	str	@X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`]
+___
+# While ARMv8 specifies merged rotate-n-logical operation such as
+# 'eor x,y,z,ror#n', it was found to negatively affect performance
+# on Apple A7. The reason seems to be that it requires even 'y' to
+# be available earlier. This means that such merged instruction is
+# not necessarily best choice on critical path... On the other hand
+# Cortex-A5x handles merged instructions much better than disjoint
+# rotate and logical... See (**) footnote above.
+$code.=<<___	if ($i<15);
+	ror	$t0,$e,#$Sigma1[0]
+	add	$h,$h,$t2			// h+=K[i]
+	eor	$T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]`
+	and	$t1,$f,$e
+	bic	$t2,$g,$e
+	add	$h,$h,@X[$i&15]			// h+=X[i]
+	orr	$t1,$t1,$t2			// Ch(e,f,g)
+	eor	$t2,$a,$b			// a^b, b^c in next round
+	eor	$t0,$t0,$T0,ror#$Sigma1[1]	// Sigma1(e)
+	ror	$T0,$a,#$Sigma0[0]
+	add	$h,$h,$t1			// h+=Ch(e,f,g)
+	eor	$t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]`
+	add	$h,$h,$t0			// h+=Sigma1(e)
+	and	$t3,$t3,$t2			// (b^c)&=(a^b)
+	add	$d,$d,$h			// d+=h
+	eor	$t3,$t3,$b			// Maj(a,b,c)
+	eor	$t1,$T0,$t1,ror#$Sigma0[1]	// Sigma0(a)
+	add	$h,$h,$t3			// h+=Maj(a,b,c)
+	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
+	//add	$h,$h,$t1			// h+=Sigma0(a)
+___
+$code.=<<___	if ($i>=15);
+	ror	$t0,$e,#$Sigma1[0]
+	add	$h,$h,$t2			// h+=K[i]
+	ror	$T1,@X[($j+1)&15],#$sigma0[0]
+	and	$t1,$f,$e
+	ror	$T2,@X[($j+14)&15],#$sigma1[0]
+	bic	$t2,$g,$e
+	ror	$T0,$a,#$Sigma0[0]
+	add	$h,$h,@X[$i&15]			// h+=X[i]
+	eor	$t0,$t0,$e,ror#$Sigma1[1]
+	eor	$T1,$T1,@X[($j+1)&15],ror#$sigma0[1]
+	orr	$t1,$t1,$t2			// Ch(e,f,g)
+	eor	$t2,$a,$b			// a^b, b^c in next round
+	eor	$t0,$t0,$e,ror#$Sigma1[2]	// Sigma1(e)
+	eor	$T0,$T0,$a,ror#$Sigma0[1]
+	add	$h,$h,$t1			// h+=Ch(e,f,g)
+	and	$t3,$t3,$t2			// (b^c)&=(a^b)
+	eor	$T2,$T2,@X[($j+14)&15],ror#$sigma1[1]
+	eor	$T1,$T1,@X[($j+1)&15],lsr#$sigma0[2]	// sigma0(X[i+1])
+	add	$h,$h,$t0			// h+=Sigma1(e)
+	eor	$t3,$t3,$b			// Maj(a,b,c)
+	eor	$t1,$T0,$a,ror#$Sigma0[2]	// Sigma0(a)
+	eor	$T2,$T2,@X[($j+14)&15],lsr#$sigma1[2]	// sigma1(X[i+14])
+	add	@X[$j],@X[$j],@X[($j+9)&15]
+	add	$d,$d,$h			// d+=h
+	add	$h,$h,$t3			// h+=Maj(a,b,c)
+	ldr	$t3,[$Ktbl],#$SZ		// *K++, $t2 in next round
+	add	@X[$j],@X[$j],$T1
+	add	$h,$h,$t1			// h+=Sigma0(a)
+	add	@X[$j],@X[$j],$T2
+___
+	($t2,$t3)=($t3,$t2);
+}
+
+$code.=<<___;
+#include <openssl/arm_arch.h>
+
+.text
+
+.extern	OPENSSL_armcap_P
+.globl	$func
+.type	$func,%function
+.align	6
+$func:
+___
+$code.=<<___	if ($SZ==4);
+	ldr	x16,.LOPENSSL_armcap_P
+	adr	x17,.LOPENSSL_armcap_P
+	add	x16,x16,x17
+	ldr	w16,[x16]
+	tst	w16,#ARMV8_SHA256
+	b.ne	.Lv8_entry
+___
+$code.=<<___;
+	stp	x29,x30,[sp,#-128]!
+	add	x29,sp,#0
+
+	stp	x19,x20,[sp,#16]
+	stp	x21,x22,[sp,#32]
+	stp	x23,x24,[sp,#48]
+	stp	x25,x26,[sp,#64]
+	stp	x27,x28,[sp,#80]
+	sub	sp,sp,#4*$SZ
+
+	ldp	$A,$B,[$ctx]				// load context
+	ldp	$C,$D,[$ctx,#2*$SZ]
+	ldp	$E,$F,[$ctx,#4*$SZ]
+	add	$num,$inp,$num,lsl#`log(16*$SZ)/log(2)`	// end of input
+	ldp	$G,$H,[$ctx,#6*$SZ]
+	adr	$Ktbl,.LK$BITS
+	stp	$ctx,$num,[x29,#96]
+
+.Loop:
+	ldp	@X[0],@X[1],[$inp],#2*$SZ
+	ldr	$t2,[$Ktbl],#$SZ			// *K++
+	eor	$t3,$B,$C				// magic seed
+	str	$inp,[x29,#112]
+___
+for ($i=0;$i<16;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=".Loop_16_xx:\n";
+for (;$i<32;$i++)	{ &BODY_00_xx($i,@V); unshift(@V,pop(@V)); }
+$code.=<<___;
+	cbnz	$t2,.Loop_16_xx
+
+	ldp	$ctx,$num,[x29,#96]
+	ldr	$inp,[x29,#112]
+	sub	$Ktbl,$Ktbl,#`$SZ*($rounds+1)`		// rewind
+
+	ldp	@X[0],@X[1],[$ctx]
+	ldp	@X[2],@X[3],[$ctx,#2*$SZ]
+	add	$inp,$inp,#14*$SZ			// advance input pointer
+	ldp	@X[4],@X[5],[$ctx,#4*$SZ]
+	add	$A,$A,@X[0]
+	ldp	@X[6],@X[7],[$ctx,#6*$SZ]
+	add	$B,$B,@X[1]
+	add	$C,$C,@X[2]
+	add	$D,$D,@X[3]
+	stp	$A,$B,[$ctx]
+	add	$E,$E,@X[4]
+	add	$F,$F,@X[5]
+	stp	$C,$D,[$ctx,#2*$SZ]
+	add	$G,$G,@X[6]
+	add	$H,$H,@X[7]
+	cmp	$inp,$num
+	stp	$E,$F,[$ctx,#4*$SZ]
+	stp	$G,$H,[$ctx,#6*$SZ]
+	b.ne	.Loop
+
+	ldp	x19,x20,[x29,#16]
+	add	sp,sp,#4*$SZ
+	ldp	x21,x22,[x29,#32]
+	ldp	x23,x24,[x29,#48]
+	ldp	x25,x26,[x29,#64]
+	ldp	x27,x28,[x29,#80]
+	ldp	x29,x30,[sp],#128
+	ret
+.size	$func,.-$func
+
+.align	6
+.type	.LK$BITS,%object
+.LK$BITS:
+___
+$code.=<<___ if ($SZ==8);
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0	// terminator
+___
+$code.=<<___ if ($SZ==4);
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0	//terminator
+___
+$code.=<<___;
+.size	.LK$BITS,.-.LK$BITS
+.align	3
+.LOPENSSL_armcap_P:
+	.quad	OPENSSL_armcap_P-.
+.asciz	"SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align	2
+___
+
+if ($SZ==4) {
+my $Ktbl="x3";
+
+my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2));
+my @MSG=map("v$_.16b",(4..7));
+my ($W0,$W1)=("v16.4s","v17.4s");
+my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b");
+
+$code.=<<___;
+.type	sha256_block_armv8,%function
+.align	6
+sha256_block_armv8:
+.Lv8_entry:
+	stp		x29,x30,[sp,#-16]!
+	add		x29,sp,#0
+
+	ld1.32		{$ABCD,$EFGH},[$ctx]
+	adr		$Ktbl,.LK256
+
+.Loop_hw:
+	ld1		{@MSG[0]-@MSG[3]},[$inp],#64
+	sub		$num,$num,#1
+	ld1.32		{$W0},[$Ktbl],#16
+	rev32		@MSG[0],@MSG[0]
+	rev32		@MSG[1],@MSG[1]
+	rev32		@MSG[2],@MSG[2]
+	rev32		@MSG[3],@MSG[3]
+	orr		$ABCD_SAVE,$ABCD,$ABCD		// offload
+	orr		$EFGH_SAVE,$EFGH,$EFGH
+___
+for($i=0;$i<12;$i++) {
+$code.=<<___;
+	ld1.32		{$W1},[$Ktbl],#16
+	add.i32		$W0,$W0,@MSG[0]
+	sha256su0	@MSG[0],@MSG[1]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+	sha256su1	@MSG[0],@MSG[2],@MSG[3]
+___
+	($W0,$W1)=($W1,$W0);	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	ld1.32		{$W1},[$Ktbl],#16
+	add.i32		$W0,$W0,@MSG[0]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	ld1.32		{$W0},[$Ktbl],#16
+	add.i32		$W1,$W1,@MSG[1]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	ld1.32		{$W1},[$Ktbl]
+	add.i32		$W0,$W0,@MSG[2]
+	sub		$Ktbl,$Ktbl,#$rounds*$SZ-16	// rewind
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W0
+	sha256h2	$EFGH,$abcd,$W0
+
+	add.i32		$W1,$W1,@MSG[3]
+	orr		$abcd,$ABCD,$ABCD
+	sha256h		$ABCD,$EFGH,$W1
+	sha256h2	$EFGH,$abcd,$W1
+
+	add.i32		$ABCD,$ABCD,$ABCD_SAVE
+	add.i32		$EFGH,$EFGH,$EFGH_SAVE
+
+	cbnz		$num,.Loop_hw
+
+	st1.32		{$ABCD,$EFGH},[$ctx]
+
+	ldr		x29,[sp],#16
+	ret
+.size	sha256_block_armv8,.-sha256_block_armv8
+___
+}
+
+$code.=<<___;
+.comm	OPENSSL_armcap_P,4,4
+___
+
+{   my  %opcode = (
+	"sha256h"	=> 0x5e004000,	"sha256h2"	=> 0x5e005000,
+	"sha256su0"	=> 0x5e282800,	"sha256su1"	=> 0x5e006000	);
+
+    sub unsha256 {
+	my ($mnemonic,$arg)=@_;
+
+	$arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o
+	&&
+	sprintf ".inst\t0x%08x\t//%s %s",
+			$opcode{$mnemonic}|$1|($2<<5)|($3<<16),
+			$mnemonic,$arg;
+    }
+}
+
+foreach(split("\n",$code)) {
+
+	s/\`([^\`]*)\`/eval($1)/geo;
+
+	s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/geo;
+
+	s/\.\w?32\b//o		and s/\.16b/\.4s/go;
+	m/(ld|st)1[^\[]+\[0\]/o	and s/\.4s/\.s/go;
+
+	print $_,"\n";
+}
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/asm/sha512-x86_64.pl b/src/crypto/fipsmodule/sha/asm/sha512-x86_64.pl
new file mode 100755
index 0000000..c638595
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/asm/sha512-x86_64.pl
@@ -0,0 +1,2393 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. Rights for redistribution and usage in source and binary
+# forms are granted according to the OpenSSL license.
+# ====================================================================
+#
+# sha256/512_block procedure for x86_64.
+#
+# 40% improvement over compiler-generated code on Opteron. On EM64T
+# sha256 was observed to run >80% faster and sha512 - >40%. No magical
+# tricks, just straight implementation... I really wonder why gcc
+# [being armed with inline assembler] fails to generate as fast code.
+# The only thing which is cool about this module is that it's very
+# same instruction sequence used for both SHA-256 and SHA-512. In
+# former case the instructions operate on 32-bit operands, while in
+# latter - on 64-bit ones. All I had to do is to get one flavor right,
+# the other one passed the test right away:-)
+#
+# sha256_block runs in ~1005 cycles on Opteron, which gives you
+# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
+# frequency in GHz. sha512_block runs in ~1275 cycles, which results
+# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
+# Well, if you compare it to IA-64 implementation, which maintains
+# X[16] in register bank[!], tends to 4 instructions per CPU clock
+# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
+# issue Opteron pipeline and X[16] maintained in memory. So that *if*
+# there is a way to improve it, *then* the only way would be to try to
+# offload X[16] updates to SSE unit, but that would require "deeper"
+# loop unroll, which in turn would naturally cause size blow-up, not
+# to mention increased complexity! And once again, only *if* it's
+# actually possible to noticeably improve overall ILP, instruction
+# level parallelism, on a given CPU implementation in this case.
+#
+# Special note on Intel EM64T. While Opteron CPU exhibits perfect
+# performance ratio of 1.5 between 64- and 32-bit flavors [see above],
+# [currently available] EM64T CPUs apparently are far from it. On the
+# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
+# sha256_block:-( This is presumably because 64-bit shifts/rotates
+# apparently are not atomic instructions, but implemented in microcode.
+#
+# May 2012.
+#
+# Optimization including one of Pavel Semjanov's ideas, alternative
+# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
+# unfortunately -2% SHA512 on P4 [which nobody should care about
+# that much].
+#
+# June 2012.
+#
+# Add SIMD code paths, see below for improvement coefficients. SSSE3
+# code path was not attempted for SHA512, because improvement is not
+# estimated to be high enough, noticeably less than 9%, to justify
+# the effort, not on pre-AVX processors. [Obviously with exclusion
+# for VIA Nano, but it has SHA512 instruction that is faster and
+# should be used instead.] For reference, corresponding estimated
+# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
+# higher coefficients are observed on VIA Nano and Bulldozer has more
+# to do with specifics of their architecture [which is topic for
+# separate discussion].
+#
+# November 2012.
+#
+# Add AVX2 code path. Two consecutive input blocks are loaded to
+# 256-bit %ymm registers, with data from first block to least
+# significant 128-bit halves and data from second to most significant.
+# The data is then processed with same SIMD instruction sequence as
+# for AVX, but with %ymm as operands. Side effect is increased stack
+# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
+# code size increase.
+#
+# March 2014.
+#
+# Add support for Intel SHA Extensions.
+
+######################################################################
+# Current performance in cycles per processed byte (less is better):
+#
+#		SHA256	SSSE3       AVX/XOP(*)	    SHA512  AVX/XOP(*)
+#
+# AMD K8	14.9	-	    -		    9.57    -
+# P4		17.3	-	    -		    30.8    -
+# Core 2	15.6	13.8(+13%)  -		    9.97    -
+# Westmere	14.8	12.3(+19%)  -		    9.58    -
+# Sandy Bridge	17.4	14.2(+23%)  11.6(+50%(**))  11.2    8.10(+38%(**))
+# Ivy Bridge	12.6	10.5(+20%)  10.3(+22%)	    8.17    7.22(+13%)
+# Haswell	12.2	9.28(+31%)  7.80(+56%)	    7.66    5.40(+42%)
+# Skylake	11.4	9.03(+26%)  7.70(+48%)      7.25    5.20(+40%)
+# Bulldozer	21.1	13.6(+54%)  13.6(+54%(***)) 13.5    8.58(+57%)
+# VIA Nano	23.0	16.5(+39%)  -		    14.7    -
+# Atom		23.0	18.9(+22%)  -		    14.7    -
+# Silvermont	27.4	20.6(+33%)  -               17.5    -
+# Goldmont	18.9	14.3(+32%)  4.16(+350%)     12.0    -
+#
+# (*)	whichever best applicable, including SHAEXT;
+# (**)	switch from ror to shrd stands for fair share of improvement;
+# (***)	execution time is fully determined by remaining integer-only
+#	part, body_00_15; reducing the amount of SIMD instructions
+#	below certain limit makes no difference/sense; to conserve
+#	space SHA256 XOP code path is therefore omitted;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+# In upstream, this is controlled by shelling out to the compiler to check
+# versions, but BoringSSL is intended to be used with pre-generated perlasm
+# output, so this isn't useful anyway.
+#
+# TODO(davidben): Enable AVX2 code after testing by setting $avx to 2. Is it
+# necessary to disable AVX2 code when SHA Extensions code is disabled? Upstream
+# did not tie them together until after $shaext was added.
+$avx = 1;
+
+# TODO(davidben): Consider enabling the Intel SHA Extensions code once it's
+# been tested.
+$shaext=0;	### set to zero if compiling for 1.0.1
+$avx=1		if (!$shaext && $avx);
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if ($output =~ /512/) {
+	$func="sha512_block_data_order";
+	$TABLE="K512";
+	$SZ=8;
+	@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
+					"%r8", "%r9", "%r10","%r11");
+	($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
+	@Sigma0=(28,34,39);
+	@Sigma1=(14,18,41);
+	@sigma0=(1,  8, 7);
+	@sigma1=(19,61, 6);
+	$rounds=80;
+} else {
+	$func="sha256_block_data_order";
+	$TABLE="K256";
+	$SZ=4;
+	@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
+					"%r8d","%r9d","%r10d","%r11d");
+	($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
+	@Sigma0=( 2,13,22);
+	@Sigma1=( 6,11,25);
+	@sigma0=( 7,18, 3);
+	@sigma1=(17,19,10);
+	$rounds=64;
+}
+
+$ctx="%rdi";	# 1st arg, zapped by $a3
+$inp="%rsi";	# 2nd arg
+$Tbl="%rbp";
+
+$_ctx="16*$SZ+0*8(%rsp)";
+$_inp="16*$SZ+1*8(%rsp)";
+$_end="16*$SZ+2*8(%rsp)";
+$_rsp="16*$SZ+3*8(%rsp)";
+$framesz="16*$SZ+4*8";
+
+
+sub ROUND_00_15()
+{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+  my $STRIDE=$SZ;
+     $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
+
+$code.=<<___;
+	ror	\$`$Sigma1[2]-$Sigma1[1]`,$a0
+	mov	$f,$a2
+
+	xor	$e,$a0
+	ror	\$`$Sigma0[2]-$Sigma0[1]`,$a1
+	xor	$g,$a2			# f^g
+
+	mov	$T1,`$SZ*($i&0xf)`(%rsp)
+	xor	$a,$a1
+	and	$e,$a2			# (f^g)&e
+
+	ror	\$`$Sigma1[1]-$Sigma1[0]`,$a0
+	add	$h,$T1			# T1+=h
+	xor	$g,$a2			# Ch(e,f,g)=((f^g)&e)^g
+
+	ror	\$`$Sigma0[1]-$Sigma0[0]`,$a1
+	xor	$e,$a0
+	add	$a2,$T1			# T1+=Ch(e,f,g)
+
+	mov	$a,$a2
+	add	($Tbl),$T1		# T1+=K[round]
+	xor	$a,$a1
+
+	xor	$b,$a2			# a^b, b^c in next round
+	ror	\$$Sigma1[0],$a0	# Sigma1(e)
+	mov	$b,$h
+
+	and	$a2,$a3
+	ror	\$$Sigma0[0],$a1	# Sigma0(a)
+	add	$a0,$T1			# T1+=Sigma1(e)
+
+	xor	$a3,$h			# h=Maj(a,b,c)=Ch(a^b,c,b)
+	add	$T1,$d			# d+=T1
+	add	$T1,$h			# h+=T1
+
+	lea	$STRIDE($Tbl),$Tbl	# round++
+___
+$code.=<<___ if ($i<15);
+	add	$a1,$h			# h+=Sigma0(a)
+___
+	($a2,$a3) = ($a3,$a2);
+}
+
+sub ROUND_16_XX()
+{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
+
+$code.=<<___;
+	mov	`$SZ*(($i+1)&0xf)`(%rsp),$a0
+	mov	`$SZ*(($i+14)&0xf)`(%rsp),$a2
+
+	mov	$a0,$T1
+	ror	\$`$sigma0[1]-$sigma0[0]`,$a0
+	add	$a1,$a			# modulo-scheduled h+=Sigma0(a)
+	mov	$a2,$a1
+	ror	\$`$sigma1[1]-$sigma1[0]`,$a2
+
+	xor	$T1,$a0
+	shr	\$$sigma0[2],$T1
+	ror	\$$sigma0[0],$a0
+	xor	$a1,$a2
+	shr	\$$sigma1[2],$a1
+
+	ror	\$$sigma1[0],$a2
+	xor	$a0,$T1			# sigma0(X[(i+1)&0xf])
+	xor	$a1,$a2			# sigma1(X[(i+14)&0xf])
+	add	`$SZ*(($i+9)&0xf)`(%rsp),$T1
+
+	add	`$SZ*($i&0xf)`(%rsp),$T1
+	mov	$e,$a0
+	add	$a2,$T1
+	mov	$a,$a1
+___
+	&ROUND_00_15(@_);
+}
+
+$code=<<___;
+.text
+
+.extern	OPENSSL_ia32cap_addr
+.globl	$func
+.type	$func,\@function,3
+.align	16
+$func:
+___
+$code.=<<___ if ($SZ==4 || $avx);
+	lea	OPENSSL_ia32cap_addr(%rip),%r11
+	mov	(%r11),%r11
+	mov	0(%r11),%r9d
+	mov	4(%r11),%r10d
+	mov	8(%r11),%r11d
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+	test	\$`1<<29`,%r11d		# check for SHA
+	jnz	_shaext_shortcut
+___
+$code.=<<___ if ($avx && $SZ==8);
+	test	\$`1<<11`,%r10d		# check for XOP
+	jnz	.Lxop_shortcut
+___
+$code.=<<___ if ($avx>1);
+	and	\$`1<<8|1<<5|1<<3`,%r11d	# check for BMI2+AVX2+BMI1
+	cmp	\$`1<<8|1<<5|1<<3`,%r11d
+	je	.Lavx2_shortcut
+___
+$code.=<<___ if ($avx);
+	and	\$`1<<30`,%r9d		# mask "Intel CPU" bit
+	and	\$`1<<28|1<<9`,%r10d	# mask AVX and SSSE3 bits
+	or	%r9d,%r10d
+	cmp	\$`1<<28|1<<9|1<<30`,%r10d
+	je	.Lavx_shortcut
+___
+$code.=<<___ if ($SZ==4);
+	test	\$`1<<9`,%r10d
+	jnz	.Lssse3_shortcut
+___
+$code.=<<___;
+	mov	%rsp,%rax		# copy %rsp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	shl	\$4,%rdx		# num*16
+	sub	\$$framesz,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%rax,$_rsp		# save copy of %rsp
+.Lprologue:
+
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+
+	jmp	.Lloop
+
+.align	16
+.Lloop:
+	mov	$B,$a3
+	lea	$TABLE(%rip),$Tbl
+	xor	$C,$a3			# magic
+___
+	for($i=0;$i<16;$i++) {
+		$code.="	mov	$SZ*$i($inp),$T1\n";
+		$code.="	mov	@ROT[4],$a0\n";
+		$code.="	mov	@ROT[0],$a1\n";
+		$code.="	bswap	$T1\n";
+		&ROUND_00_15($i,@ROT);
+		unshift(@ROT,pop(@ROT));
+	}
+$code.=<<___;
+	jmp	.Lrounds_16_xx
+.align	16
+.Lrounds_16_xx:
+___
+	for(;$i<32;$i++) {
+		&ROUND_16_XX($i,@ROT);
+		unshift(@ROT,pop(@ROT));
+	}
+
+$code.=<<___;
+	cmpb	\$0,`$SZ-1`($Tbl)
+	jnz	.Lrounds_16_xx
+
+	mov	$_ctx,$ctx
+	add	$a1,$A			# modulo-scheduled h+=Sigma0(a)
+	lea	16*$SZ($inp),$inp
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop
+
+	mov	$_rsp,%rsi
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue:
+	ret
+.size	$func,.-$func
+___
+
+if ($SZ==4) {
+$code.=<<___;
+.align	64
+.type	$TABLE,\@object
+$TABLE:
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+	.long	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+	.long	0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+	.long	0x03020100,0x0b0a0908,0xffffffff,0xffffffff
+	.long	0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+	.long	0xffffffff,0xffffffff,0x03020100,0x0b0a0908
+	.asciz	"SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+} else {
+$code.=<<___;
+.align	64
+.type	$TABLE,\@object
+$TABLE:
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0x428a2f98d728ae22,0x7137449123ef65cd
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x3956c25bf348b538,0x59f111f1b605d019
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0x923f82a4af194f9b,0xab1c5ed5da6d8118
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0xd807aa98a3030242,0x12835b0145706fbe
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x72be5d74f27b896f,0x80deb1fe3b1696b1
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0x9bdc06a725c71235,0xc19bf174cf692694
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0xe49b69c19ef14ad2,0xefbe4786384f25e3
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x2de92c6f592b0275,0x4a7484aa6ea6e483
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0x983e5152ee66dfab,0xa831c66d2db43210
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xb00327c898fb213f,0xbf597fc7beef0ee4
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0xc6e00bf33da88fc2,0xd5a79147930aa725
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x06ca6351e003826f,0x142929670a0e6e70
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x27b70a8546d22ffc,0x2e1b21385c26c926
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x650a73548baf63de,0x766a0abb3c77b2a8
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0x81c2c92e47edaee6,0x92722c851482353b
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xa2bfe8a14cf10364,0xa81a664bbc423001
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xc24b8b70d0f89791,0xc76c51a30654be30
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xd192e819d6ef5218,0xd69906245565a910
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0xf40e35855771202a,0x106aa07032bbd1b8
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x19a4c116b8d2d0c8,0x1e376c085141ab53
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x748f82ee5defb2fc,0x78a5636f43172f60
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x84c87814a1f0ab72,0x8cc702081a6439ec
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0x90befffa23631e28,0xa4506cebde82bde9
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xbef9a3f7b2c67915,0xc67178f2e372532b
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xca273eceea26619c,0xd186b8c721c0c207
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x06f067aa72176fba,0x0a637dc5a2c898a6
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x113f9804bef90dae,0x1b710b35131c471b
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x28db77f523047d84,0x32caab7b40c72493
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+	.quad	0x5fcb6fab3ad6faec,0x6c44198c4a475817
+
+	.quad	0x0001020304050607,0x08090a0b0c0d0e0f
+	.quad	0x0001020304050607,0x08090a0b0c0d0e0f
+	.asciz	"SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+}
+
+######################################################################
+# SIMD code paths
+#
+if ($SZ==4 && $shaext) {{{
+######################################################################
+# Intel SHA Extensions implementation of SHA256 update function.
+#
+my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
+
+my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
+my @MSG=map("%xmm$_",(3..6));
+
+$code.=<<___;
+.type	sha256_block_data_order_shaext,\@function,3
+.align	64
+sha256_block_data_order_shaext:
+_shaext_shortcut:
+___
+$code.=<<___ if ($win64);
+	lea	`-8-5*16`(%rsp),%rsp
+	movaps	%xmm6,-8-5*16(%rax)
+	movaps	%xmm7,-8-4*16(%rax)
+	movaps	%xmm8,-8-3*16(%rax)
+	movaps	%xmm9,-8-2*16(%rax)
+	movaps	%xmm10,-8-1*16(%rax)
+.Lprologue_shaext:
+___
+$code.=<<___;
+	lea		K256+0x80(%rip),$Tbl
+	movdqu		($ctx),$ABEF		# DCBA
+	movdqu		16($ctx),$CDGH		# HGFE
+	movdqa		0x200-0x80($Tbl),$TMP	# byte swap mask
+
+	pshufd		\$0x1b,$ABEF,$Wi	# ABCD
+	pshufd		\$0xb1,$ABEF,$ABEF	# CDAB
+	pshufd		\$0x1b,$CDGH,$CDGH	# EFGH
+	movdqa		$TMP,$BSWAP		# offload
+	palignr		\$8,$CDGH,$ABEF		# ABEF
+	punpcklqdq	$Wi,$CDGH		# CDGH
+	jmp		.Loop_shaext
+
+.align	16
+.Loop_shaext:
+	movdqu		($inp),@MSG[0]
+	movdqu		0x10($inp),@MSG[1]
+	movdqu		0x20($inp),@MSG[2]
+	pshufb		$TMP,@MSG[0]
+	movdqu		0x30($inp),@MSG[3]
+
+	movdqa		0*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	pshufb		$TMP,@MSG[1]
+	movdqa		$CDGH,$CDGH_SAVE	# offload
+	sha256rnds2	$ABEF,$CDGH		# 0-3
+	pshufd		\$0x0e,$Wi,$Wi
+	nop
+	movdqa		$ABEF,$ABEF_SAVE	# offload
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		1*32-0x80($Tbl),$Wi
+	paddd		@MSG[1],$Wi
+	pshufb		$TMP,@MSG[2]
+	sha256rnds2	$ABEF,$CDGH		# 4-7
+	pshufd		\$0x0e,$Wi,$Wi
+	lea		0x40($inp),$inp
+	sha256msg1	@MSG[1],@MSG[0]
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		2*32-0x80($Tbl),$Wi
+	paddd		@MSG[2],$Wi
+	pshufb		$TMP,@MSG[3]
+	sha256rnds2	$ABEF,$CDGH		# 8-11
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[3],$TMP
+	palignr		\$4,@MSG[2],$TMP
+	nop
+	paddd		$TMP,@MSG[0]
+	sha256msg1	@MSG[2],@MSG[1]
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		3*32-0x80($Tbl),$Wi
+	paddd		@MSG[3],$Wi
+	sha256msg2	@MSG[3],@MSG[0]
+	sha256rnds2	$ABEF,$CDGH		# 12-15
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[0],$TMP
+	palignr		\$4,@MSG[3],$TMP
+	nop
+	paddd		$TMP,@MSG[1]
+	sha256msg1	@MSG[3],@MSG[2]
+	sha256rnds2	$CDGH,$ABEF
+___
+for($i=4;$i<16-3;$i++) {
+$code.=<<___;
+	movdqa		$i*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	sha256msg2	@MSG[0],@MSG[1]
+	sha256rnds2	$ABEF,$CDGH		# 16-19...
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[1],$TMP
+	palignr		\$4,@MSG[0],$TMP
+	nop
+	paddd		$TMP,@MSG[2]
+	sha256msg1	@MSG[0],@MSG[3]
+	sha256rnds2	$CDGH,$ABEF
+___
+	push(@MSG,shift(@MSG));
+}
+$code.=<<___;
+	movdqa		13*32-0x80($Tbl),$Wi
+	paddd		@MSG[0],$Wi
+	sha256msg2	@MSG[0],@MSG[1]
+	sha256rnds2	$ABEF,$CDGH		# 52-55
+	pshufd		\$0x0e,$Wi,$Wi
+	movdqa		@MSG[1],$TMP
+	palignr		\$4,@MSG[0],$TMP
+	sha256rnds2	$CDGH,$ABEF
+	paddd		$TMP,@MSG[2]
+
+	movdqa		14*32-0x80($Tbl),$Wi
+	paddd		@MSG[1],$Wi
+	sha256rnds2	$ABEF,$CDGH		# 56-59
+	pshufd		\$0x0e,$Wi,$Wi
+	sha256msg2	@MSG[1],@MSG[2]
+	movdqa		$BSWAP,$TMP
+	sha256rnds2	$CDGH,$ABEF
+
+	movdqa		15*32-0x80($Tbl),$Wi
+	paddd		@MSG[2],$Wi
+	nop
+	sha256rnds2	$ABEF,$CDGH		# 60-63
+	pshufd		\$0x0e,$Wi,$Wi
+	dec		$num
+	nop
+	sha256rnds2	$CDGH,$ABEF
+
+	paddd		$CDGH_SAVE,$CDGH
+	paddd		$ABEF_SAVE,$ABEF
+	jnz		.Loop_shaext
+
+	pshufd		\$0xb1,$CDGH,$CDGH	# DCHG
+	pshufd		\$0x1b,$ABEF,$TMP	# FEBA
+	pshufd		\$0xb1,$ABEF,$ABEF	# BAFE
+	punpckhqdq	$CDGH,$ABEF		# DCBA
+	palignr		\$8,$TMP,$CDGH		# HGFE
+
+	movdqu	$ABEF,($ctx)
+	movdqu	$CDGH,16($ctx)
+___
+$code.=<<___ if ($win64);
+	movaps	-8-5*16(%rax),%xmm6
+	movaps	-8-4*16(%rax),%xmm7
+	movaps	-8-3*16(%rax),%xmm8
+	movaps	-8-2*16(%rax),%xmm9
+	movaps	-8-1*16(%rax),%xmm10
+	mov	%rax,%rsp
+.Lepilogue_shaext:
+___
+$code.=<<___;
+	ret
+.size	sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
+___
+}}}
+{{{
+
+my $a4=$T1;
+my ($a,$b,$c,$d,$e,$f,$g,$h);
+
+sub AUTOLOAD()		# thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+  my $arg = pop;
+    $arg = "\$$arg" if ($arg*1 eq $arg);
+    $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+sub body_00_15 () {
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&ror	($a0,$Sigma1[2]-$Sigma1[1])',
+	'&mov	($a,$a1)',
+	'&mov	($a4,$f)',
+
+	'&ror	($a1,$Sigma0[2]-$Sigma0[1])',
+	'&xor	($a0,$e)',
+	'&xor	($a4,$g)',			# f^g
+
+	'&ror	($a0,$Sigma1[1]-$Sigma1[0])',
+	'&xor	($a1,$a)',
+	'&and	($a4,$e)',			# (f^g)&e
+
+	'&xor	($a0,$e)',
+	'&add	($h,$SZ*($i&15)."(%rsp)")',	# h+=X[i]+K[i]
+	'&mov	($a2,$a)',
+
+	'&xor	($a4,$g)',			# Ch(e,f,g)=((f^g)&e)^g
+	'&ror	($a1,$Sigma0[1]-$Sigma0[0])',
+	'&xor	($a2,$b)',			# a^b, b^c in next round
+
+	'&add	($h,$a4)',			# h+=Ch(e,f,g)
+	'&ror	($a0,$Sigma1[0])',		# Sigma1(e)
+	'&and	($a3,$a2)',			# (b^c)&(a^b)
+
+	'&xor	($a1,$a)',
+	'&add	($h,$a0)',			# h+=Sigma1(e)
+	'&xor	($a3,$b)',			# Maj(a,b,c)=Ch(a^b,c,b)
+
+	'&ror	($a1,$Sigma0[0])',		# Sigma0(a)
+	'&add	($d,$h)',			# d+=h
+	'&add	($h,$a3)',			# h+=Maj(a,b,c)
+
+	'&mov	($a0,$d)',
+	'&add	($a1,$h);'.			# h+=Sigma0(a)
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+}
+
+######################################################################
+# SSSE3 code path
+#
+if ($SZ==4) {	# SHA256 only
+my @X = map("%xmm$_",(0..3));
+my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+.type	${func}_ssse3,\@function,3
+.align	64
+${func}_ssse3:
+.Lssse3_shortcut:
+	mov	%rsp,%rax		# copy %rsp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*4`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%rax,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___;
+.Lprologue_ssse3:
+
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+
+$code.=<<___;
+	#movdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	#movdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Lloop_ssse3
+.align	16
+.Lloop_ssse3:
+	movdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	movdqu	0x00($inp),@X[0]
+	movdqu	0x10($inp),@X[1]
+	movdqu	0x20($inp),@X[2]
+	pshufb	$t3,@X[0]
+	movdqu	0x30($inp),@X[3]
+	lea	$TABLE(%rip),$Tbl
+	pshufb	$t3,@X[1]
+	movdqa	0x00($Tbl),$t0
+	movdqa	0x20($Tbl),$t1
+	pshufb	$t3,@X[2]
+	paddd	@X[0],$t0
+	movdqa	0x40($Tbl),$t2
+	pshufb	$t3,@X[3]
+	movdqa	0x60($Tbl),$t3
+	paddd	@X[1],$t1
+	paddd	@X[2],$t2
+	paddd	@X[3],$t3
+	movdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	movdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	movdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	movdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lssse3_00_47
+
+.align	16
+.Lssse3_00_47:
+	sub	\$`-16*2*$SZ`,$Tbl	# size optimization
+___
+sub Xupdate_256_SSSE3 () {
+	(
+	'&movdqa	($t0,@X[1]);',
+	'&movdqa	($t3,@X[3])',
+	'&palignr	($t0,@X[0],$SZ)',	# X[1..4]
+	 '&palignr	($t3,@X[2],$SZ);',	# X[9..12]
+	'&movdqa	($t1,$t0)',
+	'&movdqa	($t2,$t0);',
+	'&psrld		($t0,$sigma0[2])',
+	 '&paddd	(@X[0],$t3);',		# X[0..3] += X[9..12]
+	'&psrld		($t2,$sigma0[0])',
+	 '&pshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&pslld		($t1,8*$SZ-$sigma0[1]);'.
+	'&pxor		($t0,$t2)',
+	'&psrld		($t2,$sigma0[1]-$sigma0[0]);'.
+	'&pxor		($t0,$t1)',
+	'&pslld		($t1,$sigma0[1]-$sigma0[0]);'.
+	'&pxor		($t0,$t2);',
+	 '&movdqa	($t2,$t3)',
+	'&pxor		($t0,$t1);',		# sigma0(X[1..4])
+	 '&psrld	($t3,$sigma1[2])',
+	'&paddd		(@X[0],$t0);',		# X[0..3] += sigma0(X[1..4])
+	 '&psrlq	($t2,$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	 '&psrlq	($t2,$sigma1[1]-$sigma1[0])',
+	 '&pxor		($t3,$t2)',
+	 '&pshufb	($t3,$t4)',		# sigma1(X[14..15])
+	'&paddd		(@X[0],$t3)',		# X[0..1] += sigma1(X[14..15])
+	 '&pshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&movdqa	($t2,$t3);',
+	 '&psrld	($t3,$sigma1[2])',
+	 '&psrlq	($t2,$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	 '&psrlq	($t2,$sigma1[1]-$sigma1[0])',
+	 '&pxor		($t3,$t2);',
+	'&movdqa	($t2,16*2*$j."($Tbl)")',
+	 '&pshufb	($t3,$t5)',
+	'&paddd		(@X[0],$t3)'		# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+sub SSSE3_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+    if (0) {
+	foreach (Xupdate_256_SSSE3()) {		# 36 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+    } else {			# squeeze extra 4% on Westmere and 19% on Atom
+	  eval(shift(@insns));	#@
+	&movdqa		($t0,@X[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t3,@X[3]);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&palignr	($t0,@X[0],$SZ);	# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &palignr	($t3,@X[2],$SZ);	# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&movdqa		($t1,$t0);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t2,$t0);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&psrld		($t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	&psrld		($t2,$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[3],0b11111010);	# X[4..15]
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&pslld		($t1,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&psrld		($t2,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	&pxor		($t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pslld		($t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&pxor		($t0,$t1);		# sigma0(X[1..4])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &psrlq		($t2,$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &psrlq		($t2,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 #&pshufb	($t3,$t4);		# sigma1(X[14..15])
+	 &pshufd	($t3,$t3,0b10000000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &movdqa	($t2,$t3);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &psrld		($t3,$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 &psrlq		($t2,$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &psrlq		($t2,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &pxor		($t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));	#@
+	 #&pshufb	($t3,$t5);
+	 &pshufd	($t3,$t3,0b00001000);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&movdqa		($t2,16*2*$j."($Tbl)");
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	 &pslldq	($t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));	#@
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+    }
+	&paddd		($t2,@X[0]);
+	  foreach (@insns) { eval; }		# remaining instructions
+	&movdqa		(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&SSSE3_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lssse3_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_ssse3
+
+	mov	$_rsp,%rsi
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_ssse3:
+	ret
+.size	${func}_ssse3,.-${func}_ssse3
+___
+}
+
+if ($avx) {{
+######################################################################
+# XOP code path
+#
+if ($SZ==8) {	# SHA512 only
+$code.=<<___;
+.type	${func}_xop,\@function,3
+.align	64
+${func}_xop:
+.Lxop_shortcut:
+	mov	%rsp,%rax		# copy %rsp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%rax,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_xop:
+
+	vzeroupper
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+	jmp	.Lloop_xop
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
+
+$code.=<<___;
+.align	16
+.Lloop_xop:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lxop_00_47
+
+.align	16
+.Lxop_00_47:
+	sub	\$`-16*2*$SZ`,$Tbl	# size optimization
+___
+sub XOP_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	&vpalignr	($t0,@X[1],@X[0],$SZ);	# X[1..4]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpalignr	($t3,@X[3],@X[2],$SZ);	# X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t1,$t0,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrld		($t0,$t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpaddd	(@X[0],@X[0],$t3);	# X[0..3] += X[9..12]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotd		($t2,$t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[3],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t2);		# sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[3],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t0);	# X[0..3] += sigma0(X[1..4])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrldq	($t3,$t3,8);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t3,@X[0],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrld	($t2,@X[0],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotd	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpslldq	($t3,$t3,8);		# 22 instructions
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		(@X[0],@X[0],$t3);	# X[2..3] += sigma1(X[16..17])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&XOP_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lxop_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+					} else {	# SHA512
+    my @X = map("%xmm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+.align	16
+.Lloop_xop:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vpshufb	$t3,@X[0],@X[0]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[1],@X[1]
+	vmovdqu	0x40($inp),@X[4]
+	vpshufb	$t3,@X[2],@X[2]
+	vmovdqu	0x50($inp),@X[5]
+	vpshufb	$t3,@X[3],@X[3]
+	vmovdqu	0x60($inp),@X[6]
+	vpshufb	$t3,@X[4],@X[4]
+	vmovdqu	0x70($inp),@X[7]
+	vpshufb	$t3,@X[5],@X[5]
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t3,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x10(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x20(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x30(%rsp)
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x40(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x50(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x60(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x70(%rsp)
+	mov	$E,$a0
+	jmp	.Lxop_00_47
+
+.align	16
+.Lxop_00_47:
+	add	\$`16*2*$SZ`,$Tbl
+___
+sub XOP_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 52 instructions
+
+	&vpalignr	($t0,@X[1],@X[0],$SZ);	# X[1..2]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpalignr	($t3,@X[5],@X[4],$SZ);	# X[9..10]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotq		($t1,$t0,8*$SZ-$sigma0[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpsrlq		($t0,$t0,$sigma0[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpaddq	(@X[0],@X[0],$t3);	# X[0..1] += X[9..10]
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vprotq		($t2,$t1,$sigma0[1]-$sigma0[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t1);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotq	($t3,@X[7],8*$SZ-$sigma1[1]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpxor		($t0,$t0,$t2);		# sigma0(X[1..2])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpsrlq	($t2,@X[7],$sigma1[2]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		(@X[0],@X[0],$t0);	# X[0..1] += sigma0(X[1..2])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vprotq	($t1,$t3,$sigma1[1]-$sigma1[0]);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t2);
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	 &vpxor		($t3,$t3,$t1);		# sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		(@X[0],@X[0],$t3);	# X[0..1] += sigma1(X[14..15])
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	  eval(shift(@insns));
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&XOP_512_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+	&jne	(".Lxop_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_xop
+
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_xop:
+	ret
+.size	${func}_xop,.-${func}_xop
+___
+}
+######################################################################
+# AVX+shrd code path
+#
+local *ror = sub { &shrd(@_[0],@_) };
+
+$code.=<<___;
+.type	${func}_avx,\@function,3
+.align	64
+${func}_avx:
+.Lavx_shortcut:
+	mov	%rsp,%rax		# copy %rsp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	shl	\$4,%rdx		# num*16
+	sub	\$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	and	\$-64,%rsp		# align stack frame
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%rax,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx:
+
+	vzeroupper
+	mov	$SZ*0($ctx),$A
+	mov	$SZ*1($ctx),$B
+	mov	$SZ*2($ctx),$C
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%xmm$_",(0..3));
+    my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
+
+$code.=<<___;
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Lloop_avx
+.align	16
+.Lloop_avx:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[0],@X[0]
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[1],@X[1]
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x10(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x20(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x30(%rsp)
+	mov	$E,$a0
+	jmp	.Lavx_00_47
+
+.align	16
+.Lavx_00_47:
+	sub	\$`-16*2*$SZ`,$Tbl	# size optimization
+___
+sub Xupdate_256_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],$SZ)',	# X[1..4]
+	 '&vpalignr	($t3,@X[3],@X[2],$SZ)',	# X[9..12]
+	'&vpsrld	($t2,$t0,$sigma0[0]);',
+	 '&vpaddd	(@X[0],@X[0],$t3)',	# X[0..3] += X[9..12]
+	'&vpsrld	($t3,$t0,$sigma0[2])',
+	'&vpslld	($t1,$t0,8*$SZ-$sigma0[1]);',
+	'&vpxor		($t0,$t3,$t2)',
+	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
+	'&vpsrld	($t2,$t2,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t1)',
+	'&vpslld	($t1,$t1,$sigma0[1]-$sigma0[0]);',
+	'&vpxor		($t0,$t0,$t2)',
+	 '&vpsrld	($t2,$t3,$sigma1[2]);',
+	'&vpxor		($t0,$t0,$t1)',		# sigma0(X[1..4])
+	 '&vpsrlq	($t3,$t3,$sigma1[0]);',
+	'&vpaddd	(@X[0],@X[0],$t0)',	# X[0..3] += sigma0(X[1..4])
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',
+	 '&vpshufb	($t2,$t2,$t4)',		# sigma1(X[14..15])
+	'&vpaddd	(@X[0],@X[0],$t2)',	# X[0..1] += sigma1(X[14..15])
+	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
+	 '&vpsrld	($t2,$t3,$sigma1[2])',
+	 '&vpsrlq	($t3,$t3,$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3);',
+	 '&vpsrlq	($t3,$t3,$sigma1[1]-$sigma1[0])',
+	 '&vpxor	($t2,$t2,$t3)',
+	 '&vpshufb	($t2,$t2,$t5)',
+	'&vpaddd	(@X[0],@X[0],$t2)'	# X[2..3] += sigma1(X[16..17])
+	);
+}
+
+sub AVX_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 104 instructions
+
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX_256_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ."($Tbl)",0);
+	&jne	(".Lavx_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+
+					} else {	# SHA512
+    my @X = map("%xmm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
+
+$code.=<<___;
+	jmp	.Lloop_avx
+.align	16
+.Lloop_avx:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	0x00($inp),@X[0]
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	0x10($inp),@X[1]
+	vmovdqu	0x20($inp),@X[2]
+	vpshufb	$t3,@X[0],@X[0]
+	vmovdqu	0x30($inp),@X[3]
+	vpshufb	$t3,@X[1],@X[1]
+	vmovdqu	0x40($inp),@X[4]
+	vpshufb	$t3,@X[2],@X[2]
+	vmovdqu	0x50($inp),@X[5]
+	vpshufb	$t3,@X[3],@X[3]
+	vmovdqu	0x60($inp),@X[6]
+	vpshufb	$t3,@X[4],@X[4]
+	vmovdqu	0x70($inp),@X[7]
+	vpshufb	$t3,@X[5],@X[5]
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t3,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x10(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x20(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x30(%rsp)
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x40(%rsp)
+	mov	$A,$a1
+	vmovdqa	$t1,0x50(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x60(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x70(%rsp)
+	mov	$E,$a0
+	jmp	.Lavx_00_47
+
+.align	16
+.Lavx_00_47:
+	add	\$`16*2*$SZ`,$Tbl
+___
+sub Xupdate_512_AVX () {
+	(
+	'&vpalignr	($t0,@X[1],@X[0],$SZ)',	# X[1..2]
+	 '&vpalignr	($t3,@X[5],@X[4],$SZ)',	# X[9..10]
+	'&vpsrlq	($t2,$t0,$sigma0[0])',
+	 '&vpaddq	(@X[0],@X[0],$t3);',	# X[0..1] += X[9..10]
+	'&vpsrlq	($t3,$t0,$sigma0[2])',
+	'&vpsllq	($t1,$t0,8*$SZ-$sigma0[1]);',
+	 '&vpxor	($t0,$t3,$t2)',
+	'&vpsrlq	($t2,$t2,$sigma0[1]-$sigma0[0]);',
+	 '&vpxor	($t0,$t0,$t1)',
+	'&vpsllq	($t1,$t1,$sigma0[1]-$sigma0[0]);',
+	 '&vpxor	($t0,$t0,$t2)',
+	 '&vpsrlq	($t3,@X[7],$sigma1[2]);',
+	'&vpxor		($t0,$t0,$t1)',		# sigma0(X[1..2])
+	 '&vpsllq	($t2,@X[7],8*$SZ-$sigma1[1]);',
+	'&vpaddq	(@X[0],@X[0],$t0)',	# X[0..1] += sigma0(X[1..2])
+	 '&vpsrlq	($t1,@X[7],$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t2)',
+	 '&vpsllq	($t2,$t2,$sigma1[1]-$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t1)',
+	 '&vpsrlq	($t1,$t1,$sigma1[1]-$sigma1[0]);',
+	 '&vpxor	($t3,$t3,$t2)',
+	 '&vpxor	($t3,$t3,$t1)',		# sigma1(X[14..15])
+	'&vpaddq	(@X[0],@X[0],$t3)',	# X[0..1] += sigma1(X[14..15])
+	);
+}
+
+sub AVX_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 52 instructions
+
+	foreach (Xupdate_512_AVX()) {		# 23 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	(16*$j."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&AVX_512_00_47($j,\&body_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&cmpb	($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
+	&jne	(".Lavx_00_47");
+
+    for ($i=0; $i<16; ) {
+	foreach(body_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	$_ctx,$ctx
+	mov	$a1,$A
+
+	add	$SZ*0($ctx),$A
+	lea	16*$SZ($inp),$inp
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+	jb	.Lloop_avx
+
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_avx:
+	ret
+.size	${func}_avx,.-${func}_avx
+___
+
+if ($avx>1) {{
+######################################################################
+# AVX2+BMI code path
+#
+my $a5=$SZ==4?"%esi":"%rsi";	# zap $inp
+my $PUSH8=8*2*$SZ;
+use integer;
+
+sub bodyx_00_15 () {
+	# at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
+	(
+	'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
+
+	'&add	($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)',    # h+=X[i]+K[i]
+	'&and	($a4,$e)',		# f&e
+	'&rorx	($a0,$e,$Sigma1[2])',
+	'&rorx	($a2,$e,$Sigma1[1])',
+
+	'&lea	($a,"($a,$a1)")',	# h+=Sigma0(a) from the past
+	'&lea	($h,"($h,$a4)")',
+	'&andn	($a4,$e,$g)',		# ~e&g
+	'&xor	($a0,$a2)',
+
+	'&rorx	($a1,$e,$Sigma1[0])',
+	'&lea	($h,"($h,$a4)")',	# h+=Ch(e,f,g)=(e&f)+(~e&g)
+	'&xor	($a0,$a1)',		# Sigma1(e)
+	'&mov	($a2,$a)',
+
+	'&rorx	($a4,$a,$Sigma0[2])',
+	'&lea	($h,"($h,$a0)")',	# h+=Sigma1(e)
+	'&xor	($a2,$b)',		# a^b, b^c in next round
+	'&rorx	($a1,$a,$Sigma0[1])',
+
+	'&rorx	($a0,$a,$Sigma0[0])',
+	'&lea	($d,"($d,$h)")',	# d+=h
+	'&and	($a3,$a2)',		# (b^c)&(a^b)
+	'&xor	($a1,$a4)',
+
+	'&xor	($a3,$b)',		# Maj(a,b,c)=Ch(a^b,c,b)
+	'&xor	($a1,$a0)',		# Sigma0(a)
+	'&lea	($h,"($h,$a3)");'.	# h+=Maj(a,b,c)
+	'&mov	($a4,$e)',		# copy of f in future
+
+	'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
+	);
+	# and at the finish one has to $a+=$a1
+}
+
+$code.=<<___;
+.type	${func}_avx2,\@function,3
+.align	64
+${func}_avx2:
+.Lavx2_shortcut:
+	mov	%rsp,%rax		# copy %rsp
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	sub	\$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
+	shl	\$4,%rdx		# num*16
+	and	\$-256*$SZ,%rsp		# align stack frame
+	lea	($inp,%rdx,$SZ),%rdx	# inp+num*16*$SZ
+	add	\$`2*$SZ*($rounds-8)`,%rsp
+	mov	$ctx,$_ctx		# save ctx, 1st arg
+	mov	$inp,$_inp		# save inp, 2nd arh
+	mov	%rdx,$_end		# save end pointer, "3rd" arg
+	mov	%rax,$_rsp		# save copy of %rsp
+___
+$code.=<<___ if ($win64);
+	movaps	%xmm6,16*$SZ+32(%rsp)
+	movaps	%xmm7,16*$SZ+48(%rsp)
+	movaps	%xmm8,16*$SZ+64(%rsp)
+	movaps	%xmm9,16*$SZ+80(%rsp)
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	%xmm10,16*$SZ+96(%rsp)
+	movaps	%xmm11,16*$SZ+112(%rsp)
+___
+$code.=<<___;
+.Lprologue_avx2:
+
+	vzeroupper
+	sub	\$-16*$SZ,$inp		# inp++, size optimization
+	mov	$SZ*0($ctx),$A
+	mov	$inp,%r12		# borrow $T1
+	mov	$SZ*1($ctx),$B
+	cmp	%rdx,$inp		# $_end
+	mov	$SZ*2($ctx),$C
+	cmove	%rsp,%r12		# next block or random data
+	mov	$SZ*3($ctx),$D
+	mov	$SZ*4($ctx),$E
+	mov	$SZ*5($ctx),$F
+	mov	$SZ*6($ctx),$G
+	mov	$SZ*7($ctx),$H
+___
+					if ($SZ==4) {	# SHA256
+    my @X = map("%ymm$_",(0..3));
+    my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
+
+$code.=<<___;
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+32(%rip),$t4
+	vmovdqa	$TABLE+`$SZ*2*$rounds`+64(%rip),$t5
+	jmp	.Loop_avx2
+.align	16
+.Loop_avx2:
+	vmovdqa	$TABLE+`$SZ*2*$rounds`(%rip),$t3
+	vmovdqu	-16*$SZ+0($inp),%xmm0
+	vmovdqu	-16*$SZ+16($inp),%xmm1
+	vmovdqu	-16*$SZ+32($inp),%xmm2
+	vmovdqu	-16*$SZ+48($inp),%xmm3
+	#mov		$inp,$_inp	# offload $inp
+	vinserti128	\$1,(%r12),@X[0],@X[0]
+	vinserti128	\$1,16(%r12),@X[1],@X[1]
+	vpshufb		$t3,@X[0],@X[0]
+	vinserti128	\$1,32(%r12),@X[2],@X[2]
+	vpshufb		$t3,@X[1],@X[1]
+	vinserti128	\$1,48(%r12),@X[3],@X[3]
+
+	lea	$TABLE(%rip),$Tbl
+	vpshufb	$t3,@X[2],@X[2]
+	vpaddd	0x00($Tbl),@X[0],$t0
+	vpshufb	$t3,@X[3],@X[3]
+	vpaddd	0x20($Tbl),@X[1],$t1
+	vpaddd	0x40($Tbl),@X[2],$t2
+	vpaddd	0x60($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	xor	$a1,$a1
+	vmovdqa	$t1,0x20(%rsp)
+	lea	-$PUSH8(%rsp),%rsp
+	mov	$B,$a3
+	vmovdqa	$t2,0x00(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x20(%rsp)
+	mov	$F,$a4
+	sub	\$-16*2*$SZ,$Tbl	# size optimization
+	jmp	.Lavx2_00_47
+
+.align	16
+.Lavx2_00_47:
+___
+
+sub AVX2_256_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body,&$body,&$body);	# 96 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+	&lea	("%rsp","-$PUSH8(%rsp)")	if (($j%2)==0);
+	foreach (Xupdate_256_AVX()) {		# 29 instructions
+	    eval;
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	    eval(shift(@insns));
+	}
+	&vpaddd		($t2,@X[0],16*2*$j."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<4; $j++) {
+	&AVX2_256_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&lea	($Tbl,16*2*$SZ."($Tbl)");
+	&cmpb	(($SZ-1)."($Tbl)",0);
+	&jne	(".Lavx2_00_47");
+
+    for ($i=0; $i<16; ) {
+	my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+					} else {	# SHA512
+    my @X = map("%ymm$_",(0..7));
+    my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
+
+$code.=<<___;
+	jmp	.Loop_avx2
+.align	16
+.Loop_avx2:
+	vmovdqu	-16*$SZ($inp),%xmm0
+	vmovdqu	-16*$SZ+16($inp),%xmm1
+	vmovdqu	-16*$SZ+32($inp),%xmm2
+	lea	$TABLE+0x80(%rip),$Tbl	# size optimization
+	vmovdqu	-16*$SZ+48($inp),%xmm3
+	vmovdqu	-16*$SZ+64($inp),%xmm4
+	vmovdqu	-16*$SZ+80($inp),%xmm5
+	vmovdqu	-16*$SZ+96($inp),%xmm6
+	vmovdqu	-16*$SZ+112($inp),%xmm7
+	#mov	$inp,$_inp	# offload $inp
+	vmovdqa	`$SZ*2*$rounds-0x80`($Tbl),$t2
+	vinserti128	\$1,(%r12),@X[0],@X[0]
+	vinserti128	\$1,16(%r12),@X[1],@X[1]
+	 vpshufb	$t2,@X[0],@X[0]
+	vinserti128	\$1,32(%r12),@X[2],@X[2]
+	 vpshufb	$t2,@X[1],@X[1]
+	vinserti128	\$1,48(%r12),@X[3],@X[3]
+	 vpshufb	$t2,@X[2],@X[2]
+	vinserti128	\$1,64(%r12),@X[4],@X[4]
+	 vpshufb	$t2,@X[3],@X[3]
+	vinserti128	\$1,80(%r12),@X[5],@X[5]
+	 vpshufb	$t2,@X[4],@X[4]
+	vinserti128	\$1,96(%r12),@X[6],@X[6]
+	 vpshufb	$t2,@X[5],@X[5]
+	vinserti128	\$1,112(%r12),@X[7],@X[7]
+
+	vpaddq	-0x80($Tbl),@X[0],$t0
+	vpshufb	$t2,@X[6],@X[6]
+	vpaddq	-0x60($Tbl),@X[1],$t1
+	vpshufb	$t2,@X[7],@X[7]
+	vpaddq	-0x40($Tbl),@X[2],$t2
+	vpaddq	-0x20($Tbl),@X[3],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	vpaddq	0x00($Tbl),@X[4],$t0
+	vmovdqa	$t1,0x20(%rsp)
+	vpaddq	0x20($Tbl),@X[5],$t1
+	vmovdqa	$t2,0x40(%rsp)
+	vpaddq	0x40($Tbl),@X[6],$t2
+	vmovdqa	$t3,0x60(%rsp)
+	lea	-$PUSH8(%rsp),%rsp
+	vpaddq	0x60($Tbl),@X[7],$t3
+	vmovdqa	$t0,0x00(%rsp)
+	xor	$a1,$a1
+	vmovdqa	$t1,0x20(%rsp)
+	mov	$B,$a3
+	vmovdqa	$t2,0x40(%rsp)
+	xor	$C,$a3			# magic
+	vmovdqa	$t3,0x60(%rsp)
+	mov	$F,$a4
+	add	\$16*2*$SZ,$Tbl
+	jmp	.Lavx2_00_47
+
+.align	16
+.Lavx2_00_47:
+___
+
+sub AVX2_512_00_47 () {
+my $j = shift;
+my $body = shift;
+my @X = @_;
+my @insns = (&$body,&$body);			# 48 instructions
+my $base = "+2*$PUSH8(%rsp)";
+
+	&lea	("%rsp","-$PUSH8(%rsp)")	if (($j%4)==0);
+	foreach (Xupdate_512_AVX()) {		# 23 instructions
+	    eval;
+	    if ($_ !~ /\;$/) {
+		eval(shift(@insns));
+		eval(shift(@insns));
+		eval(shift(@insns));
+	    }
+	}
+	&vpaddq		($t2,@X[0],16*2*$j-0x80."($Tbl)");
+	  foreach (@insns) { eval; }		# remaining instructions
+	&vmovdqa	((32*$j)%$PUSH8."(%rsp)",$t2);
+}
+
+    for ($i=0,$j=0; $j<8; $j++) {
+	&AVX2_512_00_47($j,\&bodyx_00_15,@X);
+	push(@X,shift(@X));			# rotate(@X)
+    }
+	&lea	($Tbl,16*2*$SZ."($Tbl)");
+	&cmpb	(($SZ-1-0x80)."($Tbl)",0);
+	&jne	(".Lavx2_00_47");
+
+    for ($i=0; $i<16; ) {
+	my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+}
+$code.=<<___;
+	mov	`2*$SZ*$rounds`(%rsp),$ctx	# $_ctx
+	add	$a1,$A
+	#mov	`2*$SZ*$rounds+8`(%rsp),$inp	# $_inp
+	lea	`2*$SZ*($rounds-8)`(%rsp),$Tbl
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	add	$SZ*6($ctx),$G
+	add	$SZ*7($ctx),$H
+
+	mov	$A,$SZ*0($ctx)
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+
+	cmp	`$PUSH8+2*8`($Tbl),$inp	# $_end
+	je	.Ldone_avx2
+
+	xor	$a1,$a1
+	mov	$B,$a3
+	xor	$C,$a3			# magic
+	mov	$F,$a4
+	jmp	.Lower_avx2
+.align	16
+.Lower_avx2:
+___
+    for ($i=0; $i<8; ) {
+	my $base="+16($Tbl)";
+	foreach(bodyx_00_15()) { eval; }
+    }
+$code.=<<___;
+	lea	-$PUSH8($Tbl),$Tbl
+	cmp	%rsp,$Tbl
+	jae	.Lower_avx2
+
+	mov	`2*$SZ*$rounds`(%rsp),$ctx	# $_ctx
+	add	$a1,$A
+	#mov	`2*$SZ*$rounds+8`(%rsp),$inp	# $_inp
+	lea	`2*$SZ*($rounds-8)`(%rsp),%rsp
+
+	add	$SZ*0($ctx),$A
+	add	$SZ*1($ctx),$B
+	add	$SZ*2($ctx),$C
+	add	$SZ*3($ctx),$D
+	add	$SZ*4($ctx),$E
+	add	$SZ*5($ctx),$F
+	lea	`2*16*$SZ`($inp),$inp	# inp+=2
+	add	$SZ*6($ctx),$G
+	mov	$inp,%r12
+	add	$SZ*7($ctx),$H
+	cmp	$_end,$inp
+
+	mov	$A,$SZ*0($ctx)
+	cmove	%rsp,%r12		# next block or stale data
+	mov	$B,$SZ*1($ctx)
+	mov	$C,$SZ*2($ctx)
+	mov	$D,$SZ*3($ctx)
+	mov	$E,$SZ*4($ctx)
+	mov	$F,$SZ*5($ctx)
+	mov	$G,$SZ*6($ctx)
+	mov	$H,$SZ*7($ctx)
+
+	jbe	.Loop_avx2
+	lea	(%rsp),$Tbl
+
+.Ldone_avx2:
+	lea	($Tbl),%rsp
+	mov	$_rsp,%rsi
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	16*$SZ+32(%rsp),%xmm6
+	movaps	16*$SZ+48(%rsp),%xmm7
+	movaps	16*$SZ+64(%rsp),%xmm8
+	movaps	16*$SZ+80(%rsp),%xmm9
+___
+$code.=<<___ if ($win64 && $SZ>4);
+	movaps	16*$SZ+96(%rsp),%xmm10
+	movaps	16*$SZ+112(%rsp),%xmm11
+___
+$code.=<<___;
+	mov	-48(%rsi),%r15
+	mov	-40(%rsi),%r14
+	mov	-32(%rsi),%r13
+	mov	-24(%rsi),%r12
+	mov	-16(%rsi),%rbp
+	mov	-8(%rsi),%rbx
+	lea	(%rsi),%rsp
+.Lepilogue_avx2:
+	ret
+.size	${func}_avx2,.-${func}_avx2
+___
+}}
+}}}}}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HanderlData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lin_prologue
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lin_prologue
+___
+$code.=<<___ if ($avx>1);
+	lea	.Lavx2_shortcut(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<avx2_shortcut
+	jb	.Lnot_in_avx2
+
+	and	\$-256*$SZ,%rax
+	add	\$`2*$SZ*($rounds-8)`,%rax
+.Lnot_in_avx2:
+___
+$code.=<<___;
+	mov	%rax,%rsi		# put aside Rsp
+	mov	16*$SZ+3*8(%rax),%rax	# pull $_rsp
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+	lea	.Lepilogue(%rip),%r10
+	cmp	%r10,%rbx
+	jb	.Lin_prologue		# non-AVX code
+
+	lea	16*$SZ+4*8(%rsi),%rsi	# Xmm6- save area
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$`$SZ==4?8:12`,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+.Lin_prologue:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	se_handler,.-se_handler
+___
+
+$code.=<<___ if ($SZ==4 && $shaext);
+.type	shaext_handler,\@abi-omnipotent
+.align	16
+shaext_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	lea	.Lprologue_shaext(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip<.Lprologue
+	jb	.Lin_prologue
+
+	lea	.Lepilogue_shaext(%rip),%r10
+	cmp	%r10,%rbx		# context->Rip>=.Lepilogue
+	jae	.Lin_prologue
+
+	lea	-8-5*16(%rax),%rsi
+	lea	512($context),%rdi	# &context.Xmm6
+	mov	\$10,%ecx
+	.long	0xa548f3fc		# cld; rep movsq
+
+	jmp	.Lin_prologue
+.size	shaext_handler,.-shaext_handler
+___
+
+$code.=<<___;
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_$func
+	.rva	.LSEH_end_$func
+	.rva	.LSEH_info_$func
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+	.rva	.LSEH_begin_${func}_shaext
+	.rva	.LSEH_end_${func}_shaext
+	.rva	.LSEH_info_${func}_shaext
+___
+$code.=<<___ if ($SZ==4);
+	.rva	.LSEH_begin_${func}_ssse3
+	.rva	.LSEH_end_${func}_ssse3
+	.rva	.LSEH_info_${func}_ssse3
+___
+$code.=<<___ if ($avx && $SZ==8);
+	.rva	.LSEH_begin_${func}_xop
+	.rva	.LSEH_end_${func}_xop
+	.rva	.LSEH_info_${func}_xop
+___
+$code.=<<___ if ($avx);
+	.rva	.LSEH_begin_${func}_avx
+	.rva	.LSEH_end_${func}_avx
+	.rva	.LSEH_info_${func}_avx
+___
+$code.=<<___ if ($avx>1);
+	.rva	.LSEH_begin_${func}_avx2
+	.rva	.LSEH_end_${func}_avx2
+	.rva	.LSEH_info_${func}_avx2
+___
+$code.=<<___;
+.section	.xdata
+.align	8
+.LSEH_info_$func:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue,.Lepilogue			# HandlerData[]
+___
+$code.=<<___ if ($SZ==4 && $shaext);
+.LSEH_info_${func}_shaext:
+	.byte	9,0,0,0
+	.rva	shaext_handler
+___
+$code.=<<___ if ($SZ==4);
+.LSEH_info_${func}_ssse3:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_ssse3,.Lepilogue_ssse3	# HandlerData[]
+___
+$code.=<<___ if ($avx && $SZ==8);
+.LSEH_info_${func}_xop:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_xop,.Lepilogue_xop		# HandlerData[]
+___
+$code.=<<___ if ($avx);
+.LSEH_info_${func}_avx:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx,.Lepilogue_avx		# HandlerData[]
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_${func}_avx2:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lprologue_avx2,.Lepilogue_avx2		# HandlerData[]
+___
+}
+
+sub sha256op38 {
+    my $instr = shift;
+    my %opcodelet = (
+		"sha256rnds2" => 0xcb,
+  		"sha256msg1"  => 0xcc,
+		"sha256msg2"  => 0xcd	);
+
+    if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
+      my @opcode=(0x0f,0x38);
+	push @opcode,$opcodelet{$instr};
+	push @opcode,0xc0|($1&7)|(($2&7)<<3);		# ModR/M
+	return ".byte\t".join(',',@opcode);
+    } else {
+	return $instr."\t".@_[0];
+    }
+}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval $1/geo;
+
+	s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
+
+	print $_,"\n";
+}
+close STDOUT;
diff --git a/src/crypto/fipsmodule/sha/sha1-altivec.c b/src/crypto/fipsmodule/sha/sha1-altivec.c
new file mode 100644
index 0000000..14e2bae
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/sha1-altivec.c
@@ -0,0 +1,361 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+/* Altivec-optimized SHA1 in C. This is tested on ppc64le only.
+ *
+ * References:
+ * https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1
+ * http://arctic.org/~dean/crypto/sha1.html
+ *
+ * This code used the generic SHA-1 from OpenSSL as a basis and AltiVec
+ * optimisations were added on top. */
+
+#include <openssl/sha.h>
+
+#if defined(OPENSSL_PPC64LE)
+
+#include <altivec.h>
+
+void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
+
+static uint32_t rotate(uint32_t a, int n) { return (a << n) | (a >> (32 - n)); }
+
+typedef vector unsigned int vec_uint32_t;
+typedef vector unsigned char vec_uint8_t;
+
+/* Vector constants */
+static const vec_uint8_t k_swap_endianness = {3,  2,  1, 0, 7,  6,  5,  4,
+                                              11, 10, 9, 8, 15, 14, 13, 12};
+
+/* Shift amounts for byte and bit shifts and rotations */
+static const vec_uint8_t k_4_bytes = {32, 32, 32, 32, 32, 32, 32, 32,
+                                      32, 32, 32, 32, 32, 32, 32, 32};
+static const vec_uint8_t k_12_bytes = {96, 96, 96, 96, 96, 96, 96, 96,
+                                       96, 96, 96, 96, 96, 96, 96, 96};
+
+#define K_00_19 0x5a827999UL
+#define K_20_39 0x6ed9eba1UL
+#define K_40_59 0x8f1bbcdcUL
+#define K_60_79 0xca62c1d6UL
+
+/* Vector versions of the above. */
+static const vec_uint32_t K_00_19_x_4 = {K_00_19, K_00_19, K_00_19, K_00_19};
+static const vec_uint32_t K_20_39_x_4 = {K_20_39, K_20_39, K_20_39, K_20_39};
+static const vec_uint32_t K_40_59_x_4 = {K_40_59, K_40_59, K_40_59, K_40_59};
+static const vec_uint32_t K_60_79_x_4 = {K_60_79, K_60_79, K_60_79, K_60_79};
+
+/* vector message scheduling: compute message schedule for round i..i+3 where i
+ * is divisible by 4. We return the schedule w[i..i+3] as a vector. In
+ * addition, we also precompute sum w[i..+3] and an additive constant K. This
+ * is done to offload some computation of f() in the integer execution units.
+ *
+ * Byte shifting code below may not be correct for big-endian systems. */
+static vec_uint32_t sched_00_15(vec_uint32_t *pre_added, const void *data,
+                                vec_uint32_t k) {
+  const vector unsigned char unaligned_data =
+    vec_vsx_ld(0, (const unsigned char*) data);
+  const vec_uint32_t v = (vec_uint32_t) unaligned_data;
+  const vec_uint32_t w = vec_perm(v, v, k_swap_endianness);
+  vec_st(w + k, 0, pre_added);
+  return w;
+}
+
+/* Compute w[i..i+3] using these steps for i in [16, 20, 24, 28]
+ *
+ * w'[i  ]  = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) <<< 1
+ * w'[i+1]  = (w[i-2] ^ w[i-7] ^ w[i-13] ^ w[i-15]) <<< 1
+ * w'[i+2]  = (w[i-1] ^ w[i-6] ^ w[i-12] ^ w[i-14]) <<< 1
+ * w'[i+3]  = (     0 ^ w[i-5] ^ w[i-11] ^ w[i-13]) <<< 1
+ *
+ * w[  i] = w'[  i]
+ * w[i+1] = w'[i+1]
+ * w[i+2] = w'[i+2]
+ * w[i+3] = w'[i+3] ^ (w'[i] <<< 1) */
+static vec_uint32_t sched_16_31(vec_uint32_t *pre_added, vec_uint32_t minus_4,
+                                vec_uint32_t minus_8, vec_uint32_t minus_12,
+                                vec_uint32_t minus_16, vec_uint32_t k) {
+  const vec_uint32_t minus_3 = vec_sro(minus_4, k_4_bytes);
+  const vec_uint32_t minus_14 = vec_sld((minus_12), (minus_16), 8);
+  const vec_uint32_t k_1_bit = vec_splat_u32(1);
+  const vec_uint32_t w_prime =
+      vec_rl(minus_3 ^ minus_8 ^ minus_14 ^ minus_16, k_1_bit);
+  const vec_uint32_t w =
+      w_prime ^ vec_rl(vec_slo(w_prime, k_12_bytes), k_1_bit);
+  vec_st(w + k, 0, pre_added);
+  return w;
+}
+
+/* Compute w[i..i+3] using this relation for i in [32, 36, 40 ... 76]
+ * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]), 2) <<< 2 */
+static vec_uint32_t sched_32_79(vec_uint32_t *pre_added, vec_uint32_t minus_4,
+                                vec_uint32_t minus_8, vec_uint32_t minus_16,
+                                vec_uint32_t minus_28, vec_uint32_t minus_32,
+                                vec_uint32_t k) {
+  const vec_uint32_t minus_6 = vec_sld(minus_4, minus_8, 8);
+  const vec_uint32_t k_2_bits = vec_splat_u32(2);
+  const vec_uint32_t w =
+      vec_rl(minus_6 ^ minus_16 ^ minus_28 ^ minus_32, k_2_bits);
+  vec_st(w + k, 0, pre_added);
+  return w;
+}
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+ * to the code in F_00_19. Wei attributes these optimisations to Peter
+ * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+ * F(x,y,z) (((x) & (y))  |  ((~(x)) & (z))) I've just become aware of another
+ * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */
+#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
+#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
+#define F_60_79(b, c, d) F_20_39(b, c, d)
+
+/* We pre-added the K constants during message scheduling. */
+#define BODY_00_19(i, a, b, c, d, e, f)                         \
+  do {                                                          \
+    (f) = w[i] + (e) + rotate((a), 5) + F_00_19((b), (c), (d)); \
+    (b) = rotate((b), 30);                                      \
+  } while (0)
+
+#define BODY_20_39(i, a, b, c, d, e, f)                         \
+  do {                                                          \
+    (f) = w[i] + (e) + rotate((a), 5) + F_20_39((b), (c), (d)); \
+    (b) = rotate((b), 30);                                      \
+  } while (0)
+
+#define BODY_40_59(i, a, b, c, d, e, f)                         \
+  do {                                                          \
+    (f) = w[i] + (e) + rotate((a), 5) + F_40_59((b), (c), (d)); \
+    (b) = rotate((b), 30);                                      \
+  } while (0)
+
+#define BODY_60_79(i, a, b, c, d, e, f)                         \
+  do {                                                          \
+    (f) = w[i] + (e) + rotate((a), 5) + F_60_79((b), (c), (d)); \
+    (b) = rotate((b), 30);                                      \
+  } while (0)
+
+void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num) {
+  uint32_t A, B, C, D, E, T;
+
+  A = state[0];
+  B = state[1];
+  C = state[2];
+  D = state[3];
+  E = state[4];
+
+  for (;;) {
+    vec_uint32_t vw[20];
+    const uint32_t *w = (const uint32_t *)&vw;
+
+    vec_uint32_t k = K_00_19_x_4;
+    const vec_uint32_t w0 = sched_00_15(vw + 0, data + 0, k);
+    BODY_00_19(0, A, B, C, D, E, T);
+    BODY_00_19(1, T, A, B, C, D, E);
+    BODY_00_19(2, E, T, A, B, C, D);
+    BODY_00_19(3, D, E, T, A, B, C);
+
+    const vec_uint32_t w4 = sched_00_15(vw + 1, data + 16, k);
+    BODY_00_19(4, C, D, E, T, A, B);
+    BODY_00_19(5, B, C, D, E, T, A);
+    BODY_00_19(6, A, B, C, D, E, T);
+    BODY_00_19(7, T, A, B, C, D, E);
+
+    const vec_uint32_t w8 = sched_00_15(vw + 2, data + 32, k);
+    BODY_00_19(8, E, T, A, B, C, D);
+    BODY_00_19(9, D, E, T, A, B, C);
+    BODY_00_19(10, C, D, E, T, A, B);
+    BODY_00_19(11, B, C, D, E, T, A);
+
+    const vec_uint32_t w12 = sched_00_15(vw + 3, data + 48, k);
+    BODY_00_19(12, A, B, C, D, E, T);
+    BODY_00_19(13, T, A, B, C, D, E);
+    BODY_00_19(14, E, T, A, B, C, D);
+    BODY_00_19(15, D, E, T, A, B, C);
+
+    const vec_uint32_t w16 = sched_16_31(vw + 4, w12, w8, w4, w0, k);
+    BODY_00_19(16, C, D, E, T, A, B);
+    BODY_00_19(17, B, C, D, E, T, A);
+    BODY_00_19(18, A, B, C, D, E, T);
+    BODY_00_19(19, T, A, B, C, D, E);
+
+    k = K_20_39_x_4;
+    const vec_uint32_t w20 = sched_16_31(vw + 5, w16, w12, w8, w4, k);
+    BODY_20_39(20, E, T, A, B, C, D);
+    BODY_20_39(21, D, E, T, A, B, C);
+    BODY_20_39(22, C, D, E, T, A, B);
+    BODY_20_39(23, B, C, D, E, T, A);
+
+    const vec_uint32_t w24 = sched_16_31(vw + 6, w20, w16, w12, w8, k);
+    BODY_20_39(24, A, B, C, D, E, T);
+    BODY_20_39(25, T, A, B, C, D, E);
+    BODY_20_39(26, E, T, A, B, C, D);
+    BODY_20_39(27, D, E, T, A, B, C);
+
+    const vec_uint32_t w28 = sched_16_31(vw + 7, w24, w20, w16, w12, k);
+    BODY_20_39(28, C, D, E, T, A, B);
+    BODY_20_39(29, B, C, D, E, T, A);
+    BODY_20_39(30, A, B, C, D, E, T);
+    BODY_20_39(31, T, A, B, C, D, E);
+
+    const vec_uint32_t w32 = sched_32_79(vw + 8, w28, w24, w16, w4, w0, k);
+    BODY_20_39(32, E, T, A, B, C, D);
+    BODY_20_39(33, D, E, T, A, B, C);
+    BODY_20_39(34, C, D, E, T, A, B);
+    BODY_20_39(35, B, C, D, E, T, A);
+
+    const vec_uint32_t w36 = sched_32_79(vw + 9, w32, w28, w20, w8, w4, k);
+    BODY_20_39(36, A, B, C, D, E, T);
+    BODY_20_39(37, T, A, B, C, D, E);
+    BODY_20_39(38, E, T, A, B, C, D);
+    BODY_20_39(39, D, E, T, A, B, C);
+
+    k = K_40_59_x_4;
+    const vec_uint32_t w40 = sched_32_79(vw + 10, w36, w32, w24, w12, w8, k);
+    BODY_40_59(40, C, D, E, T, A, B);
+    BODY_40_59(41, B, C, D, E, T, A);
+    BODY_40_59(42, A, B, C, D, E, T);
+    BODY_40_59(43, T, A, B, C, D, E);
+
+    const vec_uint32_t w44 = sched_32_79(vw + 11, w40, w36, w28, w16, w12, k);
+    BODY_40_59(44, E, T, A, B, C, D);
+    BODY_40_59(45, D, E, T, A, B, C);
+    BODY_40_59(46, C, D, E, T, A, B);
+    BODY_40_59(47, B, C, D, E, T, A);
+
+    const vec_uint32_t w48 = sched_32_79(vw + 12, w44, w40, w32, w20, w16, k);
+    BODY_40_59(48, A, B, C, D, E, T);
+    BODY_40_59(49, T, A, B, C, D, E);
+    BODY_40_59(50, E, T, A, B, C, D);
+    BODY_40_59(51, D, E, T, A, B, C);
+
+    const vec_uint32_t w52 = sched_32_79(vw + 13, w48, w44, w36, w24, w20, k);
+    BODY_40_59(52, C, D, E, T, A, B);
+    BODY_40_59(53, B, C, D, E, T, A);
+    BODY_40_59(54, A, B, C, D, E, T);
+    BODY_40_59(55, T, A, B, C, D, E);
+
+    const vec_uint32_t w56 = sched_32_79(vw + 14, w52, w48, w40, w28, w24, k);
+    BODY_40_59(56, E, T, A, B, C, D);
+    BODY_40_59(57, D, E, T, A, B, C);
+    BODY_40_59(58, C, D, E, T, A, B);
+    BODY_40_59(59, B, C, D, E, T, A);
+
+    k = K_60_79_x_4;
+    const vec_uint32_t w60 = sched_32_79(vw + 15, w56, w52, w44, w32, w28, k);
+    BODY_60_79(60, A, B, C, D, E, T);
+    BODY_60_79(61, T, A, B, C, D, E);
+    BODY_60_79(62, E, T, A, B, C, D);
+    BODY_60_79(63, D, E, T, A, B, C);
+
+    const vec_uint32_t w64 = sched_32_79(vw + 16, w60, w56, w48, w36, w32, k);
+    BODY_60_79(64, C, D, E, T, A, B);
+    BODY_60_79(65, B, C, D, E, T, A);
+    BODY_60_79(66, A, B, C, D, E, T);
+    BODY_60_79(67, T, A, B, C, D, E);
+
+    const vec_uint32_t w68 = sched_32_79(vw + 17, w64, w60, w52, w40, w36, k);
+    BODY_60_79(68, E, T, A, B, C, D);
+    BODY_60_79(69, D, E, T, A, B, C);
+    BODY_60_79(70, C, D, E, T, A, B);
+    BODY_60_79(71, B, C, D, E, T, A);
+
+    const vec_uint32_t w72 = sched_32_79(vw + 18, w68, w64, w56, w44, w40, k);
+    BODY_60_79(72, A, B, C, D, E, T);
+    BODY_60_79(73, T, A, B, C, D, E);
+    BODY_60_79(74, E, T, A, B, C, D);
+    BODY_60_79(75, D, E, T, A, B, C);
+
+    /* We don't use the last value */
+    (void)sched_32_79(vw + 19, w72, w68, w60, w48, w44, k);
+    BODY_60_79(76, C, D, E, T, A, B);
+    BODY_60_79(77, B, C, D, E, T, A);
+    BODY_60_79(78, A, B, C, D, E, T);
+    BODY_60_79(79, T, A, B, C, D, E);
+
+    const uint32_t mask = 0xffffffffUL;
+    state[0] = (state[0] + E) & mask;
+    state[1] = (state[1] + T) & mask;
+    state[2] = (state[2] + A) & mask;
+    state[3] = (state[3] + B) & mask;
+    state[4] = (state[4] + C) & mask;
+
+    data += 64;
+    if (--num == 0) {
+      break;
+    }
+
+    A = state[0];
+    B = state[1];
+    C = state[2];
+    D = state[3];
+    E = state[4];
+  }
+}
+
+#endif  /* OPENSSL_PPC64LE */
+
+#undef K_00_19
+#undef K_20_39
+#undef K_40_59
+#undef K_60_79
+#undef F_00_19
+#undef F_20_39
+#undef F_40_59
+#undef F_60_79
+#undef BODY_00_19
+#undef BODY_20_39
+#undef BODY_40_59
+#undef BODY_60_79
diff --git a/src/crypto/fipsmodule/sha/sha1.c b/src/crypto/fipsmodule/sha/sha1.c
new file mode 100644
index 0000000..7b44563
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/sha1.c
@@ -0,0 +1,375 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+#if !defined(OPENSSL_NO_ASM) &&                         \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
+     defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64) || \
+     defined(OPENSSL_PPC64LE))
+#define SHA1_ASM
+#endif
+
+int SHA1_Init(SHA_CTX *sha) {
+  OPENSSL_memset(sha, 0, sizeof(SHA_CTX));
+  sha->h[0] = 0x67452301UL;
+  sha->h[1] = 0xefcdab89UL;
+  sha->h[2] = 0x98badcfeUL;
+  sha->h[3] = 0x10325476UL;
+  sha->h[4] = 0xc3d2e1f0UL;
+  return 1;
+}
+
+uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA_CTX ctx;
+  SHA1_Init(&ctx);
+  SHA1_Update(&ctx, data, len);
+  SHA1_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_CTX                SHA_CTX
+#define HASH_CBLOCK             64
+#define HASH_MAKE_STRING(c, s) \
+  do {                         \
+    uint32_t ll;               \
+    ll = (c)->h[0];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[1];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[2];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[3];            \
+    HOST_l2c(ll, (s));         \
+    ll = (c)->h[4];            \
+    HOST_l2c(ll, (s));         \
+  } while (0)
+
+#define HASH_UPDATE SHA1_Update
+#define HASH_TRANSFORM SHA1_Transform
+#define HASH_FINAL SHA1_Final
+#define HASH_BLOCK_DATA_ORDER sha1_block_data_order
+#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
+#define Xupdate(a, ix, ia, ib, ic, id) \
+  do {                                 \
+    (a) = ((ia) ^ (ib) ^ (ic) ^ (id)); \
+    (ix) = (a) = ROTATE((a), 1);       \
+  } while (0)
+
+#ifndef SHA1_ASM
+static
+#endif
+void sha1_block_data_order(uint32_t *state, const uint8_t *data, size_t num);
+
+#include "../digest/md32_common.h"
+
+#define K_00_19 0x5a827999UL
+#define K_20_39 0x6ed9eba1UL
+#define K_40_59 0x8f1bbcdcUL
+#define K_60_79 0xca62c1d6UL
+
+/* As  pointed out by Wei Dai <weidai@eskimo.com>, F() below can be simplified
+ * to the code in F_00_19.  Wei attributes these optimisations to Peter
+ * Gutmann's SHS code, and he attributes it to Rich Schroeppel. #define
+ * F(x,y,z) (((x) & (y))  |  ((~(x)) & (z))) I've just become aware of another
+ * tweak to be made, again from Wei Dai, in F_40_59, (x&a)|(y&a) -> (x|y)&a */
+#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b, c, d) ((b) ^ (c) ^ (d))
+#define F_40_59(b, c, d) (((b) & (c)) | (((b) | (c)) & (d)))
+#define F_60_79(b, c, d) F_20_39(b, c, d)
+
+#define BODY_00_15(i, a, b, c, d, e, f, xi)                               \
+  do {                                                                    \
+    (f) = (xi) + (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                                \
+  } while (0)
+
+#define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)         \
+  do {                                                              \
+    Xupdate(f, xi, xa, xb, xc, xd);                                 \
+    (f) += (e) + K_00_19 + ROTATE((a), 5) + F_00_19((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                          \
+  } while (0)
+
+#define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd)         \
+  do {                                                              \
+    Xupdate(f, xi, xa, xb, xc, xd);                                 \
+    (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                          \
+  } while (0)
+
+#define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd)             \
+  do {                                                              \
+    Xupdate(f, xa, xa, xb, xc, xd);                                 \
+    (f) += (e) + K_20_39 + ROTATE((a), 5) + F_20_39((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                          \
+  } while (0)
+
+#define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd)             \
+  do {                                                              \
+    Xupdate(f, xa, xa, xb, xc, xd);                                 \
+    (f) += (e) + K_40_59 + ROTATE((a), 5) + F_40_59((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                          \
+  } while (0)
+
+#define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd)                   \
+  do {                                                                    \
+    Xupdate(f, xa, xa, xb, xc, xd);                                       \
+    (f) = (xa) + (e) + K_60_79 + ROTATE((a), 5) + F_60_79((b), (c), (d)); \
+    (b) = ROTATE((b), 30);                                                \
+  } while (0)
+
+#ifdef X
+#undef X
+#endif
+
+/* Originally X was an array. As it's automatic it's natural
+* to expect RISC compiler to accomodate at least part of it in
+* the register bank, isn't it? Unfortunately not all compilers
+* "find" this expectation reasonable:-( On order to make such
+* compilers generate better code I replace X[] with a bunch of
+* X0, X1, etc. See the function body below...
+*					<appro@fy.chalmers.se> */
+#define X(i)	XX##i
+
+#if !defined(SHA1_ASM)
+static void sha1_block_data_order(uint32_t *state, const uint8_t *data,
+                                  size_t num) {
+  register uint32_t A, B, C, D, E, T, l;
+  uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
+      XX11, XX12, XX13, XX14, XX15;
+
+  A = state[0];
+  B = state[1];
+  C = state[2];
+  D = state[3];
+  E = state[4];
+
+  for (;;) {
+    HOST_c2l(data, l);
+    X(0) = l;
+    HOST_c2l(data, l);
+    X(1) = l;
+    BODY_00_15(0, A, B, C, D, E, T, X(0));
+    HOST_c2l(data, l);
+    X(2) = l;
+    BODY_00_15(1, T, A, B, C, D, E, X(1));
+    HOST_c2l(data, l);
+    X(3) = l;
+    BODY_00_15(2, E, T, A, B, C, D, X(2));
+    HOST_c2l(data, l);
+    X(4) = l;
+    BODY_00_15(3, D, E, T, A, B, C, X(3));
+    HOST_c2l(data, l);
+    X(5) = l;
+    BODY_00_15(4, C, D, E, T, A, B, X(4));
+    HOST_c2l(data, l);
+    X(6) = l;
+    BODY_00_15(5, B, C, D, E, T, A, X(5));
+    HOST_c2l(data, l);
+    X(7) = l;
+    BODY_00_15(6, A, B, C, D, E, T, X(6));
+    HOST_c2l(data, l);
+    X(8) = l;
+    BODY_00_15(7, T, A, B, C, D, E, X(7));
+    HOST_c2l(data, l);
+    X(9) = l;
+    BODY_00_15(8, E, T, A, B, C, D, X(8));
+    HOST_c2l(data, l);
+    X(10) = l;
+    BODY_00_15(9, D, E, T, A, B, C, X(9));
+    HOST_c2l(data, l);
+    X(11) = l;
+    BODY_00_15(10, C, D, E, T, A, B, X(10));
+    HOST_c2l(data, l);
+    X(12) = l;
+    BODY_00_15(11, B, C, D, E, T, A, X(11));
+    HOST_c2l(data, l);
+    X(13) = l;
+    BODY_00_15(12, A, B, C, D, E, T, X(12));
+    HOST_c2l(data, l);
+    X(14) = l;
+    BODY_00_15(13, T, A, B, C, D, E, X(13));
+    HOST_c2l(data, l);
+    X(15) = l;
+    BODY_00_15(14, E, T, A, B, C, D, X(14));
+    BODY_00_15(15, D, E, T, A, B, C, X(15));
+
+    BODY_16_19(16, C, D, E, T, A, B, X(0), X(0), X(2), X(8), X(13));
+    BODY_16_19(17, B, C, D, E, T, A, X(1), X(1), X(3), X(9), X(14));
+    BODY_16_19(18, A, B, C, D, E, T, X(2), X(2), X(4), X(10), X(15));
+    BODY_16_19(19, T, A, B, C, D, E, X(3), X(3), X(5), X(11), X(0));
+
+    BODY_20_31(20, E, T, A, B, C, D, X(4), X(4), X(6), X(12), X(1));
+    BODY_20_31(21, D, E, T, A, B, C, X(5), X(5), X(7), X(13), X(2));
+    BODY_20_31(22, C, D, E, T, A, B, X(6), X(6), X(8), X(14), X(3));
+    BODY_20_31(23, B, C, D, E, T, A, X(7), X(7), X(9), X(15), X(4));
+    BODY_20_31(24, A, B, C, D, E, T, X(8), X(8), X(10), X(0), X(5));
+    BODY_20_31(25, T, A, B, C, D, E, X(9), X(9), X(11), X(1), X(6));
+    BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X(2), X(7));
+    BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X(3), X(8));
+    BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X(4), X(9));
+    BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X(5), X(10));
+    BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X(0), X(6), X(11));
+    BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X(1), X(7), X(12));
+
+    BODY_32_39(32, E, T, A, B, C, D, X(0), X(2), X(8), X(13));
+    BODY_32_39(33, D, E, T, A, B, C, X(1), X(3), X(9), X(14));
+    BODY_32_39(34, C, D, E, T, A, B, X(2), X(4), X(10), X(15));
+    BODY_32_39(35, B, C, D, E, T, A, X(3), X(5), X(11), X(0));
+    BODY_32_39(36, A, B, C, D, E, T, X(4), X(6), X(12), X(1));
+    BODY_32_39(37, T, A, B, C, D, E, X(5), X(7), X(13), X(2));
+    BODY_32_39(38, E, T, A, B, C, D, X(6), X(8), X(14), X(3));
+    BODY_32_39(39, D, E, T, A, B, C, X(7), X(9), X(15), X(4));
+
+    BODY_40_59(40, C, D, E, T, A, B, X(8), X(10), X(0), X(5));
+    BODY_40_59(41, B, C, D, E, T, A, X(9), X(11), X(1), X(6));
+    BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X(2), X(7));
+    BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X(3), X(8));
+    BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X(4), X(9));
+    BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X(5), X(10));
+    BODY_40_59(46, C, D, E, T, A, B, X(14), X(0), X(6), X(11));
+    BODY_40_59(47, B, C, D, E, T, A, X(15), X(1), X(7), X(12));
+    BODY_40_59(48, A, B, C, D, E, T, X(0), X(2), X(8), X(13));
+    BODY_40_59(49, T, A, B, C, D, E, X(1), X(3), X(9), X(14));
+    BODY_40_59(50, E, T, A, B, C, D, X(2), X(4), X(10), X(15));
+    BODY_40_59(51, D, E, T, A, B, C, X(3), X(5), X(11), X(0));
+    BODY_40_59(52, C, D, E, T, A, B, X(4), X(6), X(12), X(1));
+    BODY_40_59(53, B, C, D, E, T, A, X(5), X(7), X(13), X(2));
+    BODY_40_59(54, A, B, C, D, E, T, X(6), X(8), X(14), X(3));
+    BODY_40_59(55, T, A, B, C, D, E, X(7), X(9), X(15), X(4));
+    BODY_40_59(56, E, T, A, B, C, D, X(8), X(10), X(0), X(5));
+    BODY_40_59(57, D, E, T, A, B, C, X(9), X(11), X(1), X(6));
+    BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X(2), X(7));
+    BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X(3), X(8));
+
+    BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X(4), X(9));
+    BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X(5), X(10));
+    BODY_60_79(62, E, T, A, B, C, D, X(14), X(0), X(6), X(11));
+    BODY_60_79(63, D, E, T, A, B, C, X(15), X(1), X(7), X(12));
+    BODY_60_79(64, C, D, E, T, A, B, X(0), X(2), X(8), X(13));
+    BODY_60_79(65, B, C, D, E, T, A, X(1), X(3), X(9), X(14));
+    BODY_60_79(66, A, B, C, D, E, T, X(2), X(4), X(10), X(15));
+    BODY_60_79(67, T, A, B, C, D, E, X(3), X(5), X(11), X(0));
+    BODY_60_79(68, E, T, A, B, C, D, X(4), X(6), X(12), X(1));
+    BODY_60_79(69, D, E, T, A, B, C, X(5), X(7), X(13), X(2));
+    BODY_60_79(70, C, D, E, T, A, B, X(6), X(8), X(14), X(3));
+    BODY_60_79(71, B, C, D, E, T, A, X(7), X(9), X(15), X(4));
+    BODY_60_79(72, A, B, C, D, E, T, X(8), X(10), X(0), X(5));
+    BODY_60_79(73, T, A, B, C, D, E, X(9), X(11), X(1), X(6));
+    BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X(2), X(7));
+    BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X(3), X(8));
+    BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X(4), X(9));
+    BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X(5), X(10));
+    BODY_60_79(78, A, B, C, D, E, T, X(14), X(0), X(6), X(11));
+    BODY_60_79(79, T, A, B, C, D, E, X(15), X(1), X(7), X(12));
+
+    state[0] = (state[0] + E) & 0xffffffffL;
+    state[1] = (state[1] + T) & 0xffffffffL;
+    state[2] = (state[2] + A) & 0xffffffffL;
+    state[3] = (state[3] + B) & 0xffffffffL;
+    state[4] = (state[4] + C) & 0xffffffffL;
+
+    if (--num == 0) {
+      break;
+    }
+
+    A = state[0];
+    B = state[1];
+    C = state[2];
+    D = state[3];
+    E = state[4];
+  }
+}
+#endif
+
+#undef DATA_ORDER_IS_BIG_ENDIAN
+#undef HASH_CTX
+#undef HASH_CBLOCK
+#undef HASH_MAKE_STRING
+#undef HASH_UPDATE
+#undef HASH_TRANSFORM
+#undef HASH_FINAL
+#undef HASH_BLOCK_DATA_ORDER
+#undef ROTATE
+#undef Xupdate
+#undef K_00_19
+#undef K_20_39
+#undef K_40_59
+#undef K_60_79
+#undef F_00_19
+#undef F_20_39
+#undef F_40_59
+#undef F_60_79
+#undef BODY_00_15
+#undef BODY_16_19
+#undef BODY_20_31
+#undef BODY_32_39
+#undef BODY_40_59
+#undef BODY_60_79
+#undef X
+#undef HOST_c2l
+#undef HOST_l2c
diff --git a/src/crypto/fipsmodule/sha/sha256.c b/src/crypto/fipsmodule/sha/sha256.c
new file mode 100644
index 0000000..cd6becb
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/sha256.c
@@ -0,0 +1,337 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+#if !defined(OPENSSL_NO_ASM) &&                         \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
+     defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))
+#define SHA256_ASM
+#endif
+
+int SHA224_Init(SHA256_CTX *sha) {
+  OPENSSL_memset(sha, 0, sizeof(SHA256_CTX));
+  sha->h[0] = 0xc1059ed8UL;
+  sha->h[1] = 0x367cd507UL;
+  sha->h[2] = 0x3070dd17UL;
+  sha->h[3] = 0xf70e5939UL;
+  sha->h[4] = 0xffc00b31UL;
+  sha->h[5] = 0x68581511UL;
+  sha->h[6] = 0x64f98fa7UL;
+  sha->h[7] = 0xbefa4fa4UL;
+  sha->md_len = SHA224_DIGEST_LENGTH;
+  return 1;
+}
+
+int SHA256_Init(SHA256_CTX *sha) {
+  OPENSSL_memset(sha, 0, sizeof(SHA256_CTX));
+  sha->h[0] = 0x6a09e667UL;
+  sha->h[1] = 0xbb67ae85UL;
+  sha->h[2] = 0x3c6ef372UL;
+  sha->h[3] = 0xa54ff53aUL;
+  sha->h[4] = 0x510e527fUL;
+  sha->h[5] = 0x9b05688cUL;
+  sha->h[6] = 0x1f83d9abUL;
+  sha->h[7] = 0x5be0cd19UL;
+  sha->md_len = SHA256_DIGEST_LENGTH;
+  return 1;
+}
+
+uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA256_CTX ctx;
+  SHA224_Init(&ctx);
+  SHA224_Update(&ctx, data, len);
+  SHA224_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA256_CTX ctx;
+  SHA256_Init(&ctx);
+  SHA256_Update(&ctx, data, len);
+  SHA256_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+int SHA224_Update(SHA256_CTX *ctx, const void *data, size_t len) {
+  return SHA256_Update(ctx, data, len);
+}
+
+int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
+  return SHA256_Final(md, ctx);
+}
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_CTX SHA256_CTX
+#define HASH_CBLOCK 64
+
+/* Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
+ * default: case below covers for it. It's not clear however if it's permitted
+ * to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
+ * then default: case shall be extended. For reference. Idea behind separate
+ * cases for pre-defined lenghts is to let the compiler decide if it's
+ * appropriate to unroll small loops.
+ *
+ * TODO(davidben): The small |md_len| case is one of the few places a low-level
+ * hash 'final' function can fail. This should never happen. */
+#define HASH_MAKE_STRING(c, s)                              \
+  do {                                                      \
+    uint32_t ll;                                            \
+    unsigned int nn;                                        \
+    switch ((c)->md_len) {                                  \
+      case SHA224_DIGEST_LENGTH:                            \
+        for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) { \
+          ll = (c)->h[nn];                                  \
+          HOST_l2c(ll, (s));                                \
+        }                                                   \
+        break;                                              \
+      case SHA256_DIGEST_LENGTH:                            \
+        for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) { \
+          ll = (c)->h[nn];                                  \
+          HOST_l2c(ll, (s));                                \
+        }                                                   \
+        break;                                              \
+      default:                                              \
+        if ((c)->md_len > SHA256_DIGEST_LENGTH) {           \
+          return 0;                                         \
+        }                                                   \
+        for (nn = 0; nn < (c)->md_len / 4; nn++) {          \
+          ll = (c)->h[nn];                                  \
+          HOST_l2c(ll, (s));                                \
+        }                                                   \
+        break;                                              \
+    }                                                       \
+  } while (0)
+
+
+#define HASH_UPDATE SHA256_Update
+#define HASH_TRANSFORM SHA256_Transform
+#define HASH_FINAL SHA256_Final
+#define HASH_BLOCK_DATA_ORDER sha256_block_data_order
+#ifndef SHA256_ASM
+static
+#endif
+void sha256_block_data_order(uint32_t *state, const uint8_t *in, size_t num);
+
+#include "../digest/md32_common.h"
+
+#ifndef SHA256_ASM
+static const uint32_t K256[64] = {
+    0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+    0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+    0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+    0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+    0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+    0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+    0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+    0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+    0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+    0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+    0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+    0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+    0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL};
+
+#define ROTATE(a, n) (((a) << (n)) | ((a) >> (32 - (n))))
+
+/* FIPS specification refers to right rotations, while our ROTATE macro
+ * is left one. This is why you might notice that rotation coefficients
+ * differ from those observed in FIPS document by 32-N... */
+#define Sigma0(x) (ROTATE((x), 30) ^ ROTATE((x), 19) ^ ROTATE((x), 10))
+#define Sigma1(x) (ROTATE((x), 26) ^ ROTATE((x), 21) ^ ROTATE((x), 7))
+#define sigma0(x) (ROTATE((x), 25) ^ ROTATE((x), 14) ^ ((x) >> 3))
+#define sigma1(x) (ROTATE((x), 15) ^ ROTATE((x), 13) ^ ((x) >> 10))
+
+#define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+#define ROUND_00_15(i, a, b, c, d, e, f, g, h)   \
+  do {                                           \
+    T1 += h + Sigma1(e) + Ch(e, f, g) + K256[i]; \
+    h = Sigma0(a) + Maj(a, b, c);                \
+    d += T1;                                     \
+    h += T1;                                     \
+  } while (0)
+
+#define ROUND_16_63(i, a, b, c, d, e, f, g, h, X)      \
+  do {                                                 \
+    s0 = X[(i + 1) & 0x0f];                            \
+    s0 = sigma0(s0);                                   \
+    s1 = X[(i + 14) & 0x0f];                           \
+    s1 = sigma1(s1);                                   \
+    T1 = X[(i) & 0x0f] += s0 + s1 + X[(i + 9) & 0x0f]; \
+    ROUND_00_15(i, a, b, c, d, e, f, g, h);            \
+  } while (0)
+
+static void sha256_block_data_order(uint32_t *state, const uint8_t *data,
+                                    size_t num) {
+  uint32_t a, b, c, d, e, f, g, h, s0, s1, T1;
+  uint32_t X[16];
+  int i;
+
+  while (num--) {
+    a = state[0];
+    b = state[1];
+    c = state[2];
+    d = state[3];
+    e = state[4];
+    f = state[5];
+    g = state[6];
+    h = state[7];
+
+    uint32_t l;
+
+    HOST_c2l(data, l);
+    T1 = X[0] = l;
+    ROUND_00_15(0, a, b, c, d, e, f, g, h);
+    HOST_c2l(data, l);
+    T1 = X[1] = l;
+    ROUND_00_15(1, h, a, b, c, d, e, f, g);
+    HOST_c2l(data, l);
+    T1 = X[2] = l;
+    ROUND_00_15(2, g, h, a, b, c, d, e, f);
+    HOST_c2l(data, l);
+    T1 = X[3] = l;
+    ROUND_00_15(3, f, g, h, a, b, c, d, e);
+    HOST_c2l(data, l);
+    T1 = X[4] = l;
+    ROUND_00_15(4, e, f, g, h, a, b, c, d);
+    HOST_c2l(data, l);
+    T1 = X[5] = l;
+    ROUND_00_15(5, d, e, f, g, h, a, b, c);
+    HOST_c2l(data, l);
+    T1 = X[6] = l;
+    ROUND_00_15(6, c, d, e, f, g, h, a, b);
+    HOST_c2l(data, l);
+    T1 = X[7] = l;
+    ROUND_00_15(7, b, c, d, e, f, g, h, a);
+    HOST_c2l(data, l);
+    T1 = X[8] = l;
+    ROUND_00_15(8, a, b, c, d, e, f, g, h);
+    HOST_c2l(data, l);
+    T1 = X[9] = l;
+    ROUND_00_15(9, h, a, b, c, d, e, f, g);
+    HOST_c2l(data, l);
+    T1 = X[10] = l;
+    ROUND_00_15(10, g, h, a, b, c, d, e, f);
+    HOST_c2l(data, l);
+    T1 = X[11] = l;
+    ROUND_00_15(11, f, g, h, a, b, c, d, e);
+    HOST_c2l(data, l);
+    T1 = X[12] = l;
+    ROUND_00_15(12, e, f, g, h, a, b, c, d);
+    HOST_c2l(data, l);
+    T1 = X[13] = l;
+    ROUND_00_15(13, d, e, f, g, h, a, b, c);
+    HOST_c2l(data, l);
+    T1 = X[14] = l;
+    ROUND_00_15(14, c, d, e, f, g, h, a, b);
+    HOST_c2l(data, l);
+    T1 = X[15] = l;
+    ROUND_00_15(15, b, c, d, e, f, g, h, a);
+
+    for (i = 16; i < 64; i += 8) {
+      ROUND_16_63(i + 0, a, b, c, d, e, f, g, h, X);
+      ROUND_16_63(i + 1, h, a, b, c, d, e, f, g, X);
+      ROUND_16_63(i + 2, g, h, a, b, c, d, e, f, X);
+      ROUND_16_63(i + 3, f, g, h, a, b, c, d, e, X);
+      ROUND_16_63(i + 4, e, f, g, h, a, b, c, d, X);
+      ROUND_16_63(i + 5, d, e, f, g, h, a, b, c, X);
+      ROUND_16_63(i + 6, c, d, e, f, g, h, a, b, X);
+      ROUND_16_63(i + 7, b, c, d, e, f, g, h, a, X);
+    }
+
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+    state[4] += e;
+    state[5] += f;
+    state[6] += g;
+    state[7] += h;
+  }
+}
+
+#endif /* !SHA256_ASM */
+
+#undef DATA_ORDER_IS_BIG_ENDIAN
+#undef HASH_CTX
+#undef HASH_CBLOCK
+#undef HASH_MAKE_STRING
+#undef HASH_UPDATE
+#undef HASH_TRANSFORM
+#undef HASH_FINAL
+#undef HASH_BLOCK_DATA_ORDER
+#undef ROTATE
+#undef Sigma0
+#undef Sigma1
+#undef sigma0
+#undef sigma1
+#undef Ch
+#undef Maj
+#undef ROUND_00_15
+#undef ROUND_16_63
+#undef HOST_c2l
+#undef HOST_l2c
diff --git a/src/crypto/fipsmodule/sha/sha512.c b/src/crypto/fipsmodule/sha/sha512.c
new file mode 100644
index 0000000..6e1f79b
--- /dev/null
+++ b/src/crypto/fipsmodule/sha/sha512.c
@@ -0,0 +1,610 @@
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to.  The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *    "This product includes cryptographic software written by
+ *     Eric Young (eay@cryptsoft.com)"
+ *    The word 'cryptographic' can be left out if the rouines from the library
+ *    being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ *    the apps directory (application code) you must include an acknowledgement:
+ *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed.  i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.] */
+
+#include <openssl/sha.h>
+
+#include <string.h>
+
+#include <openssl/mem.h>
+
+#include "../../internal.h"
+
+
+/* IMPLEMENTATION NOTES.
+ *
+ * The 32-bit hash algorithms share a common byte-order neutral collector and
+ * padding function implementations that operate on unaligned data,
+ * ../md32_common.h. This SHA-512 implementation does not. Reasons
+ * [in reverse order] are:
+ *
+ * - It's the only 64-bit hash algorithm for the moment of this writing,
+ *   there is no need for common collector/padding implementation [yet];
+ * - By supporting only a transform function that operates on *aligned* data
+ *   the collector/padding function is simpler and easier to optimize. */
+
+#if !defined(OPENSSL_NO_ASM) &&                         \
+    (defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
+     defined(OPENSSL_ARM) || defined(OPENSSL_AARCH64))
+#define SHA512_ASM
+#endif
+
+#if defined(OPENSSL_X86) || defined(OPENSSL_X86_64) || \
+    defined(__ARM_FEATURE_UNALIGNED)
+#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+#endif
+
+int SHA384_Init(SHA512_CTX *sha) {
+  sha->h[0] = UINT64_C(0xcbbb9d5dc1059ed8);
+  sha->h[1] = UINT64_C(0x629a292a367cd507);
+  sha->h[2] = UINT64_C(0x9159015a3070dd17);
+  sha->h[3] = UINT64_C(0x152fecd8f70e5939);
+  sha->h[4] = UINT64_C(0x67332667ffc00b31);
+  sha->h[5] = UINT64_C(0x8eb44a8768581511);
+  sha->h[6] = UINT64_C(0xdb0c2e0d64f98fa7);
+  sha->h[7] = UINT64_C(0x47b5481dbefa4fa4);
+
+  sha->Nl = 0;
+  sha->Nh = 0;
+  sha->num = 0;
+  sha->md_len = SHA384_DIGEST_LENGTH;
+  return 1;
+}
+
+
+int SHA512_Init(SHA512_CTX *sha) {
+  sha->h[0] = UINT64_C(0x6a09e667f3bcc908);
+  sha->h[1] = UINT64_C(0xbb67ae8584caa73b);
+  sha->h[2] = UINT64_C(0x3c6ef372fe94f82b);
+  sha->h[3] = UINT64_C(0xa54ff53a5f1d36f1);
+  sha->h[4] = UINT64_C(0x510e527fade682d1);
+  sha->h[5] = UINT64_C(0x9b05688c2b3e6c1f);
+  sha->h[6] = UINT64_C(0x1f83d9abfb41bd6b);
+  sha->h[7] = UINT64_C(0x5be0cd19137e2179);
+
+  sha->Nl = 0;
+  sha->Nh = 0;
+  sha->num = 0;
+  sha->md_len = SHA512_DIGEST_LENGTH;
+  return 1;
+}
+
+uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA512_CTX ctx;
+  SHA384_Init(&ctx);
+  SHA384_Update(&ctx, data, len);
+  SHA384_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out) {
+  SHA512_CTX ctx;
+  SHA512_Init(&ctx);
+  SHA512_Update(&ctx, data, len);
+  SHA512_Final(out, &ctx);
+  OPENSSL_cleanse(&ctx, sizeof(ctx));
+  return out;
+}
+
+#if !defined(SHA512_ASM)
+static
+#endif
+void sha512_block_data_order(uint64_t *state, const uint64_t *W, size_t num);
+
+
+int SHA384_Final(uint8_t *md, SHA512_CTX *sha) {
+  return SHA512_Final(md, sha);
+}
+
+int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) {
+  return SHA512_Update(sha, data, len);
+}
+
+void SHA512_Transform(SHA512_CTX *c, const uint8_t *block) {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+  if ((size_t)block % sizeof(c->u.d[0]) != 0) {
+    OPENSSL_memcpy(c->u.p, block, sizeof(c->u.p));
+    block = c->u.p;
+  }
+#endif
+  sha512_block_data_order(c->h, (uint64_t *)block, 1);
+}
+
+int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
+  uint64_t l;
+  uint8_t *p = c->u.p;
+  const uint8_t *data = (const uint8_t *)in_data;
+
+  if (len == 0) {
+    return 1;
+  }
+
+  l = (c->Nl + (((uint64_t)len) << 3)) & UINT64_C(0xffffffffffffffff);
+  if (l < c->Nl) {
+    c->Nh++;
+  }
+  if (sizeof(len) >= 8) {
+    c->Nh += (((uint64_t)len) >> 61);
+  }
+  c->Nl = l;
+
+  if (c->num != 0) {
+    size_t n = sizeof(c->u) - c->num;
+
+    if (len < n) {
+      OPENSSL_memcpy(p + c->num, data, len);
+      c->num += (unsigned int)len;
+      return 1;
+    } else {
+      OPENSSL_memcpy(p + c->num, data, n), c->num = 0;
+      len -= n;
+      data += n;
+      sha512_block_data_order(c->h, (uint64_t *)p, 1);
+    }
+  }
+
+  if (len >= sizeof(c->u)) {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+    if ((size_t)data % sizeof(c->u.d[0]) != 0) {
+      while (len >= sizeof(c->u)) {
+        OPENSSL_memcpy(p, data, sizeof(c->u));
+        sha512_block_data_order(c->h, (uint64_t *)p, 1);
+        len -= sizeof(c->u);
+        data += sizeof(c->u);
+      }
+    } else
+#endif
+    {
+      sha512_block_data_order(c->h, (uint64_t *)data, len / sizeof(c->u));
+      data += len;
+      len %= sizeof(c->u);
+      data -= len;
+    }
+  }
+
+  if (len != 0) {
+    OPENSSL_memcpy(p, data, len);
+    c->num = (int)len;
+  }
+
+  return 1;
+}
+
+int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
+  uint8_t *p = (uint8_t *)sha->u.p;
+  size_t n = sha->num;
+
+  p[n] = 0x80; /* There always is a room for one */
+  n++;
+  if (n > (sizeof(sha->u) - 16)) {
+    OPENSSL_memset(p + n, 0, sizeof(sha->u) - n);
+    n = 0;
+    sha512_block_data_order(sha->h, (uint64_t *)p, 1);
+  }
+
+  OPENSSL_memset(p + n, 0, sizeof(sha->u) - 16 - n);
+  p[sizeof(sha->u) - 1] = (uint8_t)(sha->Nl);
+  p[sizeof(sha->u) - 2] = (uint8_t)(sha->Nl >> 8);
+  p[sizeof(sha->u) - 3] = (uint8_t)(sha->Nl >> 16);
+  p[sizeof(sha->u) - 4] = (uint8_t)(sha->Nl >> 24);
+  p[sizeof(sha->u) - 5] = (uint8_t)(sha->Nl >> 32);
+  p[sizeof(sha->u) - 6] = (uint8_t)(sha->Nl >> 40);
+  p[sizeof(sha->u) - 7] = (uint8_t)(sha->Nl >> 48);
+  p[sizeof(sha->u) - 8] = (uint8_t)(sha->Nl >> 56);
+  p[sizeof(sha->u) - 9] = (uint8_t)(sha->Nh);
+  p[sizeof(sha->u) - 10] = (uint8_t)(sha->Nh >> 8);
+  p[sizeof(sha->u) - 11] = (uint8_t)(sha->Nh >> 16);
+  p[sizeof(sha->u) - 12] = (uint8_t)(sha->Nh >> 24);
+  p[sizeof(sha->u) - 13] = (uint8_t)(sha->Nh >> 32);
+  p[sizeof(sha->u) - 14] = (uint8_t)(sha->Nh >> 40);
+  p[sizeof(sha->u) - 15] = (uint8_t)(sha->Nh >> 48);
+  p[sizeof(sha->u) - 16] = (uint8_t)(sha->Nh >> 56);
+
+  sha512_block_data_order(sha->h, (uint64_t *)p, 1);
+
+  if (md == NULL) {
+    /* TODO(davidben): This NULL check is absent in other low-level hash 'final'
+     * functions and is one of the few places one can fail. */
+    return 0;
+  }
+
+  switch (sha->md_len) {
+    /* Let compiler decide if it's appropriate to unroll... */
+    case SHA384_DIGEST_LENGTH:
+      for (n = 0; n < SHA384_DIGEST_LENGTH / 8; n++) {
+        uint64_t t = sha->h[n];
+
+        *(md++) = (uint8_t)(t >> 56);
+        *(md++) = (uint8_t)(t >> 48);
+        *(md++) = (uint8_t)(t >> 40);
+        *(md++) = (uint8_t)(t >> 32);
+        *(md++) = (uint8_t)(t >> 24);
+        *(md++) = (uint8_t)(t >> 16);
+        *(md++) = (uint8_t)(t >> 8);
+        *(md++) = (uint8_t)(t);
+      }
+      break;
+    case SHA512_DIGEST_LENGTH:
+      for (n = 0; n < SHA512_DIGEST_LENGTH / 8; n++) {
+        uint64_t t = sha->h[n];
+
+        *(md++) = (uint8_t)(t >> 56);
+        *(md++) = (uint8_t)(t >> 48);
+        *(md++) = (uint8_t)(t >> 40);
+        *(md++) = (uint8_t)(t >> 32);
+        *(md++) = (uint8_t)(t >> 24);
+        *(md++) = (uint8_t)(t >> 16);
+        *(md++) = (uint8_t)(t >> 8);
+        *(md++) = (uint8_t)(t);
+      }
+      break;
+    /* ... as well as make sure md_len is not abused. */
+    default:
+      /* TODO(davidben): This bad |md_len| case is one of the few places a
+       * low-level hash 'final' function can fail. This should never happen. */
+      return 0;
+  }
+
+  return 1;
+}
+
+#ifndef SHA512_ASM
+static const uint64_t K512[80] = {
+    UINT64_C(0x428a2f98d728ae22), UINT64_C(0x7137449123ef65cd),
+    UINT64_C(0xb5c0fbcfec4d3b2f), UINT64_C(0xe9b5dba58189dbbc),
+    UINT64_C(0x3956c25bf348b538), UINT64_C(0x59f111f1b605d019),
+    UINT64_C(0x923f82a4af194f9b), UINT64_C(0xab1c5ed5da6d8118),
+    UINT64_C(0xd807aa98a3030242), UINT64_C(0x12835b0145706fbe),
+    UINT64_C(0x243185be4ee4b28c), UINT64_C(0x550c7dc3d5ffb4e2),
+    UINT64_C(0x72be5d74f27b896f), UINT64_C(0x80deb1fe3b1696b1),
+    UINT64_C(0x9bdc06a725c71235), UINT64_C(0xc19bf174cf692694),
+    UINT64_C(0xe49b69c19ef14ad2), UINT64_C(0xefbe4786384f25e3),
+    UINT64_C(0x0fc19dc68b8cd5b5), UINT64_C(0x240ca1cc77ac9c65),
+    UINT64_C(0x2de92c6f592b0275), UINT64_C(0x4a7484aa6ea6e483),
+    UINT64_C(0x5cb0a9dcbd41fbd4), UINT64_C(0x76f988da831153b5),
+    UINT64_C(0x983e5152ee66dfab), UINT64_C(0xa831c66d2db43210),
+    UINT64_C(0xb00327c898fb213f), UINT64_C(0xbf597fc7beef0ee4),
+    UINT64_C(0xc6e00bf33da88fc2), UINT64_C(0xd5a79147930aa725),
+    UINT64_C(0x06ca6351e003826f), UINT64_C(0x142929670a0e6e70),
+    UINT64_C(0x27b70a8546d22ffc), UINT64_C(0x2e1b21385c26c926),
+    UINT64_C(0x4d2c6dfc5ac42aed), UINT64_C(0x53380d139d95b3df),
+    UINT64_C(0x650a73548baf63de), UINT64_C(0x766a0abb3c77b2a8),
+    UINT64_C(0x81c2c92e47edaee6), UINT64_C(0x92722c851482353b),
+    UINT64_C(0xa2bfe8a14cf10364), UINT64_C(0xa81a664bbc423001),
+    UINT64_C(0xc24b8b70d0f89791), UINT64_C(0xc76c51a30654be30),
+    UINT64_C(0xd192e819d6ef5218), UINT64_C(0xd69906245565a910),
+    UINT64_C(0xf40e35855771202a), UINT64_C(0x106aa07032bbd1b8),
+    UINT64_C(0x19a4c116b8d2d0c8), UINT64_C(0x1e376c085141ab53),
+    UINT64_C(0x2748774cdf8eeb99), UINT64_C(0x34b0bcb5e19b48a8),
+    UINT64_C(0x391c0cb3c5c95a63), UINT64_C(0x4ed8aa4ae3418acb),
+    UINT64_C(0x5b9cca4f7763e373), UINT64_C(0x682e6ff3d6b2b8a3),
+    UINT64_C(0x748f82ee5defb2fc), UINT64_C(0x78a5636f43172f60),
+    UINT64_C(0x84c87814a1f0ab72), UINT64_C(0x8cc702081a6439ec),
+    UINT64_C(0x90befffa23631e28), UINT64_C(0xa4506cebde82bde9),
+    UINT64_C(0xbef9a3f7b2c67915), UINT64_C(0xc67178f2e372532b),
+    UINT64_C(0xca273eceea26619c), UINT64_C(0xd186b8c721c0c207),
+    UINT64_C(0xeada7dd6cde0eb1e), UINT64_C(0xf57d4f7fee6ed178),
+    UINT64_C(0x06f067aa72176fba), UINT64_C(0x0a637dc5a2c898a6),
+    UINT64_C(0x113f9804bef90dae), UINT64_C(0x1b710b35131c471b),
+    UINT64_C(0x28db77f523047d84), UINT64_C(0x32caab7b40c72493),
+    UINT64_C(0x3c9ebe0a15c9bebc), UINT64_C(0x431d67c49c100d4c),
+    UINT64_C(0x4cc5d4becb3e42b6), UINT64_C(0x597f299cfc657e2a),
+    UINT64_C(0x5fcb6fab3ad6faec), UINT64_C(0x6c44198c4a475817),
+};
+
+#if defined(__GNUC__) && __GNUC__ >= 2 && !defined(OPENSSL_NO_ASM)
+#if defined(__x86_64) || defined(__x86_64__)
+#define ROTR(a, n)                                              \
+  ({                                                            \
+    uint64_t ret;                                               \
+    __asm__("rorq %1, %0" : "=r"(ret) : "J"(n), "0"(a) : "cc"); \
+    ret;                                                        \
+  })
+#define PULL64(x)                                \
+  ({                                             \
+    uint64_t ret = *((const uint64_t *)(&(x)));  \
+    __asm__("bswapq %0" : "=r"(ret) : "0"(ret)); \
+    ret;                                         \
+  })
+#elif(defined(__i386) || defined(__i386__))
+#define PULL64(x)                                                             \
+  ({                                                                          \
+    const unsigned int *p = (const unsigned int *)(&(x));                     \
+    unsigned int hi = p[0], lo = p[1];                                        \
+    __asm__("bswapl %0; bswapl %1;" : "=r"(lo), "=r"(hi) : "0"(lo), "1"(hi)); \
+    ((uint64_t)hi) << 32 | lo;                                                \
+  })
+#elif(defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+#define ROTR(a, n)                                             \
+  ({                                                           \
+    uint64_t ret;                                              \
+    __asm__("rotrdi %0, %1, %2" : "=r"(ret) : "r"(a), "K"(n)); \
+    ret;                                                       \
+  })
+#elif defined(__aarch64__)
+#define ROTR(a, n)                                          \
+  ({                                                        \
+    uint64_t ret;                                           \
+    __asm__("ror %0, %1, %2" : "=r"(ret) : "r"(a), "I"(n)); \
+    ret;                                                    \
+  })
+#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+    __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define PULL64(x)                                                         \
+  ({                                                                      \
+    uint64_t ret;                                                         \
+    __asm__("rev %0, %1" : "=r"(ret) : "r"(*((const uint64_t *)(&(x))))); \
+    ret;                                                                  \
+  })
+#endif
+#endif
+#elif defined(_MSC_VER)
+#if defined(_WIN64) /* applies to both IA-64 and AMD64 */
+#pragma intrinsic(_rotr64)
+#define ROTR(a, n) _rotr64((a), n)
+#endif
+#if defined(_M_IX86) && !defined(OPENSSL_NO_ASM)
+static uint64_t __fastcall __pull64be(const void *x) {
+  _asm mov edx, [ecx + 0]
+  _asm mov eax, [ecx + 4]
+  _asm bswap edx
+  _asm bswap eax
+}
+#define PULL64(x) __pull64be(&(x))
+#if _MSC_VER <= 1200
+#pragma inline_depth(0)
+#endif
+#endif
+#endif
+
+#ifndef PULL64
+#define B(x, j) \
+  (((uint64_t)(*(((const uint8_t *)(&x)) + j))) << ((7 - j) * 8))
+#define PULL64(x)                                                        \
+  (B(x, 0) | B(x, 1) | B(x, 2) | B(x, 3) | B(x, 4) | B(x, 5) | B(x, 6) | \
+   B(x, 7))
+#endif
+
+#ifndef ROTR
+#define ROTR(x, s) (((x) >> s) | (x) << (64 - s))
+#endif
+
+#define Sigma0(x) (ROTR((x), 28) ^ ROTR((x), 34) ^ ROTR((x), 39))
+#define Sigma1(x) (ROTR((x), 14) ^ ROTR((x), 18) ^ ROTR((x), 41))
+#define sigma0(x) (ROTR((x), 1) ^ ROTR((x), 8) ^ ((x) >> 7))
+#define sigma1(x) (ROTR((x), 19) ^ ROTR((x), 61) ^ ((x) >> 6))
+
+#define Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x, y, z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+/*
+ * This code should give better results on 32-bit CPU with less than
+ * ~24 registers, both size and performance wise...
+ */
+static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
+                                    size_t num) {
+  uint64_t A, E, T;
+  uint64_t X[9 + 80], *F;
+  int i;
+
+  while (num--) {
+    F = X + 80;
+    A = state[0];
+    F[1] = state[1];
+    F[2] = state[2];
+    F[3] = state[3];
+    E = state[4];
+    F[5] = state[5];
+    F[6] = state[6];
+    F[7] = state[7];
+
+    for (i = 0; i < 16; i++, F--) {
+      T = PULL64(W[i]);
+      F[0] = A;
+      F[4] = E;
+      F[8] = T;
+      T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+      E = F[3] + T;
+      A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+    }
+
+    for (; i < 80; i++, F--) {
+      T = sigma0(F[8 + 16 - 1]);
+      T += sigma1(F[8 + 16 - 14]);
+      T += F[8 + 16] + F[8 + 16 - 9];
+
+      F[0] = A;
+      F[4] = E;
+      F[8] = T;
+      T += F[7] + Sigma1(E) + Ch(E, F[5], F[6]) + K512[i];
+      E = F[3] + T;
+      A = T + Sigma0(A) + Maj(A, F[1], F[2]);
+    }
+
+    state[0] += A;
+    state[1] += F[1];
+    state[2] += F[2];
+    state[3] += F[3];
+    state[4] += E;
+    state[5] += F[5];
+    state[6] += F[6];
+    state[7] += F[7];
+
+    W += 16;
+  }
+}
+
+#else
+
+#define ROUND_00_15(i, a, b, c, d, e, f, g, h)   \
+  do {                                           \
+    T1 += h + Sigma1(e) + Ch(e, f, g) + K512[i]; \
+    h = Sigma0(a) + Maj(a, b, c);                \
+    d += T1;                                     \
+    h += T1;                                     \
+  } while (0)
+
+#define ROUND_16_80(i, j, a, b, c, d, e, f, g, h, X)   \
+  do {                                                 \
+    s0 = X[(j + 1) & 0x0f];                            \
+    s0 = sigma0(s0);                                   \
+    s1 = X[(j + 14) & 0x0f];                           \
+    s1 = sigma1(s1);                                   \
+    T1 = X[(j) & 0x0f] += s0 + s1 + X[(j + 9) & 0x0f]; \
+    ROUND_00_15(i + j, a, b, c, d, e, f, g, h);        \
+  } while (0)
+
+static void sha512_block_data_order(uint64_t *state, const uint64_t *W,
+                                    size_t num) {
+  uint64_t a, b, c, d, e, f, g, h, s0, s1, T1;
+  uint64_t X[16];
+  int i;
+
+  while (num--) {
+
+    a = state[0];
+    b = state[1];
+    c = state[2];
+    d = state[3];
+    e = state[4];
+    f = state[5];
+    g = state[6];
+    h = state[7];
+
+    T1 = X[0] = PULL64(W[0]);
+    ROUND_00_15(0, a, b, c, d, e, f, g, h);
+    T1 = X[1] = PULL64(W[1]);
+    ROUND_00_15(1, h, a, b, c, d, e, f, g);
+    T1 = X[2] = PULL64(W[2]);
+    ROUND_00_15(2, g, h, a, b, c, d, e, f);
+    T1 = X[3] = PULL64(W[3]);
+    ROUND_00_15(3, f, g, h, a, b, c, d, e);
+    T1 = X[4] = PULL64(W[4]);
+    ROUND_00_15(4, e, f, g, h, a, b, c, d);
+    T1 = X[5] = PULL64(W[5]);
+    ROUND_00_15(5, d, e, f, g, h, a, b, c);
+    T1 = X[6] = PULL64(W[6]);
+    ROUND_00_15(6, c, d, e, f, g, h, a, b);
+    T1 = X[7] = PULL64(W[7]);
+    ROUND_00_15(7, b, c, d, e, f, g, h, a);
+    T1 = X[8] = PULL64(W[8]);
+    ROUND_00_15(8, a, b, c, d, e, f, g, h);
+    T1 = X[9] = PULL64(W[9]);
+    ROUND_00_15(9, h, a, b, c, d, e, f, g);
+    T1 = X[10] = PULL64(W[10]);
+    ROUND_00_15(10, g, h, a, b, c, d, e, f);
+    T1 = X[11] = PULL64(W[11]);
+    ROUND_00_15(11, f, g, h, a, b, c, d, e);
+    T1 = X[12] = PULL64(W[12]);
+    ROUND_00_15(12, e, f, g, h, a, b, c, d);
+    T1 = X[13] = PULL64(W[13]);
+    ROUND_00_15(13, d, e, f, g, h, a, b, c);
+    T1 = X[14] = PULL64(W[14]);
+    ROUND_00_15(14, c, d, e, f, g, h, a, b);
+    T1 = X[15] = PULL64(W[15]);
+    ROUND_00_15(15, b, c, d, e, f, g, h, a);
+
+    for (i = 16; i < 80; i += 16) {
+      ROUND_16_80(i, 0, a, b, c, d, e, f, g, h, X);
+      ROUND_16_80(i, 1, h, a, b, c, d, e, f, g, X);
+      ROUND_16_80(i, 2, g, h, a, b, c, d, e, f, X);
+      ROUND_16_80(i, 3, f, g, h, a, b, c, d, e, X);
+      ROUND_16_80(i, 4, e, f, g, h, a, b, c, d, X);
+      ROUND_16_80(i, 5, d, e, f, g, h, a, b, c, X);
+      ROUND_16_80(i, 6, c, d, e, f, g, h, a, b, X);
+      ROUND_16_80(i, 7, b, c, d, e, f, g, h, a, X);
+      ROUND_16_80(i, 8, a, b, c, d, e, f, g, h, X);
+      ROUND_16_80(i, 9, h, a, b, c, d, e, f, g, X);
+      ROUND_16_80(i, 10, g, h, a, b, c, d, e, f, X);
+      ROUND_16_80(i, 11, f, g, h, a, b, c, d, e, X);
+      ROUND_16_80(i, 12, e, f, g, h, a, b, c, d, X);
+      ROUND_16_80(i, 13, d, e, f, g, h, a, b, c, X);
+      ROUND_16_80(i, 14, c, d, e, f, g, h, a, b, X);
+      ROUND_16_80(i, 15, b, c, d, e, f, g, h, a, X);
+    }
+
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+    state[4] += e;
+    state[5] += f;
+    state[6] += g;
+    state[7] += h;
+
+    W += 16;
+  }
+}
+
+#endif
+
+#endif /* !SHA512_ASM */
+
+#undef ROTR
+#undef PULL64
+#undef B
+#undef Sigma0
+#undef Sigma1
+#undef sigma0
+#undef sigma1
+#undef Ch
+#undef Maj
+#undef ROUND_00_15
+#undef ROUND_16_80
+#undef HOST_c2l
+#undef HOST_l2c