external/boringssl: Sync to 3a3552247ecb0bfb260a36d9da7a3bce7fdc3f8a.
This includes the following changes:
https://boringssl.googlesource.com/boringssl/+log/e60b080dda138e1dd02d99fb34641ac22e46c85d..3a3552247ecb0bfb260a36d9da7a3bce7fdc3f8a
Also changes Android.bp to compile with execute-only memory again as
this should no longer be necessary with the mprotect changes in
https://boringssl.googlesource.com/boringssl/+/09a9ec036030ac84896f5143548d05f3951d1817
Bug: 134580074
Test: atest CtsLibcoreTestCases CtsLibcoreOkHttpTestCases
Change-Id: I0ec54998afd2e0b40ec930716397e20aa3c21bf2
diff --git a/src/crypto/base64/base64_test.cc b/src/crypto/base64/base64_test.cc
index 6905659..6484dc6 100644
--- a/src/crypto/base64/base64_test.cc
+++ b/src/crypto/base64/base64_test.cc
@@ -105,7 +105,7 @@
class Base64Test : public testing::TestWithParam<Base64TestVector> {};
-INSTANTIATE_TEST_SUITE_P(, Base64Test, testing::ValuesIn(kTestVectors));
+INSTANTIATE_TEST_SUITE_P(All, Base64Test, testing::ValuesIn(kTestVectors));
// RemoveNewlines returns a copy of |in| with all '\n' characters removed.
static std::string RemoveNewlines(const char *in) {
diff --git a/src/crypto/bio/bio_test.cc b/src/crypto/bio/bio_test.cc
index 797a733..765e962 100644
--- a/src/crypto/bio/bio_test.cc
+++ b/src/crypto/bio/bio_test.cc
@@ -220,7 +220,7 @@
}
}
-INSTANTIATE_TEST_SUITE_P(, BIOASN1Test, testing::ValuesIn(kASN1TestParams));
+INSTANTIATE_TEST_SUITE_P(All, BIOASN1Test, testing::ValuesIn(kASN1TestParams));
// Run through the tests twice, swapping |bio1| and |bio2|, for symmetry.
class BIOPairTest : public testing::TestWithParam<bool> {};
@@ -322,4 +322,4 @@
EXPECT_EQ(Bytes("12345"), Bytes(buf, 5));
}
-INSTANTIATE_TEST_SUITE_P(, BIOPairTest, testing::Values(false, true));
+INSTANTIATE_TEST_SUITE_P(All, BIOPairTest, testing::Values(false, true));
diff --git a/src/crypto/cipher_extra/aead_test.cc b/src/crypto/cipher_extra/aead_test.cc
index 25924bd..caabb77 100644
--- a/src/crypto/cipher_extra/aead_test.cc
+++ b/src/crypto/cipher_extra/aead_test.cc
@@ -53,6 +53,8 @@
true, 0},
{"AES_128_GCM_NIST", EVP_aead_aes_128_gcm, "nist_cavp/aes_128_gcm.txt",
false, true, true, 0},
+ {"AES_192_GCM", EVP_aead_aes_192_gcm, "aes_192_gcm_tests.txt", false, true,
+ true, 0},
{"AES_256_GCM", EVP_aead_aes_256_gcm, "aes_256_gcm_tests.txt", false, true,
true, 0},
{"AES_256_GCM_NIST", EVP_aead_aes_256_gcm, "nist_cavp/aes_256_gcm.txt",
@@ -101,7 +103,7 @@
const EVP_AEAD *aead() { return GetParam().func(); }
};
-INSTANTIATE_TEST_SUITE_P(, PerAEADTest, testing::ValuesIn(kAEADs),
+INSTANTIATE_TEST_SUITE_P(All, PerAEADTest, testing::ValuesIn(kAEADs),
[](const testing::TestParamInfo<KnownAEAD> ¶ms)
-> std::string { return params.param.name; });
@@ -801,9 +803,8 @@
aead = EVP_aead_aes_128_gcm();
break;
case 192:
- // Skip AES-192-GCM tests.
- t->SkipCurrent();
- return;
+ aead = EVP_aead_aes_192_gcm();
+ break;
case 256:
aead = EVP_aead_aes_256_gcm();
break;
diff --git a/src/crypto/cipher_extra/test/aes_192_gcm_tests.txt b/src/crypto/cipher_extra/test/aes_192_gcm_tests.txt
new file mode 100644
index 0000000..154e3c3
--- /dev/null
+++ b/src/crypto/cipher_extra/test/aes_192_gcm_tests.txt
@@ -0,0 +1,43 @@
+# Test vectors from NIST: http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-spec.pdf
+
+KEY: 000000000000000000000000000000000000000000000000
+NONCE: 000000000000000000000000
+AD:
+TAG: cd33b28ac773f74ba00ed1f312572435
+IN:
+CT:
+
+KEY: 000000000000000000000000000000000000000000000000
+NONCE: 000000000000000000000000
+AD:
+TAG: 2ff58d80033927ab8ef4d4587514f0fb
+IN: 00000000000000000000000000000000
+CT: 98e7247c07f0fe411c267e4384b0f600
+
+KEY: feffe9928665731c6d6a8f9467308308feffe9928665731c
+NONCE: cafebabefacedbaddecaf888
+AD:
+TAG: 9924a7c8587336bfb118024db8674a14
+IN: d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b391aafd255
+CT: 3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d773d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710acade256
+
+KEY: feffe9928665731c6d6a8f9467308308feffe9928665731c
+NONCE: cafebabefacedbaddecaf888
+AD: feedfacedeadbeeffeedfacedeadbeefabaddad2
+TAG: 2519498e80f1478f37ba55bd6d27618c
+IN: d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39
+CT: 3980ca0b3c00e841eb06fac4872a2757859e1ceaa6efd984628593b40ca1e19c7d773d00c144c525ac619d18c84a3f4718e2448b2fe324d9ccda2710
+
+KEY: feffe9928665731c6d6a8f9467308308feffe9928665731c
+NONCE: cafebabefacedbad
+AD: feedfacedeadbeeffeedfacedeadbeefabaddad2
+TAG: 65dcc57fcf623a24094fcca40d3533f8
+IN: d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39
+CT: 0f10f599ae14a154ed24b36e25324db8c566632ef2bbb34f8347280fc4507057fddc29df9a471f75c66541d4d4dad1c9e93a19a58e8b473fa0f062f7
+
+KEY: feffe9928665731c6d6a8f9467308308feffe9928665731c
+NONCE: 9313225df88406e555909c5aff5269aa6a7a9538534f7da1e4c303d2a318a728c3c0c95156809539fcf0e2429a6b525416aedbf5a0de6a57a637b39b
+AD: feedfacedeadbeeffeedfacedeadbeefabaddad2
+TAG: dcf566ff291c25bbb8568fc3d376a6d9
+IN: d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39
+CT: d27e88681ce3243c4830165a8fdcf9ff1de9a1d8e6b447ef6ef7b79828666e4581e79012af34ddd9e2f037589b292db3e67c036745fa22e7e9b7373b
diff --git a/src/crypto/cpu-intel.c b/src/crypto/cpu-intel.c
index 1621ef6..832e9d6 100644
--- a/src/crypto/cpu-intel.c
+++ b/src/crypto/cpu-intel.c
@@ -164,17 +164,23 @@
if (is_amd) {
// See https://www.amd.com/system/files/TechDocs/25481.pdf, page 10.
const uint32_t base_family = (eax >> 8) & 15;
+ const uint32_t base_model = (eax >> 4) & 15;
uint32_t family = base_family;
+ uint32_t model = base_model;
if (base_family == 0xf) {
const uint32_t ext_family = (eax >> 20) & 255;
family += ext_family;
+ const uint32_t ext_model = (eax >> 16) & 15;
+ model |= ext_model << 4;
}
- if (family < 0x17) {
+ if (family < 0x17 || (family == 0x17 && 0x70 <= model && model <= 0x7f)) {
// Disable RDRAND on AMD families before 0x17 (Zen) due to reported
// failures after suspend.
// https://bugzilla.redhat.com/show_bug.cgi?id=1150286
+ // Also disable for family 0x17, models 0x70–0x7f, due to possible RDRAND
+ // failures there too.
ecx &= ~(1u << 30);
}
}
diff --git a/src/crypto/ec_extra/ec_asn1.c b/src/crypto/ec_extra/ec_asn1.c
index 31988f3..9769d01 100644
--- a/src/crypto/ec_extra/ec_asn1.c
+++ b/src/crypto/ec_extra/ec_asn1.c
@@ -264,7 +264,8 @@
CBS *out_base_y, CBS *out_order) {
// See RFC 3279, section 2.3.5. Note that RFC 3279 calls this structure an
// ECParameters while RFC 5480 calls it a SpecifiedECDomain.
- CBS params, field_id, field_type, curve, base;
+ CBS params, field_id, field_type, curve, base, cofactor;
+ int has_cofactor;
uint64_t version;
if (!CBS_get_asn1(in, ¶ms, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1_uint64(¶ms, &version) ||
@@ -272,7 +273,8 @@
!CBS_get_asn1(¶ms, &field_id, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&field_id, &field_type, CBS_ASN1_OBJECT) ||
CBS_len(&field_type) != sizeof(kPrimeField) ||
- OPENSSL_memcmp(CBS_data(&field_type), kPrimeField, sizeof(kPrimeField)) != 0 ||
+ OPENSSL_memcmp(CBS_data(&field_type), kPrimeField, sizeof(kPrimeField)) !=
+ 0 ||
!CBS_get_asn1(&field_id, out_prime, CBS_ASN1_INTEGER) ||
!is_unsigned_integer(out_prime) ||
CBS_len(&field_id) != 0 ||
@@ -280,16 +282,26 @@
!CBS_get_asn1(&curve, out_a, CBS_ASN1_OCTETSTRING) ||
!CBS_get_asn1(&curve, out_b, CBS_ASN1_OCTETSTRING) ||
// |curve| has an optional BIT STRING seed which we ignore.
+ !CBS_get_optional_asn1(&curve, NULL, NULL, CBS_ASN1_BITSTRING) ||
+ CBS_len(&curve) != 0 ||
!CBS_get_asn1(¶ms, &base, CBS_ASN1_OCTETSTRING) ||
!CBS_get_asn1(¶ms, out_order, CBS_ASN1_INTEGER) ||
- !is_unsigned_integer(out_order)) {
+ !is_unsigned_integer(out_order) ||
+ !CBS_get_optional_asn1(¶ms, &cofactor, &has_cofactor,
+ CBS_ASN1_INTEGER) ||
+ CBS_len(¶ms) != 0) {
OPENSSL_PUT_ERROR(EC, EC_R_DECODE_ERROR);
return 0;
}
- // |params| has an optional cofactor which we ignore. With the optional seed
- // in |curve|, a group already has arbitrarily many encodings. Parse enough to
- // uniquely determine the curve.
+ if (has_cofactor) {
+ // We only support prime-order curves so the cofactor must be one.
+ if (CBS_len(&cofactor) != 1 ||
+ CBS_data(&cofactor)[0] != 1) {
+ OPENSSL_PUT_ERROR(EC, EC_R_UNKNOWN_GROUP);
+ return 0;
+ }
+ }
// Require that the base point use uncompressed form.
uint8_t form;
diff --git a/src/crypto/err/evp.errordata b/src/crypto/err/evp.errordata
index 771cd6a..390dec0 100644
--- a/src/crypto/err/evp.errordata
+++ b/src/crypto/err/evp.errordata
@@ -22,6 +22,7 @@
EVP,132,MEMORY_LIMIT_EXCEEDED
EVP,118,MISSING_PARAMETERS
EVP,130,NOT_A_PRIVATE_KEY
+EVP,135,NOT_XOF_OR_INVALID_LENGTH
EVP,119,NO_DEFAULT_DIGEST
EVP,120,NO_KEY_SET
EVP,121,NO_MDC2_SUPPORT
diff --git a/src/crypto/evp/evp.c b/src/crypto/evp/evp.c
index 0e90b6f..60fdf64 100644
--- a/src/crypto/evp/evp.c
+++ b/src/crypto/evp/evp.c
@@ -71,6 +71,11 @@
#include "../internal.h"
+// Node depends on |EVP_R_NOT_XOF_OR_INVALID_LENGTH|.
+//
+// TODO(davidben): Fix Node to not touch the error queue itself and remove this.
+OPENSSL_DECLARE_ERROR_REASON(EVP, NOT_XOF_OR_INVALID_LENGTH)
+
EVP_PKEY *EVP_PKEY_new(void) {
EVP_PKEY *ret;
diff --git a/src/crypto/evp/evp_tests.txt b/src/crypto/evp/evp_tests.txt
index 2ac51d3..0c890fd 100644
--- a/src/crypto/evp/evp_tests.txt
+++ b/src/crypto/evp/evp_tests.txt
@@ -73,6 +73,32 @@
Input = 308190020100301306072a8648ce3d020106082a8648ce3d0301070476307402010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a00706052b81040022a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
Error = GROUP_MISMATCH
+# The same key, but with the curve spelled explicitly.
+PrivateKey = P-256-ExplicitParameters
+Type = EC
+Input = 308201610201003081ec06072a8648ce3d02013081e0020101302c06072a8648ce3d0101022100ffffffff00000001000000000000000000000000ffffffffffffffffffffffff30440420ffffffff00000001000000000000000000000000fffffffffffffffffffffffc04205ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b0441046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c2964fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5022100ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551020101046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+Output = 308187020100301306072a8648ce3d020106082a8648ce3d030107046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+ExpectNoRawPrivate
+ExpectNoRawPublic
+
+# The same as above, but with the optional cofactor omitted.
+PrivateKey = P-256-ExplicitParameters-NoCofactor
+Type = EC
+Input = 3082015e0201003081e906072a8648ce3d02013081dd020101302c06072a8648ce3d0101022100ffffffff00000001000000000000000000000000ffffffffffffffffffffffff30440420ffffffff00000001000000000000000000000000fffffffffffffffffffffffc04205ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b0441046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c2964fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5022100ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+Output = 308187020100301306072a8648ce3d020106082a8648ce3d030107046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+ExpectNoRawPrivate
+ExpectNoRawPublic
+
+# The same as above, but the cofactor is zero instead of one.
+PrivateKey = P-256-ExplicitParameters-CofactorZero
+Input = 308201610201003081ec06072a8648ce3d02013081e0020101302c06072a8648ce3d0101022100ffffffff00000001000000000000000000000000ffffffffffffffffffffffff30440420ffffffff00000001000000000000000000000000fffffffffffffffffffffffc04205ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b0441046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c2964fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5022100ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551020100046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+Error = UNKNOWN_GROUP
+
+# The same as above, but the cofactor is two instead of one.
+PrivateKey = P-256-ExplicitParameters-CofactorTwo
+Input = 308201610201003081ec06072a8648ce3d02013081e0020101302c06072a8648ce3d0101022100ffffffff00000001000000000000000000000000ffffffffffffffffffffffff30440420ffffffff00000001000000000000000000000000fffffffffffffffffffffffc04205ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b0441046b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c2964fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5022100ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551020102046d306b02010104208a872fb62893c4d1ffc5b9f0f91758069f8352e08fa05a49f8db926cb5728725a144034200042c150f429ce70f216c252cf5e062ce1f639cd5d165c7f89424072c27197d78b33b920e95cdb664e990dcf0cfea0d94e2a8e6af9d0e58056e653104925b9fe6c9
+Error = UNKNOWN_GROUP
+
# The public half of the same key encoded as a PublicKey.
PublicKey = P-256-SPKI
Type = EC
diff --git a/src/crypto/fipsmodule/CMakeLists.txt b/src/crypto/fipsmodule/CMakeLists.txt
index e978820..dc8f1b1 100644
--- a/src/crypto/fipsmodule/CMakeLists.txt
+++ b/src/crypto/fipsmodule/CMakeLists.txt
@@ -55,6 +55,7 @@
sha1-armv4-large.${ASM_EXT}
sha256-armv4.${ASM_EXT}
sha512-armv4.${ASM_EXT}
+ vpaes-armv7.${ASM_EXT}
)
endif()
@@ -121,6 +122,7 @@
perlasm(sha512-armv4.${ASM_EXT} sha/asm/sha512-armv4.pl)
perlasm(sha512-armv8.${ASM_EXT} sha/asm/sha512-armv8.pl)
perlasm(sha512-x86_64.${ASM_EXT} sha/asm/sha512-x86_64.pl)
+perlasm(vpaes-armv7.${ASM_EXT} aes/asm/vpaes-armv7.pl)
perlasm(vpaes-armv8.${ASM_EXT} aes/asm/vpaes-armv8.pl)
perlasm(vpaes-x86_64.${ASM_EXT} aes/asm/vpaes-x86_64.pl)
perlasm(vpaes-x86.${ASM_EXT} aes/asm/vpaes-x86.pl)
diff --git a/src/crypto/fipsmodule/aes/aes_test.cc b/src/crypto/fipsmodule/aes/aes_test.cc
index 7fadb35..5061e01 100644
--- a/src/crypto/fipsmodule/aes/aes_test.cc
+++ b/src/crypto/fipsmodule/aes/aes_test.cc
@@ -22,6 +22,7 @@
#include <gtest/gtest.h>
#include <openssl/aes.h>
+#include <openssl/rand.h>
#include "internal.h"
#include "../../internal.h"
@@ -304,7 +305,8 @@
#endif
if (bsaes_capable()) {
- aes_nohw_set_encrypt_key(kKey, bits, &key);
+ vpaes_set_encrypt_key(kKey, bits, &key);
+ CHECK_ABI(vpaes_encrypt_key_to_bsaes, &key, &key);
for (size_t blocks : block_counts) {
SCOPED_TRACE(blocks);
if (blocks != 0) {
@@ -312,7 +314,8 @@
}
}
- aes_nohw_set_decrypt_key(kKey, bits, &key);
+ vpaes_set_decrypt_key(kKey, bits, &key);
+ CHECK_ABI(vpaes_decrypt_key_to_bsaes, &key, &key);
for (size_t blocks : block_counts) {
SCOPED_TRACE(blocks);
CHECK_ABI(bsaes_cbc_encrypt, buf, buf, AES_BLOCK_SIZE * blocks, &key,
@@ -325,8 +328,10 @@
CHECK_ABI(vpaes_encrypt, block, block, &key);
for (size_t blocks : block_counts) {
SCOPED_TRACE(blocks);
+#if defined(VPAES_CBC)
CHECK_ABI(vpaes_cbc_encrypt, buf, buf, AES_BLOCK_SIZE * blocks, &key,
block, AES_ENCRYPT);
+#endif
#if defined(VPAES_CTR32)
CHECK_ABI(vpaes_ctr32_encrypt_blocks, buf, buf, blocks, &key, block);
#endif
@@ -334,11 +339,13 @@
CHECK_ABI(vpaes_set_decrypt_key, kKey, bits, &key);
CHECK_ABI(vpaes_decrypt, block, block, &key);
+#if defined(VPAES_CBC)
for (size_t blocks : block_counts) {
SCOPED_TRACE(blocks);
CHECK_ABI(vpaes_cbc_encrypt, buf, buf, AES_BLOCK_SIZE * blocks, &key,
block, AES_DECRYPT);
}
+#endif // VPAES_CBC
}
if (hwaes_capable()) {
@@ -370,3 +377,52 @@
}
}
#endif // SUPPORTS_ABI_TEST
+
+#if defined(BSAES) && !defined(BORINGSSL_SHARED_LIBRARY)
+static Bytes AESKeyToBytes(const AES_KEY *key) {
+ return Bytes(reinterpret_cast<const uint8_t *>(key), sizeof(*key));
+}
+
+TEST(AESTest, VPAESToBSAESConvert) {
+ const int kNumIterations = 1000;
+ for (int i = 0; i < kNumIterations; i++) {
+ uint8_t key[256 / 8];
+ RAND_bytes(key, sizeof(key));
+ SCOPED_TRACE(Bytes(key));
+ for (unsigned bits : {128u, 192u, 256u}) {
+ SCOPED_TRACE(bits);
+ for (bool enc : {false, true}) {
+ SCOPED_TRACE(enc);
+ AES_KEY nohw, vpaes, bsaes;
+ OPENSSL_memset(&nohw, 0xaa, sizeof(nohw));
+ OPENSSL_memset(&vpaes, 0xaa, sizeof(vpaes));
+ OPENSSL_memset(&bsaes, 0xaa, sizeof(bsaes));
+
+ if (enc) {
+ aes_nohw_set_encrypt_key(key, bits, &nohw);
+ vpaes_set_encrypt_key(key, bits, &vpaes);
+ vpaes_encrypt_key_to_bsaes(&bsaes, &vpaes);
+ } else {
+ aes_nohw_set_decrypt_key(key, bits, &nohw);
+ vpaes_set_decrypt_key(key, bits, &vpaes);
+ vpaes_decrypt_key_to_bsaes(&bsaes, &vpaes);
+ }
+
+ // Although not fatal, stop running if this fails, otherwise we'll spam
+ // the user's console.
+ ASSERT_EQ(AESKeyToBytes(&nohw), AESKeyToBytes(&bsaes));
+
+ // Repeat the test in-place.
+ OPENSSL_memcpy(&bsaes, &vpaes, sizeof(AES_KEY));
+ if (enc) {
+ vpaes_encrypt_key_to_bsaes(&bsaes, &vpaes);
+ } else {
+ vpaes_decrypt_key_to_bsaes(&bsaes, &vpaes);
+ }
+
+ ASSERT_EQ(AESKeyToBytes(&nohw), AESKeyToBytes(&bsaes));
+ }
+ }
+ }
+}
+#endif // !NO_ASM && X86_64 && !SHARED_LIBRARY
diff --git a/src/crypto/fipsmodule/aes/asm/vpaes-armv7.pl b/src/crypto/fipsmodule/aes/asm/vpaes-armv7.pl
new file mode 100644
index 0000000..deb9a2a
--- /dev/null
+++ b/src/crypto/fipsmodule/aes/asm/vpaes-armv7.pl
@@ -0,0 +1,1375 @@
+#! /usr/bin/env perl
+# Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+######################################################################
+## Constant-time SSSE3 AES core implementation.
+## version 0.1
+##
+## By Mike Hamburg (Stanford University), 2009
+## Public domain.
+##
+## For details see http://shiftleft.org/papers/vector_aes/ and
+## http://crypto.stanford.edu/vpaes/.
+##
+######################################################################
+# Adapted from the original x86_64 version and <appro@openssl.org>'s ARMv8
+# version.
+#
+# armv7, aarch64, and x86_64 differ in several ways:
+#
+# * x86_64 SSSE3 instructions are two-address (destination operand is also a
+# source), while NEON is three-address (destination operand is separate from
+# two sources).
+#
+# * aarch64 has 32 SIMD registers available, while x86_64 and armv7 have 16.
+#
+# * x86_64 instructions can take memory references, while ARM is a load/store
+# architecture. This means we sometimes need a spare register.
+#
+# * aarch64 and x86_64 have 128-bit byte shuffle instructions (tbl and pshufb),
+# while armv7 only has a 64-bit byte shuffle (vtbl).
+#
+# This means this armv7 version must be a mix of both aarch64 and x86_64
+# implementations. armv7 and aarch64 have analogous SIMD instructions, so we
+# base the instructions on aarch64. However, we cannot use aarch64's register
+# allocation. x86_64's register count matches, but x86_64 is two-address.
+# vpaes-armv8.pl already accounts for this in the comments, which use
+# three-address AVX instructions instead of the original SSSE3 ones. We base
+# register usage on these comments, which are preserved in this file.
+#
+# This means we do not use separate input and output registers as in aarch64 and
+# cannot pin as many constants in the preheat functions. However, the load/store
+# architecture means we must still deviate from x86_64 in places.
+#
+# Next, we account for the byte shuffle instructions. vtbl takes 64-bit source
+# and destination and 128-bit table. Fortunately, armv7 also allows addressing
+# upper and lower halves of each 128-bit register. The lower half of q{N} is
+# d{2*N}. The upper half is d{2*N+1}. Instead of the following non-existent
+# instruction,
+#
+# vtbl.8 q0, q1, q2 @ Index each of q2's 16 bytes into q1. Store in q0.
+#
+# we write:
+#
+# vtbl.8 d0, q1, d4 @ Index each of d4's 8 bytes into q1. Store in d0.
+# vtbl.8 d1, q1, d5 @ Index each of d5's 8 bytes into q1. Store in d1.
+#
+# For readability, we write d0 and d1 as q0#lo and q0#hi, respectively and
+# post-process before outputting. (This is adapted from ghash-armv4.pl.) Note,
+# however, that destination (q0) and table (q1) registers may no longer match.
+# We adjust the register usage from x86_64 to avoid this. (Unfortunately, the
+# two-address pshufb always matched these operands, so this is common.)
+#
+# This file also runs against the limit of ARMv7's ADR pseudo-instruction. ADR
+# expands to an ADD or SUB of the pc register to find an address. That immediate
+# must fit in ARM's encoding scheme: 8 bits of constant and 4 bits of rotation.
+# This means larger values must be more aligned.
+#
+# ARM additionally has two encodings, ARM and Thumb mode. Our assembly files may
+# use either encoding (do we actually need to support this?). In ARM mode, the
+# distances get large enough to require 16-byte alignment. Moving constants
+# closer to their use resolves most of this, but common constants in
+# _vpaes_consts are used by the whole file. Affected ADR instructions must be
+# placed at 8 mod 16 (the pc register is 8 ahead). Instructions with this
+# constraint have been commented.
+#
+# For details on ARM's immediate value encoding scheme, see
+# https://alisdair.mcdiarmid.org/arm-immediate-value-encoding/
+#
+# Finally, a summary of armv7 and aarch64 SIMD syntax differences:
+#
+# * armv7 prefixes SIMD instructions with 'v', while aarch64 does not.
+#
+# * armv7 SIMD registers are named like q0 (and d0 for the half-width ones).
+# aarch64 names registers like v0, and denotes half-width operations in an
+# instruction suffix (see below).
+#
+# * aarch64 embeds size and lane information in register suffixes. v0.16b is
+# 16 bytes, v0.8h is eight u16s, v0.4s is four u32s, and v0.2d is two u64s.
+# armv7 embeds the total size in the register name (see above) and the size of
+# each element in an instruction suffix, which may look like vmov.i8,
+# vshr.u8, or vtbl.8, depending on instruction.
+
+use strict;
+
+my $flavour = shift;
+my $output;
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/;
+my $dir=$1;
+my $xlate;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my $code = "";
+
+$code.=<<___;
+.syntax unified
+
+.arch armv7-a
+.fpu neon
+
+#if defined(__thumb2__)
+.thumb
+#else
+.code 32
+#endif
+
+.text
+
+.type _vpaes_consts,%object
+.align 7 @ totally strategic alignment
+_vpaes_consts:
+.Lk_mc_forward: @ mc_forward
+ .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
+ .quad 0x080B0A0904070605, 0x000302010C0F0E0D
+ .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
+ .quad 0x000302010C0F0E0D, 0x080B0A0904070605
+.Lk_mc_backward:@ mc_backward
+ .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
+ .quad 0x020100030E0D0C0F, 0x0A09080B06050407
+ .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
+ .quad 0x0A09080B06050407, 0x020100030E0D0C0F
+.Lk_sr: @ sr
+ .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
+ .quad 0x030E09040F0A0500, 0x0B06010C07020D08
+ .quad 0x0F060D040B020900, 0x070E050C030A0108
+ .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
+
+@
+@ "Hot" constants
+@
+.Lk_inv: @ inv, inva
+ .quad 0x0E05060F0D080180, 0x040703090A0B0C02
+ .quad 0x01040A060F0B0780, 0x030D0E0C02050809
+.Lk_ipt: @ input transform (lo, hi)
+ .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
+ .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
+.Lk_sbo: @ sbou, sbot
+ .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
+ .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
+.Lk_sb1: @ sb1u, sb1t
+ .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
+ .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
+.Lk_sb2: @ sb2u, sb2t
+ .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
+ .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
+
+.asciz "Vector Permutation AES for ARMv7 NEON, Mike Hamburg (Stanford University)"
+.size _vpaes_consts,.-_vpaes_consts
+.align 6
+___
+
+{
+my ($inp,$out,$key) = map("r$_", (0..2));
+
+my ($invlo,$invhi) = map("q$_", (10..11));
+my ($sb1u,$sb1t,$sb2u,$sb2t) = map("q$_", (12..15));
+
+$code.=<<___;
+@@
+@@ _aes_preheat
+@@
+@@ Fills q9-q15 as specified below.
+@@
+.type _vpaes_preheat,%function
+.align 4
+_vpaes_preheat:
+ adr r10, .Lk_inv
+ vmov.i8 q9, #0x0f @ .Lk_s0F
+ vld1.64 {q10,q11}, [r10]! @ .Lk_inv
+ add r10, r10, #64 @ Skip .Lk_ipt, .Lk_sbo
+ vld1.64 {q12,q13}, [r10]! @ .Lk_sb1
+ vld1.64 {q14,q15}, [r10] @ .Lk_sb2
+ bx lr
+
+@@
+@@ _aes_encrypt_core
+@@
+@@ AES-encrypt q0.
+@@
+@@ Inputs:
+@@ q0 = input
+@@ q9-q15 as in _vpaes_preheat
+@@ [$key] = scheduled keys
+@@
+@@ Output in q0
+@@ Clobbers q1-q5, r8-r11
+@@ Preserves q6-q8 so you get some local vectors
+@@
+@@
+.type _vpaes_encrypt_core,%function
+.align 4
+_vpaes_encrypt_core:
+ mov r9, $key
+ ldr r8, [$key,#240] @ pull rounds
+ adr r11, .Lk_ipt
+ @ vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
+ @ vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
+ vld1.64 {q2, q3}, [r11]
+ adr r11, .Lk_mc_forward+16
+ vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5 # round0 key
+ vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0
+ vtbl.8 q1#lo, {q2}, q1#lo @ vpshufb %xmm1, %xmm2, %xmm1
+ vtbl.8 q1#hi, {q2}, q1#hi
+ vtbl.8 q2#lo, {q3}, q0#lo @ vpshufb %xmm0, %xmm3, %xmm2
+ vtbl.8 q2#hi, {q3}, q0#hi
+ veor q0, q1, q5 @ vpxor %xmm5, %xmm1, %xmm0
+ veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
+
+ @ .Lenc_entry ends with a bnz instruction which is normally paired with
+ @ subs in .Lenc_loop.
+ tst r8, r8
+ b .Lenc_entry
+
+.align 4
+.Lenc_loop:
+ @ middle of middle round
+ add r10, r11, #0x40
+ vtbl.8 q4#lo, {$sb1t}, q2#lo @ vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
+ vtbl.8 q4#hi, {$sb1t}, q2#hi
+ vld1.64 {q1}, [r11]! @ vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
+ vtbl.8 q0#lo, {$sb1u}, q3#lo @ vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
+ vtbl.8 q0#hi, {$sb1u}, q3#hi
+ veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
+ vtbl.8 q5#lo, {$sb2t}, q2#lo @ vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
+ vtbl.8 q5#hi, {$sb2t}, q2#hi
+ veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
+ vtbl.8 q2#lo, {$sb2u}, q3#lo @ vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
+ vtbl.8 q2#hi, {$sb2u}, q3#hi
+ vld1.64 {q4}, [r10] @ vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
+ vtbl.8 q3#lo, {q0}, q1#lo @ vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
+ vtbl.8 q3#hi, {q0}, q1#hi
+ veor q2, q2, q5 @ vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
+ @ Write to q5 instead of q0, so the table and destination registers do
+ @ not overlap.
+ vtbl.8 q5#lo, {q0}, q4#lo @ vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
+ vtbl.8 q5#hi, {q0}, q4#hi
+ veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
+ vtbl.8 q4#lo, {q3}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
+ vtbl.8 q4#hi, {q3}, q1#hi
+ @ Here we restore the original q0/q5 usage.
+ veor q0, q5, q3 @ vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
+ and r11, r11, #~(1<<6) @ and \$0x30, %r11 # ... mod 4
+ veor q0, q0, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
+ subs r8, r8, #1 @ nr--
+
+.Lenc_entry:
+ @ top of round
+ vand q1, q0, q9 @ vpand %xmm0, %xmm9, %xmm1 # 0 = k
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0 # 1 = i
+ vtbl.8 q5#lo, {$invhi}, q1#lo @ vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
+ vtbl.8 q5#hi, {$invhi}, q1#hi
+ veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
+ vtbl.8 q3#lo, {$invlo}, q0#lo @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
+ vtbl.8 q3#hi, {$invlo}, q0#hi
+ vtbl.8 q4#lo, {$invlo}, q1#lo @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
+ vtbl.8 q4#hi, {$invlo}, q1#hi
+ veor q3, q3, q5 @ vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
+ veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
+ vtbl.8 q2#lo, {$invlo}, q3#lo @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
+ vtbl.8 q2#hi, {$invlo}, q3#hi
+ vtbl.8 q3#lo, {$invlo}, q4#lo @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
+ vtbl.8 q3#hi, {$invlo}, q4#hi
+ veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
+ veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
+ vld1.64 {q5}, [r9]! @ vmovdqu (%r9), %xmm5
+ bne .Lenc_loop
+
+ @ middle of last round
+ add r10, r11, #0x80
+
+ adr r11, .Lk_sbo
+ @ Read to q1 instead of q4, so the vtbl.8 instruction below does not
+ @ overlap table and destination registers.
+ vld1.64 {q1}, [r11]! @ vmovdqa -0x60(%r10), %xmm4 # 3 : sbou
+ vld1.64 {q0}, [r11] @ vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
+ vtbl.8 q4#lo, {q1}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
+ vtbl.8 q4#hi, {q1}, q2#hi
+ vld1.64 {q1}, [r10] @ vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
+ @ Write to q2 instead of q0 below, to avoid overlapping table and
+ @ destination registers.
+ vtbl.8 q2#lo, {q0}, q3#lo @ vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
+ vtbl.8 q2#hi, {q0}, q3#hi
+ veor q4, q4, q5 @ vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
+ veor q2, q2, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 0 = A
+ @ Here we restore the original q0/q2 usage.
+ vtbl.8 q0#lo, {q2}, q1#lo @ vpshufb %xmm1, %xmm0, %xmm0
+ vtbl.8 q0#hi, {q2}, q1#hi
+ bx lr
+.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
+
+.globl vpaes_encrypt
+.type vpaes_encrypt,%function
+.align 4
+vpaes_encrypt:
+ @ _vpaes_encrypt_core uses r8-r11. Round up to r7-r11 to maintain stack
+ @ alignment.
+ stmdb sp!, {r7-r11,lr}
+ @ _vpaes_encrypt_core uses q4-q5 (d8-d11), which are callee-saved.
+ vstmdb sp!, {d8-d11}
+
+ vld1.64 {q0}, [$inp]
+ bl _vpaes_preheat
+ bl _vpaes_encrypt_core
+ vst1.64 {q0}, [$out]
+
+ vldmia sp!, {d8-d11}
+ ldmia sp!, {r7-r11, pc} @ return
+.size vpaes_encrypt,.-vpaes_encrypt
+
+@
+@ Decryption stuff
+@
+.type _vpaes_decrypt_consts,%object
+.align 4
+.Lk_dipt: @ decryption input transform
+ .quad 0x0F505B040B545F00, 0x154A411E114E451A
+ .quad 0x86E383E660056500, 0x12771772F491F194
+.Lk_dsbo: @ decryption sbox final output
+ .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
+ .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
+.Lk_dsb9: @ decryption sbox output *9*u, *9*t
+ .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
+ .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
+.Lk_dsbd: @ decryption sbox output *D*u, *D*t
+ .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
+ .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
+.Lk_dsbb: @ decryption sbox output *B*u, *B*t
+ .quad 0xD022649296B44200, 0x602646F6B0F2D404
+ .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
+.Lk_dsbe: @ decryption sbox output *E*u, *E*t
+ .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
+ .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
+.size _vpaes_decrypt_consts,.-_vpaes_decrypt_consts
+
+@@
+@@ Decryption core
+@@
+@@ Same API as encryption core, except it clobbers q12-q15 rather than using
+@@ the values from _vpaes_preheat. q9-q11 must still be set from
+@@ _vpaes_preheat.
+@@
+.type _vpaes_decrypt_core,%function
+.align 4
+_vpaes_decrypt_core:
+ mov r9, $key
+ ldr r8, [$key,#240] @ pull rounds
+
+ @ This function performs shuffles with various constants. The x86_64
+ @ version loads them on-demand into %xmm0-%xmm5. This does not work well
+ @ for ARMv7 because those registers are shuffle destinations. The ARMv8
+ @ version preloads those constants into registers, but ARMv7 has half
+ @ the registers to work with. Instead, we load them on-demand into
+ @ q12-q15, registers normally use for preloaded constants. This is fine
+ @ because decryption doesn't use those constants. The values are
+ @ constant, so this does not interfere with potential 2x optimizations.
+ adr r7, .Lk_dipt
+
+ vld1.64 {q12,q13}, [r7] @ vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
+ lsl r11, r8, #4 @ mov %rax, %r11; shl \$4, %r11
+ eor r11, r11, #0x30 @ xor \$0x30, %r11
+ adr r10, .Lk_sr
+ and r11, r11, #0x30 @ and \$0x30, %r11
+ add r11, r11, r10
+ adr r10, .Lk_mc_forward+48
+
+ vld1.64 {q4}, [r9]! @ vmovdqu (%r9), %xmm4 # round0 key
+ vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0
+ vtbl.8 q2#lo, {q12}, q1#lo @ vpshufb %xmm1, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q12}, q1#hi
+ vld1.64 {q5}, [r10] @ vmovdqa .Lk_mc_forward+48(%rip), %xmm5
+ @ vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
+ vtbl.8 q0#lo, {q13}, q0#lo @ vpshufb %xmm0, %xmm1, %xmm0
+ vtbl.8 q0#hi, {q13}, q0#hi
+ veor q2, q2, q4 @ vpxor %xmm4, %xmm2, %xmm2
+ veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
+
+ @ .Ldec_entry ends with a bnz instruction which is normally paired with
+ @ subs in .Ldec_loop.
+ tst r8, r8
+ b .Ldec_entry
+
+.align 4
+.Ldec_loop:
+@
+@ Inverse mix columns
+@
+
+ @ We load .Lk_dsb* into q12-q15 on-demand. See the comment at the top of
+ @ the function.
+ adr r10, .Lk_dsb9
+ vld1.64 {q12,q13}, [r10]! @ vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
+ @ vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
+ @ Load sbd* ahead of time.
+ vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
+ @ vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
+ vtbl.8 q4#lo, {q12}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
+ vtbl.8 q4#hi, {q12}, q2#hi
+ vtbl.8 q1#lo, {q13}, q3#lo @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
+ vtbl.8 q1#hi, {q13}, q3#hi
+ veor q0, q4, q0 @ vpxor %xmm4, %xmm0, %xmm0
+
+ veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
+
+ @ Load sbb* ahead of time.
+ vld1.64 {q12,q13}, [r10]! @ vmovdqa 0x20(%r10),%xmm4 # 4 : sbbu
+ @ vmovdqa 0x30(%r10),%xmm1 # 0 : sbbt
+
+ vtbl.8 q4#lo, {q14}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
+ vtbl.8 q4#hi, {q14}, q2#hi
+ @ Write to q1 instead of q0, so the table and destination registers do
+ @ not overlap.
+ vtbl.8 q1#lo, {q0}, q5#lo @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
+ vtbl.8 q1#hi, {q0}, q5#hi
+ @ Here we restore the original q0/q1 usage. This instruction is
+ @ reordered from the ARMv8 version so we do not clobber the vtbl.8
+ @ below.
+ veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
+ vtbl.8 q1#lo, {q15}, q3#lo @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
+ vtbl.8 q1#hi, {q15}, q3#hi
+ @ vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
+ veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
+ @ vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
+
+ @ Load sbd* ahead of time.
+ vld1.64 {q14,q15}, [r10]! @ vmovdqa 0x40(%r10),%xmm4 # 4 : sbeu
+ @ vmovdqa 0x50(%r10),%xmm1 # 0 : sbet
+
+ vtbl.8 q4#lo, {q12}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
+ vtbl.8 q4#hi, {q12}, q2#hi
+ @ Write to q1 instead of q0, so the table and destination registers do
+ @ not overlap.
+ vtbl.8 q1#lo, {q0}, q5#lo @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
+ vtbl.8 q1#hi, {q0}, q5#hi
+ @ Here we restore the original q0/q1 usage. This instruction is
+ @ reordered from the ARMv8 version so we do not clobber the vtbl.8
+ @ below.
+ veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
+ vtbl.8 q1#lo, {q13}, q3#lo @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
+ vtbl.8 q1#hi, {q13}, q3#hi
+ veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
+
+ vtbl.8 q4#lo, {q14}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
+ vtbl.8 q4#hi, {q14}, q2#hi
+ @ Write to q1 instead of q0, so the table and destination registers do
+ @ not overlap.
+ vtbl.8 q1#lo, {q0}, q5#lo @ vpshufb %xmm5, %xmm0, %xmm0 # MC ch
+ vtbl.8 q1#hi, {q0}, q5#hi
+ @ Here we restore the original q0/q1 usage. This instruction is
+ @ reordered from the ARMv8 version so we do not clobber the vtbl.8
+ @ below.
+ veor q0, q1, q4 @ vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
+ vtbl.8 q1#lo, {q15}, q3#lo @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
+ vtbl.8 q1#hi, {q15}, q3#hi
+ vext.8 q5, q5, q5, #12 @ vpalignr \$12, %xmm5, %xmm5, %xmm5
+ veor q0, q0, q1 @ vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
+ subs r8, r8, #1 @ sub \$1,%rax # nr--
+
+.Ldec_entry:
+ @ top of round
+ vand q1, q0, q9 @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0 # 1 = i
+ vtbl.8 q2#lo, {$invhi}, q1#lo @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
+ vtbl.8 q2#hi, {$invhi}, q1#hi
+ veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
+ vtbl.8 q3#lo, {$invlo}, q0#lo @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
+ vtbl.8 q3#hi, {$invlo}, q0#hi
+ vtbl.8 q4#lo, {$invlo}, q1#lo @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
+ vtbl.8 q4#hi, {$invlo}, q1#hi
+ veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
+ veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
+ vtbl.8 q2#lo, {$invlo}, q3#lo @ vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
+ vtbl.8 q2#hi, {$invlo}, q3#hi
+ vtbl.8 q3#lo, {$invlo}, q4#lo @ vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
+ vtbl.8 q3#hi, {$invlo}, q4#hi
+ veor q2, q2, q1 @ vpxor %xmm1, %xmm2, %xmm2 # 2 = io
+ veor q3, q3, q0 @ vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
+ vld1.64 {q0}, [r9]! @ vmovdqu (%r9), %xmm0
+ bne .Ldec_loop
+
+ @ middle of last round
+
+ adr r10, .Lk_dsbo
+
+ @ Write to q1 rather than q4 to avoid overlapping table and destination.
+ vld1.64 {q1}, [r10]! @ vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
+ vtbl.8 q4#lo, {q1}, q2#lo @ vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
+ vtbl.8 q4#hi, {q1}, q2#hi
+ @ Write to q2 rather than q1 to avoid overlapping table and destination.
+ vld1.64 {q2}, [r10] @ vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
+ vtbl.8 q1#lo, {q2}, q3#lo @ vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
+ vtbl.8 q1#hi, {q2}, q3#hi
+ vld1.64 {q2}, [r11] @ vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
+ veor q4, q4, q0 @ vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
+ @ Write to q1 rather than q0 so the table and destination registers
+ @ below do not overlap.
+ veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm0 # 0 = A
+ vtbl.8 q0#lo, {q1}, q2#lo @ vpshufb %xmm2, %xmm0, %xmm0
+ vtbl.8 q0#hi, {q1}, q2#hi
+ bx lr
+.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
+
+.globl vpaes_decrypt
+.type vpaes_decrypt,%function
+.align 4
+vpaes_decrypt:
+ @ _vpaes_decrypt_core uses r7-r11.
+ stmdb sp!, {r7-r11,lr}
+ @ _vpaes_decrypt_core uses q4-q5 (d8-d11), which are callee-saved.
+ vstmdb sp!, {d8-d11}
+
+ vld1.64 {q0}, [$inp]
+ bl _vpaes_preheat
+ bl _vpaes_decrypt_core
+ vst1.64 {q0}, [$out]
+
+ vldmia sp!, {d8-d11}
+ ldmia sp!, {r7-r11, pc} @ return
+.size vpaes_decrypt,.-vpaes_decrypt
+___
+}
+{
+my ($inp,$bits,$out,$dir)=("r0","r1","r2","r3");
+my ($rcon,$s0F,$invlo,$invhi,$s63) = map("q$_",(8..12));
+
+$code.=<<___;
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+@@ @@
+@@ AES key schedule @@
+@@ @@
+@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
+
+@ This function diverges from both x86_64 and armv7 in which constants are
+@ pinned. x86_64 has a common preheat function for all operations. aarch64
+@ separates them because it has enough registers to pin nearly all constants.
+@ armv7 does not have enough registers, but needing explicit loads and stores
+@ also complicates using x86_64's register allocation directly.
+@
+@ We pin some constants for convenience and leave q14 and q15 free to load
+@ others on demand.
+
+@
+@ Key schedule constants
+@
+.type _vpaes_key_consts,%object
+.align 4
+_vpaes_key_consts:
+.Lk_dksd: @ decryption key schedule: invskew x*D
+ .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
+ .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
+.Lk_dksb: @ decryption key schedule: invskew x*B
+ .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
+ .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
+.Lk_dkse: @ decryption key schedule: invskew x*E + 0x63
+ .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
+ .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
+.Lk_dks9: @ decryption key schedule: invskew x*9
+ .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
+ .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
+
+.Lk_rcon: @ rcon
+ .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
+
+.Lk_opt: @ output transform
+ .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
+ .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
+.Lk_deskew: @ deskew tables: inverts the sbox's "skew"
+ .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
+ .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
+.size _vpaes_key_consts,.-_vpaes_key_consts
+
+.type _vpaes_key_preheat,%function
+.align 4
+_vpaes_key_preheat:
+ adr r11, .Lk_rcon
+ vmov.i8 $s63, #0x5b @ .Lk_s63
+ adr r10, .Lk_inv @ Must be aligned to 8 mod 16.
+ vmov.i8 $s0F, #0x0f @ .Lk_s0F
+ vld1.64 {$invlo,$invhi}, [r10] @ .Lk_inv
+ vld1.64 {$rcon}, [r11] @ .Lk_rcon
+ bx lr
+.size _vpaes_key_preheat,.-_vpaes_key_preheat
+
+.type _vpaes_schedule_core,%function
+.align 4
+_vpaes_schedule_core:
+ @ We only need to save lr, but ARM requires an 8-byte stack alignment,
+ @ so save an extra register.
+ stmdb sp!, {r3,lr}
+
+ bl _vpaes_key_preheat @ load the tables
+
+ adr r11, .Lk_ipt @ Must be aligned to 8 mod 16.
+ vld1.64 {q0}, [$inp]! @ vmovdqu (%rdi), %xmm0 # load key (unaligned)
+
+ @ input transform
+ @ Use q4 here rather than q3 so .Lschedule_am_decrypting does not
+ @ overlap table and destination.
+ vmov q4, q0 @ vmovdqa %xmm0, %xmm3
+ bl _vpaes_schedule_transform
+ adr r10, .Lk_sr @ Must be aligned to 8 mod 16.
+ vmov q7, q0 @ vmovdqa %xmm0, %xmm7
+
+ add r8, r8, r10
+ tst $dir, $dir
+ bne .Lschedule_am_decrypting
+
+ @ encrypting, output zeroth round key after transform
+ vst1.64 {q0}, [$out] @ vmovdqu %xmm0, (%rdx)
+ b .Lschedule_go
+
+.Lschedule_am_decrypting:
+ @ decrypting, output zeroth round key after shiftrows
+ vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
+ vtbl.8 q3#lo, {q4}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q4}, q1#hi
+ vst1.64 {q3}, [$out] @ vmovdqu %xmm3, (%rdx)
+ eor r8, r8, #0x30 @ xor \$0x30, %r8
+
+.Lschedule_go:
+ cmp $bits, #192 @ cmp \$192, %esi
+ bhi .Lschedule_256
+ beq .Lschedule_192
+ @ 128: fall though
+
+@@
+@@ .schedule_128
+@@
+@@ 128-bit specific part of key schedule.
+@@
+@@ This schedule is really simple, because all its parts
+@@ are accomplished by the subroutines.
+@@
+.Lschedule_128:
+ mov $inp, #10 @ mov \$10, %esi
+
+.Loop_schedule_128:
+ bl _vpaes_schedule_round
+ subs $inp, $inp, #1 @ dec %esi
+ beq .Lschedule_mangle_last
+ bl _vpaes_schedule_mangle @ write output
+ b .Loop_schedule_128
+
+@@
+@@ .aes_schedule_192
+@@
+@@ 192-bit specific part of key schedule.
+@@
+@@ The main body of this schedule is the same as the 128-bit
+@@ schedule, but with more smearing. The long, high side is
+@@ stored in q7 as before, and the short, low side is in
+@@ the high bits of q6.
+@@
+@@ This schedule is somewhat nastier, however, because each
+@@ round produces 192 bits of key material, or 1.5 round keys.
+@@ Therefore, on each cycle we do 2 rounds and produce 3 round
+@@ keys.
+@@
+.align 4
+.Lschedule_192:
+ sub $inp, $inp, #8
+ vld1.64 {q0}, [$inp] @ vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
+ bl _vpaes_schedule_transform @ input transform
+ vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save short part
+ vmov.i8 q6#lo, #0 @ vpxor %xmm4, %xmm4, %xmm4 # clear 4
+ @ vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
+ mov $inp, #4 @ mov \$4, %esi
+
+.Loop_schedule_192:
+ bl _vpaes_schedule_round
+ vext.8 q0, q6, q0, #8 @ vpalignr \$8,%xmm6,%xmm0,%xmm0
+ bl _vpaes_schedule_mangle @ save key n
+ bl _vpaes_schedule_192_smear
+ bl _vpaes_schedule_mangle @ save key n+1
+ bl _vpaes_schedule_round
+ subs $inp, $inp, #1 @ dec %esi
+ beq .Lschedule_mangle_last
+ bl _vpaes_schedule_mangle @ save key n+2
+ bl _vpaes_schedule_192_smear
+ b .Loop_schedule_192
+
+@@
+@@ .aes_schedule_256
+@@
+@@ 256-bit specific part of key schedule.
+@@
+@@ The structure here is very similar to the 128-bit
+@@ schedule, but with an additional "low side" in
+@@ q6. The low side's rounds are the same as the
+@@ high side's, except no rcon and no rotation.
+@@
+.align 4
+.Lschedule_256:
+ vld1.64 {q0}, [$inp] @ vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
+ bl _vpaes_schedule_transform @ input transform
+ mov $inp, #7 @ mov \$7, %esi
+
+.Loop_schedule_256:
+ bl _vpaes_schedule_mangle @ output low result
+ vmov q6, q0 @ vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
+
+ @ high round
+ bl _vpaes_schedule_round
+ subs $inp, $inp, #1 @ dec %esi
+ beq .Lschedule_mangle_last
+ bl _vpaes_schedule_mangle
+
+ @ low round. swap xmm7 and xmm6
+ vdup.32 q0, q0#hi[1] @ vpshufd \$0xFF, %xmm0, %xmm0
+ vmov.i8 q4, #0
+ vmov q5, q7 @ vmovdqa %xmm7, %xmm5
+ vmov q7, q6 @ vmovdqa %xmm6, %xmm7
+ bl _vpaes_schedule_low_round
+ vmov q7, q5 @ vmovdqa %xmm5, %xmm7
+
+ b .Loop_schedule_256
+
+@@
+@@ .aes_schedule_mangle_last
+@@
+@@ Mangler for last round of key schedule
+@@ Mangles q0
+@@ when encrypting, outputs out(q0) ^ 63
+@@ when decrypting, outputs unskew(q0)
+@@
+@@ Always called right before return... jumps to cleanup and exits
+@@
+.align 4
+.Lschedule_mangle_last:
+ @ schedule last round key from xmm0
+ adr r11, .Lk_deskew @ lea .Lk_deskew(%rip),%r11 # prepare to deskew
+ tst $dir, $dir
+ bne .Lschedule_mangle_last_dec
+
+ @ encrypting
+ vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10),%xmm1
+ adr r11, .Lk_opt @ lea .Lk_opt(%rip), %r11 # prepare to output transform
+ add $out, $out, #32 @ add \$32, %rdx
+ vmov q2, q0
+ vtbl.8 q0#lo, {q2}, q1#lo @ vpshufb %xmm1, %xmm0, %xmm0 # output permute
+ vtbl.8 q0#hi, {q2}, q1#hi
+
+.Lschedule_mangle_last_dec:
+ sub $out, $out, #16 @ add \$-16, %rdx
+ veor q0, q0, $s63 @ vpxor .Lk_s63(%rip), %xmm0, %xmm0
+ bl _vpaes_schedule_transform @ output transform
+ vst1.64 {q0}, [$out] @ vmovdqu %xmm0, (%rdx) # save last key
+
+ @ cleanup
+ veor q0, q0, q0 @ vpxor %xmm0, %xmm0, %xmm0
+ veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
+ veor q2, q2, q2 @ vpxor %xmm2, %xmm2, %xmm2
+ veor q3, q3, q3 @ vpxor %xmm3, %xmm3, %xmm3
+ veor q4, q4, q4 @ vpxor %xmm4, %xmm4, %xmm4
+ veor q5, q5, q5 @ vpxor %xmm5, %xmm5, %xmm5
+ veor q6, q6, q6 @ vpxor %xmm6, %xmm6, %xmm6
+ veor q7, q7, q7 @ vpxor %xmm7, %xmm7, %xmm7
+ ldmia sp!, {r3,pc} @ return
+.size _vpaes_schedule_core,.-_vpaes_schedule_core
+
+@@
+@@ .aes_schedule_192_smear
+@@
+@@ Smear the short, low side in the 192-bit key schedule.
+@@
+@@ Inputs:
+@@ q7: high side, b a x y
+@@ q6: low side, d c 0 0
+@@
+@@ Outputs:
+@@ q6: b+c+d b+c 0 0
+@@ q0: b+c+d b+c b a
+@@
+.type _vpaes_schedule_192_smear,%function
+.align 4
+_vpaes_schedule_192_smear:
+ vmov.i8 q1, #0
+ vdup.32 q0, q7#hi[1]
+ vshl.i64 q1, q6, #32 @ vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
+ vmov q0#lo, q7#hi @ vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
+ veor q6, q6, q1 @ vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
+ veor q1, q1, q1 @ vpxor %xmm1, %xmm1, %xmm1
+ veor q6, q6, q0 @ vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
+ vmov q0, q6 @ vmovdqa %xmm6, %xmm0
+ vmov q6#lo, q1#lo @ vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
+ bx lr
+.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
+
+@@
+@@ .aes_schedule_round
+@@
+@@ Runs one main round of the key schedule on q0, q7
+@@
+@@ Specifically, runs subbytes on the high dword of q0
+@@ then rotates it by one byte and xors into the low dword of
+@@ q7.
+@@
+@@ Adds rcon from low byte of q8, then rotates q8 for
+@@ next rcon.
+@@
+@@ Smears the dwords of q7 by xoring the low into the
+@@ second low, result into third, result into highest.
+@@
+@@ Returns results in q7 = q0.
+@@ Clobbers q1-q4, r11.
+@@
+.type _vpaes_schedule_round,%function
+.align 4
+_vpaes_schedule_round:
+ @ extract rcon from xmm8
+ vmov.i8 q4, #0 @ vpxor %xmm4, %xmm4, %xmm4
+ vext.8 q1, $rcon, q4, #15 @ vpalignr \$15, %xmm8, %xmm4, %xmm1
+ vext.8 $rcon, $rcon, $rcon, #15 @ vpalignr \$15, %xmm8, %xmm8, %xmm8
+ veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
+
+ @ rotate
+ vdup.32 q0, q0#hi[1] @ vpshufd \$0xFF, %xmm0, %xmm0
+ vext.8 q0, q0, q0, #1 @ vpalignr \$1, %xmm0, %xmm0, %xmm0
+
+ @ fall through...
+
+ @ low round: same as high round, but no rotation and no rcon.
+_vpaes_schedule_low_round:
+ @ The x86_64 version pins .Lk_sb1 in %xmm13 and .Lk_sb1+16 in %xmm12.
+ @ We pin other values in _vpaes_key_preheat, so load them now.
+ adr r11, .Lk_sb1
+ vld1.64 {q14,q15}, [r11]
+
+ @ smear xmm7
+ vext.8 q1, q4, q7, #12 @ vpslldq \$4, %xmm7, %xmm1
+ veor q7, q7, q1 @ vpxor %xmm1, %xmm7, %xmm7
+ vext.8 q4, q4, q7, #8 @ vpslldq \$8, %xmm7, %xmm4
+
+ @ subbytes
+ vand q1, q0, $s0F @ vpand %xmm9, %xmm0, %xmm1 # 0 = k
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0 # 1 = i
+ veor q7, q7, q4 @ vpxor %xmm4, %xmm7, %xmm7
+ vtbl.8 q2#lo, {$invhi}, q1#lo @ vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
+ vtbl.8 q2#hi, {$invhi}, q1#hi
+ veor q1, q1, q0 @ vpxor %xmm0, %xmm1, %xmm1 # 0 = j
+ vtbl.8 q3#lo, {$invlo}, q0#lo @ vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
+ vtbl.8 q3#hi, {$invlo}, q0#hi
+ veor q3, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
+ vtbl.8 q4#lo, {$invlo}, q1#lo @ vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
+ vtbl.8 q4#hi, {$invlo}, q1#hi
+ veor q7, q7, $s63 @ vpxor .Lk_s63(%rip), %xmm7, %xmm7
+ vtbl.8 q3#lo, {$invlo}, q3#lo @ vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
+ vtbl.8 q3#hi, {$invlo}, q3#hi
+ veor q4, q4, q2 @ vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
+ vtbl.8 q2#lo, {$invlo}, q4#lo @ vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
+ vtbl.8 q2#hi, {$invlo}, q4#hi
+ veor q3, q3, q1 @ vpxor %xmm1, %xmm3, %xmm3 # 2 = io
+ veor q2, q2, q0 @ vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
+ vtbl.8 q4#lo, {q15}, q3#lo @ vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
+ vtbl.8 q4#hi, {q15}, q3#hi
+ vtbl.8 q1#lo, {q14}, q2#lo @ vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
+ vtbl.8 q1#hi, {q14}, q2#hi
+ veor q1, q1, q4 @ vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
+
+ @ add in smeared stuff
+ veor q0, q1, q7 @ vpxor %xmm7, %xmm1, %xmm0
+ veor q7, q1, q7 @ vmovdqa %xmm0, %xmm7
+ bx lr
+.size _vpaes_schedule_round,.-_vpaes_schedule_round
+
+@@
+@@ .aes_schedule_transform
+@@
+@@ Linear-transform q0 according to tables at [r11]
+@@
+@@ Requires that q9 = 0x0F0F... as in preheat
+@@ Output in q0
+@@ Clobbers q1, q2, q14, q15
+@@
+.type _vpaes_schedule_transform,%function
+.align 4
+_vpaes_schedule_transform:
+ vld1.64 {q14,q15}, [r11] @ vmovdqa (%r11), %xmm2 # lo
+ @ vmovdqa 16(%r11), %xmm1 # hi
+ vand q1, q0, $s0F @ vpand %xmm9, %xmm0, %xmm1
+ vshr.u8 q0, q0, #4 @ vpsrlb \$4, %xmm0, %xmm0
+ vtbl.8 q2#lo, {q14}, q1#lo @ vpshufb %xmm1, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q14}, q1#hi
+ vtbl.8 q0#lo, {q15}, q0#lo @ vpshufb %xmm0, %xmm1, %xmm0
+ vtbl.8 q0#hi, {q15}, q0#hi
+ veor q0, q0, q2 @ vpxor %xmm2, %xmm0, %xmm0
+ bx lr
+.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
+
+@@
+@@ .aes_schedule_mangle
+@@
+@@ Mangles q0 from (basis-transformed) standard version
+@@ to our version.
+@@
+@@ On encrypt,
+@@ xor with 0x63
+@@ multiply by circulant 0,1,1,1
+@@ apply shiftrows transform
+@@
+@@ On decrypt,
+@@ xor with 0x63
+@@ multiply by "inverse mixcolumns" circulant E,B,D,9
+@@ deskew
+@@ apply shiftrows transform
+@@
+@@
+@@ Writes out to [r2], and increments or decrements it
+@@ Keeps track of round number mod 4 in r8
+@@ Preserves q0
+@@ Clobbers q1-q5
+@@
+.type _vpaes_schedule_mangle,%function
+.align 4
+_vpaes_schedule_mangle:
+ tst $dir, $dir
+ vmov q4, q0 @ vmovdqa %xmm0, %xmm4 # save xmm0 for later
+ adr r11, .Lk_mc_forward @ Must be aligned to 8 mod 16.
+ vld1.64 {q5}, [r11] @ vmovdqa .Lk_mc_forward(%rip),%xmm5
+ bne .Lschedule_mangle_dec
+
+ @ encrypting
+ @ Write to q2 so we do not overlap table and destination below.
+ veor q2, q0, $s63 @ vpxor .Lk_s63(%rip), %xmm0, %xmm4
+ add $out, $out, #16 @ add \$16, %rdx
+ vtbl.8 q4#lo, {q2}, q5#lo @ vpshufb %xmm5, %xmm4, %xmm4
+ vtbl.8 q4#hi, {q2}, q5#hi
+ vtbl.8 q1#lo, {q4}, q5#lo @ vpshufb %xmm5, %xmm4, %xmm1
+ vtbl.8 q1#hi, {q4}, q5#hi
+ vtbl.8 q3#lo, {q1}, q5#lo @ vpshufb %xmm5, %xmm1, %xmm3
+ vtbl.8 q3#hi, {q1}, q5#hi
+ veor q4, q4, q1 @ vpxor %xmm1, %xmm4, %xmm4
+ vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
+ veor q3, q3, q4 @ vpxor %xmm4, %xmm3, %xmm3
+
+ b .Lschedule_mangle_both
+.align 4
+.Lschedule_mangle_dec:
+ @ inverse mix columns
+ adr r11, .Lk_dksd @ lea .Lk_dksd(%rip),%r11
+ vshr.u8 q1, q4, #4 @ vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
+ vand q4, q4, $s0F @ vpand %xmm9, %xmm4, %xmm4 # 4 = lo
+
+ vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x00(%r11), %xmm2
+ @ vmovdqa 0x10(%r11), %xmm3
+ vtbl.8 q2#lo, {q14}, q4#lo @ vpshufb %xmm4, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q14}, q4#hi
+ vtbl.8 q3#lo, {q15}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q15}, q1#hi
+ @ Load .Lk_dksb ahead of time.
+ vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x20(%r11), %xmm2
+ @ vmovdqa 0x30(%r11), %xmm3
+ @ Write to q13 so we do not overlap table and destination.
+ veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
+ vtbl.8 q3#lo, {q13}, q5#lo @ vpshufb %xmm5, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q13}, q5#hi
+
+ vtbl.8 q2#lo, {q14}, q4#lo @ vpshufb %xmm4, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q14}, q4#hi
+ veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
+ vtbl.8 q3#lo, {q15}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q15}, q1#hi
+ @ Load .Lk_dkse ahead of time.
+ vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x40(%r11), %xmm2
+ @ vmovdqa 0x50(%r11), %xmm3
+ @ Write to q13 so we do not overlap table and destination.
+ veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
+ vtbl.8 q3#lo, {q13}, q5#lo @ vpshufb %xmm5, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q13}, q5#hi
+
+ vtbl.8 q2#lo, {q14}, q4#lo @ vpshufb %xmm4, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q14}, q4#hi
+ veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
+ vtbl.8 q3#lo, {q15}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q15}, q1#hi
+ @ Load .Lk_dkse ahead of time.
+ vld1.64 {q14,q15}, [r11]! @ vmovdqa 0x60(%r11), %xmm2
+ @ vmovdqa 0x70(%r11), %xmm4
+ @ Write to q13 so we do not overlap table and destination.
+ veor q13, q3, q2 @ vpxor %xmm2, %xmm3, %xmm3
+
+ vtbl.8 q2#lo, {q14}, q4#lo @ vpshufb %xmm4, %xmm2, %xmm2
+ vtbl.8 q2#hi, {q14}, q4#hi
+ vtbl.8 q3#lo, {q13}, q5#lo @ vpshufb %xmm5, %xmm3, %xmm3
+ vtbl.8 q3#hi, {q13}, q5#hi
+ vtbl.8 q4#lo, {q15}, q1#lo @ vpshufb %xmm1, %xmm4, %xmm4
+ vtbl.8 q4#hi, {q15}, q1#hi
+ vld1.64 {q1}, [r8] @ vmovdqa (%r8,%r10), %xmm1
+ veor q2, q2, q3 @ vpxor %xmm3, %xmm2, %xmm2
+ veor q3, q4, q2 @ vpxor %xmm2, %xmm4, %xmm3
+
+ sub $out, $out, #16 @ add \$-16, %rdx
+
+.Lschedule_mangle_both:
+ @ Write to q2 so table and destination do not overlap.
+ vtbl.8 q2#lo, {q3}, q1#lo @ vpshufb %xmm1, %xmm3, %xmm3
+ vtbl.8 q2#hi, {q3}, q1#hi
+ add r8, r8, #64-16 @ add \$-16, %r8
+ and r8, r8, #~(1<<6) @ and \$0x30, %r8
+ vst1.64 {q2}, [$out] @ vmovdqu %xmm3, (%rdx)
+ bx lr
+.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
+
+.globl vpaes_set_encrypt_key
+.type vpaes_set_encrypt_key,%function
+.align 4
+vpaes_set_encrypt_key:
+ stmdb sp!, {r7-r11, lr}
+ vstmdb sp!, {d8-d15}
+
+ lsr r9, $bits, #5 @ shr \$5,%eax
+ add r9, r9, #5 @ \$5,%eax
+ str r9, [$out,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
+
+ mov $dir, #0 @ mov \$0,%ecx
+ mov r8, #0x30 @ mov \$0x30,%r8d
+ bl _vpaes_schedule_core
+ eor r0, r0, r0
+
+ vldmia sp!, {d8-d15}
+ ldmia sp!, {r7-r11, pc} @ return
+.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
+
+.globl vpaes_set_decrypt_key
+.type vpaes_set_decrypt_key,%function
+.align 4
+vpaes_set_decrypt_key:
+ stmdb sp!, {r7-r11, lr}
+ vstmdb sp!, {d8-d15}
+
+ lsr r9, $bits, #5 @ shr \$5,%eax
+ add r9, r9, #5 @ \$5,%eax
+ str r9, [$out,#240] @ mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
+ lsl r9, r9, #4 @ shl \$4,%eax
+ add $out, $out, #16 @ lea 16(%rdx,%rax),%rdx
+ add $out, $out, r9
+
+ mov $dir, #1 @ mov \$1,%ecx
+ lsr r8, $bits, #1 @ shr \$1,%r8d
+ and r8, r8, #32 @ and \$32,%r8d
+ eor r8, r8, #32 @ xor \$32,%r8d # nbits==192?0:32
+ bl _vpaes_schedule_core
+
+ vldmia sp!, {d8-d15}
+ ldmia sp!, {r7-r11, pc} @ return
+.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
+___
+}
+
+{
+my ($out, $inp) = map("r$_", (0..1));
+my ($s0F, $s63, $s63_raw, $mc_forward) = map("q$_", (9..12));
+
+$code .= <<___;
+
+@ Additional constants for converting to bsaes.
+.type _vpaes_convert_consts,%object
+.align 4
+_vpaes_convert_consts:
+@ .Lk_opt_then_skew applies skew(opt(x)) XOR 0x63, where skew is the linear
+@ transform in the AES S-box. 0x63 is incorporated into the low half of the
+@ table. This was computed with the following script:
+@
+@ def u64s_to_u128(x, y):
+@ return x | (y << 64)
+@ def u128_to_u64s(w):
+@ return w & ((1<<64)-1), w >> 64
+@ def get_byte(w, i):
+@ return (w >> (i*8)) & 0xff
+@ def apply_table(table, b):
+@ lo = b & 0xf
+@ hi = b >> 4
+@ return get_byte(table[0], lo) ^ get_byte(table[1], hi)
+@ def opt(b):
+@ table = [
+@ u64s_to_u128(0xFF9F4929D6B66000, 0xF7974121DEBE6808),
+@ u64s_to_u128(0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0),
+@ ]
+@ return apply_table(table, b)
+@ def rot_byte(b, n):
+@ return 0xff & ((b << n) | (b >> (8-n)))
+@ def skew(x):
+@ return (x ^ rot_byte(x, 1) ^ rot_byte(x, 2) ^ rot_byte(x, 3) ^
+@ rot_byte(x, 4))
+@ table = [0, 0]
+@ for i in range(16):
+@ table[0] |= (skew(opt(i)) ^ 0x63) << (i*8)
+@ table[1] |= skew(opt(i<<4)) << (i*8)
+@ print("\t.quad\t0x%016x, 0x%016x" % u128_to_u64s(table[0]))
+@ print("\t.quad\t0x%016x, 0x%016x" % u128_to_u64s(table[1]))
+.Lk_opt_then_skew:
+ .quad 0x9cb8436798bc4763, 0x6440bb9f6044bf9b
+ .quad 0x1f30062936192f00, 0xb49bad829db284ab
+
+@ .Lk_decrypt_transform is a permutation which performs an 8-bit left-rotation
+@ followed by a byte-swap on each 32-bit word of a vector. E.g., 0x11223344
+@ becomes 0x22334411 and then 0x11443322.
+.Lk_decrypt_transform:
+ .quad 0x0704050603000102, 0x0f0c0d0e0b08090a
+.size _vpaes_convert_consts,.-_vpaes_convert_consts
+
+@ void vpaes_encrypt_key_to_bsaes(AES_KEY *bsaes, const AES_KEY *vpaes);
+.globl vpaes_encrypt_key_to_bsaes
+.type vpaes_encrypt_key_to_bsaes,%function
+.align 4
+vpaes_encrypt_key_to_bsaes:
+ stmdb sp!, {r11, lr}
+
+ @ See _vpaes_schedule_core for the key schedule logic. In particular,
+ @ _vpaes_schedule_transform(.Lk_ipt) (section 2.2 of the paper),
+ @ _vpaes_schedule_mangle (section 4.3), and .Lschedule_mangle_last
+ @ contain the transformations not in the bsaes representation. This
+ @ function inverts those transforms.
+ @
+ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
+ @ representation, which does not match the other aes_nohw_*
+ @ implementations. The ARM aes_nohw_* stores each 32-bit word
+ @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
+ @ cost of extra REV and VREV32 operations in little-endian ARM.
+
+ vmov.i8 $s0F, #0x0f @ Required by _vpaes_schedule_transform
+ adr r2, .Lk_mc_forward @ Must be aligned to 8 mod 16.
+ add r3, r2, 0x90 @ .Lk_sr+0x10-.Lk_mc_forward = 0x90 (Apple's toolchain doesn't support the expression)
+
+ vld1.64 {$mc_forward}, [r2]
+ vmov.i8 $s63, #0x5b @ .Lk_s63 from vpaes-x86_64
+ adr r11, .Lk_opt @ Must be aligned to 8 mod 16.
+ vmov.i8 $s63_raw, #0x63 @ .LK_s63 without .Lk_ipt applied
+
+ @ vpaes stores one fewer round count than bsaes, but the number of keys
+ @ is the same.
+ ldr r2, [$inp,#240]
+ add r2, r2, #1
+ str r2, [$out,#240]
+
+ @ The first key is transformed with _vpaes_schedule_transform(.Lk_ipt).
+ @ Invert this with .Lk_opt.
+ vld1.64 {q0}, [$inp]!
+ bl _vpaes_schedule_transform
+ vrev32.8 q0, q0
+ vst1.64 {q0}, [$out]!
+
+ @ The middle keys have _vpaes_schedule_transform(.Lk_ipt) applied,
+ @ followed by _vpaes_schedule_mangle. _vpaes_schedule_mangle XORs 0x63,
+ @ multiplies by the circulant 0,1,1,1, then applies ShiftRows.
+.Loop_enc_key_to_bsaes:
+ vld1.64 {q0}, [$inp]!
+
+ @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note we cycle
+ @ r3 in the opposite direction and start at .Lk_sr+0x10 instead of 0x30.
+ @ We use r3 rather than r8 to avoid a callee-saved register.
+ vld1.64 {q1}, [r3]
+ vtbl.8 q2#lo, {q0}, q1#lo
+ vtbl.8 q2#hi, {q0}, q1#hi
+ add r3, r3, #16
+ and r3, r3, #~(1<<6)
+ vmov q0, q2
+
+ @ Handle the last key differently.
+ subs r2, r2, #1
+ beq .Loop_enc_key_to_bsaes_last
+
+ @ Multiply by the circulant. This is its own inverse.
+ vtbl.8 q1#lo, {q0}, $mc_forward#lo
+ vtbl.8 q1#hi, {q0}, $mc_forward#hi
+ vmov q0, q1
+ vtbl.8 q2#lo, {q1}, $mc_forward#lo
+ vtbl.8 q2#hi, {q1}, $mc_forward#hi
+ veor q0, q0, q2
+ vtbl.8 q1#lo, {q2}, $mc_forward#lo
+ vtbl.8 q1#hi, {q2}, $mc_forward#hi
+ veor q0, q0, q1
+
+ @ XOR and finish.
+ veor q0, q0, $s63
+ bl _vpaes_schedule_transform
+ vrev32.8 q0, q0
+ vst1.64 {q0}, [$out]!
+ b .Loop_enc_key_to_bsaes
+
+.Loop_enc_key_to_bsaes_last:
+ @ The final key does not have a basis transform (note
+ @ .Lschedule_mangle_last inverts the original transform). It only XORs
+ @ 0x63 and applies ShiftRows. The latter was already inverted in the
+ @ loop. Note that, because we act on the original representation, we use
+ @ $s63_raw, not $s63.
+ veor q0, q0, $s63_raw
+ vrev32.8 q0, q0
+ vst1.64 {q0}, [$out]
+
+ @ Wipe registers which contained key material.
+ veor q0, q0, q0
+ veor q1, q1, q1
+ veor q2, q2, q2
+
+ ldmia sp!, {r11, pc} @ return
+.size vpaes_encrypt_key_to_bsaes,.-vpaes_encrypt_key_to_bsaes
+
+@ void vpaes_decrypt_key_to_bsaes(AES_KEY *vpaes, const AES_KEY *bsaes);
+.globl vpaes_decrypt_key_to_bsaes
+.type vpaes_decrypt_key_to_bsaes,%function
+.align 4
+vpaes_decrypt_key_to_bsaes:
+ stmdb sp!, {r11, lr}
+
+ @ See _vpaes_schedule_core for the key schedule logic. Note vpaes
+ @ computes the decryption key schedule in reverse. Additionally,
+ @ aes-x86_64.pl shares some transformations, so we must only partially
+ @ invert vpaes's transformations. In general, vpaes computes in a
+ @ different basis (.Lk_ipt and .Lk_opt) and applies the inverses of
+ @ MixColumns, ShiftRows, and the affine part of the AES S-box (which is
+ @ split into a linear skew and XOR of 0x63). We undo all but MixColumns.
+ @
+ @ Note also that bsaes-armv7.pl expects aes-armv4.pl's key
+ @ representation, which does not match the other aes_nohw_*
+ @ implementations. The ARM aes_nohw_* stores each 32-bit word
+ @ byteswapped, as a convenience for (unsupported) big-endian ARM, at the
+ @ cost of extra REV and VREV32 operations in little-endian ARM.
+
+ adr r2, .Lk_decrypt_transform
+ adr r3, .Lk_sr+0x30
+ adr r11, .Lk_opt_then_skew @ Input to _vpaes_schedule_transform.
+ vld1.64 {$mc_forward}, [r2] @ Reuse $mc_forward from encryption.
+ vmov.i8 $s0F, #0x0f @ Required by _vpaes_schedule_transform
+
+ @ vpaes stores one fewer round count than bsaes, but the number of keys
+ @ is the same.
+ ldr r2, [$inp,#240]
+ add r2, r2, #1
+ str r2, [$out,#240]
+
+ @ Undo the basis change and reapply the S-box affine transform. See
+ @ .Lschedule_mangle_last.
+ vld1.64 {q0}, [$inp]!
+ bl _vpaes_schedule_transform
+ vrev32.8 q0, q0
+ vst1.64 {q0}, [$out]!
+
+ @ See _vpaes_schedule_mangle for the transform on the middle keys. Note
+ @ it simultaneously inverts MixColumns and the S-box affine transform.
+ @ See .Lk_dksd through .Lk_dks9.
+.Loop_dec_key_to_bsaes:
+ vld1.64 {q0}, [$inp]!
+
+ @ Invert the ShiftRows step (see .Lschedule_mangle_both). Note going
+ @ forwards cancels inverting for which direction we cycle r3. We use r3
+ @ rather than r8 to avoid a callee-saved register.
+ vld1.64 {q1}, [r3]
+ vtbl.8 q2#lo, {q0}, q1#lo
+ vtbl.8 q2#hi, {q0}, q1#hi
+ add r3, r3, #64-16
+ and r3, r3, #~(1<<6)
+ vmov q0, q2
+
+ @ Handle the last key differently.
+ subs r2, r2, #1
+ beq .Loop_dec_key_to_bsaes_last
+
+ @ Undo the basis change and reapply the S-box affine transform.
+ bl _vpaes_schedule_transform
+
+ @ Rotate each word by 8 bytes (cycle the rows) and then byte-swap. We
+ @ combine the two operations in .Lk_decrypt_transform.
+ @
+ @ TODO(davidben): Where does the rotation come from?
+ vtbl.8 q1#lo, {q0}, $mc_forward#lo
+ vtbl.8 q1#hi, {q0}, $mc_forward#hi
+
+ vst1.64 {q1}, [$out]!
+ b .Loop_dec_key_to_bsaes
+
+.Loop_dec_key_to_bsaes_last:
+ @ The final key only inverts ShiftRows (already done in the loop). See
+ @ .Lschedule_am_decrypting. Its basis is not transformed.
+ vrev32.8 q0, q0
+ vst1.64 {q0}, [$out]!
+
+ @ Wipe registers which contained key material.
+ veor q0, q0, q0
+ veor q1, q1, q1
+ veor q2, q2, q2
+
+ ldmia sp!, {r11, pc} @ return
+.size vpaes_decrypt_key_to_bsaes,.-vpaes_decrypt_key_to_bsaes
+___
+}
+
+{
+# Register-passed parameters.
+my ($inp, $out, $len, $key) = map("r$_", 0..3);
+# Temporaries. _vpaes_encrypt_core already uses r8..r11, so overlap $ivec and
+# $tmp. $ctr is r7 because it must be preserved across calls.
+my ($ctr, $ivec, $tmp) = map("r$_", 7..9);
+
+# void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
+# const AES_KEY *key, const uint8_t ivec[16]);
+$code .= <<___;
+.globl vpaes_ctr32_encrypt_blocks
+.type vpaes_ctr32_encrypt_blocks,%function
+.align 4
+vpaes_ctr32_encrypt_blocks:
+ mov ip, sp
+ stmdb sp!, {r7-r11, lr}
+ @ This function uses q4-q7 (d8-d15), which are callee-saved.
+ vstmdb sp!, {d8-d15}
+
+ cmp $len, #0
+ @ $ivec is passed on the stack.
+ ldr $ivec, [ip]
+ beq .Lctr32_done
+
+ @ _vpaes_encrypt_core expects the key in r2, so swap $len and $key.
+ mov $tmp, $key
+ mov $key, $len
+ mov $len, $tmp
+___
+my ($len, $key) = ($key, $len);
+$code .= <<___;
+
+ @ Load the IV and counter portion.
+ ldr $ctr, [$ivec, #12]
+ vld1.8 {q7}, [$ivec]
+
+ bl _vpaes_preheat
+ rev $ctr, $ctr @ The counter is big-endian.
+
+.Lctr32_loop:
+ vmov q0, q7
+ vld1.8 {q6}, [$inp]! @ Load input ahead of time
+ bl _vpaes_encrypt_core
+ veor q0, q0, q6 @ XOR input and result
+ vst1.8 {q0}, [$out]!
+ subs $len, $len, #1
+ @ Update the counter.
+ add $ctr, $ctr, #1
+ rev $tmp, $ctr
+ vmov.32 q7#hi[1], $tmp
+ bne .Lctr32_loop
+
+.Lctr32_done:
+ vldmia sp!, {d8-d15}
+ ldmia sp!, {r7-r11, pc} @ return
+.size vpaes_ctr32_encrypt_blocks,.-vpaes_ctr32_encrypt_blocks
+___
+}
+
+foreach (split("\n",$code)) {
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl b/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
index 45463e8..8cc6a1a 100644
--- a/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
+++ b/src/crypto/fipsmodule/aes/asm/vpaes-x86_64.pl
@@ -182,11 +182,11 @@
##
## Inputs:
## %xmm0 and %xmm6 = input
-## %xmm12-%xmm15 as in _vpaes_preheat
+## %xmm9 and %xmm10 as in _vpaes_preheat
## (%rdx) = scheduled keys
##
## Output in %xmm0 and %xmm6
-## Clobbers %xmm1-%xmm5, %xmm7-%xmm11, %r9, %r10, %r11, %rax
+## Clobbers %xmm1-%xmm5, %xmm7, %xmm8, %xmm11-%xmm13, %r9, %r10, %r11, %rax
## Preserves %xmm14 and %xmm15
##
## This function stitches two parallel instances of _vpaes_encrypt_core. x86_64
diff --git a/src/crypto/fipsmodule/aes/internal.h b/src/crypto/fipsmodule/aes/internal.h
index 0cebb04..99d509a 100644
--- a/src/crypto/fipsmodule/aes/internal.h
+++ b/src/crypto/fipsmodule/aes/internal.h
@@ -38,6 +38,7 @@
#if defined(OPENSSL_X86_64)
#define VPAES_CTR32
#endif
+#define VPAES_CBC
OPENSSL_INLINE int vpaes_capable(void) {
return (OPENSSL_ia32cap_get()[1] & (1 << (41 - 32))) != 0;
}
@@ -49,11 +50,15 @@
#if defined(OPENSSL_ARM)
#define BSAES
+#define VPAES
+#define VPAES_CTR32
OPENSSL_INLINE int bsaes_capable(void) { return CRYPTO_is_NEON_capable(); }
+OPENSSL_INLINE int vpaes_capable(void) { return CRYPTO_is_NEON_capable(); }
#endif
#if defined(OPENSSL_AARCH64)
#define VPAES
+#define VPAES_CBC
#define VPAES_CTR32
OPENSSL_INLINE int vpaes_capable(void) { return CRYPTO_is_NEON_capable(); }
#endif
@@ -130,12 +135,14 @@
#if defined(BSAES)
-// On platforms where BSAES gets defined (just above), then these functions are
-// provided by asm. Note |bsaes_cbc_encrypt| requires |enc| to be zero.
+// Note |bsaes_cbc_encrypt| requires |enc| to be zero.
void bsaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t ivec[16], int enc);
void bsaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, const uint8_t ivec[16]);
+// VPAES to BSAES conversions are available on all BSAES platforms.
+void vpaes_encrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes);
+void vpaes_decrypt_key_to_bsaes(AES_KEY *out_bsaes, const AES_KEY *vpaes);
#else
OPENSSL_INLINE char bsaes_capable(void) { return 0; }
@@ -152,6 +159,16 @@
const uint8_t ivec[16]) {
abort();
}
+
+OPENSSL_INLINE void vpaes_encrypt_key_to_bsaes(AES_KEY *out_bsaes,
+ const AES_KEY *vpaes) {
+ abort();
+}
+
+OPENSSL_INLINE void vpaes_decrypt_key_to_bsaes(AES_KEY *out_bsaes,
+ const AES_KEY *vpaes) {
+ abort();
+}
#endif // !BSAES
@@ -164,8 +181,10 @@
void vpaes_encrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
void vpaes_decrypt(const uint8_t *in, uint8_t *out, const AES_KEY *key);
+#if defined(VPAES_CBC)
void vpaes_cbc_encrypt(const uint8_t *in, uint8_t *out, size_t length,
const AES_KEY *key, uint8_t *ivec, int enc);
+#endif
#if defined(VPAES_CTR32)
void vpaes_ctr32_encrypt_blocks(const uint8_t *in, uint8_t *out, size_t len,
const AES_KEY *key, const uint8_t ivec[16]);
diff --git a/src/crypto/fipsmodule/bcm.c b/src/crypto/fipsmodule/bcm.c
index 559ade3..2706722 100644
--- a/src/crypto/fipsmodule/bcm.c
+++ b/src/crypto/fipsmodule/bcm.c
@@ -19,6 +19,10 @@
#include <openssl/crypto.h>
#include <stdlib.h>
+#if defined(BORINGSSL_FIPS)
+#include <sys/mman.h>
+#include <unistd.h>
+#endif
#include <openssl/digest.h>
#include <openssl/hmac.h>
@@ -99,6 +103,7 @@
#if defined(BORINGSSL_FIPS)
#if !defined(OPENSSL_ASAN)
+
// These symbols are filled in by delocate.go (in static builds) or a linker
// script (in shared builds). They point to the start and end of the module, and
// the location of the integrity hash, respectively.
@@ -109,9 +114,31 @@
extern const uint8_t BORINGSSL_bcm_rodata_start[];
extern const uint8_t BORINGSSL_bcm_rodata_end[];
#endif
+
+#if defined(OPENSSL_ANDROID) && defined(OPENSSL_AARCH64)
+static void BORINGSSL_maybe_set_module_text_permissions(int permission) {
+ // Android may be compiled in execute-only-memory mode, in which case the
+ // .text segment cannot be read. That conflicts with the need for a FIPS
+ // module to hash its own contents, therefore |mprotect| is used to make
+ // the module's .text readable for the duration of the hashing process. In
+ // other build configurations this is a no-op.
+ const uintptr_t page_size = getpagesize();
+ const uintptr_t page_start =
+ ((uintptr_t)BORINGSSL_bcm_text_start) & ~(page_size - 1);
+
+ if (mprotect((void *)page_start,
+ ((uintptr_t)BORINGSSL_bcm_text_end) - page_start,
+ permission) != 0) {
+ perror("BoringSSL: mprotect");
+ }
+}
+#else
+static void BORINGSSL_maybe_set_module_text_permissions(int permission) {}
+#endif // !ANDROID
+
#else
static const uint8_t BORINGSSL_bcm_text_hash[SHA512_DIGEST_LENGTH] = {0};
-#endif
+#endif // !ASAN
static void __attribute__((constructor))
BORINGSSL_bcm_power_on_self_test(void) {
@@ -138,6 +165,8 @@
fprintf(stderr, "HMAC_Init_ex failed.\n");
goto err;
}
+
+ BORINGSSL_maybe_set_module_text_permissions(PROT_READ | PROT_EXEC);
#if defined(BORINGSSL_SHARED_LIBRARY)
uint64_t length = end - start;
HMAC_Update(&hmac_ctx, (const uint8_t *) &length, sizeof(length));
@@ -149,6 +178,8 @@
#else
HMAC_Update(&hmac_ctx, start, end - start);
#endif
+ BORINGSSL_maybe_set_module_text_permissions(PROT_EXEC);
+
if (!HMAC_Final(&hmac_ctx, result, &result_len) ||
result_len != sizeof(result)) {
fprintf(stderr, "HMAC failed.\n");
diff --git a/src/crypto/fipsmodule/cipher/e_aes.c b/src/crypto/fipsmodule/cipher/e_aes.c
index 1ea012d..2b30fb0 100644
--- a/src/crypto/fipsmodule/cipher/e_aes.c
+++ b/src/crypto/fipsmodule/cipher/e_aes.c
@@ -68,6 +68,48 @@
OPENSSL_MSVC_PRAGMA(warning(push))
OPENSSL_MSVC_PRAGMA(warning(disable: 4702)) // Unreachable code.
+#if defined(BSAES)
+static void vpaes_ctr32_encrypt_blocks_with_bsaes(const uint8_t *in,
+ uint8_t *out, size_t blocks,
+ const AES_KEY *key,
+ const uint8_t ivec[16]) {
+ // |bsaes_ctr32_encrypt_blocks| is faster than |vpaes_ctr32_encrypt_blocks|,
+ // but it takes at least one full 8-block batch to amortize the conversion.
+ if (blocks < 8) {
+ vpaes_ctr32_encrypt_blocks(in, out, blocks, key, ivec);
+ return;
+ }
+
+ size_t bsaes_blocks = blocks;
+ if (bsaes_blocks % 8 < 6) {
+ // |bsaes_ctr32_encrypt_blocks| internally works in 8-block batches. If the
+ // final batch is too small (under six blocks), it is faster to loop over
+ // |vpaes_encrypt|. Round |bsaes_blocks| down to a multiple of 8.
+ bsaes_blocks -= bsaes_blocks % 8;
+ }
+
+ AES_KEY bsaes;
+ vpaes_encrypt_key_to_bsaes(&bsaes, key);
+ bsaes_ctr32_encrypt_blocks(in, out, bsaes_blocks, &bsaes, ivec);
+ OPENSSL_cleanse(&bsaes, sizeof(bsaes));
+
+ in += 16 * bsaes_blocks;
+ out += 16 * bsaes_blocks;
+ blocks -= bsaes_blocks;
+
+ union {
+ uint32_t u32[4];
+ uint8_t u8[16];
+ } new_ivec;
+ memcpy(new_ivec.u8, ivec, 16);
+ uint32_t ctr = CRYPTO_bswap4(new_ivec.u32[3]) + bsaes_blocks;
+ new_ivec.u32[3] = CRYPTO_bswap4(ctr);
+
+ // Finish any remaining blocks with |vpaes_ctr32_encrypt_blocks|.
+ vpaes_ctr32_encrypt_blocks(in, out, blocks, key, new_ivec.u8);
+}
+#endif // BSAES
+
typedef struct {
union {
double align;
@@ -110,14 +152,23 @@
dat->stream.cbc = aes_hw_cbc_encrypt;
}
} else if (bsaes_capable() && mode == EVP_CIPH_CBC_MODE) {
- ret = aes_nohw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
+ assert(vpaes_capable());
+ ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
+ if (ret == 0) {
+ vpaes_decrypt_key_to_bsaes(&dat->ks.ks, &dat->ks.ks);
+ }
// If |dat->stream.cbc| is provided, |dat->block| is never used.
dat->block = NULL;
dat->stream.cbc = bsaes_cbc_encrypt;
} else if (vpaes_capable()) {
ret = vpaes_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
dat->block = vpaes_decrypt;
- dat->stream.cbc = mode == EVP_CIPH_CBC_MODE ? vpaes_cbc_encrypt : NULL;
+ dat->stream.cbc = NULL;
+#if defined(VPAES_CBC)
+ if (mode == EVP_CIPH_CBC_MODE) {
+ dat->stream.cbc = vpaes_cbc_encrypt;
+ }
+#endif
} else {
ret = aes_nohw_set_decrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
dat->block = aes_nohw_decrypt;
@@ -137,23 +188,23 @@
} else if (mode == EVP_CIPH_CTR_MODE) {
dat->stream.ctr = aes_hw_ctr32_encrypt_blocks;
}
- } else if (bsaes_capable() && mode == EVP_CIPH_CTR_MODE) {
- ret = aes_nohw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
- // If |dat->stream.ctr| is provided, |dat->block| is never used.
- dat->block = NULL;
- dat->stream.ctr = bsaes_ctr32_encrypt_blocks;
} else if (vpaes_capable()) {
ret = vpaes_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
dat->block = vpaes_encrypt;
dat->stream.cbc = NULL;
+#if defined(VPAES_CBC)
if (mode == EVP_CIPH_CBC_MODE) {
dat->stream.cbc = vpaes_cbc_encrypt;
}
-#if defined(VPAES_CTR32)
- if (mode == EVP_CIPH_CTR_MODE) {
- dat->stream.ctr = vpaes_ctr32_encrypt_blocks;
- }
#endif
+ if (mode == EVP_CIPH_CTR_MODE) {
+#if defined(BSAES)
+ assert(bsaes_capable());
+ dat->stream.ctr = vpaes_ctr32_encrypt_blocks_with_bsaes;
+#elif defined(VPAES_CTR32)
+ dat->stream.ctr = vpaes_ctr32_encrypt_blocks;
+#endif
+ }
} else {
ret = aes_nohw_set_encrypt_key(key, ctx->key_len * 8, &dat->ks.ks);
dat->block = aes_nohw_encrypt;
@@ -242,17 +293,6 @@
return aes_hw_ctr32_encrypt_blocks;
}
- if (bsaes_capable()) {
- aes_nohw_set_encrypt_key(key, key_bytes * 8, aes_key);
- if (gcm_key != NULL) {
- CRYPTO_gcm128_init_key(gcm_key, aes_key, aes_nohw_encrypt, 0);
- }
- if (out_block) {
- *out_block = aes_nohw_encrypt;
- }
- return bsaes_ctr32_encrypt_blocks;
- }
-
if (vpaes_capable()) {
vpaes_set_encrypt_key(key, key_bytes * 8, aes_key);
if (out_block) {
@@ -261,7 +301,10 @@
if (gcm_key != NULL) {
CRYPTO_gcm128_init_key(gcm_key, aes_key, vpaes_encrypt, 0);
}
-#if defined(VPAES_CTR32)
+#if defined(BSAES)
+ assert(bsaes_capable());
+ return vpaes_ctr32_encrypt_blocks_with_bsaes;
+#elif defined(VPAES_CTR32)
return vpaes_ctr32_encrypt_blocks;
#else
return NULL;
@@ -847,7 +890,7 @@
size_t key_len, size_t tag_len) {
const size_t key_bits = key_len * 8;
- if (key_bits != 128 && key_bits != 256) {
+ if (key_bits != 128 && key_bits != 192 && key_bits != 256) {
OPENSSL_PUT_ERROR(CIPHER, CIPHER_R_BAD_KEY_LENGTH);
return 0; // EVP_AEAD_CTX_init should catch this.
}
@@ -1020,6 +1063,21 @@
out->open_gather = aead_aes_gcm_open_gather;
}
+DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_192_gcm) {
+ memset(out, 0, sizeof(EVP_AEAD));
+
+ out->key_len = 24;
+ out->nonce_len = 12;
+ out->overhead = EVP_AEAD_AES_GCM_TAG_LEN;
+ out->max_tag_len = EVP_AEAD_AES_GCM_TAG_LEN;
+ out->seal_scatter_supports_extra_in = 1;
+
+ out->init = aead_aes_gcm_init;
+ out->cleanup = aead_aes_gcm_cleanup;
+ out->seal_scatter = aead_aes_gcm_seal_scatter;
+ out->open_gather = aead_aes_gcm_open_gather;
+}
+
DEFINE_METHOD_FUNCTION(EVP_AEAD, EVP_aead_aes_256_gcm) {
memset(out, 0, sizeof(EVP_AEAD));
diff --git a/src/crypto/fipsmodule/digest/digest.c b/src/crypto/fipsmodule/digest/digest.c
index 68e81c4..a0b3bf5 100644
--- a/src/crypto/fipsmodule/digest/digest.c
+++ b/src/crypto/fipsmodule/digest/digest.c
@@ -120,6 +120,8 @@
return 0;
}
+uint32_t EVP_MD_meth_get_flags(const EVP_MD *md) { return EVP_MD_flags(md); }
+
int EVP_MD_CTX_copy_ex(EVP_MD_CTX *out, const EVP_MD_CTX *in) {
// |in->digest| may be NULL if this is a signing |EVP_MD_CTX| for, e.g.,
// Ed25519 which does not hash with |EVP_MD_CTX|.
diff --git a/src/crypto/fipsmodule/ec/ec_test.cc b/src/crypto/fipsmodule/ec/ec_test.cc
index c0ad61f..9737d31 100644
--- a/src/crypto/fipsmodule/ec/ec_test.cc
+++ b/src/crypto/fipsmodule/ec/ec_test.cc
@@ -800,7 +800,7 @@
return OBJ_nid2sn(params.param.nid);
}
-INSTANTIATE_TEST_SUITE_P(, ECCurveTest, testing::ValuesIn(AllCurves()),
+INSTANTIATE_TEST_SUITE_P(All, ECCurveTest, testing::ValuesIn(AllCurves()),
CurveToString);
static bssl::UniquePtr<EC_GROUP> GetCurve(FileTest *t, const char *key) {
diff --git a/src/crypto/fipsmodule/rand/internal.h b/src/crypto/fipsmodule/rand/internal.h
index ad75823..c7ed74d 100644
--- a/src/crypto/fipsmodule/rand/internal.h
+++ b/src/crypto/fipsmodule/rand/internal.h
@@ -26,6 +26,11 @@
#endif
+#if !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_FUCHSIA) && \
+ !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) && !defined(OPENSSL_TRUSTY)
+#define OPENSSL_URANDOM
+#endif
+
// RAND_bytes_with_additional_data samples from the RNG after mixing 32 bytes
// from |user_additional_data| in.
void RAND_bytes_with_additional_data(uint8_t *out, size_t out_len,
@@ -35,6 +40,14 @@
// system.
void CRYPTO_sysrand(uint8_t *buf, size_t len);
+#if defined(OPENSSL_URANDOM) && defined(BORINGSSL_FIPS)
+// CRYPTO_sysrand_if_available fills |len| bytes at |buf| with entropy from the
+// operating system, if the entropy pool is initialized. If it is uninitialized,
+// it will not block and will instead fill |buf| with all zeros or early
+// /dev/urandom output.
+void CRYPTO_sysrand_if_available(uint8_t *buf, size_t len);
+#endif
+
// rand_fork_unsafe_buffering_enabled returns whether fork-unsafe buffering has
// been enabled via |RAND_enable_fork_unsafe_buffering|.
int rand_fork_unsafe_buffering_enabled(void);
diff --git a/src/crypto/fipsmodule/rand/rand.c b/src/crypto/fipsmodule/rand/rand.c
index a8ef458..60e92c5 100644
--- a/src/crypto/fipsmodule/rand/rand.c
+++ b/src/crypto/fipsmodule/rand/rand.c
@@ -179,7 +179,8 @@
#define FIPS_OVERREAD 10
uint8_t entropy[CTR_DRBG_ENTROPY_LEN * FIPS_OVERREAD];
- if (!hwrand(entropy, sizeof(entropy))) {
+ int used_hwrand = hwrand(entropy, sizeof(entropy));
+ if (!used_hwrand) {
CRYPTO_sysrand(entropy, sizeof(entropy));
}
@@ -210,6 +211,17 @@
seed[j] ^= entropy[CTR_DRBG_ENTROPY_LEN * i + j];
}
}
+
+#if defined(OPENSSL_URANDOM)
+ // If we used RDRAND, also opportunistically read from the system. This avoids
+ // solely relying on the hardware once the entropy pool has been initialized.
+ if (used_hwrand) {
+ CRYPTO_sysrand_if_available(entropy, CTR_DRBG_ENTROPY_LEN);
+ for (size_t i = 0; i < CTR_DRBG_ENTROPY_LEN; i++) {
+ seed[i] ^= entropy[i];
+ }
+ }
+#endif
}
#else
diff --git a/src/crypto/fipsmodule/rand/urandom.c b/src/crypto/fipsmodule/rand/urandom.c
index f63857f..23413ff 100644
--- a/src/crypto/fipsmodule/rand/urandom.c
+++ b/src/crypto/fipsmodule/rand/urandom.c
@@ -18,8 +18,7 @@
#include <openssl/rand.h>
-#if !defined(OPENSSL_WINDOWS) && !defined(OPENSSL_FUCHSIA) && \
- !defined(BORINGSSL_UNSAFE_DETERMINISTIC_MODE) && !defined(OPENSSL_TRUSTY)
+#if defined(OPENSSL_URANDOM)
#include <assert.h>
#include <errno.h>
@@ -133,6 +132,12 @@
// urandom_fd is a file descriptor to /dev/urandom. It's protected by |once|.
DEFINE_BSS_GET(int, urandom_fd)
+#if defined(USE_NR_getrandom)
+// getrandom_ready is one if |getrandom| had been initialized by the time
+// |init_once| was called and zero otherwise.
+DEFINE_BSS_GET(int, getrandom_ready)
+#endif
+
DEFINE_STATIC_ONCE(rand_once)
// init_once initializes the state of this module to values previously
@@ -145,42 +150,29 @@
CRYPTO_STATIC_MUTEX_unlock_read(rand_lock_bss_get());
#if defined(USE_NR_getrandom)
+ int have_getrandom;
uint8_t dummy;
ssize_t getrandom_ret =
boringssl_getrandom(&dummy, sizeof(dummy), GRND_NONBLOCK);
-
- if (getrandom_ret == -1 && errno == EAGAIN) {
- // Attempt to get the path of the current process to aid in debugging when
- // something blocks.
- const char *current_process = "<unknown>";
-#if defined(OPENSSL_HAS_GETAUXVAL)
- const unsigned long getauxval_ret = getauxval(AT_EXECFN);
- if (getauxval_ret != 0) {
- current_process = (const char *)getauxval_ret;
- }
-#endif
-
- fprintf(stderr,
- "%s: getrandom indicates that the entropy pool has not been "
- "initialized. Rather than continue with poor entropy, this process "
- "will block until entropy is available.\n",
- current_process);
-
- getrandom_ret =
- boringssl_getrandom(&dummy, sizeof(dummy), 0 /* no flags */);
- }
-
if (getrandom_ret == 1) {
- *urandom_fd_bss_get() = kHaveGetrandom;
- return;
- }
-
- // Ignore ENOSYS and fallthrough to using /dev/urandom, below. Otherwise it's
- // a fatal error.
- if (getrandom_ret != -1 || errno != ENOSYS) {
+ *getrandom_ready_bss_get() = 1;
+ have_getrandom = 1;
+ } else if (getrandom_ret == -1 && errno == EAGAIN) {
+ // We have getrandom, but the entropy pool has not been initialized yet.
+ have_getrandom = 1;
+ } else if (getrandom_ret == -1 && errno == ENOSYS) {
+ // Fallthrough to using /dev/urandom, below.
+ have_getrandom = 0;
+ } else {
+ // Other errors are fatal.
perror("getrandom");
abort();
}
+
+ if (have_getrandom) {
+ *urandom_fd_bss_get() = kHaveGetrandom;
+ return;
+ }
#endif // USE_NR_getrandom
// Android FIPS builds must support getrandom.
@@ -214,6 +206,71 @@
}
}
+ int flags = fcntl(fd, F_GETFD);
+ if (flags == -1) {
+ // Native Client doesn't implement |fcntl|.
+ if (errno != ENOSYS) {
+ perror("failed to get flags from urandom fd");
+ abort();
+ }
+ } else {
+ flags |= FD_CLOEXEC;
+ if (fcntl(fd, F_SETFD, flags) == -1) {
+ perror("failed to set FD_CLOEXEC on urandom fd");
+ abort();
+ }
+ }
+ *urandom_fd_bss_get() = fd;
+}
+
+DEFINE_STATIC_ONCE(wait_for_entropy_once)
+
+static void wait_for_entropy(void) {
+ int fd = *urandom_fd_bss_get();
+ if (fd == kHaveGetrandom) {
+#if defined(USE_NR_getrandom)
+ if (*getrandom_ready_bss_get()) {
+ // The entropy pool was already initialized in |init_once|.
+ return;
+ }
+
+ uint8_t dummy;
+ ssize_t getrandom_ret =
+ boringssl_getrandom(&dummy, sizeof(dummy), GRND_NONBLOCK);
+ if (getrandom_ret == -1 && errno == EAGAIN) {
+ // Attempt to get the path of the current process to aid in debugging when
+ // something blocks.
+ const char *current_process = "<unknown>";
+#if defined(OPENSSL_HAS_GETAUXVAL)
+ const unsigned long getauxval_ret = getauxval(AT_EXECFN);
+ if (getauxval_ret != 0) {
+ current_process = (const char *)getauxval_ret;
+ }
+#endif
+
+ fprintf(
+ stderr,
+ "%s: getrandom indicates that the entropy pool has not been "
+ "initialized. Rather than continue with poor entropy, this process "
+ "will block until entropy is available.\n",
+ current_process);
+
+ getrandom_ret =
+ boringssl_getrandom(&dummy, sizeof(dummy), 0 /* no flags */);
+ }
+
+ if (getrandom_ret == 1) {
+ return;
+ }
+
+ perror("getrandom");
+ abort();
+#else
+ fprintf(stderr, "urandom fd corrupt.\n");
+ abort();
+#endif // USE_NR_getrandom
+ }
+
#if defined(BORINGSSL_FIPS)
// In FIPS mode we ensure that the kernel has sufficient entropy before
// continuing. This is automatically handled by getrandom, which requires
@@ -235,23 +292,7 @@
usleep(250000);
}
-#endif
-
- int flags = fcntl(fd, F_GETFD);
- if (flags == -1) {
- // Native Client doesn't implement |fcntl|.
- if (errno != ENOSYS) {
- perror("failed to get flags from urandom fd");
- abort();
- }
- } else {
- flags |= FD_CLOEXEC;
- if (fcntl(fd, F_SETFD, flags) == -1) {
- perror("failed to set FD_CLOEXEC on urandom fd");
- abort();
- }
- }
- *urandom_fd_bss_get() = fd;
+#endif // BORINGSSL_FIPS
}
void RAND_set_urandom_fd(int fd) {
@@ -289,14 +330,28 @@
}
// fill_with_entropy writes |len| bytes of entropy into |out|. It returns one
-// on success and zero on error.
-static char fill_with_entropy(uint8_t *out, size_t len) {
+// on success and zero on error. If |block| is one, this function will block
+// until the entropy pool is initialized. Otherwise, this function may fail,
+// setting |errno| to |EAGAIN| if the entropy pool has not yet been initialized.
+static int fill_with_entropy(uint8_t *out, size_t len, int block) {
+ if (len == 0) {
+ return 1;
+ }
+
+ CRYPTO_once(rand_once_bss_get(), init_once);
+ if (block) {
+ CRYPTO_once(wait_for_entropy_once_bss_get(), wait_for_entropy);
+ }
+
+ // Clear |errno| so it has defined value if |read| or |getrandom|
+ // "successfully" returns zero.
+ errno = 0;
while (len > 0) {
ssize_t r;
if (*urandom_fd_bss_get() == kHaveGetrandom) {
#if defined(USE_NR_getrandom)
- r = boringssl_getrandom(out, len, 0 /* no flags */);
+ r = boringssl_getrandom(out, len, block ? 0 : GRND_NONBLOCK);
#else // USE_NR_getrandom
fprintf(stderr, "urandom fd corrupt.\n");
abort();
@@ -319,13 +374,7 @@
// CRYPTO_sysrand puts |requested| random bytes into |out|.
void CRYPTO_sysrand(uint8_t *out, size_t requested) {
- if (requested == 0) {
- return;
- }
-
- CRYPTO_once(rand_once_bss_get(), init_once);
-
- if (!fill_with_entropy(out, requested)) {
+ if (!fill_with_entropy(out, requested, /*block=*/1)) {
perror("entropy fill failed");
abort();
}
@@ -337,5 +386,17 @@
#endif
}
-#endif /* !OPENSSL_WINDOWS && !defined(OPENSSL_FUCHSIA) && \
- !BORINGSSL_UNSAFE_DETERMINISTIC_MODE && !OPENSSL_TRUSTY */
+#if defined(BORINGSSL_FIPS)
+void CRYPTO_sysrand_if_available(uint8_t *out, size_t requested) {
+ // Return all zeros if |fill_with_entropy| fails.
+ OPENSSL_memset(out, 0, requested);
+
+ if (!fill_with_entropy(out, requested, /*block=*/0) &&
+ errno != EAGAIN) {
+ perror("opportunistic entropy fill failed");
+ abort();
+ }
+}
+#endif // BORINGSSL_FIPS
+
+#endif // OPENSSL_URANDOM
diff --git a/src/crypto/impl_dispatch_test.cc b/src/crypto/impl_dispatch_test.cc
index 54ee704..2c3a613 100644
--- a/src/crypto/impl_dispatch_test.cc
+++ b/src/crypto/impl_dispatch_test.cc
@@ -105,12 +105,12 @@
const uint8_t kPlaintext[40] = {1, 2, 3, 4, 0};
uint8_t ciphertext[sizeof(kPlaintext) + 16];
size_t ciphertext_len;
- EVP_AEAD_CTX ctx;
- ASSERT_TRUE(EVP_AEAD_CTX_init(&ctx, EVP_aead_aes_128_gcm(), kZeros,
+ bssl::ScopedEVP_AEAD_CTX ctx;
+ ASSERT_TRUE(EVP_AEAD_CTX_init(ctx.get(), EVP_aead_aes_128_gcm(), kZeros,
sizeof(kZeros),
EVP_AEAD_DEFAULT_TAG_LENGTH, nullptr));
ASSERT_TRUE(EVP_AEAD_CTX_seal(
- &ctx, ciphertext, &ciphertext_len, sizeof(ciphertext), kZeros,
+ ctx.get(), ciphertext, &ciphertext_len, sizeof(ciphertext), kZeros,
EVP_AEAD_nonce_length(EVP_aead_aes_128_gcm()), kPlaintext,
sizeof(kPlaintext), nullptr, 0));
});
diff --git a/src/crypto/rsa_extra/rsa_test.cc b/src/crypto/rsa_extra/rsa_test.cc
index ed1630d..0fe0351 100644
--- a/src/crypto/rsa_extra/rsa_test.cc
+++ b/src/crypto/rsa_extra/rsa_test.cc
@@ -461,7 +461,7 @@
}
}
-INSTANTIATE_TEST_SUITE_P(, RSAEncryptTest,
+INSTANTIATE_TEST_SUITE_P(All, RSAEncryptTest,
testing::ValuesIn(kRSAEncryptParams));
TEST(RSATest, TestDecrypt) {
diff --git a/src/crypto/test/abi_test.cc b/src/crypto/test/abi_test.cc
index 9844c73..6b17031 100644
--- a/src/crypto/test/abi_test.cc
+++ b/src/crypto/test/abi_test.cc
@@ -48,6 +48,11 @@
#endif
#endif // X86_64 && SUPPORTS_ABI_TEST
+// FIPS mode breaks unwind tests. See https://crbug.com/boringssl/289.
+#if defined(BORINGSSL_FIPS)
+#undef SUPPORTS_UNWIND_TEST
+#endif
+
namespace abi_test {
diff --git a/src/crypto/x509/x509_test.cc b/src/crypto/x509/x509_test.cc
index 1f664b9..521d757 100644
--- a/src/crypto/x509/x509_test.cc
+++ b/src/crypto/x509/x509_test.cc
@@ -817,6 +817,117 @@
"-----END CERTIFICATE-----\n";
static const char kCommonNameNotDNS[] = "Not a DNS name";
+// The following six certificates are issued by |kSANTypesRoot| and have
+// different extended key usage values. They were created with the following
+// Go program:
+//
+// func main() {
+// block, _ := pem.Decode([]byte(rootKeyPEM))
+// rootPriv, _ := x509.ParsePKCS1PrivateKey(block.Bytes)
+// block, _ = pem.Decode([]byte(rootCertPEM))
+// root, _ := x509.ParseCertificate(block.Bytes)
+//
+// leafTemplate := &x509.Certificate{
+// SerialNumber: big.NewInt(3),
+// Subject: pkix.Name{
+// CommonName: "EKU msSGC",
+// },
+// NotBefore: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
+// NotAfter: time.Date(2099, time.January, 1, 0, 0, 0, 0, time.UTC),
+// BasicConstraintsValid: true,
+// ExtKeyUsage: []x509.ExtKeyUsage{FILL IN HERE},
+// }
+// leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+// leafDER, err := x509.CreateCertificate(rand.Reader, leafTemplate, root, &leafKey.PublicKey, rootPriv)
+// if err != nil {
+// panic(err)
+// }
+// pem.Encode(os.Stdout, &pem.Block{Type: "CERTIFICATE", Bytes: leafDER})
+// }
+
+static const char kMicrosoftSGCCert[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBtDCCAR2gAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAEEn61v3Vs+q6bTyyRnrJvuKBE8PTNVLbXGB52jig4Qse2\n"
+ "mGygNEysS0uzZ0luz+rn2hDRUFL6sHLUs1d8UMbI/6NEMEIwFQYDVR0lBA4wDAYK\n"
+ "KwYBBAGCNwoDAzAMBgNVHRMBAf8EAjAAMBsGA1UdIwQUMBKAEEA31wH7QC+4HH5U\n"
+ "BCeMWQEwDQYJKoZIhvcNAQELBQADgYEAgDQI9RSo3E3ZVnU71TV/LjG9xwHtfk6I\n"
+ "rlNnlJJ0lsTHAuMc1mwCbzhtsmasetwYlIa9G8GFWB9Gh/QqHA7G649iGGmXShqe\n"
+ "aVDuWgeSEJxBPE2jILoMm4pEYF7jfonTn7XXX6O78yuSlP+NPIU0gUKHkWZ1sWk0\n"
+ "cC4l0r/6jik=\n"
+ "-----END CERTIFICATE-----\n";
+
+static const char kNetscapeSGCCert[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBszCCARygAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAE3NbT+TnBfq1DWJCezjaUL52YhDU7cOkI2S2PoWgJ1v7x\n"
+ "kKLwBonUFZjppZs69SyBHeJdti+KoJ3qTW+hCG08EaNDMEEwFAYDVR0lBA0wCwYJ\n"
+ "YIZIAYb4QgQBMAwGA1UdEwEB/wQCMAAwGwYDVR0jBBQwEoAQQDfXAftAL7gcflQE\n"
+ "J4xZATANBgkqhkiG9w0BAQsFAAOBgQBuiyVcfazekHkCWksxdFmjPmMtWCxFjkzc\n"
+ "8VBxFE0CfSHQAfZ8J7tXd1FbAq/eXdZvvo8v0JB4sOM4Ex1ob1fuvDFHdSAHAD7W\n"
+ "dhKIjJyzVojoxjCjyue0XMeEPl7RiqbdxoS/R5HFAqAF0T2OeQAqP9gTpOXoau1M\n"
+ "RQHX6HQJJg==\n"
+ "-----END CERTIFICATE-----\n";
+
+static const char kServerEKUCert[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBsjCCARugAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAEDd35i+VWPwIOKLrLWTuP5cqD+yJDB5nujEzPgkXP5LKJ\n"
+ "SZRbHTqTdpYZB2jy6y90RY2Bsjx7FfZ7nN5G2g1GOKNCMEAwEwYDVR0lBAwwCgYI\n"
+ "KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAbBgNVHSMEFDASgBBAN9cB+0AvuBx+VAQn\n"
+ "jFkBMA0GCSqGSIb3DQEBCwUAA4GBAIKmbMBjuivL/rxDu7u7Vr3o3cdmEggBJxwL\n"
+ "iatNW3x1wg0645aNYOktW/iQ7mAAiziTY73GFyfiJDWqnY+CwA94ZWyQidjHdN/I\n"
+ "6BR52sN/dkYEoInYEbmDNMc/if+T0yqeBQLP4BeKLiT8p0qqaimae6LgibS19hDP\n"
+ "2hoEMdz2\n"
+ "-----END CERTIFICATE-----\n";
+
+static const char kServerEKUPlusMicrosoftSGCCert[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBvjCCASegAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAEDO1MYPxq+U4oXMIK8UnsS4C696wpcu4UOmcMJJ5CUd5Z\n"
+ "ZpJShN6kYKnrb3GK/6xEgbUGntmrzSRG5FYqk6QgD6NOMEwwHwYDVR0lBBgwFgYI\n"
+ "KwYBBQUHAwEGCisGAQQBgjcKAwMwDAYDVR0TAQH/BAIwADAbBgNVHSMEFDASgBBA\n"
+ "N9cB+0AvuBx+VAQnjFkBMA0GCSqGSIb3DQEBCwUAA4GBAHOu2IBa4lHzVGS36HxS\n"
+ "SejUE87Ji1ysM6BgkYbfxfS9MuV+J3UnqH57JjbH/3CFl4ZDWceF6SGBSCn8LqKa\n"
+ "KHpwoNFU3zA99iQzVJgbUyN0PbKwHEanLyKDJZyFk71R39ToxhSNQgaQYjZYCy1H\n"
+ "5V9oXd1bodEqVsOZ/mur24Ku\n"
+ "-----END CERTIFICATE-----\n";
+
+static const char kAnyEKU[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBrjCCARegAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAE9nsLABDporlTvx1OBUc4Hd5vxfX+8nS/OhbHmKtFLYNu\n"
+ "1CLLrImbwMQYD2G+PgLO6sQHmASq2jmJKp6ZWsRkTqM+MDwwDwYDVR0lBAgwBgYE\n"
+ "VR0lADAMBgNVHRMBAf8EAjAAMBsGA1UdIwQUMBKAEEA31wH7QC+4HH5UBCeMWQEw\n"
+ "DQYJKoZIhvcNAQELBQADgYEAxgjgn1SAzQ+2GeCicZ5ndvVhKIeFelGCQ989XTVq\n"
+ "uUbAYBW6v8GXNuVzoXYxDgNSanF6U+w+INrJ6daKVrIxAxdk9QFgBXqJoupuRAA3\n"
+ "/OqnmYux0EqOTLbTK1P8DhaiaD0KV6dWGUwzqsgBmPkZ0lgNaPjvb1mKV3jhBkjz\n"
+ "L6A=\n"
+ "-----END CERTIFICATE-----\n";
+
+static const char kNoEKU[] =
+ "-----BEGIN CERTIFICATE-----\n"
+ "MIIBnTCCAQagAwIBAgIBAzANBgkqhkiG9w0BAQsFADArMRcwFQYDVQQKEw5Cb3Jp\n"
+ "bmdTU0wgVGVzdDEQMA4GA1UEAxMHUm9vdCBDQTAgFw0wMDAxMDEwMDAwMDBaGA8y\n"
+ "MDk5MDEwMTAwMDAwMFowFDESMBAGA1UEAxMJRUtVIG1zU0dDMFkwEwYHKoZIzj0C\n"
+ "AQYIKoZIzj0DAQcDQgAEpSFSqbYY86ZcMamE606dqdyjWlwhSHKOLUFsUUIzkMPz\n"
+ "KHRu/x3Yzi8+Hm8eFK/TnCbkpYsYw4hIw00176dYzaMtMCswDAYDVR0TAQH/BAIw\n"
+ "ADAbBgNVHSMEFDASgBBAN9cB+0AvuBx+VAQnjFkBMA0GCSqGSIb3DQEBCwUAA4GB\n"
+ "AHvYzynIkjLThExHRS+385hfv4vgrQSMmCM1SAnEIjSBGsU7RPgiGAstN06XivuF\n"
+ "T1fNugRmTu4OtOIbfdYkcjavJufw9hR9zWTt77CNMTy9XmOZLgdS5boFTtLCztr3\n"
+ "TXHOSQQD8Dl4BK0wOet+TP6LBEjHlRFjAqK4bu9xpxV2\n"
+ "-----END CERTIFICATE-----\n";
+
// CertFromPEM parses the given, NUL-terminated pem block and returns an
// |X509*|.
static bssl::UniquePtr<X509> CertFromPEM(const char *pem) {
@@ -2073,3 +2184,45 @@
EXPECT_EQ(X509_V_ERR_HOSTNAME_MISMATCH,
verify_cert(not_dns.get(), 0 /* no flags */, kCommonNameNotDNS));
}
+
+TEST(X509Test, ServerGatedCryptoEKUs) {
+ bssl::UniquePtr<X509> root = CertFromPEM(kSANTypesRoot);
+ ASSERT_TRUE(root);
+ bssl::UniquePtr<X509> ms_sgc = CertFromPEM(kMicrosoftSGCCert);
+ ASSERT_TRUE(ms_sgc);
+ bssl::UniquePtr<X509> ns_sgc = CertFromPEM(kNetscapeSGCCert);
+ ASSERT_TRUE(ns_sgc);
+ bssl::UniquePtr<X509> server_eku = CertFromPEM(kServerEKUCert);
+ ASSERT_TRUE(server_eku);
+ bssl::UniquePtr<X509> server_eku_plus_ms_sgc =
+ CertFromPEM(kServerEKUPlusMicrosoftSGCCert);
+ ASSERT_TRUE(server_eku_plus_ms_sgc);
+ bssl::UniquePtr<X509> any_eku = CertFromPEM(kAnyEKU);
+ ASSERT_TRUE(any_eku);
+ bssl::UniquePtr<X509> no_eku = CertFromPEM(kNoEKU);
+ ASSERT_TRUE(no_eku);
+
+ auto verify_cert = [&root](X509 *leaf) {
+ return Verify(leaf, {root.get()}, /*intermediates=*/{}, /*crls=*/{},
+ /*flags=*/0, /*use_additional_untrusted=*/false,
+ [&](X509_VERIFY_PARAM *param) {
+ ASSERT_TRUE(X509_VERIFY_PARAM_set_purpose(
+ param, X509_PURPOSE_SSL_SERVER));
+ });
+ };
+
+ // Neither the Microsoft nor Netscape SGC EKU should be sufficient for
+ // |X509_PURPOSE_SSL_SERVER|. The "any" EKU probably, technically, should be.
+ // However, we've never accepted it and it's not acceptable in leaf
+ // certificates by the Baseline, so perhaps we don't need this complexity.
+ for (X509 *leaf : {ms_sgc.get(), ns_sgc.get(), any_eku.get()}) {
+ EXPECT_EQ(X509_V_ERR_INVALID_PURPOSE, verify_cert(leaf));
+ }
+
+ // The server-auth EKU is sufficient, and it doesn't matter if an SGC EKU is
+ // also included. Lastly, not specifying an EKU is also valid.
+ for (X509 *leaf : {server_eku.get(), server_eku_plus_ms_sgc.get(),
+ no_eku.get()}) {
+ EXPECT_EQ(X509_V_OK, verify_cert(leaf));
+ }
+}
diff --git a/src/crypto/x509v3/v3_alt.c b/src/crypto/x509v3/v3_alt.c
index 5a4fadf..74e05bf 100644
--- a/src/crypto/x509v3/v3_alt.c
+++ b/src/crypto/x509v3/v3_alt.c
@@ -210,15 +210,18 @@
break;
case GEN_EMAIL:
- BIO_printf(out, "email:%s", gen->d.ia5->data);
+ BIO_printf(out, "email:");
+ ASN1_STRING_print(out, gen->d.ia5);
break;
case GEN_DNS:
- BIO_printf(out, "DNS:%s", gen->d.ia5->data);
+ BIO_printf(out, "DNS:");
+ ASN1_STRING_print(out, gen->d.ia5);
break;
case GEN_URI:
- BIO_printf(out, "URI:%s", gen->d.ia5->data);
+ BIO_printf(out, "URI:");
+ ASN1_STRING_print(out, gen->d.ia5);
break;
case GEN_DIRNAME:
diff --git a/src/crypto/x509v3/v3_purp.c b/src/crypto/x509v3/v3_purp.c
index 5e1f641..25768c0 100644
--- a/src/crypto/x509v3/v3_purp.c
+++ b/src/crypto/x509v3/v3_purp.c
@@ -611,7 +611,7 @@
static int check_purpose_ssl_server(const X509_PURPOSE *xp, const X509 *x,
int ca)
{
- if (xku_reject(x, XKU_SSL_SERVER | XKU_SGC))
+ if (xku_reject(x, XKU_SSL_SERVER))
return 0;
if (ca)
return check_ca(x);