summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUros Majstorovic <majstor@majstor.org>2022-01-31 22:44:00 +0100
committerUros Majstorovic <majstor@majstor.org>2022-01-31 22:44:00 +0100
commit74a34dfce2f4fd622229a9caf0c3cfcc7bc68ced (patch)
tree180dda5f23a8cc84746643dc125ba19924357c4d
parent76be7c9a79da62e0a52d932bdfdf56671c82e33d (diff)
added libressl
-rw-r--r--crypto/Makefile13
-rw-r--r--crypto/aes/Makefile15
-rw-r--r--crypto/blowfish/Makefile15
-rw-r--r--crypto/crypto_obj.mk3
-rw-r--r--crypto/libressl/Makefile15
-rw-r--r--crypto/libressl/README1
-rw-r--r--crypto/libressl/crypto/aead/Makefile13
-rw-r--r--crypto/libressl/crypto/aead/e_chacha20poly1305.c395
-rw-r--r--crypto/libressl/crypto/chacha/Makefile13
-rw-r--r--crypto/libressl/crypto/chacha/chacha-merged.c325
-rw-r--r--crypto/libressl/crypto/chacha/chacha.c87
-rw-r--r--crypto/libressl/crypto/compat/Makefile13
-rw-r--r--crypto/libressl/crypto/compat/arc4random.c216
-rw-r--r--crypto/libressl/crypto/compat/arc4random.h41
-rw-r--r--crypto/libressl/crypto/compat/arc4random_aix.h81
-rw-r--r--crypto/libressl/crypto/compat/arc4random_fe310.h27
-rw-r--r--crypto/libressl/crypto/compat/arc4random_freebsd.h87
-rw-r--r--crypto/libressl/crypto/compat/arc4random_hpux.h81
-rw-r--r--crypto/libressl/crypto/compat/arc4random_linux.h88
-rw-r--r--crypto/libressl/crypto/compat/arc4random_netbsd.h87
-rw-r--r--crypto/libressl/crypto/compat/arc4random_osx.h81
-rw-r--r--crypto/libressl/crypto/compat/arc4random_solaris.h81
-rw-r--r--crypto/libressl/crypto/compat/arc4random_uniform.c56
-rw-r--r--crypto/libressl/crypto/compat/arc4random_win.h78
-rw-r--r--crypto/libressl/crypto/compat/chacha_private.h222
-rw-r--r--crypto/libressl/crypto/compat/explicit_bzero.c19
-rw-r--r--crypto/libressl/crypto/compat/explicit_bzero_win.c13
-rw-r--r--crypto/libressl/crypto/compat/getentropy_aix.c402
-rw-r--r--crypto/libressl/crypto/compat/getentropy_freebsd.c60
-rw-r--r--crypto/libressl/crypto/compat/getentropy_hpux.c396
-rw-r--r--crypto/libressl/crypto/compat/getentropy_linux.c525
-rw-r--r--crypto/libressl/crypto/compat/getentropy_netbsd.c62
-rw-r--r--crypto/libressl/crypto/compat/getentropy_osx.c417
-rw-r--r--crypto/libressl/crypto/compat/getentropy_solaris.c422
-rw-r--r--crypto/libressl/crypto/compat/getentropy_win.c50
-rw-r--r--crypto/libressl/crypto/compat/timingsafe_bcmp.c29
-rw-r--r--crypto/libressl/crypto/compat/timingsafe_memcmp.c46
-rw-r--r--crypto/libressl/crypto/curve25519/Makefile14
-rw-r--r--crypto/libressl/crypto/curve25519/curve25519-generic.c34
-rw-r--r--crypto/libressl/crypto/curve25519/curve25519.c4935
-rw-r--r--crypto/libressl/crypto/curve25519/curve25519_internal.h99
-rw-r--r--crypto/libressl/crypto/poly1305/Makefile13
-rw-r--r--crypto/libressl/crypto/poly1305/poly1305-donna.c321
-rw-r--r--crypto/libressl/crypto/poly1305/poly1305.c38
-rw-r--r--crypto/libressl/crypto/sha/Makefile14
-rw-r--r--crypto/libressl/crypto/sha/sha1-elf-armv4.S455
-rw-r--r--crypto/libressl/crypto/sha/sha1-elf-x86_64.S2491
-rw-r--r--crypto/libressl/crypto/sha/sha1-macosx-x86_64.S2488
-rw-r--r--crypto/libressl/crypto/sha/sha1-masm-x86_64.S2746
-rw-r--r--crypto/libressl/crypto/sha/sha1-mingw64-x86_64.S2664
-rw-r--r--crypto/libressl/crypto/sha/sha1_one.c81
-rw-r--r--crypto/libressl/crypto/sha/sha1dgst.c72
-rw-r--r--crypto/libressl/crypto/sha/sha256-elf-armv4.S1520
-rw-r--r--crypto/libressl/crypto/sha/sha256-elf-x86_64.S1782
-rw-r--r--crypto/libressl/crypto/sha/sha256-macosx-x86_64.S1779
-rw-r--r--crypto/libressl/crypto/sha/sha256-masm-x86_64.S1864
-rw-r--r--crypto/libressl/crypto/sha/sha256-mingw64-x86_64.S1790
-rw-r--r--crypto/libressl/crypto/sha/sha256.c284
-rw-r--r--crypto/libressl/crypto/sha/sha512-elf-armv4.S1786
-rw-r--r--crypto/libressl/crypto/sha/sha512-elf-x86_64.S1806
-rw-r--r--crypto/libressl/crypto/sha/sha512-macosx-x86_64.S1803
-rw-r--r--crypto/libressl/crypto/sha/sha512-masm-x86_64.S1888
-rw-r--r--crypto/libressl/crypto/sha/sha512-mingw64-x86_64.S1814
-rw-r--r--crypto/libressl/crypto/sha/sha512.c547
-rw-r--r--crypto/libressl/crypto/sha/sha_locl.h419
-rw-r--r--crypto/libressl/include/compat/string.h87
-rw-r--r--crypto/libressl/include/compat/unistd.h78
-rw-r--r--crypto/libressl/include/md32_common.h345
-rw-r--r--crypto/libressl/include/openssl/chacha.h58
-rw-r--r--crypto/libressl/include/openssl/curve25519.h77
-rw-r--r--crypto/libressl/include/openssl/opensslconf.h153
-rw-r--r--crypto/libressl/include/openssl/opensslfeatures.h120
-rw-r--r--crypto/libressl/include/openssl/opensslv.h18
-rw-r--r--crypto/libressl/include/openssl/poly1305.h49
-rw-r--r--crypto/libressl/include/openssl/sha.h192
-rw-r--r--crypto/libressl/ssl_common.mk7
-rw-r--r--crypto/libressl/ssl_obj.mk8
-rw-r--r--crypto/sha/Makefile15
78 files changed, 41329 insertions, 0 deletions
diff --git a/crypto/Makefile b/crypto/Makefile
new file mode 100644
index 0000000..1168470
--- /dev/null
+++ b/crypto/Makefile
@@ -0,0 +1,13 @@
+include common.mk
+include crypto_obj.mk
+
+all:
+ for i in $(subdirs); do \
+ (cd $$i && $(MAKE)) || exit; \
+ done
+
+clean:
+ for i in $(subdirs); do \
+ (cd $$i && $(MAKE) clean) || exit; \
+ done
+ rm -f *.o *.a
diff --git a/crypto/aes/Makefile b/crypto/aes/Makefile
new file mode 100644
index 0000000..463aed3
--- /dev/null
+++ b/crypto/aes/Makefile
@@ -0,0 +1,15 @@
+include common.mk
+
+obj = aes.o
+
+
+%.o: %.c %.h
+ $(CC) $(CFLAGS) -c $<
+
+%.o: %.S
+ $(CC) $(CFLAGS) -c $<
+
+all: $(obj)
+
+clean:
+ rm -f *.o
diff --git a/crypto/blowfish/Makefile b/crypto/blowfish/Makefile
new file mode 100644
index 0000000..fff41a0
--- /dev/null
+++ b/crypto/blowfish/Makefile
@@ -0,0 +1,15 @@
+include common.mk
+
+obj = blowfish.o
+
+
+%.o: %.c %.h
+ $(CC) $(CFLAGS) -c $<
+
+%.o: %.S
+ $(CC) $(CFLAGS) -c $<
+
+all: $(obj)
+
+clean:
+ rm -f *.o
diff --git a/crypto/crypto_obj.mk b/crypto/crypto_obj.mk
new file mode 100644
index 0000000..54c7092
--- /dev/null
+++ b/crypto/crypto_obj.mk
@@ -0,0 +1,3 @@
+obj_dep = aes/aes.o blowfish/blowfish.o sha/sha1.o
+
+subdirs = aes blowfish sha
diff --git a/crypto/libressl/Makefile b/crypto/libressl/Makefile
new file mode 100644
index 0000000..60d9ce6
--- /dev/null
+++ b/crypto/libressl/Makefile
@@ -0,0 +1,15 @@
+pwd := $(abspath $(dir $(firstword $(MAKEFILE_LIST))))
+MAKEFLAGS += -I$(pwd)
+
+include ssl_obj.mk
+
+all:
+ for i in $(subdirs); do \
+ (cd $$i && $(MAKE)) || exit; \
+ done
+
+clean:
+ for i in $(subdirs); do \
+ (cd $$i && $(MAKE) clean) || exit; \
+ done
+ rm -f *.o *.a
diff --git a/crypto/libressl/README b/crypto/libressl/README
new file mode 100644
index 0000000..43f2397
--- /dev/null
+++ b/crypto/libressl/README
@@ -0,0 +1 @@
+Extracted from libressl 3.4.2
diff --git a/crypto/libressl/crypto/aead/Makefile b/crypto/libressl/crypto/aead/Makefile
new file mode 100644
index 0000000..2148699
--- /dev/null
+++ b/crypto/libressl/crypto/aead/Makefile
@@ -0,0 +1,13 @@
+include ssl_common.mk
+
+obj = e_chacha20poly1305.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/aead/e_chacha20poly1305.c b/crypto/libressl/crypto/aead/e_chacha20poly1305.c
new file mode 100644
index 0000000..9d8291e
--- /dev/null
+++ b/crypto/libressl/crypto/aead/e_chacha20poly1305.c
@@ -0,0 +1,395 @@
+/* $OpenBSD: e_chacha20poly1305.c,v 1.21 2019/03/27 15:34:01 jsing Exp $ */
+
+/*
+ * Copyright (c) 2015 Reyk Floter <reyk@openbsd.org>
+ * Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <openssl/opensslconf.h>
+
+#if !defined(OPENSSL_NO_CHACHA) && !defined(OPENSSL_NO_POLY1305)
+
+//#include <openssl/err.h>
+//#include <openssl/evp.h>
+#include <openssl/chacha.h>
+#include <openssl/poly1305.h>
+
+//#include "evp_locl.h"
+#define EVPerror(X) ;
+
+#define POLY1305_TAG_LEN 16
+
+#define CHACHA20_CONSTANT_LEN 4
+#define CHACHA20_IV_LEN 8
+#define CHACHA20_NONCE_LEN (CHACHA20_CONSTANT_LEN + CHACHA20_IV_LEN)
+#define XCHACHA20_NONCE_LEN 24
+
+#if 0
+struct aead_chacha20_poly1305_ctx {
+ unsigned char key[32];
+ unsigned char tag_len;
+};
+
+static int
+aead_chacha20_poly1305_init(EVP_AEAD_CTX *ctx, const unsigned char *key,
+ size_t key_len, size_t tag_len)
+{
+ struct aead_chacha20_poly1305_ctx *c20_ctx;
+
+ if (tag_len == 0)
+ tag_len = POLY1305_TAG_LEN;
+
+ if (tag_len > POLY1305_TAG_LEN) {
+ EVPerror(EVP_R_TOO_LARGE);
+ return 0;
+ }
+
+ /* Internal error - EVP_AEAD_CTX_init should catch this. */
+ if (key_len != sizeof(c20_ctx->key))
+ return 0;
+
+ c20_ctx = malloc(sizeof(struct aead_chacha20_poly1305_ctx));
+ if (c20_ctx == NULL)
+ return 0;
+
+ memcpy(&c20_ctx->key[0], key, key_len);
+ c20_ctx->tag_len = tag_len;
+ ctx->aead_state = c20_ctx;
+
+ return 1;
+}
+
+static void
+aead_chacha20_poly1305_cleanup(EVP_AEAD_CTX *ctx)
+{
+ struct aead_chacha20_poly1305_ctx *c20_ctx = ctx->aead_state;
+
+ freezero(c20_ctx, sizeof(*c20_ctx));
+}
+
+#endif
+
+static void
+poly1305_update_with_length(poly1305_state *poly1305,
+ const unsigned char *data, size_t data_len)
+{
+ size_t j = data_len;
+ unsigned char length_bytes[8];
+ unsigned i;
+
+ for (i = 0; i < sizeof(length_bytes); i++) {
+ length_bytes[i] = j;
+ j >>= 8;
+ }
+
+ if (data != NULL)
+ CRYPTO_poly1305_update(poly1305, data, data_len);
+ CRYPTO_poly1305_update(poly1305, length_bytes, sizeof(length_bytes));
+}
+
+static void
+poly1305_update_with_pad16(poly1305_state *poly1305,
+ const unsigned char *data, size_t data_len)
+{
+ static const unsigned char zero_pad16[16];
+ size_t pad_len;
+
+ CRYPTO_poly1305_update(poly1305, data, data_len);
+
+ /* pad16() is defined in RFC 7539 2.8.1. */
+ if ((pad_len = data_len % 16) == 0)
+ return;
+
+ CRYPTO_poly1305_update(poly1305, zero_pad16, 16 - pad_len);
+}
+
+int
+aead_chacha20_poly1305_seal(unsigned char key[32], unsigned char tag_len,
+ unsigned char *out, size_t *out_len, size_t max_out_len,
+ const unsigned char *nonce, size_t nonce_len,
+ const unsigned char *in, size_t in_len,
+ const unsigned char *ad, size_t ad_len)
+{
+ unsigned char poly1305_key[32];
+ poly1305_state poly1305;
+ const unsigned char *iv;
+ const uint64_t in_len_64 = in_len;
+ uint64_t ctr;
+
+ /* The underlying ChaCha implementation may not overflow the block
+ * counter into the second counter word. Therefore we disallow
+ * individual operations that work on more than 2TB at a time.
+ * in_len_64 is needed because, on 32-bit platforms, size_t is only
+ * 32-bits and this produces a warning because it's always false.
+ * Casting to uint64_t inside the conditional is not sufficient to stop
+ * the warning. */
+ if (in_len_64 >= (1ULL << 32) * 64 - 64) {
+ EVPerror(EVP_R_TOO_LARGE);
+ return 0;
+ }
+
+ if (max_out_len < in_len + tag_len) {
+ EVPerror(EVP_R_BUFFER_TOO_SMALL);
+ return 0;
+ }
+
+ if (nonce_len != CHACHA20_NONCE_LEN) {
+ EVPerror(EVP_R_IV_TOO_LARGE);
+ return 0;
+ }
+
+ ctr = (uint64_t)((uint32_t)(nonce[0]) | (uint32_t)(nonce[1]) << 8 |
+ (uint32_t)(nonce[2]) << 16 | (uint32_t)(nonce[3]) << 24) << 32;
+ iv = nonce + CHACHA20_CONSTANT_LEN;
+
+ memset(poly1305_key, 0, sizeof(poly1305_key));
+ CRYPTO_chacha_20(poly1305_key, poly1305_key,
+ sizeof(poly1305_key), key, iv, ctr);
+
+ CRYPTO_poly1305_init(&poly1305, poly1305_key);
+ poly1305_update_with_pad16(&poly1305, ad, ad_len);
+ CRYPTO_chacha_20(out, in, in_len, key, iv, ctr + 1);
+ poly1305_update_with_pad16(&poly1305, out, in_len);
+ poly1305_update_with_length(&poly1305, NULL, ad_len);
+ poly1305_update_with_length(&poly1305, NULL, in_len);
+
+ if (tag_len != POLY1305_TAG_LEN) {
+ unsigned char tag[POLY1305_TAG_LEN];
+ CRYPTO_poly1305_finish(&poly1305, tag);
+ memcpy(out + in_len, tag, tag_len);
+ *out_len = in_len + tag_len;
+ return 1;
+ }
+
+ CRYPTO_poly1305_finish(&poly1305, out + in_len);
+ *out_len = in_len + POLY1305_TAG_LEN;
+ return 1;
+}
+
+int
+aead_chacha20_poly1305_open(unsigned char key[32], unsigned char tag_len,
+ unsigned char *out, size_t *out_len, size_t max_out_len,
+ const unsigned char *nonce, size_t nonce_len,
+ const unsigned char *in, size_t in_len,
+ const unsigned char *ad, size_t ad_len)
+{
+ unsigned char mac[POLY1305_TAG_LEN];
+ unsigned char poly1305_key[32];
+ const unsigned char *iv = nonce;
+ poly1305_state poly1305;
+ const uint64_t in_len_64 = in_len;
+ size_t plaintext_len;
+ uint64_t ctr = 0;
+
+ if (in_len < tag_len) {
+ EVPerror(EVP_R_BAD_DECRYPT);
+ return 0;
+ }
+
+ /* The underlying ChaCha implementation may not overflow the block
+ * counter into the second counter word. Therefore we disallow
+ * individual operations that work on more than 2TB at a time.
+ * in_len_64 is needed because, on 32-bit platforms, size_t is only
+ * 32-bits and this produces a warning because it's always false.
+ * Casting to uint64_t inside the conditional is not sufficient to stop
+ * the warning. */
+ if (in_len_64 >= (1ULL << 32) * 64 - 64) {
+ EVPerror(EVP_R_TOO_LARGE);
+ return 0;
+ }
+
+ if (nonce_len != CHACHA20_NONCE_LEN) {
+ EVPerror(EVP_R_IV_TOO_LARGE);
+ return 0;
+ }
+
+ plaintext_len = in_len - tag_len;
+
+ if (max_out_len < plaintext_len) {
+ EVPerror(EVP_R_BUFFER_TOO_SMALL);
+ return 0;
+ }
+
+ ctr = (uint64_t)((uint32_t)(nonce[0]) | (uint32_t)(nonce[1]) << 8 |
+ (uint32_t)(nonce[2]) << 16 | (uint32_t)(nonce[3]) << 24) << 32;
+ iv = nonce + CHACHA20_CONSTANT_LEN;
+
+ memset(poly1305_key, 0, sizeof(poly1305_key));
+ CRYPTO_chacha_20(poly1305_key, poly1305_key,
+ sizeof(poly1305_key), key, iv, ctr);
+
+ CRYPTO_poly1305_init(&poly1305, poly1305_key);
+ poly1305_update_with_pad16(&poly1305, ad, ad_len);
+ poly1305_update_with_pad16(&poly1305, in, plaintext_len);
+ poly1305_update_with_length(&poly1305, NULL, ad_len);
+ poly1305_update_with_length(&poly1305, NULL, plaintext_len);
+
+ CRYPTO_poly1305_finish(&poly1305, mac);
+
+ if (timingsafe_memcmp(mac, in + plaintext_len, tag_len) != 0) {
+ EVPerror(EVP_R_BAD_DECRYPT);
+ return 0;
+ }
+
+ CRYPTO_chacha_20(out, in, plaintext_len, key, iv, ctr + 1);
+ *out_len = plaintext_len;
+ return 1;
+}
+
+int
+aead_xchacha20_poly1305_seal(unsigned char key[32], unsigned char tag_len,
+ unsigned char *out, size_t *out_len, size_t max_out_len,
+ const unsigned char *nonce, size_t nonce_len,
+ const unsigned char *in, size_t in_len,
+ const unsigned char *ad, size_t ad_len)
+{
+ unsigned char poly1305_key[32];
+ unsigned char subkey[32];
+ poly1305_state poly1305;
+
+ if (max_out_len < in_len + tag_len) {
+ EVPerror(EVP_R_BUFFER_TOO_SMALL);
+ return 0;
+ }
+
+ if (nonce_len != XCHACHA20_NONCE_LEN) {
+ EVPerror(EVP_R_IV_TOO_LARGE);
+ return 0;
+ }
+
+ CRYPTO_hchacha_20(subkey, key, nonce);
+
+ CRYPTO_chacha_20(out, in, in_len, subkey, nonce + 16, 1);
+
+ memset(poly1305_key, 0, sizeof(poly1305_key));
+ CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key),
+ subkey, nonce + 16, 0);
+
+ CRYPTO_poly1305_init(&poly1305, poly1305_key);
+ poly1305_update_with_pad16(&poly1305, ad, ad_len);
+ poly1305_update_with_pad16(&poly1305, out, in_len);
+ poly1305_update_with_length(&poly1305, NULL, ad_len);
+ poly1305_update_with_length(&poly1305, NULL, in_len);
+
+ if (tag_len != POLY1305_TAG_LEN) {
+ unsigned char tag[POLY1305_TAG_LEN];
+ CRYPTO_poly1305_finish(&poly1305, tag);
+ memcpy(out + in_len, tag, tag_len);
+ *out_len = in_len + tag_len;
+ return 1;
+ }
+
+ CRYPTO_poly1305_finish(&poly1305, out + in_len);
+ *out_len = in_len + POLY1305_TAG_LEN;
+ return 1;
+}
+
+int
+aead_xchacha20_poly1305_open(unsigned char key[32], unsigned char tag_len,
+ unsigned char *out, size_t *out_len, size_t max_out_len,
+ const unsigned char *nonce, size_t nonce_len,
+ const unsigned char *in, size_t in_len,
+ const unsigned char *ad, size_t ad_len)
+{
+ unsigned char mac[POLY1305_TAG_LEN];
+ unsigned char poly1305_key[32];
+ unsigned char subkey[32];
+ poly1305_state poly1305;
+ size_t plaintext_len;
+
+ if (in_len < tag_len) {
+ EVPerror(EVP_R_BAD_DECRYPT);
+ return 0;
+ }
+
+ if (nonce_len != XCHACHA20_NONCE_LEN) {
+ EVPerror(EVP_R_IV_TOO_LARGE);
+ return 0;
+ }
+
+ plaintext_len = in_len - tag_len;
+
+ if (max_out_len < plaintext_len) {
+ EVPerror(EVP_R_BUFFER_TOO_SMALL);
+ return 0;
+ }
+
+ CRYPTO_hchacha_20(subkey, key, nonce);
+
+ memset(poly1305_key, 0, sizeof(poly1305_key));
+ CRYPTO_chacha_20(poly1305_key, poly1305_key, sizeof(poly1305_key),
+ subkey, nonce + 16, 0);
+
+ CRYPTO_poly1305_init(&poly1305, poly1305_key);
+ poly1305_update_with_pad16(&poly1305, ad, ad_len);
+ poly1305_update_with_pad16(&poly1305, in, plaintext_len);
+ poly1305_update_with_length(&poly1305, NULL, ad_len);
+ poly1305_update_with_length(&poly1305, NULL, plaintext_len);
+
+ CRYPTO_poly1305_finish(&poly1305, mac);
+ if (timingsafe_memcmp(mac, in + plaintext_len, tag_len) != 0) {
+ EVPerror(EVP_R_BAD_DECRYPT);
+ return 0;
+ }
+
+ CRYPTO_chacha_20(out, in, plaintext_len, subkey, nonce + 16, 1);
+
+ *out_len = plaintext_len;
+ return 1;
+}
+
+#if 0
+/* RFC 7539 */
+static const EVP_AEAD aead_chacha20_poly1305 = {
+ .key_len = 32,
+ .nonce_len = CHACHA20_NONCE_LEN,
+ .overhead = POLY1305_TAG_LEN,
+ .max_tag_len = POLY1305_TAG_LEN,
+
+ .init = aead_chacha20_poly1305_init,
+ .cleanup = aead_chacha20_poly1305_cleanup,
+ .seal = aead_chacha20_poly1305_seal,
+ .open = aead_chacha20_poly1305_open,
+};
+
+const EVP_AEAD *
+EVP_aead_chacha20_poly1305()
+{
+ return &aead_chacha20_poly1305;
+}
+
+static const EVP_AEAD aead_xchacha20_poly1305 = {
+ .key_len = 32,
+ .nonce_len = XCHACHA20_NONCE_LEN,
+ .overhead = POLY1305_TAG_LEN,
+ .max_tag_len = POLY1305_TAG_LEN,
+
+ .init = aead_chacha20_poly1305_init,
+ .cleanup = aead_chacha20_poly1305_cleanup,
+ .seal = aead_xchacha20_poly1305_seal,
+ .open = aead_xchacha20_poly1305_open,
+};
+
+const EVP_AEAD *
+EVP_aead_xchacha20_poly1305()
+{
+ return &aead_xchacha20_poly1305;
+}
+#endif
+
+#endif /* !OPENSSL_NO_CHACHA && !OPENSSL_NO_POLY1305 */
diff --git a/crypto/libressl/crypto/chacha/Makefile b/crypto/libressl/crypto/chacha/Makefile
new file mode 100644
index 0000000..9f7aa3a
--- /dev/null
+++ b/crypto/libressl/crypto/chacha/Makefile
@@ -0,0 +1,13 @@
+include ssl_common.mk
+
+obj = chacha.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/chacha/chacha-merged.c b/crypto/libressl/crypto/chacha/chacha-merged.c
new file mode 100644
index 0000000..67508f2
--- /dev/null
+++ b/crypto/libressl/crypto/chacha/chacha-merged.c
@@ -0,0 +1,325 @@
+/* $OpenBSD: chacha-merged.c,v 1.9 2019/01/22 00:59:21 dlg Exp $ */
+/*
+chacha-merged.c version 20080118
+D. J. Bernstein
+Public domain.
+*/
+
+#include <sys/types.h>
+
+#include <stdint.h>
+
+#define CHACHA_MINKEYLEN 16
+#define CHACHA_NONCELEN 8
+#define CHACHA_CTRLEN 8
+#define CHACHA_STATELEN (CHACHA_NONCELEN+CHACHA_CTRLEN)
+#define CHACHA_BLOCKLEN 64
+
+struct chacha_ctx {
+ u_int input[16];
+ uint8_t ks[CHACHA_BLOCKLEN];
+ uint8_t unused;
+};
+
+static inline void chacha_keysetup(struct chacha_ctx *x, const u_char *k,
+ u_int kbits)
+ __attribute__((__bounded__(__minbytes__, 2, CHACHA_MINKEYLEN)));
+static inline void chacha_ivsetup(struct chacha_ctx *x, const u_char *iv,
+ const u_char *ctr)
+ __attribute__((__bounded__(__minbytes__, 2, CHACHA_NONCELEN)))
+ __attribute__((__bounded__(__minbytes__, 3, CHACHA_CTRLEN)));
+static inline void chacha_encrypt_bytes(struct chacha_ctx *x, const u_char *m,
+ u_char *c, u_int bytes)
+ __attribute__((__bounded__(__buffer__, 2, 4)))
+ __attribute__((__bounded__(__buffer__, 3, 4)));
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+typedef struct chacha_ctx chacha_ctx;
+
+#define U8C(v) (v##U)
+#define U32C(v) (v##U)
+
+#define U8V(v) ((u8)(v) & U8C(0xFF))
+#define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF))
+
+#define ROTL32(v, n) \
+ (U32V((v) << (n)) | ((v) >> (32 - (n))))
+
+#define U8TO32_LITTLE(p) \
+ (((u32)((p)[0])) | \
+ ((u32)((p)[1]) << 8) | \
+ ((u32)((p)[2]) << 16) | \
+ ((u32)((p)[3]) << 24))
+
+#define U32TO8_LITTLE(p, v) \
+ do { \
+ (p)[0] = U8V((v)); \
+ (p)[1] = U8V((v) >> 8); \
+ (p)[2] = U8V((v) >> 16); \
+ (p)[3] = U8V((v) >> 24); \
+ } while (0)
+
+#define ROTATE(v,c) (ROTL32(v,c))
+#define XOR(v,w) ((v) ^ (w))
+#define PLUS(v,w) (U32V((v) + (w)))
+#define PLUSONE(v) (PLUS((v),1))
+
+#define QUARTERROUND(a,b,c,d) \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
+
+/* Initialise with "expand 32-byte k". */
+static const char sigma[16] = {
+ 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x20, 0x33,
+ 0x32, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, 0x6b,
+};
+
+/* Initialise with "expand 16-byte k". */
+static const char tau[16] = {
+ 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x20, 0x31,
+ 0x36, 0x2d, 0x62, 0x79, 0x74, 0x65, 0x20, 0x6b,
+};
+
+static inline void
+chacha_keysetup(chacha_ctx *x, const u8 *k, u32 kbits)
+{
+ const char *constants;
+
+ x->input[4] = U8TO32_LITTLE(k + 0);
+ x->input[5] = U8TO32_LITTLE(k + 4);
+ x->input[6] = U8TO32_LITTLE(k + 8);
+ x->input[7] = U8TO32_LITTLE(k + 12);
+ if (kbits == 256) { /* recommended */
+ k += 16;
+ constants = sigma;
+ } else { /* kbits == 128 */
+ constants = tau;
+ }
+ x->input[8] = U8TO32_LITTLE(k + 0);
+ x->input[9] = U8TO32_LITTLE(k + 4);
+ x->input[10] = U8TO32_LITTLE(k + 8);
+ x->input[11] = U8TO32_LITTLE(k + 12);
+ x->input[0] = U8TO32_LITTLE(constants + 0);
+ x->input[1] = U8TO32_LITTLE(constants + 4);
+ x->input[2] = U8TO32_LITTLE(constants + 8);
+ x->input[3] = U8TO32_LITTLE(constants + 12);
+}
+
+static inline void
+chacha_ivsetup(chacha_ctx *x, const u8 *iv, const u8 *counter)
+{
+ x->input[12] = counter == NULL ? 0 : U8TO32_LITTLE(counter + 0);
+ x->input[13] = counter == NULL ? 0 : U8TO32_LITTLE(counter + 4);
+ x->input[14] = U8TO32_LITTLE(iv + 0);
+ x->input[15] = U8TO32_LITTLE(iv + 4);
+}
+
+static inline void
+chacha_encrypt_bytes(chacha_ctx *x, const u8 *m, u8 *c, u32 bytes)
+{
+ u32 x0, x1, x2, x3, x4, x5, x6, x7;
+ u32 x8, x9, x10, x11, x12, x13, x14, x15;
+ u32 j0, j1, j2, j3, j4, j5, j6, j7;
+ u32 j8, j9, j10, j11, j12, j13, j14, j15;
+ u8 *ctarget = NULL;
+ u8 tmp[64];
+ u_int i;
+
+ if (!bytes)
+ return;
+
+ j0 = x->input[0];
+ j1 = x->input[1];
+ j2 = x->input[2];
+ j3 = x->input[3];
+ j4 = x->input[4];
+ j5 = x->input[5];
+ j6 = x->input[6];
+ j7 = x->input[7];
+ j8 = x->input[8];
+ j9 = x->input[9];
+ j10 = x->input[10];
+ j11 = x->input[11];
+ j12 = x->input[12];
+ j13 = x->input[13];
+ j14 = x->input[14];
+ j15 = x->input[15];
+
+ for (;;) {
+ if (bytes < 64) {
+ for (i = 0; i < bytes; ++i)
+ tmp[i] = m[i];
+ m = tmp;
+ ctarget = c;
+ c = tmp;
+ }
+ x0 = j0;
+ x1 = j1;
+ x2 = j2;
+ x3 = j3;
+ x4 = j4;
+ x5 = j5;
+ x6 = j6;
+ x7 = j7;
+ x8 = j8;
+ x9 = j9;
+ x10 = j10;
+ x11 = j11;
+ x12 = j12;
+ x13 = j13;
+ x14 = j14;
+ x15 = j15;
+ for (i = 20; i > 0; i -= 2) {
+ QUARTERROUND(x0, x4, x8, x12)
+ QUARTERROUND(x1, x5, x9, x13)
+ QUARTERROUND(x2, x6, x10, x14)
+ QUARTERROUND(x3, x7, x11, x15)
+ QUARTERROUND(x0, x5, x10, x15)
+ QUARTERROUND(x1, x6, x11, x12)
+ QUARTERROUND(x2, x7, x8, x13)
+ QUARTERROUND(x3, x4, x9, x14)
+ }
+ x0 = PLUS(x0, j0);
+ x1 = PLUS(x1, j1);
+ x2 = PLUS(x2, j2);
+ x3 = PLUS(x3, j3);
+ x4 = PLUS(x4, j4);
+ x5 = PLUS(x5, j5);
+ x6 = PLUS(x6, j6);
+ x7 = PLUS(x7, j7);
+ x8 = PLUS(x8, j8);
+ x9 = PLUS(x9, j9);
+ x10 = PLUS(x10, j10);
+ x11 = PLUS(x11, j11);
+ x12 = PLUS(x12, j12);
+ x13 = PLUS(x13, j13);
+ x14 = PLUS(x14, j14);
+ x15 = PLUS(x15, j15);
+
+ if (bytes < 64) {
+ U32TO8_LITTLE(x->ks + 0, x0);
+ U32TO8_LITTLE(x->ks + 4, x1);
+ U32TO8_LITTLE(x->ks + 8, x2);
+ U32TO8_LITTLE(x->ks + 12, x3);
+ U32TO8_LITTLE(x->ks + 16, x4);
+ U32TO8_LITTLE(x->ks + 20, x5);
+ U32TO8_LITTLE(x->ks + 24, x6);
+ U32TO8_LITTLE(x->ks + 28, x7);
+ U32TO8_LITTLE(x->ks + 32, x8);
+ U32TO8_LITTLE(x->ks + 36, x9);
+ U32TO8_LITTLE(x->ks + 40, x10);
+ U32TO8_LITTLE(x->ks + 44, x11);
+ U32TO8_LITTLE(x->ks + 48, x12);
+ U32TO8_LITTLE(x->ks + 52, x13);
+ U32TO8_LITTLE(x->ks + 56, x14);
+ U32TO8_LITTLE(x->ks + 60, x15);
+ }
+
+ x0 = XOR(x0, U8TO32_LITTLE(m + 0));
+ x1 = XOR(x1, U8TO32_LITTLE(m + 4));
+ x2 = XOR(x2, U8TO32_LITTLE(m + 8));
+ x3 = XOR(x3, U8TO32_LITTLE(m + 12));
+ x4 = XOR(x4, U8TO32_LITTLE(m + 16));
+ x5 = XOR(x5, U8TO32_LITTLE(m + 20));
+ x6 = XOR(x6, U8TO32_LITTLE(m + 24));
+ x7 = XOR(x7, U8TO32_LITTLE(m + 28));
+ x8 = XOR(x8, U8TO32_LITTLE(m + 32));
+ x9 = XOR(x9, U8TO32_LITTLE(m + 36));
+ x10 = XOR(x10, U8TO32_LITTLE(m + 40));
+ x11 = XOR(x11, U8TO32_LITTLE(m + 44));
+ x12 = XOR(x12, U8TO32_LITTLE(m + 48));
+ x13 = XOR(x13, U8TO32_LITTLE(m + 52));
+ x14 = XOR(x14, U8TO32_LITTLE(m + 56));
+ x15 = XOR(x15, U8TO32_LITTLE(m + 60));
+
+ j12 = PLUSONE(j12);
+ if (!j12) {
+ j13 = PLUSONE(j13);
+ /*
+ * Stopping at 2^70 bytes per nonce is the user's
+ * responsibility.
+ */
+ }
+
+ U32TO8_LITTLE(c + 0, x0);
+ U32TO8_LITTLE(c + 4, x1);
+ U32TO8_LITTLE(c + 8, x2);
+ U32TO8_LITTLE(c + 12, x3);
+ U32TO8_LITTLE(c + 16, x4);
+ U32TO8_LITTLE(c + 20, x5);
+ U32TO8_LITTLE(c + 24, x6);
+ U32TO8_LITTLE(c + 28, x7);
+ U32TO8_LITTLE(c + 32, x8);
+ U32TO8_LITTLE(c + 36, x9);
+ U32TO8_LITTLE(c + 40, x10);
+ U32TO8_LITTLE(c + 44, x11);
+ U32TO8_LITTLE(c + 48, x12);
+ U32TO8_LITTLE(c + 52, x13);
+ U32TO8_LITTLE(c + 56, x14);
+ U32TO8_LITTLE(c + 60, x15);
+
+ if (bytes <= 64) {
+ if (bytes < 64) {
+ for (i = 0; i < bytes; ++i)
+ ctarget[i] = c[i];
+ }
+ x->input[12] = j12;
+ x->input[13] = j13;
+ x->unused = 64 - bytes;
+ return;
+ }
+ bytes -= 64;
+ c += 64;
+ m += 64;
+ }
+}
+
+void
+CRYPTO_hchacha_20(unsigned char subkey[32], const unsigned char key[32],
+ const unsigned char nonce[16])
+{
+ uint32_t x[16];
+ int i;
+
+ x[0] = U8TO32_LITTLE(sigma + 0);
+ x[1] = U8TO32_LITTLE(sigma + 4);
+ x[2] = U8TO32_LITTLE(sigma + 8);
+ x[3] = U8TO32_LITTLE(sigma + 12);
+ x[4] = U8TO32_LITTLE(key + 0);
+ x[5] = U8TO32_LITTLE(key + 4);
+ x[6] = U8TO32_LITTLE(key + 8);
+ x[7] = U8TO32_LITTLE(key + 12);
+ x[8] = U8TO32_LITTLE(key + 16);
+ x[9] = U8TO32_LITTLE(key + 20);
+ x[10] = U8TO32_LITTLE(key + 24);
+ x[11] = U8TO32_LITTLE(key + 28);
+ x[12] = U8TO32_LITTLE(nonce + 0);
+ x[13] = U8TO32_LITTLE(nonce + 4);
+ x[14] = U8TO32_LITTLE(nonce + 8);
+ x[15] = U8TO32_LITTLE(nonce + 12);
+
+ for (i = 20; i > 0; i -= 2) {
+ QUARTERROUND(x[0], x[4], x[8], x[12])
+ QUARTERROUND(x[1], x[5], x[9], x[13])
+ QUARTERROUND(x[2], x[6], x[10], x[14])
+ QUARTERROUND(x[3], x[7], x[11], x[15])
+ QUARTERROUND(x[0], x[5], x[10], x[15])
+ QUARTERROUND(x[1], x[6], x[11], x[12])
+ QUARTERROUND(x[2], x[7], x[8], x[13])
+ QUARTERROUND(x[3], x[4], x[9], x[14])
+ }
+
+ U32TO8_LITTLE(subkey + 0, x[0]);
+ U32TO8_LITTLE(subkey + 4, x[1]);
+ U32TO8_LITTLE(subkey + 8, x[2]);
+ U32TO8_LITTLE(subkey + 12, x[3]);
+
+ U32TO8_LITTLE(subkey + 16, x[12]);
+ U32TO8_LITTLE(subkey + 20, x[13]);
+ U32TO8_LITTLE(subkey + 24, x[14]);
+ U32TO8_LITTLE(subkey + 28, x[15]);
+}
diff --git a/crypto/libressl/crypto/chacha/chacha.c b/crypto/libressl/crypto/chacha/chacha.c
new file mode 100644
index 0000000..f62a84d
--- /dev/null
+++ b/crypto/libressl/crypto/chacha/chacha.c
@@ -0,0 +1,87 @@
+/* $OpenBSD: chacha.c,v 1.8 2019/01/22 00:59:21 dlg Exp $ */
+/*
+ * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdint.h>
+
+#include <openssl/chacha.h>
+
+#include "chacha-merged.c"
+
+void
+ChaCha_set_key(ChaCha_ctx *ctx, const unsigned char *key, unsigned int keybits)
+{
+ chacha_keysetup((chacha_ctx *)ctx, key, keybits);
+ ctx->unused = 0;
+}
+
+void
+ChaCha_set_iv(ChaCha_ctx *ctx, const unsigned char *iv,
+ const unsigned char *counter)
+{
+ chacha_ivsetup((chacha_ctx *)ctx, iv, counter);
+ ctx->unused = 0;
+}
+
+void
+ChaCha(ChaCha_ctx *ctx, unsigned char *out, const unsigned char *in, size_t len)
+{
+ unsigned char *k;
+ int i, l;
+
+ /* Consume remaining keystream, if any exists. */
+ if (ctx->unused > 0) {
+ k = ctx->ks + 64 - ctx->unused;
+ l = (len > ctx->unused) ? ctx->unused : len;
+ for (i = 0; i < l; i++)
+ *(out++) = *(in++) ^ *(k++);
+ ctx->unused -= l;
+ len -= l;
+ }
+
+ chacha_encrypt_bytes((chacha_ctx *)ctx, in, out, (uint32_t)len);
+}
+
+void
+CRYPTO_chacha_20(unsigned char *out, const unsigned char *in, size_t len,
+ const unsigned char key[32], const unsigned char iv[8], uint64_t counter)
+{
+ struct chacha_ctx ctx;
+
+ /*
+ * chacha_ivsetup expects the counter to be in u8. Rather than
+ * converting size_t to u8 and then back again, pass a counter of
+ * NULL and manually assign it afterwards.
+ */
+ chacha_keysetup(&ctx, key, 256);
+ chacha_ivsetup(&ctx, iv, NULL);
+ if (counter != 0) {
+ ctx.input[12] = (uint32_t)counter;
+ ctx.input[13] = (uint32_t)(counter >> 32);
+ }
+
+ chacha_encrypt_bytes(&ctx, in, out, (uint32_t)len);
+}
+
+void
+CRYPTO_xchacha_20(unsigned char *out, const unsigned char *in, size_t len,
+ const unsigned char key[32], const unsigned char iv[24])
+{
+ uint8_t subkey[32];
+
+ CRYPTO_hchacha_20(subkey, key, iv);
+ CRYPTO_chacha_20(out, in, len, subkey, iv + 16, 0);
+}
diff --git a/crypto/libressl/crypto/compat/Makefile b/crypto/libressl/crypto/compat/Makefile
new file mode 100644
index 0000000..7fd5ea1
--- /dev/null
+++ b/crypto/libressl/crypto/compat/Makefile
@@ -0,0 +1,13 @@
+include ssl_common.mk
+
+obj = arc4random.o explicit_bzero.o timingsafe_bcmp.o timingsafe_memcmp.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/compat/arc4random.c b/crypto/libressl/crypto/compat/arc4random.c
new file mode 100644
index 0000000..ac41fb3
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random.c
@@ -0,0 +1,216 @@
+/* $OpenBSD: arc4random.c,v 1.55 2019/03/24 17:56:54 deraadt Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * ChaCha based random number generator for OpenBSD.
+ */
+
+#include <fcntl.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+
+#define KEYSTREAM_ONLY
+#include "chacha_private.h"
+
+#define minimum(a, b) ((a) < (b) ? (a) : (b))
+
+#if defined(__GNUC__) || defined(_MSC_VER)
+#define inline __inline
+#else /* __GNUC__ || _MSC_VER */
+#define inline
+#endif /* !__GNUC__ && !_MSC_VER */
+
+#define KEYSZ 32
+#define IVSZ 8
+#define BLOCKSZ 64
+#if defined(__FE310__)
+#define RSBLKS 16
+#define RSBUFSZ (BLOCKSZ)
+#else
+#define RSBUFSZ (16*BLOCKSZ)
+#endif
+
+/* Marked MAP_INHERIT_ZERO, so zero'd out in fork children. */
+static struct _rs {
+ size_t rs_have; /* valid bytes at end of rs_buf */
+ size_t rs_count; /* bytes till reseed */
+#if defined(__FE310__)
+ size_t rs_blocks; /* blocks till rekey */
+#endif
+} *rs;
+
+/* Maybe be preserved in fork children, if _rs_allocate() decides. */
+static struct _rsx {
+ chacha_ctx rs_chacha; /* chacha context for random keystream */
+ u_char rs_buf[RSBUFSZ]; /* keystream blocks */
+} *rsx;
+
+static inline int _rs_allocate(struct _rs **, struct _rsx **);
+static inline void _rs_forkdetect(void);
+#include "arc4random.h"
+
+static inline void _rs_rekey(u_char *dat, size_t datlen);
+
+static inline void
+_rs_init(u_char *buf, size_t n)
+{
+ if (n < KEYSZ + IVSZ)
+ return;
+
+ if (rs == NULL) {
+ if (_rs_allocate(&rs, &rsx) == -1)
+ _exit(1);
+ }
+
+ chacha_keysetup(&rsx->rs_chacha, buf, KEYSZ * 8, 0);
+ chacha_ivsetup(&rsx->rs_chacha, buf + KEYSZ);
+}
+
+static void
+_rs_stir(void)
+{
+ u_char rnd[KEYSZ + IVSZ];
+
+ if (getentropy(rnd, sizeof rnd) == -1)
+ _getentropy_fail();
+
+ if (!rs)
+ _rs_init(rnd, sizeof(rnd));
+ else
+ _rs_rekey(rnd, sizeof(rnd));
+ explicit_bzero(rnd, sizeof(rnd)); /* discard source seed */
+
+ /* invalidate rs_buf */
+ rs->rs_have = 0;
+ memset(rsx->rs_buf, 0, sizeof(rsx->rs_buf));
+
+ rs->rs_count = 1600000;
+#if defined(__FE310__)
+ rs->rs_blocks = RSBLKS;
+#endif
+}
+
+static inline void
+_rs_stir_if_needed(size_t len)
+{
+ _rs_forkdetect();
+ if (!rs || rs->rs_count <= len)
+ _rs_stir();
+ if (rs->rs_count <= len)
+ rs->rs_count = 0;
+ else
+ rs->rs_count -= len;
+}
+
+static inline void
+_rs_rekey(u_char *dat, size_t datlen)
+{
+#ifndef KEYSTREAM_ONLY
+ memset(rsx->rs_buf, 0, sizeof(rsx->rs_buf));
+#endif
+ /* fill rs_buf with the keystream */
+ chacha_encrypt_bytes(&rsx->rs_chacha, rsx->rs_buf,
+ rsx->rs_buf, sizeof(rsx->rs_buf));
+ /* mix in optional user provided data */
+ if (dat) {
+ size_t i, m;
+
+ m = minimum(datlen, KEYSZ + IVSZ);
+ for (i = 0; i < m; i++)
+ rsx->rs_buf[i] ^= dat[i];
+ }
+#if defined(__FE310__)
+ if (dat || (rs->rs_blocks == 0)) {
+ rs->rs_blocks = RSBLKS;
+ } else {
+ rs->rs_blocks--;
+ rs->rs_have = sizeof(rsx->rs_buf);
+ return;
+ }
+#endif
+ /* immediately reinit for backtracking resistance */
+ _rs_init(rsx->rs_buf, KEYSZ + IVSZ);
+ memset(rsx->rs_buf, 0, KEYSZ + IVSZ);
+ rs->rs_have = sizeof(rsx->rs_buf) - KEYSZ - IVSZ;
+}
+
+static inline void
+_rs_random_buf(void *_buf, size_t n)
+{
+ u_char *buf = (u_char *)_buf;
+ u_char *keystream;
+ size_t m;
+
+ _rs_stir_if_needed(n);
+ while (n > 0) {
+ if (rs->rs_have > 0) {
+ m = minimum(n, rs->rs_have);
+ keystream = rsx->rs_buf + sizeof(rsx->rs_buf)
+ - rs->rs_have;
+ memcpy(buf, keystream, m);
+ memset(keystream, 0, m);
+ buf += m;
+ n -= m;
+ rs->rs_have -= m;
+ }
+ if (rs->rs_have == 0)
+ _rs_rekey(NULL, 0);
+ }
+}
+
+static inline void
+_rs_random_u32(uint32_t *val)
+{
+ u_char *keystream;
+
+ _rs_stir_if_needed(sizeof(*val));
+ if (rs->rs_have < sizeof(*val))
+ _rs_rekey(NULL, 0);
+ keystream = rsx->rs_buf + sizeof(rsx->rs_buf) - rs->rs_have;
+ memcpy(val, keystream, sizeof(*val));
+ memset(keystream, 0, sizeof(*val));
+ rs->rs_have -= sizeof(*val);
+}
+
+uint32_t
+arc4random(void)
+{
+ uint32_t val;
+
+ _ARC4_LOCK();
+ _rs_random_u32(&val);
+ _ARC4_UNLOCK();
+ return val;
+}
+
+void
+arc4random_buf(void *buf, size_t n)
+{
+ _ARC4_LOCK();
+ _rs_random_buf(buf, n);
+ _ARC4_UNLOCK();
+}
diff --git a/crypto/libressl/crypto/compat/arc4random.h b/crypto/libressl/crypto/compat/arc4random.h
new file mode 100644
index 0000000..8a308a9
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random.h
@@ -0,0 +1,41 @@
+#ifndef LIBCRYPTOCOMPAT_ARC4RANDOM_H
+#define LIBCRYPTOCOMPAT_ARC4RANDOM_H
+
+#include <sys/param.h>
+
+#if defined(_AIX)
+#include "arc4random_aix.h"
+
+#elif defined(__FreeBSD__)
+#include "arc4random_freebsd.h"
+
+#elif defined(__hpux)
+#include "arc4random_hpux.h"
+
+#elif defined(__linux__)
+#include "arc4random_linux.h"
+
+#elif defined(__midipix__)
+#include "arc4random_linux.h"
+
+#elif defined(__NetBSD__)
+#include "arc4random_netbsd.h"
+
+#elif defined(__APPLE__)
+#include "arc4random_osx.h"
+
+#elif defined(__sun)
+#include "arc4random_solaris.h"
+
+#elif defined(_WIN32)
+#include "arc4random_win.h"
+
+#elif defined(__FE310__)
+#include "arc4random_fe310.h"
+
+#else
+#error "No arc4random hooks defined for this platform."
+
+#endif
+
+#endif
diff --git a/crypto/libressl/crypto/compat/arc4random_aix.h b/crypto/libressl/crypto/compat/arc4random_aix.h
new file mode 100644
index 0000000..3142a1f
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_aix.h
@@ -0,0 +1,81 @@
+/* $OpenBSD: arc4random_aix.h,v 1.2 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_fe310.h b/crypto/libressl/crypto/compat/arc4random_fe310.h
new file mode 100644
index 0000000..131b0d3
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_fe310.h
@@ -0,0 +1,27 @@
+#define _ARC4_LOCK() ;
+#define _ARC4_UNLOCK() ;
+
+static inline void
+_getentropy_fail(void)
+{
+ _exit(1);
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+}
+
+int arc4random_alloc(void **rsp, size_t rsp_size, void **rsxp, size_t rsxp_size);
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ return arc4random_alloc((void **)rsp, sizeof(**rsp), (void **)rsxp, sizeof(**rsxp));
+}
+
+void arc4random_close(void)
+{
+ rs = NULL;
+ rsx = NULL;
+} \ No newline at end of file
diff --git a/crypto/libressl/crypto/compat/arc4random_freebsd.h b/crypto/libressl/crypto/compat/arc4random_freebsd.h
new file mode 100644
index 0000000..3faa5e4
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_freebsd.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: arc4random_freebsd.h,v 1.4 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+/*
+ * Unfortunately, pthread_atfork() is broken on FreeBSD (at least 9 and 10) if
+ * a program does not link to -lthr. Callbacks registered with pthread_atfork()
+ * appear to fail silently. So, it is not always possible to detect a PID
+ * wraparound.
+ */
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_hpux.h b/crypto/libressl/crypto/compat/arc4random_hpux.h
new file mode 100644
index 0000000..2a3fe8c
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_hpux.h
@@ -0,0 +1,81 @@
+/* $OpenBSD: arc4random_hpux.h,v 1.3 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_linux.h b/crypto/libressl/crypto/compat/arc4random_linux.h
new file mode 100644
index 0000000..5e1cf34
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_linux.h
@@ -0,0 +1,88 @@
+/* $OpenBSD: arc4random_linux.h,v 1.12 2019/07/11 10:37:28 inoguchi Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+#if defined(__GLIBC__) && !(defined(__UCLIBC__) && !defined(__ARCH_USE_MMU__))
+extern void *__dso_handle;
+extern int __register_atfork(void (*)(void), void(*)(void), void (*)(void), void *);
+#define _ARC4_ATFORK(f) __register_atfork(NULL, NULL, (f), __dso_handle)
+#else
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+#endif
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ /* XXX unusual calls to clone() can bypass checks */
+ if (_rs_pid == 0 || _rs_pid == 1 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_netbsd.h b/crypto/libressl/crypto/compat/arc4random_netbsd.h
new file mode 100644
index 0000000..611997d
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_netbsd.h
@@ -0,0 +1,87 @@
+/* $OpenBSD: arc4random_netbsd.h,v 1.3 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+/*
+ * Unfortunately, pthread_atfork() is broken on FreeBSD (at least 9 and 10) if
+ * a program does not link to -lthr. Callbacks registered with pthread_atfork()
+ * appear to fail silently. So, it is not always possible to detect a PID
+ * wraparound.
+ */
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_osx.h b/crypto/libressl/crypto/compat/arc4random_osx.h
new file mode 100644
index 0000000..818ae6b
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_osx.h
@@ -0,0 +1,81 @@
+/* $OpenBSD: arc4random_osx.h,v 1.11 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_solaris.h b/crypto/libressl/crypto/compat/arc4random_solaris.h
new file mode 100644
index 0000000..b1084cd
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_solaris.h
@@ -0,0 +1,81 @@
+/* $OpenBSD: arc4random_solaris.h,v 1.10 2016/06/30 12:19:51 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <sys/mman.h>
+
+#include <pthread.h>
+#include <signal.h>
+
+static pthread_mutex_t arc4random_mtx = PTHREAD_MUTEX_INITIALIZER;
+#define _ARC4_LOCK() pthread_mutex_lock(&arc4random_mtx)
+#define _ARC4_UNLOCK() pthread_mutex_unlock(&arc4random_mtx)
+
+#define _ARC4_ATFORK(f) pthread_atfork(NULL, NULL, (f))
+
+static inline void
+_getentropy_fail(void)
+{
+ raise(SIGKILL);
+}
+
+static volatile sig_atomic_t _rs_forked;
+
+static inline void
+_rs_forkhandler(void)
+{
+ _rs_forked = 1;
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+ static pid_t _rs_pid = 0;
+ pid_t pid = getpid();
+
+ if (_rs_pid == 0 || _rs_pid != pid || _rs_forked) {
+ _rs_pid = pid;
+ _rs_forked = 0;
+ if (rs)
+ memset(rs, 0, sizeof(*rs));
+ }
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ if ((*rsp = mmap(NULL, sizeof(**rsp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED)
+ return (-1);
+
+ if ((*rsxp = mmap(NULL, sizeof(**rsxp), PROT_READ|PROT_WRITE,
+ MAP_ANON|MAP_PRIVATE, -1, 0)) == MAP_FAILED) {
+ munmap(*rsp, sizeof(**rsp));
+ *rsp = NULL;
+ return (-1);
+ }
+
+ _ARC4_ATFORK(_rs_forkhandler);
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_uniform.c b/crypto/libressl/crypto/compat/arc4random_uniform.c
new file mode 100644
index 0000000..06cd29c
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_uniform.c
@@ -0,0 +1,56 @@
+/* $OpenBSD: arc4random_uniform.c,v 1.3 2019/01/20 02:59:07 bcook Exp $ */
+
+/*
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/*
+ * Calculate a uniformly distributed random number less than upper_bound
+ * avoiding "modulo bias".
+ *
+ * Uniformity is achieved by generating new random numbers until the one
+ * returned is outside the range [0, 2**32 % upper_bound). This
+ * guarantees the selected random number will be inside
+ * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
+ * after reduction modulo upper_bound.
+ */
+uint32_t
+arc4random_uniform(uint32_t upper_bound)
+{
+ uint32_t r, min;
+
+ if (upper_bound < 2)
+ return 0;
+
+ /* 2**32 % x == (2**32 - x) % x */
+ min = -upper_bound % upper_bound;
+
+ /*
+ * This could theoretically loop forever but each retry has
+ * p > 0.5 (worst case, usually far better) of selecting a
+ * number inside the range we need, so it should rarely need
+ * to re-roll.
+ */
+ for (;;) {
+ r = arc4random();
+ if (r >= min)
+ break;
+ }
+
+ return r % upper_bound;
+}
diff --git a/crypto/libressl/crypto/compat/arc4random_win.h b/crypto/libressl/crypto/compat/arc4random_win.h
new file mode 100644
index 0000000..deec8a1
--- /dev/null
+++ b/crypto/libressl/crypto/compat/arc4random_win.h
@@ -0,0 +1,78 @@
+/* $OpenBSD: arc4random_win.h,v 1.6 2016/06/30 12:17:29 bcook Exp $ */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ * Copyright (c) 2013, Markus Friedl <markus@openbsd.org>
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Stub functions for portability.
+ */
+
+#include <windows.h>
+
+static volatile HANDLE arc4random_mtx = NULL;
+
+/*
+ * Initialize the mutex on the first lock attempt. On collision, each thread
+ * will attempt to allocate a mutex and compare-and-swap it into place as the
+ * global mutex. On failure to swap in the global mutex, the mutex is closed.
+ */
+#define _ARC4_LOCK() { \
+ if (!arc4random_mtx) { \
+ HANDLE p = CreateMutex(NULL, FALSE, NULL); \
+ if (InterlockedCompareExchangePointer((void **)&arc4random_mtx, (void *)p, NULL)) \
+ CloseHandle(p); \
+ } \
+ WaitForSingleObject(arc4random_mtx, INFINITE); \
+} \
+
+#define _ARC4_UNLOCK() ReleaseMutex(arc4random_mtx)
+
+static inline void
+_getentropy_fail(void)
+{
+ TerminateProcess(GetCurrentProcess(), 0);
+}
+
+static inline int
+_rs_allocate(struct _rs **rsp, struct _rsx **rsxp)
+{
+ *rsp = VirtualAlloc(NULL, sizeof(**rsp),
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (*rsp == NULL)
+ return (-1);
+
+ *rsxp = VirtualAlloc(NULL, sizeof(**rsxp),
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (*rsxp == NULL) {
+ VirtualFree(*rsp, 0, MEM_RELEASE);
+ *rsp = NULL;
+ return (-1);
+ }
+ return (0);
+}
+
+static inline void
+_rs_forkhandler(void)
+{
+}
+
+static inline void
+_rs_forkdetect(void)
+{
+}
diff --git a/crypto/libressl/crypto/compat/chacha_private.h b/crypto/libressl/crypto/compat/chacha_private.h
new file mode 100644
index 0000000..7c3680f
--- /dev/null
+++ b/crypto/libressl/crypto/compat/chacha_private.h
@@ -0,0 +1,222 @@
+/*
+chacha-merged.c version 20080118
+D. J. Bernstein
+Public domain.
+*/
+
+/* $OpenBSD: chacha_private.h,v 1.2 2013/10/04 07:02:27 djm Exp $ */
+
+typedef unsigned char u8;
+typedef unsigned int u32;
+
+typedef struct
+{
+ u32 input[16]; /* could be compressed */
+} chacha_ctx;
+
+#define U8C(v) (v##U)
+#define U32C(v) (v##U)
+
+#define U8V(v) ((u8)(v) & U8C(0xFF))
+#define U32V(v) ((u32)(v) & U32C(0xFFFFFFFF))
+
+#define ROTL32(v, n) \
+ (U32V((v) << (n)) | ((v) >> (32 - (n))))
+
+#define U8TO32_LITTLE(p) \
+ (((u32)((p)[0]) ) | \
+ ((u32)((p)[1]) << 8) | \
+ ((u32)((p)[2]) << 16) | \
+ ((u32)((p)[3]) << 24))
+
+#define U32TO8_LITTLE(p, v) \
+ do { \
+ (p)[0] = U8V((v) ); \
+ (p)[1] = U8V((v) >> 8); \
+ (p)[2] = U8V((v) >> 16); \
+ (p)[3] = U8V((v) >> 24); \
+ } while (0)
+
+#define ROTATE(v,c) (ROTL32(v,c))
+#define XOR(v,w) ((v) ^ (w))
+#define PLUS(v,w) (U32V((v) + (w)))
+#define PLUSONE(v) (PLUS((v),1))
+
+#define QUARTERROUND(a,b,c,d) \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a),16); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c),12); \
+ a = PLUS(a,b); d = ROTATE(XOR(d,a), 8); \
+ c = PLUS(c,d); b = ROTATE(XOR(b,c), 7);
+
+static const char sigma[16] = "expand 32-byte k";
+static const char tau[16] = "expand 16-byte k";
+
+static void
+chacha_keysetup(chacha_ctx *x,const u8 *k,u32 kbits,u32 ivbits)
+{
+ const char *constants;
+
+ x->input[4] = U8TO32_LITTLE(k + 0);
+ x->input[5] = U8TO32_LITTLE(k + 4);
+ x->input[6] = U8TO32_LITTLE(k + 8);
+ x->input[7] = U8TO32_LITTLE(k + 12);
+ if (kbits == 256) { /* recommended */
+ k += 16;
+ constants = sigma;
+ } else { /* kbits == 128 */
+ constants = tau;
+ }
+ x->input[8] = U8TO32_LITTLE(k + 0);
+ x->input[9] = U8TO32_LITTLE(k + 4);
+ x->input[10] = U8TO32_LITTLE(k + 8);
+ x->input[11] = U8TO32_LITTLE(k + 12);
+ x->input[0] = U8TO32_LITTLE(constants + 0);
+ x->input[1] = U8TO32_LITTLE(constants + 4);
+ x->input[2] = U8TO32_LITTLE(constants + 8);
+ x->input[3] = U8TO32_LITTLE(constants + 12);
+}
+
+static void
+chacha_ivsetup(chacha_ctx *x,const u8 *iv)
+{
+ x->input[12] = 0;
+ x->input[13] = 0;
+ x->input[14] = U8TO32_LITTLE(iv + 0);
+ x->input[15] = U8TO32_LITTLE(iv + 4);
+}
+
+static void
+chacha_encrypt_bytes(chacha_ctx *x,const u8 *m,u8 *c,u32 bytes)
+{
+ u32 x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
+ u32 j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+ u8 *ctarget = NULL;
+ u8 tmp[64];
+ u_int i;
+
+ if (!bytes) return;
+
+ j0 = x->input[0];
+ j1 = x->input[1];
+ j2 = x->input[2];
+ j3 = x->input[3];
+ j4 = x->input[4];
+ j5 = x->input[5];
+ j6 = x->input[6];
+ j7 = x->input[7];
+ j8 = x->input[8];
+ j9 = x->input[9];
+ j10 = x->input[10];
+ j11 = x->input[11];
+ j12 = x->input[12];
+ j13 = x->input[13];
+ j14 = x->input[14];
+ j15 = x->input[15];
+
+ for (;;) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i) tmp[i] = m[i];
+ m = tmp;
+ ctarget = c;
+ c = tmp;
+ }
+ x0 = j0;
+ x1 = j1;
+ x2 = j2;
+ x3 = j3;
+ x4 = j4;
+ x5 = j5;
+ x6 = j6;
+ x7 = j7;
+ x8 = j8;
+ x9 = j9;
+ x10 = j10;
+ x11 = j11;
+ x12 = j12;
+ x13 = j13;
+ x14 = j14;
+ x15 = j15;
+ for (i = 20;i > 0;i -= 2) {
+ QUARTERROUND( x0, x4, x8,x12)
+ QUARTERROUND( x1, x5, x9,x13)
+ QUARTERROUND( x2, x6,x10,x14)
+ QUARTERROUND( x3, x7,x11,x15)
+ QUARTERROUND( x0, x5,x10,x15)
+ QUARTERROUND( x1, x6,x11,x12)
+ QUARTERROUND( x2, x7, x8,x13)
+ QUARTERROUND( x3, x4, x9,x14)
+ }
+ x0 = PLUS(x0,j0);
+ x1 = PLUS(x1,j1);
+ x2 = PLUS(x2,j2);
+ x3 = PLUS(x3,j3);
+ x4 = PLUS(x4,j4);
+ x5 = PLUS(x5,j5);
+ x6 = PLUS(x6,j6);
+ x7 = PLUS(x7,j7);
+ x8 = PLUS(x8,j8);
+ x9 = PLUS(x9,j9);
+ x10 = PLUS(x10,j10);
+ x11 = PLUS(x11,j11);
+ x12 = PLUS(x12,j12);
+ x13 = PLUS(x13,j13);
+ x14 = PLUS(x14,j14);
+ x15 = PLUS(x15,j15);
+
+#ifndef KEYSTREAM_ONLY
+ x0 = XOR(x0,U8TO32_LITTLE(m + 0));
+ x1 = XOR(x1,U8TO32_LITTLE(m + 4));
+ x2 = XOR(x2,U8TO32_LITTLE(m + 8));
+ x3 = XOR(x3,U8TO32_LITTLE(m + 12));
+ x4 = XOR(x4,U8TO32_LITTLE(m + 16));
+ x5 = XOR(x5,U8TO32_LITTLE(m + 20));
+ x6 = XOR(x6,U8TO32_LITTLE(m + 24));
+ x7 = XOR(x7,U8TO32_LITTLE(m + 28));
+ x8 = XOR(x8,U8TO32_LITTLE(m + 32));
+ x9 = XOR(x9,U8TO32_LITTLE(m + 36));
+ x10 = XOR(x10,U8TO32_LITTLE(m + 40));
+ x11 = XOR(x11,U8TO32_LITTLE(m + 44));
+ x12 = XOR(x12,U8TO32_LITTLE(m + 48));
+ x13 = XOR(x13,U8TO32_LITTLE(m + 52));
+ x14 = XOR(x14,U8TO32_LITTLE(m + 56));
+ x15 = XOR(x15,U8TO32_LITTLE(m + 60));
+#endif
+
+ j12 = PLUSONE(j12);
+ if (!j12) {
+ j13 = PLUSONE(j13);
+ /* stopping at 2^70 bytes per nonce is user's responsibility */
+ }
+
+ U32TO8_LITTLE(c + 0,x0);
+ U32TO8_LITTLE(c + 4,x1);
+ U32TO8_LITTLE(c + 8,x2);
+ U32TO8_LITTLE(c + 12,x3);
+ U32TO8_LITTLE(c + 16,x4);
+ U32TO8_LITTLE(c + 20,x5);
+ U32TO8_LITTLE(c + 24,x6);
+ U32TO8_LITTLE(c + 28,x7);
+ U32TO8_LITTLE(c + 32,x8);
+ U32TO8_LITTLE(c + 36,x9);
+ U32TO8_LITTLE(c + 40,x10);
+ U32TO8_LITTLE(c + 44,x11);
+ U32TO8_LITTLE(c + 48,x12);
+ U32TO8_LITTLE(c + 52,x13);
+ U32TO8_LITTLE(c + 56,x14);
+ U32TO8_LITTLE(c + 60,x15);
+
+ if (bytes <= 64) {
+ if (bytes < 64) {
+ for (i = 0;i < bytes;++i) ctarget[i] = c[i];
+ }
+ x->input[12] = j12;
+ x->input[13] = j13;
+ return;
+ }
+ bytes -= 64;
+ c += 64;
+#ifndef KEYSTREAM_ONLY
+ m += 64;
+#endif
+ }
+}
diff --git a/crypto/libressl/crypto/compat/explicit_bzero.c b/crypto/libressl/crypto/compat/explicit_bzero.c
new file mode 100644
index 0000000..5dd0103
--- /dev/null
+++ b/crypto/libressl/crypto/compat/explicit_bzero.c
@@ -0,0 +1,19 @@
+/* $OpenBSD: explicit_bzero.c,v 1.4 2015/08/31 02:53:57 guenther Exp $ */
+/*
+ * Public domain.
+ * Written by Matthew Dempsky.
+ */
+
+#include <string.h>
+
+__attribute__((weak)) void
+__explicit_bzero_hook(void *buf, size_t len)
+{
+}
+
+void
+explicit_bzero(void *buf, size_t len)
+{
+ memset(buf, 0, len);
+ __explicit_bzero_hook(buf, len);
+}
diff --git a/crypto/libressl/crypto/compat/explicit_bzero_win.c b/crypto/libressl/crypto/compat/explicit_bzero_win.c
new file mode 100644
index 0000000..0d09d90
--- /dev/null
+++ b/crypto/libressl/crypto/compat/explicit_bzero_win.c
@@ -0,0 +1,13 @@
+/*
+ * Public domain.
+ * Win32 explicit_bzero compatibility shim.
+ */
+
+#include <windows.h>
+#include <string.h>
+
+void
+explicit_bzero(void *buf, size_t len)
+{
+ SecureZeroMemory(buf, len);
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_aix.c b/crypto/libressl/crypto/compat/getentropy_aix.c
new file mode 100644
index 0000000..422e685
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_aix.c
@@ -0,0 +1,402 @@
+/* $OpenBSD: getentropy_aix.c,v 1.7 2020/05/17 14:44:20 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2015 Michael Felt <aixtools@gmail.com>
+ * Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014 Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+/*
+ * -lperfstat is needed for the psuedo entropy data
+ */
+
+#include <sys/mman.h>
+#include <sys/procfs.h>
+#include <sys/protosw.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/timers.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <termios.h>
+
+#include <openssl/sha.h>
+
+#include <libperfstat.h>
+
+#define REPEAT 5
+#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b))
+
+#define HX(a, b) \
+ do { \
+ if ((a)) \
+ HD(errno); \
+ else \
+ HD(b); \
+ } while (0)
+
+#define HR(x, l) (SHA512_Update(&ctx, (char *)(x), (l)))
+#define HD(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (x)))
+#define HF(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (void*)))
+
+int getentropy(void *buf, size_t len);
+
+static int getentropy_urandom(void *buf, size_t len, const char *path,
+ int devfscheck);
+static int getentropy_fallback(void *buf, size_t len);
+
+int
+getentropy(void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+ /*
+ * Try to get entropy with /dev/urandom
+ */
+ ret = getentropy_urandom(buf, len, "/dev/urandom", 0);
+ if (ret != -1)
+ return (ret);
+
+ /*
+ * Entropy collection via /dev/urandom has failed.
+ *
+ * No other API exists for collecting entropy, and we have
+ * no failsafe way to get it on AIX that is not sensitive
+ * to resource exhaustion.
+ *
+ * We have very few options:
+ * - Even syslog_r is unsafe to call at this low level, so
+ * there is no way to alert the user or program.
+ * - Cannot call abort() because some systems have unsafe
+ * corefiles.
+ * - Could raise(SIGKILL) resulting in silent program termination.
+ * - Return EIO, to hint that arc4random's stir function
+ * should raise(SIGKILL)
+ * - Do the best under the circumstances....
+ *
+ * This code path exists to bring light to the issue that AIX
+ * does not provide a failsafe API for entropy collection.
+ *
+ * We hope this demonstrates that AIX should consider
+ * providing a new failsafe API which works in a chroot or
+ * when file descriptors are exhausted.
+ */
+#undef FAIL_INSTEAD_OF_TRYING_FALLBACK
+#ifdef FAIL_INSTEAD_OF_TRYING_FALLBACK
+ raise(SIGKILL);
+#endif
+ ret = getentropy_fallback(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ errno = EIO;
+ return (ret);
+}
+
+static int
+getentropy_urandom(void *buf, size_t len, const char *path, int devfscheck)
+{
+ struct stat st;
+ size_t i;
+ int fd, flags;
+ int save_errno = errno;
+
+start:
+
+ flags = O_RDONLY;
+#ifdef O_NOFOLLOW
+ flags |= O_NOFOLLOW;
+#endif
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ fd = open(path, flags, 0);
+ if (fd == -1) {
+ if (errno == EINTR)
+ goto start;
+ goto nodevrandom;
+ }
+#ifndef O_CLOEXEC
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+#endif
+
+ /* Lightly verify that the device node looks sane */
+ if (fstat(fd, &st) == -1 || !S_ISCHR(st.st_mode)) {
+ close(fd);
+ goto nodevrandom;
+ }
+ for (i = 0; i < len; ) {
+ size_t wanted = len - i;
+ ssize_t ret = read(fd, (char *)buf + i, wanted);
+
+ if (ret == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ close(fd);
+ goto nodevrandom;
+ }
+ i += ret;
+ }
+ close(fd);
+ errno = save_errno;
+ return (0); /* satisfied */
+nodevrandom:
+ errno = EIO;
+ return (-1);
+}
+
+static const int cl[] = {
+ CLOCK_REALTIME,
+#ifdef CLOCK_MONOTONIC
+ CLOCK_MONOTONIC,
+#endif
+#ifdef CLOCK_MONOTONIC_RAW
+ CLOCK_MONOTONIC_RAW,
+#endif
+#ifdef CLOCK_TAI
+ CLOCK_TAI,
+#endif
+#ifdef CLOCK_VIRTUAL
+ CLOCK_VIRTUAL,
+#endif
+#ifdef CLOCK_UPTIME
+ CLOCK_UPTIME,
+#endif
+#ifdef CLOCK_PROCESS_CPUTIME_ID
+ CLOCK_PROCESS_CPUTIME_ID,
+#endif
+#ifdef CLOCK_THREAD_CPUTIME_ID
+ CLOCK_THREAD_CPUTIME_ID,
+#endif
+};
+
+static int
+getentropy_fallback(void *buf, size_t len)
+{
+ uint8_t results[SHA512_DIGEST_LENGTH];
+ int save_errno = errno, e, pgs = sysconf(_SC_PAGESIZE), faster = 0, repeat;
+ static int cnt;
+ struct timespec ts;
+ struct timeval tv;
+ perfstat_cpu_total_t cpustats;
+#ifdef _AIX61
+ perfstat_cpu_total_wpar_t cpustats_wpar;
+#endif
+ perfstat_partition_total_t lparstats;
+ perfstat_disk_total_t diskinfo;
+ perfstat_netinterface_total_t netinfo;
+ struct rusage ru;
+ sigset_t sigset;
+ struct stat st;
+ SHA512_CTX ctx;
+ static pid_t lastpid;
+ pid_t pid;
+ size_t i, ii, m;
+ char *p;
+
+ pid = getpid();
+ if (lastpid == pid) {
+ faster = 1;
+ repeat = 2;
+ } else {
+ faster = 0;
+ lastpid = pid;
+ repeat = REPEAT;
+ }
+ for (i = 0; i < len; ) {
+ int j;
+ SHA512_Init(&ctx);
+ for (j = 0; j < repeat; j++) {
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HX(perfstat_cpu_total(NULL, &cpustats,
+ sizeof(cpustats), 1) == -1, cpustats);
+
+#ifdef _AIX61
+ HX(perfstat_cpu_total_wpar(NULL, &cpustats_wpar,
+ sizeof(cpustats_wpar), 1) == -1, cpustats_wpar);
+#endif
+
+ HX(perfstat_partition_total(NULL, &lparstats,
+ sizeof(lparstats), 1) == -1, lparstats);
+
+ HX(perfstat_disk_total(NULL, &diskinfo,
+ sizeof(diskinfo), 1) == -1, diskinfo);
+
+ HX(perfstat_netinterface_total(NULL, &netinfo,
+ sizeof(netinfo), 1) == -1, netinfo);
+
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++)
+ HX(clock_gettime(cl[ii], &ts) == -1, ts);
+
+ HX((pid = getpid()) == -1, pid);
+ HX((pid = getsid(pid)) == -1, pid);
+ HX((pid = getppid()) == -1, pid);
+ HX((pid = getpgid(0)) == -1, pid);
+ HX((e = getpriority(0, 0)) == -1, e);
+
+ if (!faster) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ (void) nanosleep(&ts, NULL);
+ }
+
+ HX(sigpending(&sigset) == -1, sigset);
+ HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
+ sigset);
+
+ HF(getentropy); /* an addr in this library */
+ HF(printf); /* an addr in libc */
+ p = (char *)&p;
+ HD(p); /* an addr on stack */
+ p = (char *)&errno;
+ HD(p); /* the addr of errno */
+
+ if (i == 0) {
+ struct sockaddr_storage ss;
+ struct statvfs stvfs;
+ struct termios tios;
+ socklen_t ssl;
+ off_t off;
+
+ /*
+ * Prime-sized mappings encourage fragmentation;
+ * thus exposing some address entropy.
+ */
+ struct mm {
+ size_t npg;
+ void *p;
+ } mm[] = {
+ { 17, MAP_FAILED }, { 3, MAP_FAILED },
+ { 11, MAP_FAILED }, { 2, MAP_FAILED },
+ { 5, MAP_FAILED }, { 3, MAP_FAILED },
+ { 7, MAP_FAILED }, { 1, MAP_FAILED },
+ { 57, MAP_FAILED }, { 3, MAP_FAILED },
+ { 131, MAP_FAILED }, { 1, MAP_FAILED },
+ };
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ HX(mm[m].p = mmap(NULL,
+ mm[m].npg * pgs,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1,
+ (off_t)0), mm[m].p);
+ if (mm[m].p != MAP_FAILED) {
+ size_t mo;
+
+ /* Touch some memory... */
+ p = mm[m].p;
+ mo = cnt %
+ (mm[m].npg * pgs - 1);
+ p[mo] = 1;
+ cnt += (int)((long)(mm[m].p)
+ / pgs);
+ }
+
+ /* Check cnts and times... */
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]);
+ ii++) {
+ HX((e = clock_gettime(cl[ii],
+ &ts)) == -1, ts);
+ if (e != -1)
+ cnt += (int)ts.tv_nsec;
+ }
+
+ HX((e = getrusage(RUSAGE_SELF,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ }
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ if (mm[m].p != MAP_FAILED)
+ munmap(mm[m].p, mm[m].npg * pgs);
+ mm[m].p = MAP_FAILED;
+ }
+
+ HX(stat(".", &st) == -1, st);
+ HX(statvfs(".", &stvfs) == -1, stvfs);
+
+ HX(stat("/", &st) == -1, st);
+ HX(statvfs("/", &stvfs) == -1, stvfs);
+
+ HX((e = fstat(0, &st)) == -1, st);
+ if (e == -1) {
+ if (S_ISREG(st.st_mode) ||
+ S_ISFIFO(st.st_mode) ||
+ S_ISSOCK(st.st_mode)) {
+ HX(fstatvfs(0, &stvfs) == -1,
+ stvfs);
+ HX((off = lseek(0, (off_t)0,
+ SEEK_CUR)) < 0, off);
+ }
+ if (S_ISCHR(st.st_mode)) {
+ HX(tcgetattr(0, &tios) == -1,
+ tios);
+ } else if (S_ISSOCK(st.st_mode)) {
+ memset(&ss, 0, sizeof ss);
+ ssl = sizeof(ss);
+ HX(getpeername(0,
+ (void *)&ss, &ssl) == -1,
+ ss);
+ }
+ }
+
+ HX((e = getrusage(RUSAGE_CHILDREN,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ } else {
+ /* Subsequent hashes absorb previous result */
+ HD(results);
+ }
+
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HD(cnt);
+ }
+ SHA512_Final(results, &ctx);
+ memcpy((char *)buf + i, results, MINIMUM(sizeof(results), len - i));
+ i += MINIMUM(sizeof(results), len - i);
+ }
+ explicit_bzero(&ctx, sizeof ctx);
+ explicit_bzero(results, sizeof results);
+ errno = save_errno;
+ return (0); /* satisfied */
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_freebsd.c b/crypto/libressl/crypto/compat/getentropy_freebsd.c
new file mode 100644
index 0000000..ea90ffe
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_freebsd.c
@@ -0,0 +1,60 @@
+/* $OpenBSD: getentropy_freebsd.c,v 1.4 2020/10/12 22:08:33 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Pawel Jakub Dawidek <pjd@FreeBSD.org>
+ * Copyright (c) 2014 Brent Cook <bcook@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include <errno.h>
+#include <stddef.h>
+
+/*
+ * Derived from lib/libc/gen/arc4random.c from FreeBSD.
+ */
+static size_t
+getentropy_sysctl(u_char *buf, size_t size)
+{
+ const int mib[2] = { CTL_KERN, KERN_ARND };
+ size_t len, done;
+
+ done = 0;
+
+ do {
+ len = size;
+ if (sysctl(mib, 2, buf, &len, NULL, 0) == -1)
+ return (done);
+ done += len;
+ buf += len;
+ size -= len;
+ } while (size > 0);
+
+ return (done);
+}
+
+int
+getentropy(void *buf, size_t len)
+{
+ if (len <= 256 && getentropy_sysctl(buf, len) == len)
+ return (0);
+
+ errno = EIO;
+ return (-1);
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_hpux.c b/crypto/libressl/crypto/compat/getentropy_hpux.c
new file mode 100644
index 0000000..c981880
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_hpux.c
@@ -0,0 +1,396 @@
+/* $OpenBSD: getentropy_hpux.c,v 1.7 2020/05/17 14:44:20 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014 Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/ioctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/statvfs.h>
+#include <sys/socket.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <termios.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+#include <openssl/sha.h>
+
+#include <sys/vfs.h>
+
+#include <sys/pstat.h>
+
+#define REPEAT 5
+#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b))
+
+#define HX(a, b) \
+ do { \
+ if ((a)) \
+ HD(errno); \
+ else \
+ HD(b); \
+ } while (0)
+
+#define HR(x, l) (SHA512_Update(&ctx, (char *)(x), (l)))
+#define HD(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (x)))
+#define HF(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (void*)))
+
+int getentropy(void *buf, size_t len);
+
+static int getentropy_urandom(void *buf, size_t len, const char *path,
+ int devfscheck);
+static int getentropy_fallback(void *buf, size_t len);
+
+int
+getentropy(void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+ /*
+ * Try to get entropy with /dev/urandom
+ */
+ ret = getentropy_urandom(buf, len, "/dev/urandom", 0);
+ if (ret != -1)
+ return (ret);
+
+ /*
+ * Entropy collection via /dev/urandom has failed.
+ *
+ * No other API exists for collecting entropy, and we have
+ * no failsafe way to get it on hpux that is not sensitive
+ * to resource exhaustion.
+ *
+ * We have very few options:
+ * - Even syslog_r is unsafe to call at this low level, so
+ * there is no way to alert the user or program.
+ * - Cannot call abort() because some systems have unsafe
+ * corefiles.
+ * - Could raise(SIGKILL) resulting in silent program termination.
+ * - Return EIO, to hint that arc4random's stir function
+ * should raise(SIGKILL)
+ * - Do the best under the circumstances....
+ *
+ * This code path exists to bring light to the issue that hpux
+ * does not provide a failsafe API for entropy collection.
+ *
+ * We hope this demonstrates that hpux should consider
+ * providing a new failsafe API which works in a chroot or
+ * when file descriptors are exhausted.
+ */
+#undef FAIL_INSTEAD_OF_TRYING_FALLBACK
+#ifdef FAIL_INSTEAD_OF_TRYING_FALLBACK
+ raise(SIGKILL);
+#endif
+ ret = getentropy_fallback(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ errno = EIO;
+ return (ret);
+}
+
+static int
+getentropy_urandom(void *buf, size_t len, const char *path, int devfscheck)
+{
+ struct stat st;
+ size_t i;
+ int fd, flags;
+ int save_errno = errno;
+
+start:
+
+ flags = O_RDONLY;
+#ifdef O_NOFOLLOW
+ flags |= O_NOFOLLOW;
+#endif
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ fd = open(path, flags, 0);
+ if (fd == -1) {
+ if (errno == EINTR)
+ goto start;
+ goto nodevrandom;
+ }
+#ifndef O_CLOEXEC
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+#endif
+
+ /* Lightly verify that the device node looks sane */
+ if (fstat(fd, &st) == -1 || !S_ISCHR(st.st_mode)) {
+ close(fd);
+ goto nodevrandom;
+ }
+ for (i = 0; i < len; ) {
+ size_t wanted = len - i;
+ ssize_t ret = read(fd, (char *)buf + i, wanted);
+
+ if (ret == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ close(fd);
+ goto nodevrandom;
+ }
+ i += ret;
+ }
+ close(fd);
+ errno = save_errno;
+ return (0); /* satisfied */
+nodevrandom:
+ errno = EIO;
+ return (-1);
+}
+
+static const int cl[] = {
+ CLOCK_REALTIME,
+#ifdef CLOCK_MONOTONIC
+ CLOCK_MONOTONIC,
+#endif
+#ifdef CLOCK_MONOTONIC_RAW
+ CLOCK_MONOTONIC_RAW,
+#endif
+#ifdef CLOCK_TAI
+ CLOCK_TAI,
+#endif
+#ifdef CLOCK_VIRTUAL
+ CLOCK_VIRTUAL,
+#endif
+#ifdef CLOCK_UPTIME
+ CLOCK_UPTIME,
+#endif
+#ifdef CLOCK_PROCESS_CPUTIME_ID
+ CLOCK_PROCESS_CPUTIME_ID,
+#endif
+#ifdef CLOCK_THREAD_CPUTIME_ID
+ CLOCK_THREAD_CPUTIME_ID,
+#endif
+};
+
+static int
+getentropy_fallback(void *buf, size_t len)
+{
+ uint8_t results[SHA512_DIGEST_LENGTH];
+ int save_errno = errno, e, pgs = sysconf(_SC_PAGESIZE), faster = 0, repeat;
+ static int cnt;
+ struct timespec ts;
+ struct timeval tv;
+ struct pst_vminfo pvi;
+ struct pst_vm_status pvs;
+ struct pst_dynamic pdy;
+ struct rusage ru;
+ sigset_t sigset;
+ struct stat st;
+ SHA512_CTX ctx;
+ static pid_t lastpid;
+ pid_t pid;
+ size_t i, ii, m;
+ char *p;
+
+ pid = getpid();
+ if (lastpid == pid) {
+ faster = 1;
+ repeat = 2;
+ } else {
+ faster = 0;
+ lastpid = pid;
+ repeat = REPEAT;
+ }
+ for (i = 0; i < len; ) {
+ int j;
+ SHA512_Init(&ctx);
+ for (j = 0; j < repeat; j++) {
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HX(pstat_getvminfo(&pvi, sizeof(pvi), 1, 0) != 1, pvi);
+ HX(pstat_getprocvm(&pvs, sizeof(pvs), 0, 0) != 1, pvs);
+
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++)
+ HX(clock_gettime(cl[ii], &ts) == -1, ts);
+
+ HX((pid = getpid()) == -1, pid);
+ HX((pid = getsid(pid)) == -1, pid);
+ HX((pid = getppid()) == -1, pid);
+ HX((pid = getpgid(0)) == -1, pid);
+ HX((e = getpriority(0, 0)) == -1, e);
+
+ if(pstat_getdynamic(&pdy, sizeof(pdy), 1, 0) != 1) {
+ HD(errno);
+ } else {
+ HD(pdy.psd_avg_1_min);
+ HD(pdy.psd_avg_5_min);
+ HD(pdy.psd_avg_15_min);
+ }
+
+ if (!faster) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ (void) nanosleep(&ts, NULL);
+ }
+
+ HX(sigpending(&sigset) == -1, sigset);
+ HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
+ sigset);
+
+ HF(getentropy); /* an addr in this library */
+ HF(printf); /* an addr in libc */
+ p = (char *)&p;
+ HD(p); /* an addr on stack */
+ p = (char *)&errno;
+ HD(p); /* the addr of errno */
+
+ if (i == 0) {
+ struct sockaddr_storage ss;
+ struct statvfs stvfs;
+ struct termios tios;
+ socklen_t ssl;
+ off_t off;
+
+ /*
+ * Prime-sized mappings encourage fragmentation;
+ * thus exposing some address entropy.
+ */
+ struct mm {
+ size_t npg;
+ void *p;
+ } mm[] = {
+ { 17, MAP_FAILED }, { 3, MAP_FAILED },
+ { 11, MAP_FAILED }, { 2, MAP_FAILED },
+ { 5, MAP_FAILED }, { 3, MAP_FAILED },
+ { 7, MAP_FAILED }, { 1, MAP_FAILED },
+ { 57, MAP_FAILED }, { 3, MAP_FAILED },
+ { 131, MAP_FAILED }, { 1, MAP_FAILED },
+ };
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ HX(mm[m].p = mmap(NULL,
+ mm[m].npg * pgs,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1,
+ (off_t)0), mm[m].p);
+ if (mm[m].p != MAP_FAILED) {
+ size_t mo;
+
+ /* Touch some memory... */
+ p = mm[m].p;
+ mo = cnt %
+ (mm[m].npg * pgs - 1);
+ p[mo] = 1;
+ cnt += (int)((long)(mm[m].p)
+ / pgs);
+ }
+
+ /* Check cnts and times... */
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]);
+ ii++) {
+ HX((e = clock_gettime(cl[ii],
+ &ts)) == -1, ts);
+ if (e != -1)
+ cnt += (int)ts.tv_nsec;
+ }
+
+ HX((e = getrusage(RUSAGE_SELF,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ }
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ if (mm[m].p != MAP_FAILED)
+ munmap(mm[m].p, mm[m].npg * pgs);
+ mm[m].p = MAP_FAILED;
+ }
+
+ HX(stat(".", &st) == -1, st);
+ HX(statvfs(".", &stvfs) == -1, stvfs);
+
+ HX(stat("/", &st) == -1, st);
+ HX(statvfs("/", &stvfs) == -1, stvfs);
+
+ HX((e = fstat(0, &st)) == -1, st);
+ if (e == -1) {
+ if (S_ISREG(st.st_mode) ||
+ S_ISFIFO(st.st_mode) ||
+ S_ISSOCK(st.st_mode)) {
+ HX(fstatvfs(0, &stvfs) == -1,
+ stvfs);
+ HX((off = lseek(0, (off_t)0,
+ SEEK_CUR)) < 0, off);
+ }
+ if (S_ISCHR(st.st_mode)) {
+ HX(tcgetattr(0, &tios) == -1,
+ tios);
+ } else if (S_ISSOCK(st.st_mode)) {
+ memset(&ss, 0, sizeof ss);
+ ssl = sizeof(ss);
+ HX(getpeername(0,
+ (void *)&ss, &ssl) == -1,
+ ss);
+ }
+ }
+
+ HX((e = getrusage(RUSAGE_CHILDREN,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ } else {
+ /* Subsequent hashes absorb previous result */
+ HD(results);
+ }
+
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HD(cnt);
+ }
+ SHA512_Final(results, &ctx);
+ memcpy((char *)buf + i, results, MINIMUM(sizeof(results), len - i));
+ i += MINIMUM(sizeof(results), len - i);
+ }
+ explicit_bzero(&ctx, sizeof ctx);
+ explicit_bzero(results, sizeof results);
+ errno = save_errno;
+ return (0); /* satisfied */
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_linux.c b/crypto/libressl/crypto/compat/getentropy_linux.c
new file mode 100644
index 0000000..bc7a6be
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_linux.c
@@ -0,0 +1,525 @@
+/* $OpenBSD: getentropy_linux.c,v 1.47 2020/05/17 14:44:20 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014 Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#define _POSIX_C_SOURCE 199309L
+#define _GNU_SOURCE 1
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/ioctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#ifdef SYS__sysctl
+#include <linux/sysctl.h>
+#endif
+#include <sys/statvfs.h>
+#include <sys/socket.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <link.h>
+#include <termios.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+#include <openssl/sha.h>
+
+#include <linux/types.h>
+#include <linux/random.h>
+#ifdef HAVE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
+#include <sys/vfs.h>
+
+#define REPEAT 5
+#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b))
+
+#define HX(a, b) \
+ do { \
+ if ((a)) \
+ HD(errno); \
+ else \
+ HD(b); \
+ } while (0)
+
+#define HR(x, l) (SHA512_Update(&ctx, (char *)(x), (l)))
+#define HD(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (x)))
+#define HF(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (void*)))
+
+int getentropy(void *buf, size_t len);
+
+#if defined(SYS_getrandom) && defined(GRND_NONBLOCK)
+static int getentropy_getrandom(void *buf, size_t len);
+#endif
+static int getentropy_urandom(void *buf, size_t len);
+#ifdef SYS__sysctl
+static int getentropy_sysctl(void *buf, size_t len);
+#endif
+static int getentropy_fallback(void *buf, size_t len);
+static int getentropy_phdr(struct dl_phdr_info *info, size_t size, void *data);
+
+int
+getentropy(void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+#if defined(SYS_getrandom) && defined(GRND_NONBLOCK)
+ /*
+ * Try descriptor-less getrandom(), in non-blocking mode.
+ *
+ * The design of Linux getrandom is broken. It has an
+ * uninitialized phase coupled with blocking behaviour, which
+ * is unacceptable from within a library at boot time without
+ * possible recovery. See http://bugs.python.org/issue26839#msg267745
+ */
+ ret = getentropy_getrandom(buf, len);
+ if (ret != -1)
+ return (ret);
+#endif
+
+ /*
+ * Try to get entropy with /dev/urandom
+ *
+ * This can fail if the process is inside a chroot or if file
+ * descriptors are exhausted.
+ */
+ ret = getentropy_urandom(buf, len);
+ if (ret != -1)
+ return (ret);
+
+#ifdef SYS__sysctl
+ /*
+ * Try to use sysctl CTL_KERN, KERN_RANDOM, RANDOM_UUID.
+ * sysctl is a failsafe API, so it guarantees a result. This
+ * should work inside a chroot, or when file descriptors are
+ * exhausted.
+ *
+ * However this can fail if the Linux kernel removes support
+ * for sysctl. Starting in 2007, there have been efforts to
+ * deprecate the sysctl API/ABI, and push callers towards use
+ * of the chroot-unavailable fd-using /proc mechanism --
+ * essentially the same problems as /dev/urandom.
+ *
+ * Numerous setbacks have been encountered in their deprecation
+ * schedule, so as of June 2014 the kernel ABI still exists on
+ * most Linux architectures. The sysctl() stub in libc is missing
+ * on some systems. There are also reports that some kernels
+ * spew messages to the console.
+ */
+ ret = getentropy_sysctl(buf, len);
+ if (ret != -1)
+ return (ret);
+#endif /* SYS__sysctl */
+
+ /*
+ * Entropy collection via /dev/urandom and sysctl have failed.
+ *
+ * No other API exists for collecting entropy. See the large
+ * comment block above.
+ *
+ * We have very few options:
+ * - Even syslog_r is unsafe to call at this low level, so
+ * there is no way to alert the user or program.
+ * - Cannot call abort() because some systems have unsafe
+ * corefiles.
+ * - Could raise(SIGKILL) resulting in silent program termination.
+ * - Return EIO, to hint that arc4random's stir function
+ * should raise(SIGKILL)
+ * - Do the best under the circumstances....
+ *
+ * This code path exists to bring light to the issue that Linux
+ * still does not provide a failsafe API for entropy collection.
+ *
+ * We hope this demonstrates that Linux should either retain their
+ * sysctl ABI, or consider providing a new failsafe API which
+ * works in a chroot or when file descriptors are exhausted.
+ */
+#undef FAIL_INSTEAD_OF_TRYING_FALLBACK
+#ifdef FAIL_INSTEAD_OF_TRYING_FALLBACK
+ raise(SIGKILL);
+#endif
+ ret = getentropy_fallback(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ errno = EIO;
+ return (ret);
+}
+
+#if defined(SYS_getrandom) && defined(GRND_NONBLOCK)
+static int
+getentropy_getrandom(void *buf, size_t len)
+{
+ int pre_errno = errno;
+ int ret;
+ if (len > 256)
+ return (-1);
+ do {
+ ret = syscall(SYS_getrandom, buf, len, GRND_NONBLOCK);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret != len)
+ return (-1);
+ errno = pre_errno;
+ return (0);
+}
+#endif
+
+static int
+getentropy_urandom(void *buf, size_t len)
+{
+ struct stat st;
+ size_t i;
+ int fd, cnt, flags;
+ int save_errno = errno;
+
+start:
+
+ flags = O_RDONLY;
+#ifdef O_NOFOLLOW
+ flags |= O_NOFOLLOW;
+#endif
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ fd = open("/dev/urandom", flags, 0);
+ if (fd == -1) {
+ if (errno == EINTR)
+ goto start;
+ goto nodevrandom;
+ }
+#ifndef O_CLOEXEC
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+#endif
+
+ /* Lightly verify that the device node looks sane */
+ if (fstat(fd, &st) == -1 || !S_ISCHR(st.st_mode)) {
+ close(fd);
+ goto nodevrandom;
+ }
+ if (ioctl(fd, RNDGETENTCNT, &cnt) == -1) {
+ close(fd);
+ goto nodevrandom;
+ }
+ for (i = 0; i < len; ) {
+ size_t wanted = len - i;
+ ssize_t ret = read(fd, (char *)buf + i, wanted);
+
+ if (ret == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ close(fd);
+ goto nodevrandom;
+ }
+ i += ret;
+ }
+ close(fd);
+ errno = save_errno;
+ return (0); /* satisfied */
+nodevrandom:
+ errno = EIO;
+ return (-1);
+}
+
+#ifdef SYS__sysctl
+static int
+getentropy_sysctl(void *buf, size_t len)
+{
+ static int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
+ size_t i;
+ int save_errno = errno;
+
+ for (i = 0; i < len; ) {
+ size_t chunk = MINIMUM(len - i, 16);
+
+ /* SYS__sysctl because some systems already removed sysctl() */
+ struct __sysctl_args args = {
+ .name = mib,
+ .nlen = 3,
+ .oldval = (char *)buf + i,
+ .oldlenp = &chunk,
+ };
+ if (syscall(SYS__sysctl, &args) != 0)
+ goto sysctlfailed;
+ i += chunk;
+ }
+ errno = save_errno;
+ return (0); /* satisfied */
+sysctlfailed:
+ errno = EIO;
+ return (-1);
+}
+#endif /* SYS__sysctl */
+
+static const int cl[] = {
+ CLOCK_REALTIME,
+#ifdef CLOCK_MONOTONIC
+ CLOCK_MONOTONIC,
+#endif
+#ifdef CLOCK_MONOTONIC_RAW
+ CLOCK_MONOTONIC_RAW,
+#endif
+#ifdef CLOCK_TAI
+ CLOCK_TAI,
+#endif
+#ifdef CLOCK_VIRTUAL
+ CLOCK_VIRTUAL,
+#endif
+#ifdef CLOCK_UPTIME
+ CLOCK_UPTIME,
+#endif
+#ifdef CLOCK_PROCESS_CPUTIME_ID
+ CLOCK_PROCESS_CPUTIME_ID,
+#endif
+#ifdef CLOCK_THREAD_CPUTIME_ID
+ CLOCK_THREAD_CPUTIME_ID,
+#endif
+};
+
+static int
+getentropy_phdr(struct dl_phdr_info *info, size_t size, void *data)
+{
+ SHA512_CTX *ctx = data;
+
+ SHA512_Update(ctx, &info->dlpi_addr, sizeof (info->dlpi_addr));
+ return (0);
+}
+
+static int
+getentropy_fallback(void *buf, size_t len)
+{
+ uint8_t results[SHA512_DIGEST_LENGTH];
+ int save_errno = errno, e, pgs = getpagesize(), faster = 0, repeat;
+ static int cnt;
+ struct timespec ts;
+ struct timeval tv;
+ struct rusage ru;
+ sigset_t sigset;
+ struct stat st;
+ SHA512_CTX ctx;
+ static pid_t lastpid;
+ pid_t pid;
+ size_t i, ii, m;
+ char *p;
+
+ pid = getpid();
+ if (lastpid == pid) {
+ faster = 1;
+ repeat = 2;
+ } else {
+ faster = 0;
+ lastpid = pid;
+ repeat = REPEAT;
+ }
+ for (i = 0; i < len; ) {
+ int j;
+ SHA512_Init(&ctx);
+ for (j = 0; j < repeat; j++) {
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ dl_iterate_phdr(getentropy_phdr, &ctx);
+
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++)
+ HX(clock_gettime(cl[ii], &ts) == -1, ts);
+
+ HX((pid = getpid()) == -1, pid);
+ HX((pid = getsid(pid)) == -1, pid);
+ HX((pid = getppid()) == -1, pid);
+ HX((pid = getpgid(0)) == -1, pid);
+ HX((e = getpriority(0, 0)) == -1, e);
+
+ if (!faster) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ (void) nanosleep(&ts, NULL);
+ }
+
+ HX(sigpending(&sigset) == -1, sigset);
+ HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
+ sigset);
+
+ HF(getentropy); /* an addr in this library */
+ HF(printf); /* an addr in libc */
+ p = (char *)&p;
+ HD(p); /* an addr on stack */
+ p = (char *)&errno;
+ HD(p); /* the addr of errno */
+
+ if (i == 0) {
+ struct sockaddr_storage ss;
+ struct statvfs stvfs;
+ struct termios tios;
+ struct statfs stfs;
+ socklen_t ssl;
+ off_t off;
+
+ /*
+ * Prime-sized mappings encourage fragmentation;
+ * thus exposing some address entropy.
+ */
+ struct mm {
+ size_t npg;
+ void *p;
+ } mm[] = {
+ { 17, MAP_FAILED }, { 3, MAP_FAILED },
+ { 11, MAP_FAILED }, { 2, MAP_FAILED },
+ { 5, MAP_FAILED }, { 3, MAP_FAILED },
+ { 7, MAP_FAILED }, { 1, MAP_FAILED },
+ { 57, MAP_FAILED }, { 3, MAP_FAILED },
+ { 131, MAP_FAILED }, { 1, MAP_FAILED },
+ };
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ HX(mm[m].p = mmap(NULL,
+ mm[m].npg * pgs,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1,
+ (off_t)0), mm[m].p);
+ if (mm[m].p != MAP_FAILED) {
+ size_t mo;
+
+ /* Touch some memory... */
+ p = mm[m].p;
+ mo = cnt %
+ (mm[m].npg * pgs - 1);
+ p[mo] = 1;
+ cnt += (int)((long)(mm[m].p)
+ / pgs);
+ }
+
+ /* Check cnts and times... */
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]);
+ ii++) {
+ HX((e = clock_gettime(cl[ii],
+ &ts)) == -1, ts);
+ if (e != -1)
+ cnt += (int)ts.tv_nsec;
+ }
+
+ HX((e = getrusage(RUSAGE_SELF,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ }
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ if (mm[m].p != MAP_FAILED)
+ munmap(mm[m].p, mm[m].npg * pgs);
+ mm[m].p = MAP_FAILED;
+ }
+
+ HX(stat(".", &st) == -1, st);
+ HX(statvfs(".", &stvfs) == -1, stvfs);
+ HX(statfs(".", &stfs) == -1, stfs);
+
+ HX(stat("/", &st) == -1, st);
+ HX(statvfs("/", &stvfs) == -1, stvfs);
+ HX(statfs("/", &stfs) == -1, stfs);
+
+ HX((e = fstat(0, &st)) == -1, st);
+ if (e == -1) {
+ if (S_ISREG(st.st_mode) ||
+ S_ISFIFO(st.st_mode) ||
+ S_ISSOCK(st.st_mode)) {
+ HX(fstatvfs(0, &stvfs) == -1,
+ stvfs);
+ HX(fstatfs(0, &stfs) == -1,
+ stfs);
+ HX((off = lseek(0, (off_t)0,
+ SEEK_CUR)) < 0, off);
+ }
+ if (S_ISCHR(st.st_mode)) {
+ HX(tcgetattr(0, &tios) == -1,
+ tios);
+ } else if (S_ISSOCK(st.st_mode)) {
+ memset(&ss, 0, sizeof ss);
+ ssl = sizeof(ss);
+ HX(getpeername(0,
+ (void *)&ss, &ssl) == -1,
+ ss);
+ }
+ }
+
+ HX((e = getrusage(RUSAGE_CHILDREN,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ } else {
+ /* Subsequent hashes absorb previous result */
+ HD(results);
+ }
+
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HD(cnt);
+ }
+#ifdef HAVE_GETAUXVAL
+#ifdef AT_RANDOM
+ /* Not as random as you think but we take what we are given */
+ p = (char *) getauxval(AT_RANDOM);
+ if (p)
+ HR(p, 16);
+#endif
+#ifdef AT_SYSINFO_EHDR
+ p = (char *) getauxval(AT_SYSINFO_EHDR);
+ if (p)
+ HR(p, pgs);
+#endif
+#ifdef AT_BASE
+ p = (char *) getauxval(AT_BASE);
+ if (p)
+ HD(p);
+#endif
+#endif
+
+ SHA512_Final(results, &ctx);
+ memcpy((char *)buf + i, results, MINIMUM(sizeof(results), len - i));
+ i += MINIMUM(sizeof(results), len - i);
+ }
+ explicit_bzero(&ctx, sizeof ctx);
+ explicit_bzero(results, sizeof results);
+ errno = save_errno;
+ return (0); /* satisfied */
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_netbsd.c b/crypto/libressl/crypto/compat/getentropy_netbsd.c
new file mode 100644
index 0000000..5dc8959
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_netbsd.c
@@ -0,0 +1,62 @@
+/* $OpenBSD: getentropy_netbsd.c,v 1.4 2020/10/12 22:08:33 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Pawel Jakub Dawidek <pjd@FreeBSD.org>
+ * Copyright (c) 2014 Brent Cook <bcook@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include <errno.h>
+#include <stddef.h>
+
+/*
+ * Derived from lib/libc/gen/arc4random.c from FreeBSD.
+ */
+static size_t
+getentropy_sysctl(u_char *buf, size_t size)
+{
+ const int mib[2] = { CTL_KERN, KERN_ARND };
+ size_t len, done;
+
+ done = 0;
+
+ do {
+ len = size;
+ if (sysctl(mib, 2, buf, &len, NULL, 0) == -1)
+ return (done);
+ done += len;
+ buf += len;
+ size -= len;
+ } while (size > 0);
+
+ return (done);
+}
+
+int
+getentropy(void *buf, size_t len)
+{
+ if (len <= 256 &&
+ getentropy_sysctl(buf, len) == len) {
+ return (0);
+ }
+
+ errno = EIO;
+ return (-1);
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_osx.c b/crypto/libressl/crypto/compat/getentropy_osx.c
new file mode 100644
index 0000000..5d4067b
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_osx.c
@@ -0,0 +1,417 @@
+/* $OpenBSD: getentropy_osx.c,v 1.13 2020/05/17 14:44:20 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014 Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <TargetConditionals.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/ioctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/statvfs.h>
+#include <sys/socket.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <termios.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+#include <mach/mach_time.h>
+#include <mach/mach_host.h>
+#include <mach/host_info.h>
+#if TARGET_OS_OSX
+#include <sys/socketvar.h>
+#include <sys/vmmeter.h>
+#endif
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#if TARGET_OS_OSX
+#include <netinet/udp.h>
+#include <netinet/ip_var.h>
+#include <netinet/tcp_var.h>
+#include <netinet/udp_var.h>
+#endif
+#include <CommonCrypto/CommonDigest.h>
+#define SHA512_Update(a, b, c) (CC_SHA512_Update((a), (b), (c)))
+#define SHA512_Init(xxx) (CC_SHA512_Init((xxx)))
+#define SHA512_Final(xxx, yyy) (CC_SHA512_Final((xxx), (yyy)))
+#define SHA512_CTX CC_SHA512_CTX
+#define SHA512_DIGEST_LENGTH CC_SHA512_DIGEST_LENGTH
+
+#define REPEAT 5
+#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b))
+
+#define HX(a, b) \
+ do { \
+ if ((a)) \
+ HD(errno); \
+ else \
+ HD(b); \
+ } while (0)
+
+#define HR(x, l) (SHA512_Update(&ctx, (char *)(x), (l)))
+#define HD(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (x)))
+#define HF(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (void*)))
+
+int getentropy(void *buf, size_t len);
+
+static int getentropy_urandom(void *buf, size_t len);
+static int getentropy_fallback(void *buf, size_t len);
+
+int
+getentropy(void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+ /*
+ * Try to get entropy with /dev/urandom
+ *
+ * This can fail if the process is inside a chroot or if file
+ * descriptors are exhausted.
+ */
+ ret = getentropy_urandom(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ /*
+ * Entropy collection via /dev/urandom and sysctl have failed.
+ *
+ * No other API exists for collecting entropy, and we have
+ * no failsafe way to get it on OSX that is not sensitive
+ * to resource exhaustion.
+ *
+ * We have very few options:
+ * - Even syslog_r is unsafe to call at this low level, so
+ * there is no way to alert the user or program.
+ * - Cannot call abort() because some systems have unsafe
+ * corefiles.
+ * - Could raise(SIGKILL) resulting in silent program termination.
+ * - Return EIO, to hint that arc4random's stir function
+ * should raise(SIGKILL)
+ * - Do the best under the circumstances....
+ *
+ * This code path exists to bring light to the issue that OSX
+ * does not provide a failsafe API for entropy collection.
+ *
+ * We hope this demonstrates that OSX should consider
+ * providing a new failsafe API which works in a chroot or
+ * when file descriptors are exhausted.
+ */
+#undef FAIL_INSTEAD_OF_TRYING_FALLBACK
+#ifdef FAIL_INSTEAD_OF_TRYING_FALLBACK
+ raise(SIGKILL);
+#endif
+ ret = getentropy_fallback(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ errno = EIO;
+ return (ret);
+}
+
+static int
+getentropy_urandom(void *buf, size_t len)
+{
+ struct stat st;
+ size_t i;
+ int fd, flags;
+ int save_errno = errno;
+
+start:
+
+ flags = O_RDONLY;
+#ifdef O_NOFOLLOW
+ flags |= O_NOFOLLOW;
+#endif
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ fd = open("/dev/urandom", flags, 0);
+ if (fd == -1) {
+ if (errno == EINTR)
+ goto start;
+ goto nodevrandom;
+ }
+#ifndef O_CLOEXEC
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+#endif
+
+ /* Lightly verify that the device node looks sane */
+ if (fstat(fd, &st) == -1 || !S_ISCHR(st.st_mode)) {
+ close(fd);
+ goto nodevrandom;
+ }
+ for (i = 0; i < len; ) {
+ size_t wanted = len - i;
+ ssize_t ret = read(fd, (char *)buf + i, wanted);
+
+ if (ret == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ close(fd);
+ goto nodevrandom;
+ }
+ i += ret;
+ }
+ close(fd);
+ errno = save_errno;
+ return (0); /* satisfied */
+nodevrandom:
+ errno = EIO;
+ return (-1);
+}
+
+#if TARGET_OS_OSX
+static int tcpmib[] = { CTL_NET, AF_INET, IPPROTO_TCP, TCPCTL_STATS };
+static int udpmib[] = { CTL_NET, AF_INET, IPPROTO_UDP, UDPCTL_STATS };
+static int ipmib[] = { CTL_NET, AF_INET, IPPROTO_IP, IPCTL_STATS };
+#endif
+static int kmib[] = { CTL_KERN, KERN_USRSTACK };
+static int hwmib[] = { CTL_HW, HW_USERMEM };
+
+static int
+getentropy_fallback(void *buf, size_t len)
+{
+ uint8_t results[SHA512_DIGEST_LENGTH];
+ int save_errno = errno, e, pgs = getpagesize(), faster = 0, repeat;
+ static int cnt;
+ struct timespec ts;
+ struct timeval tv;
+ struct rusage ru;
+ sigset_t sigset;
+ struct stat st;
+ SHA512_CTX ctx;
+ static pid_t lastpid;
+ pid_t pid;
+ size_t i, ii, m;
+ char *p;
+#if TARGET_OS_OSX
+ struct tcpstat tcpstat;
+ struct udpstat udpstat;
+ struct ipstat ipstat;
+#endif
+ u_int64_t mach_time;
+ unsigned int idata;
+ void *addr;
+
+ pid = getpid();
+ if (lastpid == pid) {
+ faster = 1;
+ repeat = 2;
+ } else {
+ faster = 0;
+ lastpid = pid;
+ repeat = REPEAT;
+ }
+ for (i = 0; i < len; ) {
+ int j;
+ SHA512_Init(&ctx);
+ for (j = 0; j < repeat; j++) {
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ mach_time = mach_absolute_time();
+ HD(mach_time);
+
+ ii = sizeof(addr);
+ HX(sysctl(kmib, sizeof(kmib) / sizeof(kmib[0]),
+ &addr, &ii, NULL, 0) == -1, addr);
+
+ ii = sizeof(idata);
+ HX(sysctl(hwmib, sizeof(hwmib) / sizeof(hwmib[0]),
+ &idata, &ii, NULL, 0) == -1, idata);
+
+#if TARGET_OS_OSX
+ ii = sizeof(tcpstat);
+ HX(sysctl(tcpmib, sizeof(tcpmib) / sizeof(tcpmib[0]),
+ &tcpstat, &ii, NULL, 0) == -1, tcpstat);
+
+ ii = sizeof(udpstat);
+ HX(sysctl(udpmib, sizeof(udpmib) / sizeof(udpmib[0]),
+ &udpstat, &ii, NULL, 0) == -1, udpstat);
+
+ ii = sizeof(ipstat);
+ HX(sysctl(ipmib, sizeof(ipmib) / sizeof(ipmib[0]),
+ &ipstat, &ii, NULL, 0) == -1, ipstat);
+#endif
+
+ HX((pid = getpid()) == -1, pid);
+ HX((pid = getsid(pid)) == -1, pid);
+ HX((pid = getppid()) == -1, pid);
+ HX((pid = getpgid(0)) == -1, pid);
+ HX((e = getpriority(0, 0)) == -1, e);
+
+ if (!faster) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ (void) nanosleep(&ts, NULL);
+ }
+
+ HX(sigpending(&sigset) == -1, sigset);
+ HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
+ sigset);
+
+ HF(getentropy); /* an addr in this library */
+ HF(printf); /* an addr in libc */
+ p = (char *)&p;
+ HD(p); /* an addr on stack */
+ p = (char *)&errno;
+ HD(p); /* the addr of errno */
+
+ if (i == 0) {
+ struct sockaddr_storage ss;
+ struct statvfs stvfs;
+ struct termios tios;
+ struct statfs stfs;
+ socklen_t ssl;
+ off_t off;
+
+ /*
+ * Prime-sized mappings encourage fragmentation;
+ * thus exposing some address entropy.
+ */
+ struct mm {
+ size_t npg;
+ void *p;
+ } mm[] = {
+ { 17, MAP_FAILED }, { 3, MAP_FAILED },
+ { 11, MAP_FAILED }, { 2, MAP_FAILED },
+ { 5, MAP_FAILED }, { 3, MAP_FAILED },
+ { 7, MAP_FAILED }, { 1, MAP_FAILED },
+ { 57, MAP_FAILED }, { 3, MAP_FAILED },
+ { 131, MAP_FAILED }, { 1, MAP_FAILED },
+ };
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ HX(mm[m].p = mmap(NULL,
+ mm[m].npg * pgs,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1,
+ (off_t)0), mm[m].p);
+ if (mm[m].p != MAP_FAILED) {
+ size_t mo;
+
+ /* Touch some memory... */
+ p = mm[m].p;
+ mo = cnt %
+ (mm[m].npg * pgs - 1);
+ p[mo] = 1;
+ cnt += (int)((long)(mm[m].p)
+ / pgs);
+ }
+
+ /* Check cnts and times... */
+ mach_time = mach_absolute_time();
+ HD(mach_time);
+ cnt += (int)mach_time;
+
+ HX((e = getrusage(RUSAGE_SELF,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ }
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ if (mm[m].p != MAP_FAILED)
+ munmap(mm[m].p, mm[m].npg * pgs);
+ mm[m].p = MAP_FAILED;
+ }
+
+ HX(stat(".", &st) == -1, st);
+ HX(statvfs(".", &stvfs) == -1, stvfs);
+ HX(statfs(".", &stfs) == -1, stfs);
+
+ HX(stat("/", &st) == -1, st);
+ HX(statvfs("/", &stvfs) == -1, stvfs);
+ HX(statfs("/", &stfs) == -1, stfs);
+
+ HX((e = fstat(0, &st)) == -1, st);
+ if (e == -1) {
+ if (S_ISREG(st.st_mode) ||
+ S_ISFIFO(st.st_mode) ||
+ S_ISSOCK(st.st_mode)) {
+ HX(fstatvfs(0, &stvfs) == -1,
+ stvfs);
+ HX(fstatfs(0, &stfs) == -1,
+ stfs);
+ HX((off = lseek(0, (off_t)0,
+ SEEK_CUR)) < 0, off);
+ }
+ if (S_ISCHR(st.st_mode)) {
+ HX(tcgetattr(0, &tios) == -1,
+ tios);
+ } else if (S_ISSOCK(st.st_mode)) {
+ memset(&ss, 0, sizeof ss);
+ ssl = sizeof(ss);
+ HX(getpeername(0,
+ (void *)&ss, &ssl) == -1,
+ ss);
+ }
+ }
+
+ HX((e = getrusage(RUSAGE_CHILDREN,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ } else {
+ /* Subsequent hashes absorb previous result */
+ HD(results);
+ }
+
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HD(cnt);
+ }
+
+ SHA512_Final(results, &ctx);
+ memcpy((char *)buf + i, results, MINIMUM(sizeof(results), len - i));
+ i += MINIMUM(sizeof(results), len - i);
+ }
+ explicit_bzero(&ctx, sizeof ctx);
+ explicit_bzero(results, sizeof results);
+ errno = save_errno;
+ return (0); /* satisfied */
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_solaris.c b/crypto/libressl/crypto/compat/getentropy_solaris.c
new file mode 100644
index 0000000..cf5b9bf
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_solaris.c
@@ -0,0 +1,422 @@
+/* $OpenBSD: getentropy_solaris.c,v 1.14 2020/05/17 14:44:20 deraadt Exp $ */
+
+/*
+ * Copyright (c) 2014 Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014 Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/ioctl.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/statvfs.h>
+#include <sys/socket.h>
+#include <sys/mount.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <link.h>
+#include <termios.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/sha2.h>
+#define SHA512_Init SHA512Init
+#define SHA512_Update SHA512Update
+#define SHA512_Final SHA512Final
+
+#include <sys/vfs.h>
+#include <sys/statfs.h>
+#include <sys/loadavg.h>
+
+#define REPEAT 5
+#define MINIMUM(a, b) (((a) < (b)) ? (a) : (b))
+
+#define HX(a, b) \
+ do { \
+ if ((a)) \
+ HD(errno); \
+ else \
+ HD(b); \
+ } while (0)
+
+#define HR(x, l) (SHA512_Update(&ctx, (char *)(x), (l)))
+#define HD(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (x)))
+#define HF(x) (SHA512_Update(&ctx, (char *)&(x), sizeof (void*)))
+
+int getentropy(void *buf, size_t len);
+
+static int getentropy_urandom(void *buf, size_t len, const char *path,
+ int devfscheck);
+static int getentropy_fallback(void *buf, size_t len);
+static int getentropy_phdr(struct dl_phdr_info *info, size_t size, void *data);
+
+int
+getentropy(void *buf, size_t len)
+{
+ int ret = -1;
+
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+ /*
+ * Try to get entropy with /dev/urandom
+ *
+ * Solaris provides /dev/urandom as a symbolic link to
+ * /devices/pseudo/random@0:urandom which is provided by
+ * a devfs filesystem. Best practice is to use O_NOFOLLOW,
+ * so we must try the unpublished name directly.
+ *
+ * This can fail if the process is inside a chroot which lacks
+ * the devfs mount, or if file descriptors are exhausted.
+ */
+ ret = getentropy_urandom(buf, len,
+ "/devices/pseudo/random@0:urandom", 1);
+ if (ret != -1)
+ return (ret);
+
+ /*
+ * Unfortunately, chroot spaces on Solaris are sometimes setup
+ * with direct device node of the well-known /dev/urandom name
+ * (perhaps to avoid dragging all of devfs into the space).
+ *
+ * This can fail if the process is inside a chroot or if file
+ * descriptors are exhausted.
+ */
+ ret = getentropy_urandom(buf, len, "/dev/urandom", 0);
+ if (ret != -1)
+ return (ret);
+
+ /*
+ * Entropy collection via /dev/urandom has failed.
+ *
+ * No other API exists for collecting entropy, and we have
+ * no failsafe way to get it on Solaris that is not sensitive
+ * to resource exhaustion.
+ *
+ * We have very few options:
+ * - Even syslog_r is unsafe to call at this low level, so
+ * there is no way to alert the user or program.
+ * - Cannot call abort() because some systems have unsafe
+ * corefiles.
+ * - Could raise(SIGKILL) resulting in silent program termination.
+ * - Return EIO, to hint that arc4random's stir function
+ * should raise(SIGKILL)
+ * - Do the best under the circumstances....
+ *
+ * This code path exists to bring light to the issue that Solaris
+ * does not provide a failsafe API for entropy collection.
+ *
+ * We hope this demonstrates that Solaris should consider
+ * providing a new failsafe API which works in a chroot or
+ * when file descriptors are exhausted.
+ */
+#undef FAIL_INSTEAD_OF_TRYING_FALLBACK
+#ifdef FAIL_INSTEAD_OF_TRYING_FALLBACK
+ raise(SIGKILL);
+#endif
+ ret = getentropy_fallback(buf, len);
+ if (ret != -1)
+ return (ret);
+
+ errno = EIO;
+ return (ret);
+}
+
+static int
+getentropy_urandom(void *buf, size_t len, const char *path, int devfscheck)
+{
+ struct stat st;
+ size_t i;
+ int fd, flags;
+ int save_errno = errno;
+
+start:
+
+ flags = O_RDONLY;
+#ifdef O_NOFOLLOW
+ flags |= O_NOFOLLOW;
+#endif
+#ifdef O_CLOEXEC
+ flags |= O_CLOEXEC;
+#endif
+ fd = open(path, flags, 0);
+ if (fd == -1) {
+ if (errno == EINTR)
+ goto start;
+ goto nodevrandom;
+ }
+#ifndef O_CLOEXEC
+ fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
+#endif
+
+ /* Lightly verify that the device node looks sane */
+ if (fstat(fd, &st) == -1 || !S_ISCHR(st.st_mode) ||
+ (devfscheck && (strcmp(st.st_fstype, "devfs") != 0))) {
+ close(fd);
+ goto nodevrandom;
+ }
+ for (i = 0; i < len; ) {
+ size_t wanted = len - i;
+ ssize_t ret = read(fd, (char *)buf + i, wanted);
+
+ if (ret == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ continue;
+ close(fd);
+ goto nodevrandom;
+ }
+ i += ret;
+ }
+ close(fd);
+ errno = save_errno;
+ return (0); /* satisfied */
+nodevrandom:
+ errno = EIO;
+ return (-1);
+}
+
+static const int cl[] = {
+ CLOCK_REALTIME,
+#ifdef CLOCK_MONOTONIC
+ CLOCK_MONOTONIC,
+#endif
+#ifdef CLOCK_MONOTONIC_RAW
+ CLOCK_MONOTONIC_RAW,
+#endif
+#ifdef CLOCK_TAI
+ CLOCK_TAI,
+#endif
+#ifdef CLOCK_VIRTUAL
+ CLOCK_VIRTUAL,
+#endif
+#ifdef CLOCK_UPTIME
+ CLOCK_UPTIME,
+#endif
+#ifdef CLOCK_PROCESS_CPUTIME_ID
+ CLOCK_PROCESS_CPUTIME_ID,
+#endif
+#ifdef CLOCK_THREAD_CPUTIME_ID
+ CLOCK_THREAD_CPUTIME_ID,
+#endif
+};
+
+static int
+getentropy_phdr(struct dl_phdr_info *info, size_t size, void *data)
+{
+ SHA512_CTX *ctx = data;
+
+ SHA512_Update(ctx, &info->dlpi_addr, sizeof (info->dlpi_addr));
+ return (0);
+}
+
+static int
+getentropy_fallback(void *buf, size_t len)
+{
+ uint8_t results[SHA512_DIGEST_LENGTH];
+ int save_errno = errno, e, pgs = getpagesize(), faster = 0, repeat;
+ static int cnt;
+ struct timespec ts;
+ struct timeval tv;
+ double loadavg[3];
+ struct rusage ru;
+ sigset_t sigset;
+ struct stat st;
+ SHA512_CTX ctx;
+ static pid_t lastpid;
+ pid_t pid;
+ size_t i, ii, m;
+ char *p;
+
+ pid = getpid();
+ if (lastpid == pid) {
+ faster = 1;
+ repeat = 2;
+ } else {
+ faster = 0;
+ lastpid = pid;
+ repeat = REPEAT;
+ }
+ for (i = 0; i < len; ) {
+ int j;
+ SHA512_Init(&ctx);
+ for (j = 0; j < repeat; j++) {
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ dl_iterate_phdr(getentropy_phdr, &ctx);
+
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]); ii++)
+ HX(clock_gettime(cl[ii], &ts) == -1, ts);
+
+ HX((pid = getpid()) == -1, pid);
+ HX((pid = getsid(pid)) == -1, pid);
+ HX((pid = getppid()) == -1, pid);
+ HX((pid = getpgid(0)) == -1, pid);
+ HX((e = getpriority(0, 0)) == -1, e);
+ HX((getloadavg(loadavg, 3) == -1), loadavg);
+
+ if (!faster) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+ (void) nanosleep(&ts, NULL);
+ }
+
+ HX(sigpending(&sigset) == -1, sigset);
+ HX(sigprocmask(SIG_BLOCK, NULL, &sigset) == -1,
+ sigset);
+
+ HF(getentropy); /* an addr in this library */
+ HF(printf); /* an addr in libc */
+ p = (char *)&p;
+ HD(p); /* an addr on stack */
+ p = (char *)&errno;
+ HD(p); /* the addr of errno */
+
+ if (i == 0) {
+ struct sockaddr_storage ss;
+ struct statvfs stvfs;
+ struct termios tios;
+ socklen_t ssl;
+ off_t off;
+
+ /*
+ * Prime-sized mappings encourage fragmentation;
+ * thus exposing some address entropy.
+ */
+ struct mm {
+ size_t npg;
+ void *p;
+ } mm[] = {
+ { 17, MAP_FAILED }, { 3, MAP_FAILED },
+ { 11, MAP_FAILED }, { 2, MAP_FAILED },
+ { 5, MAP_FAILED }, { 3, MAP_FAILED },
+ { 7, MAP_FAILED }, { 1, MAP_FAILED },
+ { 57, MAP_FAILED }, { 3, MAP_FAILED },
+ { 131, MAP_FAILED }, { 1, MAP_FAILED },
+ };
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ HX(mm[m].p = mmap(NULL,
+ mm[m].npg * pgs,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1,
+ (off_t)0), mm[m].p);
+ if (mm[m].p != MAP_FAILED) {
+ size_t mo;
+
+ /* Touch some memory... */
+ p = mm[m].p;
+ mo = cnt %
+ (mm[m].npg * pgs - 1);
+ p[mo] = 1;
+ cnt += (int)((long)(mm[m].p)
+ / pgs);
+ }
+
+ /* Check cnts and times... */
+ for (ii = 0; ii < sizeof(cl)/sizeof(cl[0]);
+ ii++) {
+ HX((e = clock_gettime(cl[ii],
+ &ts)) == -1, ts);
+ if (e != -1)
+ cnt += (int)ts.tv_nsec;
+ }
+
+ HX((e = getrusage(RUSAGE_SELF,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ }
+
+ for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
+ if (mm[m].p != MAP_FAILED)
+ munmap(mm[m].p, mm[m].npg * pgs);
+ mm[m].p = MAP_FAILED;
+ }
+
+ HX(stat(".", &st) == -1, st);
+ HX(statvfs(".", &stvfs) == -1, stvfs);
+
+ HX(stat("/", &st) == -1, st);
+ HX(statvfs("/", &stvfs) == -1, stvfs);
+
+ HX((e = fstat(0, &st)) == -1, st);
+ if (e == -1) {
+ if (S_ISREG(st.st_mode) ||
+ S_ISFIFO(st.st_mode) ||
+ S_ISSOCK(st.st_mode)) {
+ HX(fstatvfs(0, &stvfs) == -1,
+ stvfs);
+ HX((off = lseek(0, (off_t)0,
+ SEEK_CUR)) < 0, off);
+ }
+ if (S_ISCHR(st.st_mode)) {
+ HX(tcgetattr(0, &tios) == -1,
+ tios);
+ } else if (S_ISSOCK(st.st_mode)) {
+ memset(&ss, 0, sizeof ss);
+ ssl = sizeof(ss);
+ HX(getpeername(0,
+ (void *)&ss, &ssl) == -1,
+ ss);
+ }
+ }
+
+ HX((e = getrusage(RUSAGE_CHILDREN,
+ &ru)) == -1, ru);
+ if (e != -1) {
+ cnt += (int)ru.ru_utime.tv_sec;
+ cnt += (int)ru.ru_utime.tv_usec;
+ }
+ } else {
+ /* Subsequent hashes absorb previous result */
+ HD(results);
+ }
+
+ HX((e = gettimeofday(&tv, NULL)) == -1, tv);
+ if (e != -1) {
+ cnt += (int)tv.tv_sec;
+ cnt += (int)tv.tv_usec;
+ }
+
+ HD(cnt);
+ }
+ SHA512_Final(results, &ctx);
+ memcpy((char *)buf + i, results, MINIMUM(sizeof(results), len - i));
+ i += MINIMUM(sizeof(results), len - i);
+ }
+ explicit_bzero(&ctx, sizeof ctx);
+ explicit_bzero(results, sizeof results);
+ errno = save_errno;
+ return (0); /* satisfied */
+}
diff --git a/crypto/libressl/crypto/compat/getentropy_win.c b/crypto/libressl/crypto/compat/getentropy_win.c
new file mode 100644
index 0000000..64514b3
--- /dev/null
+++ b/crypto/libressl/crypto/compat/getentropy_win.c
@@ -0,0 +1,50 @@
+/* $OpenBSD: getentropy_win.c,v 1.6 2020/11/11 10:41:24 bcook Exp $ */
+
+/*
+ * Copyright (c) 2014, Theo de Raadt <deraadt@openbsd.org>
+ * Copyright (c) 2014, Bob Beck <beck@obtuse.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Emulation of getentropy(2) as documented at:
+ * http://man.openbsd.org/getentropy.2
+ */
+
+#include <windows.h>
+#include <bcrypt.h>
+#include <errno.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+int getentropy(void *buf, size_t len);
+
+/*
+ * On Windows, BCryptGenRandom with BCRYPT_USE_SYSTEM_PREFERRED_RNG is supposed
+ * to be a well-seeded, cryptographically strong random number generator.
+ * https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom
+ */
+int
+getentropy(void *buf, size_t len)
+{
+ if (len > 256) {
+ errno = EIO;
+ return (-1);
+ }
+
+ if (FAILED(BCryptGenRandom(NULL, buf, len, BCRYPT_USE_SYSTEM_PREFERRED_RNG))) {
+ errno = EIO;
+ return (-1);
+ }
+
+ return (0);
+}
diff --git a/crypto/libressl/crypto/compat/timingsafe_bcmp.c b/crypto/libressl/crypto/compat/timingsafe_bcmp.c
new file mode 100644
index 0000000..552e844
--- /dev/null
+++ b/crypto/libressl/crypto/compat/timingsafe_bcmp.c
@@ -0,0 +1,29 @@
+/* $OpenBSD: timingsafe_bcmp.c,v 1.3 2015/08/31 02:53:57 guenther Exp $ */
+/*
+ * Copyright (c) 2010 Damien Miller. All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <string.h>
+
+int
+timingsafe_bcmp(const void *b1, const void *b2, size_t n)
+{
+ const unsigned char *p1 = b1, *p2 = b2;
+ int ret = 0;
+
+ for (; n > 0; n--)
+ ret |= *p1++ ^ *p2++;
+ return (ret != 0);
+}
diff --git a/crypto/libressl/crypto/compat/timingsafe_memcmp.c b/crypto/libressl/crypto/compat/timingsafe_memcmp.c
new file mode 100644
index 0000000..bb210a3
--- /dev/null
+++ b/crypto/libressl/crypto/compat/timingsafe_memcmp.c
@@ -0,0 +1,46 @@
+/* $OpenBSD: timingsafe_memcmp.c,v 1.2 2015/08/31 02:53:57 guenther Exp $ */
+/*
+ * Copyright (c) 2014 Google Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <limits.h>
+#include <string.h>
+
+int
+timingsafe_memcmp(const void *b1, const void *b2, size_t len)
+{
+ const unsigned char *p1 = b1, *p2 = b2;
+ size_t i;
+ int res = 0, done = 0;
+
+ for (i = 0; i < len; i++) {
+ /* lt is -1 if p1[i] < p2[i]; else 0. */
+ int lt = (p1[i] - p2[i]) >> CHAR_BIT;
+
+ /* gt is -1 if p1[i] > p2[i]; else 0. */
+ int gt = (p2[i] - p1[i]) >> CHAR_BIT;
+
+ /* cmp is 1 if p1[i] > p2[i]; -1 if p1[i] < p2[i]; else 0. */
+ int cmp = lt - gt;
+
+ /* set res = cmp if !done. */
+ res |= cmp & ~done;
+
+ /* set done if p1[i] != p2[i]. */
+ done |= lt | gt;
+ }
+
+ return (res);
+}
diff --git a/crypto/libressl/crypto/curve25519/Makefile b/crypto/libressl/crypto/curve25519/Makefile
new file mode 100644
index 0000000..47696b1
--- /dev/null
+++ b/crypto/libressl/crypto/curve25519/Makefile
@@ -0,0 +1,14 @@
+include ssl_common.mk
+CFLAGS += -D__BEGIN_HIDDEN_DECLS= -D__END_HIDDEN_DECLS= -DED25519
+
+obj = curve25519.o curve25519-generic.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/curve25519/curve25519-generic.c b/crypto/libressl/crypto/curve25519/curve25519-generic.c
new file mode 100644
index 0000000..d533731
--- /dev/null
+++ b/crypto/libressl/crypto/curve25519/curve25519-generic.c
@@ -0,0 +1,34 @@
+/* $OpenBSD: curve25519-generic.c,v 1.2 2019/05/11 15:55:52 tb Exp $ */
+/*
+ * Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
+ * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
+ * public domain but this file has the ISC license just to keep licencing
+ * simple.
+ *
+ * The field functions are shared by Ed25519 and X25519 where possible.
+ */
+
+#include "curve25519_internal.h"
+
+void
+x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32],
+ const uint8_t point[32])
+{
+ x25519_scalar_mult_generic(out, scalar, point);
+}
diff --git a/crypto/libressl/crypto/curve25519/curve25519.c b/crypto/libressl/crypto/curve25519/curve25519.c
new file mode 100644
index 0000000..13b54c3
--- /dev/null
+++ b/crypto/libressl/crypto/curve25519/curve25519.c
@@ -0,0 +1,4935 @@
+/* $OpenBSD: curve25519.c,v 1.5 2019/05/11 15:55:52 tb Exp $ */
+/*
+ * Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This code is mostly taken from the ref10 version of Ed25519 in SUPERCOP
+ * 20141124 (http://bench.cr.yp.to/supercop.html). That code is released as
+ * public domain but this file has the ISC license just to keep licencing
+ * simple.
+ *
+ * The field functions are shared by Ed25519 and X25519 where possible.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <openssl/curve25519.h>
+
+#ifdef ED25519
+#include <openssl/sha.h>
+#endif
+
+#include "curve25519_internal.h"
+
+static const int64_t kBottom25Bits = 0x1ffffffLL;
+static const int64_t kBottom26Bits = 0x3ffffffLL;
+static const int64_t kTop39Bits = 0xfffffffffe000000LL;
+static const int64_t kTop38Bits = 0xfffffffffc000000LL;
+
+static uint64_t load_3(const uint8_t *in) {
+ uint64_t result;
+ result = (uint64_t)in[0];
+ result |= ((uint64_t)in[1]) << 8;
+ result |= ((uint64_t)in[2]) << 16;
+ return result;
+}
+
+static uint64_t load_4(const uint8_t *in) {
+ uint64_t result;
+ result = (uint64_t)in[0];
+ result |= ((uint64_t)in[1]) << 8;
+ result |= ((uint64_t)in[2]) << 16;
+ result |= ((uint64_t)in[3]) << 24;
+ return result;
+}
+
+static void fe_frombytes(fe h, const uint8_t *s) {
+ /* Ignores top bit of h. */
+ int64_t h0 = load_4(s);
+ int64_t h1 = load_3(s + 4) << 6;
+ int64_t h2 = load_3(s + 7) << 5;
+ int64_t h3 = load_3(s + 10) << 3;
+ int64_t h4 = load_3(s + 13) << 2;
+ int64_t h5 = load_4(s + 16);
+ int64_t h6 = load_3(s + 20) << 7;
+ int64_t h7 = load_3(s + 23) << 5;
+ int64_t h8 = load_3(s + 26) << 4;
+ int64_t h9 = (load_3(s + 29) & 8388607) << 2;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+
+ carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
+ carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
+ carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
+ carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
+ carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
+ carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/* Preconditions:
+ * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+ *
+ * Write p=2^255-19; q=floor(h/p).
+ * Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+ *
+ * Proof:
+ * Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+ * Also have |h-2^230 h9|<2^231 so |19 2^(-255)(h-2^230 h9)|<1/4.
+ *
+ * Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+ * Then 0<y<1.
+ *
+ * Write r=h-pq.
+ * Have 0<=r<=p-1=2^255-20.
+ * Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+ *
+ * Write x=r+19(2^-255)r+y.
+ * Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+ *
+ * Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+ * so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q. */
+static void fe_tobytes(uint8_t *s, const fe h) {
+ int32_t h0 = h[0];
+ int32_t h1 = h[1];
+ int32_t h2 = h[2];
+ int32_t h3 = h[3];
+ int32_t h4 = h[4];
+ int32_t h5 = h[5];
+ int32_t h6 = h[6];
+ int32_t h7 = h[7];
+ int32_t h8 = h[8];
+ int32_t h9 = h[9];
+ int32_t q;
+
+ q = (19 * h9 + (((int32_t) 1) << 24)) >> 25;
+ q = (h0 + q) >> 26;
+ q = (h1 + q) >> 25;
+ q = (h2 + q) >> 26;
+ q = (h3 + q) >> 25;
+ q = (h4 + q) >> 26;
+ q = (h5 + q) >> 25;
+ q = (h6 + q) >> 26;
+ q = (h7 + q) >> 25;
+ q = (h8 + q) >> 26;
+ q = (h9 + q) >> 25;
+
+ /* Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. */
+ h0 += 19 * q;
+ /* Goal: Output h-2^255 q, which is between 0 and 2^255-20. */
+
+ h1 += h0 >> 26; h0 &= kBottom26Bits;
+ h2 += h1 >> 25; h1 &= kBottom25Bits;
+ h3 += h2 >> 26; h2 &= kBottom26Bits;
+ h4 += h3 >> 25; h3 &= kBottom25Bits;
+ h5 += h4 >> 26; h4 &= kBottom26Bits;
+ h6 += h5 >> 25; h5 &= kBottom25Bits;
+ h7 += h6 >> 26; h6 &= kBottom26Bits;
+ h8 += h7 >> 25; h7 &= kBottom25Bits;
+ h9 += h8 >> 26; h8 &= kBottom26Bits;
+ h9 &= kBottom25Bits;
+ /* h10 = carry9 */
+
+ /* Goal: Output h0+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+ * Have h0+...+2^230 h9 between 0 and 2^255-1;
+ * evidently 2^255 h10-2^255 q = 0.
+ * Goal: Output h0+...+2^230 h9. */
+
+ s[0] = h0 >> 0;
+ s[1] = h0 >> 8;
+ s[2] = h0 >> 16;
+ s[3] = (h0 >> 24) | ((uint32_t)(h1) << 2);
+ s[4] = h1 >> 6;
+ s[5] = h1 >> 14;
+ s[6] = (h1 >> 22) | ((uint32_t)(h2) << 3);
+ s[7] = h2 >> 5;
+ s[8] = h2 >> 13;
+ s[9] = (h2 >> 21) | ((uint32_t)(h3) << 5);
+ s[10] = h3 >> 3;
+ s[11] = h3 >> 11;
+ s[12] = (h3 >> 19) | ((uint32_t)(h4) << 6);
+ s[13] = h4 >> 2;
+ s[14] = h4 >> 10;
+ s[15] = h4 >> 18;
+ s[16] = h5 >> 0;
+ s[17] = h5 >> 8;
+ s[18] = h5 >> 16;
+ s[19] = (h5 >> 24) | ((uint32_t)(h6) << 1);
+ s[20] = h6 >> 7;
+ s[21] = h6 >> 15;
+ s[22] = (h6 >> 23) | ((uint32_t)(h7) << 3);
+ s[23] = h7 >> 5;
+ s[24] = h7 >> 13;
+ s[25] = (h7 >> 21) | ((uint32_t)(h8) << 4);
+ s[26] = h8 >> 4;
+ s[27] = h8 >> 12;
+ s[28] = (h8 >> 20) | ((uint32_t)(h9) << 6);
+ s[29] = h9 >> 2;
+ s[30] = h9 >> 10;
+ s[31] = h9 >> 18;
+}
+
+/* h = f */
+static void fe_copy(fe h, const fe f) {
+ memmove(h, f, sizeof(int32_t) * 10);
+}
+
+/* h = 0 */
+static void fe_0(fe h) { memset(h, 0, sizeof(int32_t) * 10); }
+
+/* h = 1 */
+static void fe_1(fe h) {
+ memset(h, 0, sizeof(int32_t) * 10);
+ h[0] = 1;
+}
+
+/* h = f + g
+ * Can overlap h with f or g.
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
+static void fe_add(fe h, const fe f, const fe g) {
+ unsigned i;
+ for (i = 0; i < 10; i++) {
+ h[i] = f[i] + g[i];
+ }
+}
+
+/* h = f - g
+ * Can overlap h with f or g.
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ * |g| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
+static void fe_sub(fe h, const fe f, const fe g) {
+ unsigned i;
+ for (i = 0; i < 10; i++) {
+ h[i] = f[i] - g[i];
+ }
+}
+
+/* h = f * g
+ * Can overlap h with f or g.
+ *
+ * Preconditions:
+ * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+ * |g| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+ *
+ * Notes on implementation strategy:
+ *
+ * Using schoolbook multiplication.
+ * Karatsuba would save a little in some cost models.
+ *
+ * Most multiplications by 2 and 19 are 32-bit precomputations;
+ * cheaper than 64-bit postcomputations.
+ *
+ * There is one remaining multiplication by 19 in the carry chain;
+ * one *19 precomputation can be merged into this,
+ * but the resulting data flow is considerably less clean.
+ *
+ * There are 12 carries below.
+ * 10 of them are 2-way parallelizable and vectorizable.
+ * Can get away with 11 carries, but then data flow is much deeper.
+ *
+ * With tighter constraints on inputs can squeeze carries into int32. */
+static void fe_mul(fe h, const fe f, const fe g) {
+ int32_t f0 = f[0];
+ int32_t f1 = f[1];
+ int32_t f2 = f[2];
+ int32_t f3 = f[3];
+ int32_t f4 = f[4];
+ int32_t f5 = f[5];
+ int32_t f6 = f[6];
+ int32_t f7 = f[7];
+ int32_t f8 = f[8];
+ int32_t f9 = f[9];
+ int32_t g0 = g[0];
+ int32_t g1 = g[1];
+ int32_t g2 = g[2];
+ int32_t g3 = g[3];
+ int32_t g4 = g[4];
+ int32_t g5 = g[5];
+ int32_t g6 = g[6];
+ int32_t g7 = g[7];
+ int32_t g8 = g[8];
+ int32_t g9 = g[9];
+ int32_t g1_19 = 19 * g1; /* 1.959375*2^29 */
+ int32_t g2_19 = 19 * g2; /* 1.959375*2^30; still ok */
+ int32_t g3_19 = 19 * g3;
+ int32_t g4_19 = 19 * g4;
+ int32_t g5_19 = 19 * g5;
+ int32_t g6_19 = 19 * g6;
+ int32_t g7_19 = 19 * g7;
+ int32_t g8_19 = 19 * g8;
+ int32_t g9_19 = 19 * g9;
+ int32_t f1_2 = 2 * f1;
+ int32_t f3_2 = 2 * f3;
+ int32_t f5_2 = 2 * f5;
+ int32_t f7_2 = 2 * f7;
+ int32_t f9_2 = 2 * f9;
+ int64_t f0g0 = f0 * (int64_t) g0;
+ int64_t f0g1 = f0 * (int64_t) g1;
+ int64_t f0g2 = f0 * (int64_t) g2;
+ int64_t f0g3 = f0 * (int64_t) g3;
+ int64_t f0g4 = f0 * (int64_t) g4;
+ int64_t f0g5 = f0 * (int64_t) g5;
+ int64_t f0g6 = f0 * (int64_t) g6;
+ int64_t f0g7 = f0 * (int64_t) g7;
+ int64_t f0g8 = f0 * (int64_t) g8;
+ int64_t f0g9 = f0 * (int64_t) g9;
+ int64_t f1g0 = f1 * (int64_t) g0;
+ int64_t f1g1_2 = f1_2 * (int64_t) g1;
+ int64_t f1g2 = f1 * (int64_t) g2;
+ int64_t f1g3_2 = f1_2 * (int64_t) g3;
+ int64_t f1g4 = f1 * (int64_t) g4;
+ int64_t f1g5_2 = f1_2 * (int64_t) g5;
+ int64_t f1g6 = f1 * (int64_t) g6;
+ int64_t f1g7_2 = f1_2 * (int64_t) g7;
+ int64_t f1g8 = f1 * (int64_t) g8;
+ int64_t f1g9_38 = f1_2 * (int64_t) g9_19;
+ int64_t f2g0 = f2 * (int64_t) g0;
+ int64_t f2g1 = f2 * (int64_t) g1;
+ int64_t f2g2 = f2 * (int64_t) g2;
+ int64_t f2g3 = f2 * (int64_t) g3;
+ int64_t f2g4 = f2 * (int64_t) g4;
+ int64_t f2g5 = f2 * (int64_t) g5;
+ int64_t f2g6 = f2 * (int64_t) g6;
+ int64_t f2g7 = f2 * (int64_t) g7;
+ int64_t f2g8_19 = f2 * (int64_t) g8_19;
+ int64_t f2g9_19 = f2 * (int64_t) g9_19;
+ int64_t f3g0 = f3 * (int64_t) g0;
+ int64_t f3g1_2 = f3_2 * (int64_t) g1;
+ int64_t f3g2 = f3 * (int64_t) g2;
+ int64_t f3g3_2 = f3_2 * (int64_t) g3;
+ int64_t f3g4 = f3 * (int64_t) g4;
+ int64_t f3g5_2 = f3_2 * (int64_t) g5;
+ int64_t f3g6 = f3 * (int64_t) g6;
+ int64_t f3g7_38 = f3_2 * (int64_t) g7_19;
+ int64_t f3g8_19 = f3 * (int64_t) g8_19;
+ int64_t f3g9_38 = f3_2 * (int64_t) g9_19;
+ int64_t f4g0 = f4 * (int64_t) g0;
+ int64_t f4g1 = f4 * (int64_t) g1;
+ int64_t f4g2 = f4 * (int64_t) g2;
+ int64_t f4g3 = f4 * (int64_t) g3;
+ int64_t f4g4 = f4 * (int64_t) g4;
+ int64_t f4g5 = f4 * (int64_t) g5;
+ int64_t f4g6_19 = f4 * (int64_t) g6_19;
+ int64_t f4g7_19 = f4 * (int64_t) g7_19;
+ int64_t f4g8_19 = f4 * (int64_t) g8_19;
+ int64_t f4g9_19 = f4 * (int64_t) g9_19;
+ int64_t f5g0 = f5 * (int64_t) g0;
+ int64_t f5g1_2 = f5_2 * (int64_t) g1;
+ int64_t f5g2 = f5 * (int64_t) g2;
+ int64_t f5g3_2 = f5_2 * (int64_t) g3;
+ int64_t f5g4 = f5 * (int64_t) g4;
+ int64_t f5g5_38 = f5_2 * (int64_t) g5_19;
+ int64_t f5g6_19 = f5 * (int64_t) g6_19;
+ int64_t f5g7_38 = f5_2 * (int64_t) g7_19;
+ int64_t f5g8_19 = f5 * (int64_t) g8_19;
+ int64_t f5g9_38 = f5_2 * (int64_t) g9_19;
+ int64_t f6g0 = f6 * (int64_t) g0;
+ int64_t f6g1 = f6 * (int64_t) g1;
+ int64_t f6g2 = f6 * (int64_t) g2;
+ int64_t f6g3 = f6 * (int64_t) g3;
+ int64_t f6g4_19 = f6 * (int64_t) g4_19;
+ int64_t f6g5_19 = f6 * (int64_t) g5_19;
+ int64_t f6g6_19 = f6 * (int64_t) g6_19;
+ int64_t f6g7_19 = f6 * (int64_t) g7_19;
+ int64_t f6g8_19 = f6 * (int64_t) g8_19;
+ int64_t f6g9_19 = f6 * (int64_t) g9_19;
+ int64_t f7g0 = f7 * (int64_t) g0;
+ int64_t f7g1_2 = f7_2 * (int64_t) g1;
+ int64_t f7g2 = f7 * (int64_t) g2;
+ int64_t f7g3_38 = f7_2 * (int64_t) g3_19;
+ int64_t f7g4_19 = f7 * (int64_t) g4_19;
+ int64_t f7g5_38 = f7_2 * (int64_t) g5_19;
+ int64_t f7g6_19 = f7 * (int64_t) g6_19;
+ int64_t f7g7_38 = f7_2 * (int64_t) g7_19;
+ int64_t f7g8_19 = f7 * (int64_t) g8_19;
+ int64_t f7g9_38 = f7_2 * (int64_t) g9_19;
+ int64_t f8g0 = f8 * (int64_t) g0;
+ int64_t f8g1 = f8 * (int64_t) g1;
+ int64_t f8g2_19 = f8 * (int64_t) g2_19;
+ int64_t f8g3_19 = f8 * (int64_t) g3_19;
+ int64_t f8g4_19 = f8 * (int64_t) g4_19;
+ int64_t f8g5_19 = f8 * (int64_t) g5_19;
+ int64_t f8g6_19 = f8 * (int64_t) g6_19;
+ int64_t f8g7_19 = f8 * (int64_t) g7_19;
+ int64_t f8g8_19 = f8 * (int64_t) g8_19;
+ int64_t f8g9_19 = f8 * (int64_t) g9_19;
+ int64_t f9g0 = f9 * (int64_t) g0;
+ int64_t f9g1_38 = f9_2 * (int64_t) g1_19;
+ int64_t f9g2_19 = f9 * (int64_t) g2_19;
+ int64_t f9g3_38 = f9_2 * (int64_t) g3_19;
+ int64_t f9g4_19 = f9 * (int64_t) g4_19;
+ int64_t f9g5_38 = f9_2 * (int64_t) g5_19;
+ int64_t f9g6_19 = f9 * (int64_t) g6_19;
+ int64_t f9g7_38 = f9_2 * (int64_t) g7_19;
+ int64_t f9g8_19 = f9 * (int64_t) g8_19;
+ int64_t f9g9_38 = f9_2 * (int64_t) g9_19;
+ int64_t h0 = f0g0+f1g9_38+f2g8_19+f3g7_38+f4g6_19+f5g5_38+f6g4_19+f7g3_38+f8g2_19+f9g1_38;
+ int64_t h1 = f0g1+f1g0 +f2g9_19+f3g8_19+f4g7_19+f5g6_19+f6g5_19+f7g4_19+f8g3_19+f9g2_19;
+ int64_t h2 = f0g2+f1g1_2 +f2g0 +f3g9_38+f4g8_19+f5g7_38+f6g6_19+f7g5_38+f8g4_19+f9g3_38;
+ int64_t h3 = f0g3+f1g2 +f2g1 +f3g0 +f4g9_19+f5g8_19+f6g7_19+f7g6_19+f8g5_19+f9g4_19;
+ int64_t h4 = f0g4+f1g3_2 +f2g2 +f3g1_2 +f4g0 +f5g9_38+f6g8_19+f7g7_38+f8g6_19+f9g5_38;
+ int64_t h5 = f0g5+f1g4 +f2g3 +f3g2 +f4g1 +f5g0 +f6g9_19+f7g8_19+f8g7_19+f9g6_19;
+ int64_t h6 = f0g6+f1g5_2 +f2g4 +f3g3_2 +f4g2 +f5g1_2 +f6g0 +f7g9_38+f8g8_19+f9g7_38;
+ int64_t h7 = f0g7+f1g6 +f2g5 +f3g4 +f4g3 +f5g2 +f6g1 +f7g0 +f8g9_19+f9g8_19;
+ int64_t h8 = f0g8+f1g7_2 +f2g6 +f3g5_2 +f4g4 +f5g3_2 +f6g2 +f7g1_2 +f8g0 +f9g9_38;
+ int64_t h9 = f0g9+f1g8 +f2g7 +f3g6 +f4g5 +f5g4 +f6g3 +f7g2 +f8g1 +f9g0 ;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+
+ /* |h0| <= (1.65*1.65*2^52*(1+19+19+19+19)+1.65*1.65*2^50*(38+38+38+38+38))
+ * i.e. |h0| <= 1.4*2^60; narrower ranges for h2, h4, h6, h8
+ * |h1| <= (1.65*1.65*2^51*(1+1+19+19+19+19+19+19+19+19))
+ * i.e. |h1| <= 1.7*2^59; narrower ranges for h3, h5, h7, h9 */
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ /* |h0| <= 2^25 */
+ /* |h4| <= 2^25 */
+ /* |h1| <= 1.71*2^59 */
+ /* |h5| <= 1.71*2^59 */
+
+ carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
+ carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
+ /* |h1| <= 2^24; from now on fits into int32 */
+ /* |h5| <= 2^24; from now on fits into int32 */
+ /* |h2| <= 1.41*2^60 */
+ /* |h6| <= 1.41*2^60 */
+
+ carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
+ carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
+ /* |h2| <= 2^25; from now on fits into int32 unchanged */
+ /* |h6| <= 2^25; from now on fits into int32 unchanged */
+ /* |h3| <= 1.71*2^59 */
+ /* |h7| <= 1.71*2^59 */
+
+ carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
+ carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
+ /* |h3| <= 2^24; from now on fits into int32 unchanged */
+ /* |h7| <= 2^24; from now on fits into int32 unchanged */
+ /* |h4| <= 1.72*2^34 */
+ /* |h8| <= 1.41*2^60 */
+
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
+ /* |h4| <= 2^25; from now on fits into int32 unchanged */
+ /* |h8| <= 2^25; from now on fits into int32 unchanged */
+ /* |h5| <= 1.01*2^24 */
+ /* |h9| <= 1.71*2^59 */
+
+ carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
+ /* |h9| <= 2^24; from now on fits into int32 unchanged */
+ /* |h0| <= 1.1*2^39 */
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ /* |h0| <= 2^25; from now on fits into int32 unchanged */
+ /* |h1| <= 1.01*2^24 */
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+/* h = f * f
+ * Can overlap h with f.
+ *
+ * Preconditions:
+ * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+ *
+ * See fe_mul.c for discussion of implementation strategy. */
+static void fe_sq(fe h, const fe f) {
+ int32_t f0 = f[0];
+ int32_t f1 = f[1];
+ int32_t f2 = f[2];
+ int32_t f3 = f[3];
+ int32_t f4 = f[4];
+ int32_t f5 = f[5];
+ int32_t f6 = f[6];
+ int32_t f7 = f[7];
+ int32_t f8 = f[8];
+ int32_t f9 = f[9];
+ int32_t f0_2 = 2 * f0;
+ int32_t f1_2 = 2 * f1;
+ int32_t f2_2 = 2 * f2;
+ int32_t f3_2 = 2 * f3;
+ int32_t f4_2 = 2 * f4;
+ int32_t f5_2 = 2 * f5;
+ int32_t f6_2 = 2 * f6;
+ int32_t f7_2 = 2 * f7;
+ int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */
+ int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */
+ int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */
+ int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */
+ int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */
+ int64_t f0f0 = f0 * (int64_t) f0;
+ int64_t f0f1_2 = f0_2 * (int64_t) f1;
+ int64_t f0f2_2 = f0_2 * (int64_t) f2;
+ int64_t f0f3_2 = f0_2 * (int64_t) f3;
+ int64_t f0f4_2 = f0_2 * (int64_t) f4;
+ int64_t f0f5_2 = f0_2 * (int64_t) f5;
+ int64_t f0f6_2 = f0_2 * (int64_t) f6;
+ int64_t f0f7_2 = f0_2 * (int64_t) f7;
+ int64_t f0f8_2 = f0_2 * (int64_t) f8;
+ int64_t f0f9_2 = f0_2 * (int64_t) f9;
+ int64_t f1f1_2 = f1_2 * (int64_t) f1;
+ int64_t f1f2_2 = f1_2 * (int64_t) f2;
+ int64_t f1f3_4 = f1_2 * (int64_t) f3_2;
+ int64_t f1f4_2 = f1_2 * (int64_t) f4;
+ int64_t f1f5_4 = f1_2 * (int64_t) f5_2;
+ int64_t f1f6_2 = f1_2 * (int64_t) f6;
+ int64_t f1f7_4 = f1_2 * (int64_t) f7_2;
+ int64_t f1f8_2 = f1_2 * (int64_t) f8;
+ int64_t f1f9_76 = f1_2 * (int64_t) f9_38;
+ int64_t f2f2 = f2 * (int64_t) f2;
+ int64_t f2f3_2 = f2_2 * (int64_t) f3;
+ int64_t f2f4_2 = f2_2 * (int64_t) f4;
+ int64_t f2f5_2 = f2_2 * (int64_t) f5;
+ int64_t f2f6_2 = f2_2 * (int64_t) f6;
+ int64_t f2f7_2 = f2_2 * (int64_t) f7;
+ int64_t f2f8_38 = f2_2 * (int64_t) f8_19;
+ int64_t f2f9_38 = f2 * (int64_t) f9_38;
+ int64_t f3f3_2 = f3_2 * (int64_t) f3;
+ int64_t f3f4_2 = f3_2 * (int64_t) f4;
+ int64_t f3f5_4 = f3_2 * (int64_t) f5_2;
+ int64_t f3f6_2 = f3_2 * (int64_t) f6;
+ int64_t f3f7_76 = f3_2 * (int64_t) f7_38;
+ int64_t f3f8_38 = f3_2 * (int64_t) f8_19;
+ int64_t f3f9_76 = f3_2 * (int64_t) f9_38;
+ int64_t f4f4 = f4 * (int64_t) f4;
+ int64_t f4f5_2 = f4_2 * (int64_t) f5;
+ int64_t f4f6_38 = f4_2 * (int64_t) f6_19;
+ int64_t f4f7_38 = f4 * (int64_t) f7_38;
+ int64_t f4f8_38 = f4_2 * (int64_t) f8_19;
+ int64_t f4f9_38 = f4 * (int64_t) f9_38;
+ int64_t f5f5_38 = f5 * (int64_t) f5_38;
+ int64_t f5f6_38 = f5_2 * (int64_t) f6_19;
+ int64_t f5f7_76 = f5_2 * (int64_t) f7_38;
+ int64_t f5f8_38 = f5_2 * (int64_t) f8_19;
+ int64_t f5f9_76 = f5_2 * (int64_t) f9_38;
+ int64_t f6f6_19 = f6 * (int64_t) f6_19;
+ int64_t f6f7_38 = f6 * (int64_t) f7_38;
+ int64_t f6f8_38 = f6_2 * (int64_t) f8_19;
+ int64_t f6f9_38 = f6 * (int64_t) f9_38;
+ int64_t f7f7_38 = f7 * (int64_t) f7_38;
+ int64_t f7f8_38 = f7_2 * (int64_t) f8_19;
+ int64_t f7f9_76 = f7_2 * (int64_t) f9_38;
+ int64_t f8f8_19 = f8 * (int64_t) f8_19;
+ int64_t f8f9_38 = f8 * (int64_t) f9_38;
+ int64_t f9f9_38 = f9 * (int64_t) f9_38;
+ int64_t h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38;
+ int64_t h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38;
+ int64_t h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19;
+ int64_t h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38;
+ int64_t h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38;
+ int64_t h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38;
+ int64_t h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19;
+ int64_t h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38;
+ int64_t h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38;
+ int64_t h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+
+ carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
+ carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
+
+ carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
+ carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
+
+ carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
+ carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
+
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
+
+ carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+static void fe_invert(fe out, const fe z) {
+ fe t0;
+ fe t1;
+ fe t2;
+ fe t3;
+ int i;
+
+ fe_sq(t0, z);
+ for (i = 1; i < 1; ++i) {
+ fe_sq(t0, t0);
+ }
+ fe_sq(t1, t0);
+ for (i = 1; i < 2; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t1, z, t1);
+ fe_mul(t0, t0, t1);
+ fe_sq(t2, t0);
+ for (i = 1; i < 1; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t1, t2);
+ fe_sq(t2, t1);
+ for (i = 1; i < 5; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t2, t1);
+ fe_sq(t2, t1);
+ for (i = 1; i < 10; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t2, t2, t1);
+ fe_sq(t3, t2);
+ for (i = 1; i < 20; ++i) {
+ fe_sq(t3, t3);
+ }
+ fe_mul(t2, t3, t2);
+ fe_sq(t2, t2);
+ for (i = 1; i < 10; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t2, t1);
+ fe_sq(t2, t1);
+ for (i = 1; i < 50; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t2, t2, t1);
+ fe_sq(t3, t2);
+ for (i = 1; i < 100; ++i) {
+ fe_sq(t3, t3);
+ }
+ fe_mul(t2, t3, t2);
+ fe_sq(t2, t2);
+ for (i = 1; i < 50; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t2, t1);
+ fe_sq(t1, t1);
+ for (i = 1; i < 5; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(out, t1, t0);
+}
+
+/* h = -f
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */
+static void fe_neg(fe h, const fe f) {
+ unsigned i;
+ for (i = 0; i < 10; i++) {
+ h[i] = -f[i];
+ }
+}
+
+/* Replace (f,g) with (g,g) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}. */
+static void fe_cmov(fe f, const fe g, unsigned b) {
+ b = 0-b;
+ unsigned i;
+ for (i = 0; i < 10; i++) {
+ int32_t x = f[i] ^ g[i];
+ x &= b;
+ f[i] ^= x;
+ }
+}
+
+/* return 0 if f == 0
+ * return 1 if f != 0
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
+static int fe_isnonzero(const fe f) {
+ uint8_t s[32];
+ fe_tobytes(s, f);
+
+ static const uint8_t zero[32] = {0};
+ return timingsafe_memcmp(s, zero, sizeof(zero)) != 0;
+}
+
+/* return 1 if f is in {1,3,5,...,q-2}
+ * return 0 if f is in {0,2,4,...,q-1}
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. */
+static int fe_isnegative(const fe f) {
+ uint8_t s[32];
+ fe_tobytes(s, f);
+ return s[0] & 1;
+}
+
+/* h = 2 * f * f
+ * Can overlap h with f.
+ *
+ * Preconditions:
+ * |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+ *
+ * See fe_mul.c for discussion of implementation strategy. */
+static void fe_sq2(fe h, const fe f) {
+ int32_t f0 = f[0];
+ int32_t f1 = f[1];
+ int32_t f2 = f[2];
+ int32_t f3 = f[3];
+ int32_t f4 = f[4];
+ int32_t f5 = f[5];
+ int32_t f6 = f[6];
+ int32_t f7 = f[7];
+ int32_t f8 = f[8];
+ int32_t f9 = f[9];
+ int32_t f0_2 = 2 * f0;
+ int32_t f1_2 = 2 * f1;
+ int32_t f2_2 = 2 * f2;
+ int32_t f3_2 = 2 * f3;
+ int32_t f4_2 = 2 * f4;
+ int32_t f5_2 = 2 * f5;
+ int32_t f6_2 = 2 * f6;
+ int32_t f7_2 = 2 * f7;
+ int32_t f5_38 = 38 * f5; /* 1.959375*2^30 */
+ int32_t f6_19 = 19 * f6; /* 1.959375*2^30 */
+ int32_t f7_38 = 38 * f7; /* 1.959375*2^30 */
+ int32_t f8_19 = 19 * f8; /* 1.959375*2^30 */
+ int32_t f9_38 = 38 * f9; /* 1.959375*2^30 */
+ int64_t f0f0 = f0 * (int64_t) f0;
+ int64_t f0f1_2 = f0_2 * (int64_t) f1;
+ int64_t f0f2_2 = f0_2 * (int64_t) f2;
+ int64_t f0f3_2 = f0_2 * (int64_t) f3;
+ int64_t f0f4_2 = f0_2 * (int64_t) f4;
+ int64_t f0f5_2 = f0_2 * (int64_t) f5;
+ int64_t f0f6_2 = f0_2 * (int64_t) f6;
+ int64_t f0f7_2 = f0_2 * (int64_t) f7;
+ int64_t f0f8_2 = f0_2 * (int64_t) f8;
+ int64_t f0f9_2 = f0_2 * (int64_t) f9;
+ int64_t f1f1_2 = f1_2 * (int64_t) f1;
+ int64_t f1f2_2 = f1_2 * (int64_t) f2;
+ int64_t f1f3_4 = f1_2 * (int64_t) f3_2;
+ int64_t f1f4_2 = f1_2 * (int64_t) f4;
+ int64_t f1f5_4 = f1_2 * (int64_t) f5_2;
+ int64_t f1f6_2 = f1_2 * (int64_t) f6;
+ int64_t f1f7_4 = f1_2 * (int64_t) f7_2;
+ int64_t f1f8_2 = f1_2 * (int64_t) f8;
+ int64_t f1f9_76 = f1_2 * (int64_t) f9_38;
+ int64_t f2f2 = f2 * (int64_t) f2;
+ int64_t f2f3_2 = f2_2 * (int64_t) f3;
+ int64_t f2f4_2 = f2_2 * (int64_t) f4;
+ int64_t f2f5_2 = f2_2 * (int64_t) f5;
+ int64_t f2f6_2 = f2_2 * (int64_t) f6;
+ int64_t f2f7_2 = f2_2 * (int64_t) f7;
+ int64_t f2f8_38 = f2_2 * (int64_t) f8_19;
+ int64_t f2f9_38 = f2 * (int64_t) f9_38;
+ int64_t f3f3_2 = f3_2 * (int64_t) f3;
+ int64_t f3f4_2 = f3_2 * (int64_t) f4;
+ int64_t f3f5_4 = f3_2 * (int64_t) f5_2;
+ int64_t f3f6_2 = f3_2 * (int64_t) f6;
+ int64_t f3f7_76 = f3_2 * (int64_t) f7_38;
+ int64_t f3f8_38 = f3_2 * (int64_t) f8_19;
+ int64_t f3f9_76 = f3_2 * (int64_t) f9_38;
+ int64_t f4f4 = f4 * (int64_t) f4;
+ int64_t f4f5_2 = f4_2 * (int64_t) f5;
+ int64_t f4f6_38 = f4_2 * (int64_t) f6_19;
+ int64_t f4f7_38 = f4 * (int64_t) f7_38;
+ int64_t f4f8_38 = f4_2 * (int64_t) f8_19;
+ int64_t f4f9_38 = f4 * (int64_t) f9_38;
+ int64_t f5f5_38 = f5 * (int64_t) f5_38;
+ int64_t f5f6_38 = f5_2 * (int64_t) f6_19;
+ int64_t f5f7_76 = f5_2 * (int64_t) f7_38;
+ int64_t f5f8_38 = f5_2 * (int64_t) f8_19;
+ int64_t f5f9_76 = f5_2 * (int64_t) f9_38;
+ int64_t f6f6_19 = f6 * (int64_t) f6_19;
+ int64_t f6f7_38 = f6 * (int64_t) f7_38;
+ int64_t f6f8_38 = f6_2 * (int64_t) f8_19;
+ int64_t f6f9_38 = f6 * (int64_t) f9_38;
+ int64_t f7f7_38 = f7 * (int64_t) f7_38;
+ int64_t f7f8_38 = f7_2 * (int64_t) f8_19;
+ int64_t f7f9_76 = f7_2 * (int64_t) f9_38;
+ int64_t f8f8_19 = f8 * (int64_t) f8_19;
+ int64_t f8f9_38 = f8 * (int64_t) f9_38;
+ int64_t f9f9_38 = f9 * (int64_t) f9_38;
+ int64_t h0 = f0f0 +f1f9_76+f2f8_38+f3f7_76+f4f6_38+f5f5_38;
+ int64_t h1 = f0f1_2+f2f9_38+f3f8_38+f4f7_38+f5f6_38;
+ int64_t h2 = f0f2_2+f1f1_2 +f3f9_76+f4f8_38+f5f7_76+f6f6_19;
+ int64_t h3 = f0f3_2+f1f2_2 +f4f9_38+f5f8_38+f6f7_38;
+ int64_t h4 = f0f4_2+f1f3_4 +f2f2 +f5f9_76+f6f8_38+f7f7_38;
+ int64_t h5 = f0f5_2+f1f4_2 +f2f3_2 +f6f9_38+f7f8_38;
+ int64_t h6 = f0f6_2+f1f5_4 +f2f4_2 +f3f3_2 +f7f9_76+f8f8_19;
+ int64_t h7 = f0f7_2+f1f6_2 +f2f5_2 +f3f4_2 +f8f9_38;
+ int64_t h8 = f0f8_2+f1f7_4 +f2f6_2 +f3f5_4 +f4f4 +f9f9_38;
+ int64_t h9 = f0f9_2+f1f8_2 +f2f7_2 +f3f6_2 +f4f5_2;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+
+ h0 += h0;
+ h1 += h1;
+ h2 += h2;
+ h3 += h3;
+ h4 += h4;
+ h5 += h5;
+ h6 += h6;
+ h7 += h7;
+ h8 += h8;
+ h9 += h9;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+
+ carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
+ carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
+
+ carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
+ carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
+
+ carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
+ carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
+
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
+
+ carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+static void fe_pow22523(fe out, const fe z) {
+ fe t0;
+ fe t1;
+ fe t2;
+ int i;
+
+ fe_sq(t0, z);
+ for (i = 1; i < 1; ++i) {
+ fe_sq(t0, t0);
+ }
+ fe_sq(t1, t0);
+ for (i = 1; i < 2; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t1, z, t1);
+ fe_mul(t0, t0, t1);
+ fe_sq(t0, t0);
+ for (i = 1; i < 1; ++i) {
+ fe_sq(t0, t0);
+ }
+ fe_mul(t0, t1, t0);
+ fe_sq(t1, t0);
+ for (i = 1; i < 5; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t0, t1, t0);
+ fe_sq(t1, t0);
+ for (i = 1; i < 10; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t1, t1, t0);
+ fe_sq(t2, t1);
+ for (i = 1; i < 20; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t2, t1);
+ fe_sq(t1, t1);
+ for (i = 1; i < 10; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t0, t1, t0);
+ fe_sq(t1, t0);
+ for (i = 1; i < 50; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t1, t1, t0);
+ fe_sq(t2, t1);
+ for (i = 1; i < 100; ++i) {
+ fe_sq(t2, t2);
+ }
+ fe_mul(t1, t2, t1);
+ fe_sq(t1, t1);
+ for (i = 1; i < 50; ++i) {
+ fe_sq(t1, t1);
+ }
+ fe_mul(t0, t1, t0);
+ fe_sq(t0, t0);
+ for (i = 1; i < 2; ++i) {
+ fe_sq(t0, t0);
+ }
+ fe_mul(out, t0, z);
+}
+
+void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h) {
+ fe recip;
+ fe x;
+ fe y;
+
+ fe_invert(recip, h->Z);
+ fe_mul(x, h->X, recip);
+ fe_mul(y, h->Y, recip);
+ fe_tobytes(s, y);
+ s[31] ^= fe_isnegative(x) << 7;
+}
+
+#ifdef ED25519
+static void ge_p3_tobytes(uint8_t *s, const ge_p3 *h) {
+ fe recip;
+ fe x;
+ fe y;
+
+ fe_invert(recip, h->Z);
+ fe_mul(x, h->X, recip);
+ fe_mul(y, h->Y, recip);
+ fe_tobytes(s, y);
+ s[31] ^= fe_isnegative(x) << 7;
+}
+#endif
+
+static const fe d = {-10913610, 13857413, -15372611, 6949391, 114729,
+ -8787816, -6275908, -3247719, -18696448, -12055116};
+
+static const fe sqrtm1 = {-32595792, -7943725, 9377950, 3500415, 12389472,
+ -272473, -25146209, -2005654, 326686, 11406482};
+
+int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s) {
+ fe u;
+ fe v;
+ fe v3;
+ fe vxx;
+ fe check;
+
+ fe_frombytes(h->Y, s);
+ fe_1(h->Z);
+ fe_sq(u, h->Y);
+ fe_mul(v, u, d);
+ fe_sub(u, u, h->Z); /* u = y^2-1 */
+ fe_add(v, v, h->Z); /* v = dy^2+1 */
+
+ fe_sq(v3, v);
+ fe_mul(v3, v3, v); /* v3 = v^3 */
+ fe_sq(h->X, v3);
+ fe_mul(h->X, h->X, v);
+ fe_mul(h->X, h->X, u); /* x = uv^7 */
+
+ fe_pow22523(h->X, h->X); /* x = (uv^7)^((q-5)/8) */
+ fe_mul(h->X, h->X, v3);
+ fe_mul(h->X, h->X, u); /* x = uv^3(uv^7)^((q-5)/8) */
+
+ fe_sq(vxx, h->X);
+ fe_mul(vxx, vxx, v);
+ fe_sub(check, vxx, u); /* vx^2-u */
+ if (fe_isnonzero(check)) {
+ fe_add(check, vxx, u); /* vx^2+u */
+ if (fe_isnonzero(check)) {
+ return -1;
+ }
+ fe_mul(h->X, h->X, sqrtm1);
+ }
+
+ if (fe_isnegative(h->X) != (s[31] >> 7)) {
+ fe_neg(h->X, h->X);
+ }
+
+ fe_mul(h->T, h->X, h->Y);
+ return 0;
+}
+
+static void ge_p2_0(ge_p2 *h) {
+ fe_0(h->X);
+ fe_1(h->Y);
+ fe_1(h->Z);
+}
+
+static void ge_p3_0(ge_p3 *h) {
+ fe_0(h->X);
+ fe_1(h->Y);
+ fe_1(h->Z);
+ fe_0(h->T);
+}
+
+static void ge_cached_0(ge_cached *h) {
+ fe_1(h->YplusX);
+ fe_1(h->YminusX);
+ fe_1(h->Z);
+ fe_0(h->T2d);
+}
+
+static void ge_precomp_0(ge_precomp *h) {
+ fe_1(h->yplusx);
+ fe_1(h->yminusx);
+ fe_0(h->xy2d);
+}
+
+/* r = p */
+static void ge_p3_to_p2(ge_p2 *r, const ge_p3 *p) {
+ fe_copy(r->X, p->X);
+ fe_copy(r->Y, p->Y);
+ fe_copy(r->Z, p->Z);
+}
+
+static const fe d2 = {-21827239, -5839606, -30745221, 13898782, 229458,
+ 15978800, -12551817, -6495438, 29715968, 9444199};
+
+/* r = p */
+void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p) {
+ fe_add(r->YplusX, p->Y, p->X);
+ fe_sub(r->YminusX, p->Y, p->X);
+ fe_copy(r->Z, p->Z);
+ fe_mul(r->T2d, p->T, d2);
+}
+
+/* r = p */
+void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p) {
+ fe_mul(r->X, p->X, p->T);
+ fe_mul(r->Y, p->Y, p->Z);
+ fe_mul(r->Z, p->Z, p->T);
+}
+
+/* r = p */
+void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p) {
+ fe_mul(r->X, p->X, p->T);
+ fe_mul(r->Y, p->Y, p->Z);
+ fe_mul(r->Z, p->Z, p->T);
+ fe_mul(r->T, p->X, p->Y);
+}
+
+/* r = p */
+static void ge_p1p1_to_cached(ge_cached *r, const ge_p1p1 *p) {
+ ge_p3 t;
+ x25519_ge_p1p1_to_p3(&t, p);
+ x25519_ge_p3_to_cached(r, &t);
+}
+
+/* r = 2 * p */
+static void ge_p2_dbl(ge_p1p1 *r, const ge_p2 *p) {
+ fe t0;
+
+ fe_sq(r->X, p->X);
+ fe_sq(r->Z, p->Y);
+ fe_sq2(r->T, p->Z);
+ fe_add(r->Y, p->X, p->Y);
+ fe_sq(t0, r->Y);
+ fe_add(r->Y, r->Z, r->X);
+ fe_sub(r->Z, r->Z, r->X);
+ fe_sub(r->X, t0, r->Y);
+ fe_sub(r->T, r->T, r->Z);
+}
+
+/* r = 2 * p */
+static void ge_p3_dbl(ge_p1p1 *r, const ge_p3 *p) {
+ ge_p2 q;
+ ge_p3_to_p2(&q, p);
+ ge_p2_dbl(r, &q);
+}
+
+/* r = p + q */
+static void ge_madd(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
+ fe t0;
+
+ fe_add(r->X, p->Y, p->X);
+ fe_sub(r->Y, p->Y, p->X);
+ fe_mul(r->Z, r->X, q->yplusx);
+ fe_mul(r->Y, r->Y, q->yminusx);
+ fe_mul(r->T, q->xy2d, p->T);
+ fe_add(t0, p->Z, p->Z);
+ fe_sub(r->X, r->Z, r->Y);
+ fe_add(r->Y, r->Z, r->Y);
+ fe_add(r->Z, t0, r->T);
+ fe_sub(r->T, t0, r->T);
+}
+
+#ifdef ED25519
+/* r = p - q */
+static void ge_msub(ge_p1p1 *r, const ge_p3 *p, const ge_precomp *q) {
+ fe t0;
+
+ fe_add(r->X, p->Y, p->X);
+ fe_sub(r->Y, p->Y, p->X);
+ fe_mul(r->Z, r->X, q->yminusx);
+ fe_mul(r->Y, r->Y, q->yplusx);
+ fe_mul(r->T, q->xy2d, p->T);
+ fe_add(t0, p->Z, p->Z);
+ fe_sub(r->X, r->Z, r->Y);
+ fe_add(r->Y, r->Z, r->Y);
+ fe_sub(r->Z, t0, r->T);
+ fe_add(r->T, t0, r->T);
+}
+#endif
+
+/* r = p + q */
+void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
+ fe t0;
+
+ fe_add(r->X, p->Y, p->X);
+ fe_sub(r->Y, p->Y, p->X);
+ fe_mul(r->Z, r->X, q->YplusX);
+ fe_mul(r->Y, r->Y, q->YminusX);
+ fe_mul(r->T, q->T2d, p->T);
+ fe_mul(r->X, p->Z, q->Z);
+ fe_add(t0, r->X, r->X);
+ fe_sub(r->X, r->Z, r->Y);
+ fe_add(r->Y, r->Z, r->Y);
+ fe_add(r->Z, t0, r->T);
+ fe_sub(r->T, t0, r->T);
+}
+
+/* r = p - q */
+void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q) {
+ fe t0;
+
+ fe_add(r->X, p->Y, p->X);
+ fe_sub(r->Y, p->Y, p->X);
+ fe_mul(r->Z, r->X, q->YminusX);
+ fe_mul(r->Y, r->Y, q->YplusX);
+ fe_mul(r->T, q->T2d, p->T);
+ fe_mul(r->X, p->Z, q->Z);
+ fe_add(t0, r->X, r->X);
+ fe_sub(r->X, r->Z, r->Y);
+ fe_add(r->Y, r->Z, r->Y);
+ fe_sub(r->Z, t0, r->T);
+ fe_add(r->T, t0, r->T);
+}
+
+static uint8_t equal(signed char b, signed char c) {
+ uint8_t ub = b;
+ uint8_t uc = c;
+ uint8_t x = ub ^ uc; /* 0: yes; 1..255: no */
+ uint32_t y = x; /* 0: yes; 1..255: no */
+ y -= 1; /* 4294967295: yes; 0..254: no */
+ y >>= 31; /* 1: yes; 0: no */
+ return y;
+}
+
+static void cmov(ge_precomp *t, const ge_precomp *u, uint8_t b) {
+ fe_cmov(t->yplusx, u->yplusx, b);
+ fe_cmov(t->yminusx, u->yminusx, b);
+ fe_cmov(t->xy2d, u->xy2d, b);
+}
+
+void x25519_ge_scalarmult_small_precomp(
+ ge_p3 *h, const uint8_t a[32], const uint8_t precomp_table[15 * 2 * 32]) {
+ /* precomp_table is first expanded into matching |ge_precomp|
+ * elements. */
+ ge_precomp multiples[15];
+
+ unsigned i;
+ for (i = 0; i < 15; i++) {
+ const uint8_t *bytes = &precomp_table[i*(2 * 32)];
+ fe x, y;
+ fe_frombytes(x, bytes);
+ fe_frombytes(y, bytes + 32);
+
+ ge_precomp *out = &multiples[i];
+ fe_add(out->yplusx, y, x);
+ fe_sub(out->yminusx, y, x);
+ fe_mul(out->xy2d, x, y);
+ fe_mul(out->xy2d, out->xy2d, d2);
+ }
+
+ /* See the comment above |k25519SmallPrecomp| about the structure of the
+ * precomputed elements. This loop does 64 additions and 64 doublings to
+ * calculate the result. */
+ ge_p3_0(h);
+
+ for (i = 63; i < 64; i--) {
+ unsigned j;
+ signed char index = 0;
+
+ for (j = 0; j < 4; j++) {
+ const uint8_t bit = 1 & (a[(8 * j) + (i / 8)] >> (i & 7));
+ index |= (bit << j);
+ }
+
+ ge_precomp e;
+ ge_precomp_0(&e);
+
+ for (j = 1; j < 16; j++) {
+ cmov(&e, &multiples[j-1], equal(index, j));
+ }
+
+ ge_cached cached;
+ ge_p1p1 r;
+ x25519_ge_p3_to_cached(&cached, h);
+ x25519_ge_add(&r, h, &cached);
+ x25519_ge_p1p1_to_p3(h, &r);
+
+ ge_madd(&r, h, &e);
+ x25519_ge_p1p1_to_p3(h, &r);
+ }
+}
+
+#if defined(OPENSSL_SMALL)
+
+/* This block of code replaces the standard base-point table with a much smaller
+ * one. The standard table is 30,720 bytes while this one is just 960.
+ *
+ * This table contains 15 pairs of group elements, (x, y), where each field
+ * element is serialised with |fe_tobytes|. If |i| is the index of the group
+ * element then consider i+1 as a four-bit number: (i₀, i₁, i₂, i₃) (where i₀
+ * is the most significant bit). The value of the group element is then:
+ * (i₀×2^192 + i₁×2^128 + i₂×2^64 + i₃)G, where G is the generator. */
+static const uint8_t k25519SmallPrecomp[15 * 2 * 32] = {
+ 0x1a, 0xd5, 0x25, 0x8f, 0x60, 0x2d, 0x56, 0xc9, 0xb2, 0xa7, 0x25, 0x95,
+ 0x60, 0xc7, 0x2c, 0x69, 0x5c, 0xdc, 0xd6, 0xfd, 0x31, 0xe2, 0xa4, 0xc0,
+ 0xfe, 0x53, 0x6e, 0xcd, 0xd3, 0x36, 0x69, 0x21, 0x58, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0x66, 0x66, 0x02, 0xa2, 0xed, 0xf4, 0x8f, 0x6b, 0x0b, 0x3e,
+ 0xeb, 0x35, 0x1a, 0xd5, 0x7e, 0xdb, 0x78, 0x00, 0x96, 0x8a, 0xa0, 0xb4,
+ 0xcf, 0x60, 0x4b, 0xd4, 0xd5, 0xf9, 0x2d, 0xbf, 0x88, 0xbd, 0x22, 0x62,
+ 0x13, 0x53, 0xe4, 0x82, 0x57, 0xfa, 0x1e, 0x8f, 0x06, 0x2b, 0x90, 0xba,
+ 0x08, 0xb6, 0x10, 0x54, 0x4f, 0x7c, 0x1b, 0x26, 0xed, 0xda, 0x6b, 0xdd,
+ 0x25, 0xd0, 0x4e, 0xea, 0x42, 0xbb, 0x25, 0x03, 0xa2, 0xfb, 0xcc, 0x61,
+ 0x67, 0x06, 0x70, 0x1a, 0xc4, 0x78, 0x3a, 0xff, 0x32, 0x62, 0xdd, 0x2c,
+ 0xab, 0x50, 0x19, 0x3b, 0xf2, 0x9b, 0x7d, 0xb8, 0xfd, 0x4f, 0x29, 0x9c,
+ 0xa7, 0x91, 0xba, 0x0e, 0x46, 0x5e, 0x51, 0xfe, 0x1d, 0xbf, 0xe5, 0xe5,
+ 0x9b, 0x95, 0x0d, 0x67, 0xf8, 0xd1, 0xb5, 0x5a, 0xa1, 0x93, 0x2c, 0xc3,
+ 0xde, 0x0e, 0x97, 0x85, 0x2d, 0x7f, 0xea, 0xab, 0x3e, 0x47, 0x30, 0x18,
+ 0x24, 0xe8, 0xb7, 0x60, 0xae, 0x47, 0x80, 0xfc, 0xe5, 0x23, 0xe7, 0xc2,
+ 0xc9, 0x85, 0xe6, 0x98, 0xa0, 0x29, 0x4e, 0xe1, 0x84, 0x39, 0x2d, 0x95,
+ 0x2c, 0xf3, 0x45, 0x3c, 0xff, 0xaf, 0x27, 0x4c, 0x6b, 0xa6, 0xf5, 0x4b,
+ 0x11, 0xbd, 0xba, 0x5b, 0x9e, 0xc4, 0xa4, 0x51, 0x1e, 0xbe, 0xd0, 0x90,
+ 0x3a, 0x9c, 0xc2, 0x26, 0xb6, 0x1e, 0xf1, 0x95, 0x7d, 0xc8, 0x6d, 0x52,
+ 0xe6, 0x99, 0x2c, 0x5f, 0x9a, 0x96, 0x0c, 0x68, 0x29, 0xfd, 0xe2, 0xfb,
+ 0xe6, 0xbc, 0xec, 0x31, 0x08, 0xec, 0xe6, 0xb0, 0x53, 0x60, 0xc3, 0x8c,
+ 0xbe, 0xc1, 0xb3, 0x8a, 0x8f, 0xe4, 0x88, 0x2b, 0x55, 0xe5, 0x64, 0x6e,
+ 0x9b, 0xd0, 0xaf, 0x7b, 0x64, 0x2a, 0x35, 0x25, 0x10, 0x52, 0xc5, 0x9e,
+ 0x58, 0x11, 0x39, 0x36, 0x45, 0x51, 0xb8, 0x39, 0x93, 0xfc, 0x9d, 0x6a,
+ 0xbe, 0x58, 0xcb, 0xa4, 0x0f, 0x51, 0x3c, 0x38, 0x05, 0xca, 0xab, 0x43,
+ 0x63, 0x0e, 0xf3, 0x8b, 0x41, 0xa6, 0xf8, 0x9b, 0x53, 0x70, 0x80, 0x53,
+ 0x86, 0x5e, 0x8f, 0xe3, 0xc3, 0x0d, 0x18, 0xc8, 0x4b, 0x34, 0x1f, 0xd8,
+ 0x1d, 0xbc, 0xf2, 0x6d, 0x34, 0x3a, 0xbe, 0xdf, 0xd9, 0xf6, 0xf3, 0x89,
+ 0xa1, 0xe1, 0x94, 0x9f, 0x5d, 0x4c, 0x5d, 0xe9, 0xa1, 0x49, 0x92, 0xef,
+ 0x0e, 0x53, 0x81, 0x89, 0x58, 0x87, 0xa6, 0x37, 0xf1, 0xdd, 0x62, 0x60,
+ 0x63, 0x5a, 0x9d, 0x1b, 0x8c, 0xc6, 0x7d, 0x52, 0xea, 0x70, 0x09, 0x6a,
+ 0xe1, 0x32, 0xf3, 0x73, 0x21, 0x1f, 0x07, 0x7b, 0x7c, 0x9b, 0x49, 0xd8,
+ 0xc0, 0xf3, 0x25, 0x72, 0x6f, 0x9d, 0xed, 0x31, 0x67, 0x36, 0x36, 0x54,
+ 0x40, 0x92, 0x71, 0xe6, 0x11, 0x28, 0x11, 0xad, 0x93, 0x32, 0x85, 0x7b,
+ 0x3e, 0xb7, 0x3b, 0x49, 0x13, 0x1c, 0x07, 0xb0, 0x2e, 0x93, 0xaa, 0xfd,
+ 0xfd, 0x28, 0x47, 0x3d, 0x8d, 0xd2, 0xda, 0xc7, 0x44, 0xd6, 0x7a, 0xdb,
+ 0x26, 0x7d, 0x1d, 0xb8, 0xe1, 0xde, 0x9d, 0x7a, 0x7d, 0x17, 0x7e, 0x1c,
+ 0x37, 0x04, 0x8d, 0x2d, 0x7c, 0x5e, 0x18, 0x38, 0x1e, 0xaf, 0xc7, 0x1b,
+ 0x33, 0x48, 0x31, 0x00, 0x59, 0xf6, 0xf2, 0xca, 0x0f, 0x27, 0x1b, 0x63,
+ 0x12, 0x7e, 0x02, 0x1d, 0x49, 0xc0, 0x5d, 0x79, 0x87, 0xef, 0x5e, 0x7a,
+ 0x2f, 0x1f, 0x66, 0x55, 0xd8, 0x09, 0xd9, 0x61, 0x38, 0x68, 0xb0, 0x07,
+ 0xa3, 0xfc, 0xcc, 0x85, 0x10, 0x7f, 0x4c, 0x65, 0x65, 0xb3, 0xfa, 0xfa,
+ 0xa5, 0x53, 0x6f, 0xdb, 0x74, 0x4c, 0x56, 0x46, 0x03, 0xe2, 0xd5, 0x7a,
+ 0x29, 0x1c, 0xc6, 0x02, 0xbc, 0x59, 0xf2, 0x04, 0x75, 0x63, 0xc0, 0x84,
+ 0x2f, 0x60, 0x1c, 0x67, 0x76, 0xfd, 0x63, 0x86, 0xf3, 0xfa, 0xbf, 0xdc,
+ 0xd2, 0x2d, 0x90, 0x91, 0xbd, 0x33, 0xa9, 0xe5, 0x66, 0x0c, 0xda, 0x42,
+ 0x27, 0xca, 0xf4, 0x66, 0xc2, 0xec, 0x92, 0x14, 0x57, 0x06, 0x63, 0xd0,
+ 0x4d, 0x15, 0x06, 0xeb, 0x69, 0x58, 0x4f, 0x77, 0xc5, 0x8b, 0xc7, 0xf0,
+ 0x8e, 0xed, 0x64, 0xa0, 0xb3, 0x3c, 0x66, 0x71, 0xc6, 0x2d, 0xda, 0x0a,
+ 0x0d, 0xfe, 0x70, 0x27, 0x64, 0xf8, 0x27, 0xfa, 0xf6, 0x5f, 0x30, 0xa5,
+ 0x0d, 0x6c, 0xda, 0xf2, 0x62, 0x5e, 0x78, 0x47, 0xd3, 0x66, 0x00, 0x1c,
+ 0xfd, 0x56, 0x1f, 0x5d, 0x3f, 0x6f, 0xf4, 0x4c, 0xd8, 0xfd, 0x0e, 0x27,
+ 0xc9, 0x5c, 0x2b, 0xbc, 0xc0, 0xa4, 0xe7, 0x23, 0x29, 0x02, 0x9f, 0x31,
+ 0xd6, 0xe9, 0xd7, 0x96, 0xf4, 0xe0, 0x5e, 0x0b, 0x0e, 0x13, 0xee, 0x3c,
+ 0x09, 0xed, 0xf2, 0x3d, 0x76, 0x91, 0xc3, 0xa4, 0x97, 0xae, 0xd4, 0x87,
+ 0xd0, 0x5d, 0xf6, 0x18, 0x47, 0x1f, 0x1d, 0x67, 0xf2, 0xcf, 0x63, 0xa0,
+ 0x91, 0x27, 0xf8, 0x93, 0x45, 0x75, 0x23, 0x3f, 0xd1, 0xf1, 0xad, 0x23,
+ 0xdd, 0x64, 0x93, 0x96, 0x41, 0x70, 0x7f, 0xf7, 0xf5, 0xa9, 0x89, 0xa2,
+ 0x34, 0xb0, 0x8d, 0x1b, 0xae, 0x19, 0x15, 0x49, 0x58, 0x23, 0x6d, 0x87,
+ 0x15, 0x4f, 0x81, 0x76, 0xfb, 0x23, 0xb5, 0xea, 0xcf, 0xac, 0x54, 0x8d,
+ 0x4e, 0x42, 0x2f, 0xeb, 0x0f, 0x63, 0xdb, 0x68, 0x37, 0xa8, 0xcf, 0x8b,
+ 0xab, 0xf5, 0xa4, 0x6e, 0x96, 0x2a, 0xb2, 0xd6, 0xbe, 0x9e, 0xbd, 0x0d,
+ 0xb4, 0x42, 0xa9, 0xcf, 0x01, 0x83, 0x8a, 0x17, 0x47, 0x76, 0xc4, 0xc6,
+ 0x83, 0x04, 0x95, 0x0b, 0xfc, 0x11, 0xc9, 0x62, 0xb8, 0x0c, 0x76, 0x84,
+ 0xd9, 0xb9, 0x37, 0xfa, 0xfc, 0x7c, 0xc2, 0x6d, 0x58, 0x3e, 0xb3, 0x04,
+ 0xbb, 0x8c, 0x8f, 0x48, 0xbc, 0x91, 0x27, 0xcc, 0xf9, 0xb7, 0x22, 0x19,
+ 0x83, 0x2e, 0x09, 0xb5, 0x72, 0xd9, 0x54, 0x1c, 0x4d, 0xa1, 0xea, 0x0b,
+ 0xf1, 0xc6, 0x08, 0x72, 0x46, 0x87, 0x7a, 0x6e, 0x80, 0x56, 0x0a, 0x8a,
+ 0xc0, 0xdd, 0x11, 0x6b, 0xd6, 0xdd, 0x47, 0xdf, 0x10, 0xd9, 0xd8, 0xea,
+ 0x7c, 0xb0, 0x8f, 0x03, 0x00, 0x2e, 0xc1, 0x8f, 0x44, 0xa8, 0xd3, 0x30,
+ 0x06, 0x89, 0xa2, 0xf9, 0x34, 0xad, 0xdc, 0x03, 0x85, 0xed, 0x51, 0xa7,
+ 0x82, 0x9c, 0xe7, 0x5d, 0x52, 0x93, 0x0c, 0x32, 0x9a, 0x5b, 0xe1, 0xaa,
+ 0xca, 0xb8, 0x02, 0x6d, 0x3a, 0xd4, 0xb1, 0x3a, 0xf0, 0x5f, 0xbe, 0xb5,
+ 0x0d, 0x10, 0x6b, 0x38, 0x32, 0xac, 0x76, 0x80, 0xbd, 0xca, 0x94, 0x71,
+ 0x7a, 0xf2, 0xc9, 0x35, 0x2a, 0xde, 0x9f, 0x42, 0x49, 0x18, 0x01, 0xab,
+ 0xbc, 0xef, 0x7c, 0x64, 0x3f, 0x58, 0x3d, 0x92, 0x59, 0xdb, 0x13, 0xdb,
+ 0x58, 0x6e, 0x0a, 0xe0, 0xb7, 0x91, 0x4a, 0x08, 0x20, 0xd6, 0x2e, 0x3c,
+ 0x45, 0xc9, 0x8b, 0x17, 0x79, 0xe7, 0xc7, 0x90, 0x99, 0x3a, 0x18, 0x25,
+};
+
+void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]) {
+ x25519_ge_scalarmult_small_precomp(h, a, k25519SmallPrecomp);
+}
+
+#else
+
+/* k25519Precomp[i][j] = (j+1)*256^i*B */
+static const ge_precomp k25519Precomp[32][8] = {
+ {
+ {
+ {25967493, -14356035, 29566456, 3660896, -12694345, 4014787,
+ 27544626, -11754271, -6079156, 2047605},
+ {-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692,
+ 5043384, 19500929, -15469378},
+ {-8738181, 4489570, 9688441, -14785194, 10184609, -12363380,
+ 29287919, 11864899, -24514362, -4438546},
+ },
+ {
+ {-12815894, -12976347, -21581243, 11784320, -25355658, -2750717,
+ -11717903, -3814571, -358445, -10211303},
+ {-21703237, 6903825, 27185491, 6451973, -29577724, -9554005,
+ -15616551, 11189268, -26829678, -5319081},
+ {26966642, 11152617, 32442495, 15396054, 14353839, -12752335,
+ -3128826, -9541118, -15472047, -4166697},
+ },
+ {
+ {15636291, -9688557, 24204773, -7912398, 616977, -16685262,
+ 27787600, -14772189, 28944400, -1550024},
+ {16568933, 4717097, -11556148, -1102322, 15682896, -11807043,
+ 16354577, -11775962, 7689662, 11199574},
+ {30464156, -5976125, -11779434, -15670865, 23220365, 15915852,
+ 7512774, 10017326, -17749093, -9920357},
+ },
+ {
+ {-17036878, 13921892, 10945806, -6033431, 27105052, -16084379,
+ -28926210, 15006023, 3284568, -6276540},
+ {23599295, -8306047, -11193664, -7687416, 13236774, 10506355,
+ 7464579, 9656445, 13059162, 10374397},
+ {7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664,
+ -3839045, -641708, -101325},
+ },
+ {
+ {10861363, 11473154, 27284546, 1981175, -30064349, 12577861,
+ 32867885, 14515107, -15438304, 10819380},
+ {4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668,
+ 12483688, -12668491, 5581306},
+ {19563160, 16186464, -29386857, 4097519, 10237984, -4348115,
+ 28542350, 13850243, -23678021, -15815942},
+ },
+ {
+ {-15371964, -12862754, 32573250, 4720197, -26436522, 5875511,
+ -19188627, -15224819, -9818940, -12085777},
+ {-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240,
+ -15689887, 1762328, 14866737},
+ {-18199695, -15951423, -10473290, 1707278, -17185920, 3916101,
+ -28236412, 3959421, 27914454, 4383652},
+ },
+ {
+ {5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852,
+ 5230134, -23952439, -15175766},
+ {-30269007, -3463509, 7665486, 10083793, 28475525, 1649722,
+ 20654025, 16520125, 30598449, 7715701},
+ {28881845, 14381568, 9657904, 3680757, -20181635, 7843316,
+ -31400660, 1370708, 29794553, -1409300},
+ },
+ {
+ {14499471, -2729599, -33191113, -4254652, 28494862, 14271267,
+ 30290735, 10876454, -33154098, 2381726},
+ {-7195431, -2655363, -14730155, 462251, -27724326, 3941372,
+ -6236617, 3696005, -32300832, 15351955},
+ {27431194, 8222322, 16448760, -3907995, -18707002, 11938355,
+ -32961401, -2970515, 29551813, 10109425},
+ },
+ },
+ {
+ {
+ {-13657040, -13155431, -31283750, 11777098, 21447386, 6519384,
+ -2378284, -1627556, 10092783, -4764171},
+ {27939166, 14210322, 4677035, 16277044, -22964462, -12398139,
+ -32508754, 12005538, -17810127, 12803510},
+ {17228999, -15661624, -1233527, 300140, -1224870, -11714777,
+ 30364213, -9038194, 18016357, 4397660},
+ },
+ {
+ {-10958843, -7690207, 4776341, -14954238, 27850028, -15602212,
+ -26619106, 14544525, -17477504, 982639},
+ {29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899,
+ -4120128, -21047696, 9934963},
+ {5793303, 16271923, -24131614, -10116404, 29188560, 1206517,
+ -14747930, 4559895, -30123922, -10897950},
+ },
+ {
+ {-27643952, -11493006, 16282657, -11036493, 28414021, -15012264,
+ 24191034, 4541697, -13338309, 5500568},
+ {12650548, -1497113, 9052871, 11355358, -17680037, -8400164,
+ -17430592, 12264343, 10874051, 13524335},
+ {25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038,
+ 5080568, -22528059, 5376628},
+ },
+ {
+ {-26088264, -4011052, -17013699, -3537628, -6726793, 1920897,
+ -22321305, -9447443, 4535768, 1569007},
+ {-2255422, 14606630, -21692440, -8039818, 28430649, 8775819,
+ -30494562, 3044290, 31848280, 12543772},
+ {-22028579, 2943893, -31857513, 6777306, 13784462, -4292203,
+ -27377195, -2062731, 7718482, 14474653},
+ },
+ {
+ {2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965,
+ -7236665, 24316168, -5253567},
+ {13741529, 10911568, -33233417, -8603737, -20177830, -1033297,
+ 33040651, -13424532, -20729456, 8321686},
+ {21060490, -2212744, 15712757, -4336099, 1639040, 10656336,
+ 23845965, -11874838, -9984458, 608372},
+ },
+ {
+ {-13672732, -15087586, -10889693, -7557059, -6036909, 11305547,
+ 1123968, -6780577, 27229399, 23887},
+ {-23244140, -294205, -11744728, 14712571, -29465699, -2029617,
+ 12797024, -6440308, -1633405, 16678954},
+ {-29500620, 4770662, -16054387, 14001338, 7830047, 9564805,
+ -1508144, -4795045, -17169265, 4904953},
+ },
+ {
+ {24059557, 14617003, 19037157, -15039908, 19766093, -14906429,
+ 5169211, 16191880, 2128236, -4326833},
+ {-16981152, 4124966, -8540610, -10653797, 30336522, -14105247,
+ -29806336, 916033, -6882542, -2986532},
+ {-22630907, 12419372, -7134229, -7473371, -16478904, 16739175,
+ 285431, 2763829, 15736322, 4143876},
+ },
+ {
+ {2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801,
+ -14594663, 23527084, -16458268},
+ {33431127, -11130478, -17838966, -15626900, 8909499, 8376530,
+ -32625340, 4087881, -15188911, -14416214},
+ {1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055,
+ 4357868, -4774191, -16323038},
+ },
+ },
+ {
+ {
+ {6721966, 13833823, -23523388, -1551314, 26354293, -11863321,
+ 23365147, -3949732, 7390890, 2759800},
+ {4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353,
+ -4264057, 1244380, -12919645},
+ {-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413,
+ 9208236, 15886429, 16489664},
+ },
+ {
+ {1996075, 10375649, 14346367, 13311202, -6874135, -16438411,
+ -13693198, 398369, -30606455, -712933},
+ {-25307465, 9795880, -2777414, 14878809, -33531835, 14780363,
+ 13348553, 12076947, -30836462, 5113182},
+ {-17770784, 11797796, 31950843, 13929123, -25888302, 12288344,
+ -30341101, -7336386, 13847711, 5387222},
+ },
+ {
+ {-18582163, -3416217, 17824843, -2340966, 22744343, -10442611,
+ 8763061, 3617786, -19600662, 10370991},
+ {20246567, -14369378, 22358229, -543712, 18507283, -10413996,
+ 14554437, -8746092, 32232924, 16763880},
+ {9648505, 10094563, 26416693, 14745928, -30374318, -6472621,
+ 11094161, 15689506, 3140038, -16510092},
+ },
+ {
+ {-16160072, 5472695, 31895588, 4744994, 8823515, 10365685,
+ -27224800, 9448613, -28774454, 366295},
+ {19153450, 11523972, -11096490, -6503142, -24647631, 5420647,
+ 28344573, 8041113, 719605, 11671788},
+ {8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916,
+ -15266516, 27000813, -10195553},
+ },
+ {
+ {-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065,
+ 5336097, 6750977, -14521026},
+ {11836410, -3979488, 26297894, 16080799, 23455045, 15735944,
+ 1695823, -8819122, 8169720, 16220347},
+ {-18115838, 8653647, 17578566, -6092619, -8025777, -16012763,
+ -11144307, -2627664, -5990708, -14166033},
+ },
+ {
+ {-23308498, -10968312, 15213228, -10081214, -30853605, -11050004,
+ 27884329, 2847284, 2655861, 1738395},
+ {-27537433, -14253021, -25336301, -8002780, -9370762, 8129821,
+ 21651608, -3239336, -19087449, -11005278},
+ {1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092,
+ 5821408, 10478196, 8544890},
+ },
+ {
+ {32173121, -16129311, 24896207, 3921497, 22579056, -3410854,
+ 19270449, 12217473, 17789017, -3395995},
+ {-30552961, -2228401, -15578829, -10147201, 13243889, 517024,
+ 15479401, -3853233, 30460520, 1052596},
+ {-11614875, 13323618, 32618793, 8175907, -15230173, 12596687,
+ 27491595, -4612359, 3179268, -9478891},
+ },
+ {
+ {31947069, -14366651, -4640583, -15339921, -15125977, -6039709,
+ -14756777, -16411740, 19072640, -9511060},
+ {11685058, 11822410, 3158003, -13952594, 33402194, -4165066,
+ 5977896, -5215017, 473099, 5040608},
+ {-20290863, 8198642, -27410132, 11602123, 1290375, -2799760,
+ 28326862, 1721092, -19558642, -3131606},
+ },
+ },
+ {
+ {
+ {7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786,
+ 8076149, -27868496, 11538389},
+ {-19935666, 3899861, 18283497, -6801568, -15728660, -11249211,
+ 8754525, 7446702, -5676054, 5797016},
+ {-11295600, -3793569, -15782110, -7964573, 12708869, -8456199,
+ 2014099, -9050574, -2369172, -5877341},
+ },
+ {
+ {-22472376, -11568741, -27682020, 1146375, 18956691, 16640559,
+ 1192730, -3714199, 15123619, 10811505},
+ {14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363,
+ 15776356, -28886779, -11974553},
+ {-28241164, -8072475, -4978962, -5315317, 29416931, 1847569,
+ -20654173, -16484855, 4714547, -9600655},
+ },
+ {
+ {15200332, 8368572, 19679101, 15970074, -31872674, 1959451,
+ 24611599, -4543832, -11745876, 12340220},
+ {12876937, -10480056, 33134381, 6590940, -6307776, 14872440,
+ 9613953, 8241152, 15370987, 9608631},
+ {-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868,
+ 15866074, -28210621, -8814099},
+ },
+ {
+ {26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233,
+ 858697, 20571223, 8420556},
+ {14620715, 13067227, -15447274, 8264467, 14106269, 15080814,
+ 33531827, 12516406, -21574435, -12476749},
+ {236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519,
+ 7256740, 8791136, 15069930},
+ },
+ {
+ {1276410, -9371918, 22949635, -16322807, -23493039, -5702186,
+ 14711875, 4874229, -30663140, -2331391},
+ {5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175,
+ -7912378, -33069337, 9234253},
+ {20590503, -9018988, 31529744, -7352666, -2706834, 10650548,
+ 31559055, -11609587, 18979186, 13396066},
+ },
+ {
+ {24474287, 4968103, 22267082, 4407354, 24063882, -8325180,
+ -18816887, 13594782, 33514650, 7021958},
+ {-11566906, -6565505, -21365085, 15928892, -26158305, 4315421,
+ -25948728, -3916677, -21480480, 12868082},
+ {-28635013, 13504661, 19988037, -2132761, 21078225, 6443208,
+ -21446107, 2244500, -12455797, -8089383},
+ },
+ {
+ {-30595528, 13793479, -5852820, 319136, -25723172, -6263899,
+ 33086546, 8957937, -15233648, 5540521},
+ {-11630176, -11503902, -8119500, -7643073, 2620056, 1022908,
+ -23710744, -1568984, -16128528, -14962807},
+ {23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819,
+ 892185, -11513277, -15205948},
+ },
+ {
+ {9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819,
+ 4763127, -19179614, 5867134},
+ {-32765025, 1927590, 31726409, -4753295, 23962434, -16019500,
+ 27846559, 5931263, -29749703, -16108455},
+ {27461885, -2977536, 22380810, 1815854, -23033753, -3031938,
+ 7283490, -15148073, -19526700, 7734629},
+ },
+ },
+ {
+ {
+ {-8010264, -9590817, -11120403, 6196038, 29344158, -13430885,
+ 7585295, -3176626, 18549497, 15302069},
+ {-32658337, -6171222, -7672793, -11051681, 6258878, 13504381,
+ 10458790, -6418461, -8872242, 8424746},
+ {24687205, 8613276, -30667046, -3233545, 1863892, -1830544,
+ 19206234, 7134917, -11284482, -828919},
+ },
+ {
+ {11334899, -9218022, 8025293, 12707519, 17523892, -10476071,
+ 10243738, -14685461, -5066034, 16498837},
+ {8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925,
+ -14124238, 6536641, 10543906},
+ {-28946384, 15479763, -17466835, 568876, -1497683, 11223454,
+ -2669190, -16625574, -27235709, 8876771},
+ },
+ {
+ {-25742899, -12566864, -15649966, -846607, -33026686, -796288,
+ -33481822, 15824474, -604426, -9039817},
+ {10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697,
+ -4890037, 1657394, 3084098},
+ {10477963, -7470260, 12119566, -13250805, 29016247, -5365589,
+ 31280319, 14396151, -30233575, 15272409},
+ },
+ {
+ {-12288309, 3169463, 28813183, 16658753, 25116432, -5630466,
+ -25173957, -12636138, -25014757, 1950504},
+ {-26180358, 9489187, 11053416, -14746161, -31053720, 5825630,
+ -8384306, -8767532, 15341279, 8373727},
+ {28685821, 7759505, -14378516, -12002860, -31971820, 4079242,
+ 298136, -10232602, -2878207, 15190420},
+ },
+ {
+ {-32932876, 13806336, -14337485, -15794431, -24004620, 10940928,
+ 8669718, 2742393, -26033313, -6875003},
+ {-1580388, -11729417, -25979658, -11445023, -17411874, -10912854,
+ 9291594, -16247779, -12154742, 6048605},
+ {-30305315, 14843444, 1539301, 11864366, 20201677, 1900163,
+ 13934231, 5128323, 11213262, 9168384},
+ },
+ {
+ {-26280513, 11007847, 19408960, -940758, -18592965, -4328580,
+ -5088060, -11105150, 20470157, -16398701},
+ {-23136053, 9282192, 14855179, -15390078, -7362815, -14408560,
+ -22783952, 14461608, 14042978, 5230683},
+ {29969567, -2741594, -16711867, -8552442, 9175486, -2468974,
+ 21556951, 3506042, -5933891, -12449708},
+ },
+ {
+ {-3144746, 8744661, 19704003, 4581278, -20430686, 6830683,
+ -21284170, 8971513, -28539189, 15326563},
+ {-19464629, 10110288, -17262528, -3503892, -23500387, 1355669,
+ -15523050, 15300988, -20514118, 9168260},
+ {-5353335, 4488613, -23803248, 16314347, 7780487, -15638939,
+ -28948358, 9601605, 33087103, -9011387},
+ },
+ {
+ {-19443170, -15512900, -20797467, -12445323, -29824447, 10229461,
+ -27444329, -15000531, -5996870, 15664672},
+ {23294591, -16632613, -22650781, -8470978, 27844204, 11461195,
+ 13099750, -2460356, 18151676, 13417686},
+ {-24722913, -4176517, -31150679, 5988919, -26858785, 6685065,
+ 1661597, -12551441, 15271676, -15452665},
+ },
+ },
+ {
+ {
+ {11433042, -13228665, 8239631, -5279517, -1985436, -725718,
+ -18698764, 2167544, -6921301, -13440182},
+ {-31436171, 15575146, 30436815, 12192228, -22463353, 9395379,
+ -9917708, -8638997, 12215110, 12028277},
+ {14098400, 6555944, 23007258, 5757252, -15427832, -12950502,
+ 30123440, 4617780, -16900089, -655628},
+ },
+ {
+ {-4026201, -15240835, 11893168, 13718664, -14809462, 1847385,
+ -15819999, 10154009, 23973261, -12684474},
+ {-26531820, -3695990, -1908898, 2534301, -31870557, -16550355,
+ 18341390, -11419951, 32013174, -10103539},
+ {-25479301, 10876443, -11771086, -14625140, -12369567, 1838104,
+ 21911214, 6354752, 4425632, -837822},
+ },
+ {
+ {-10433389, -14612966, 22229858, -3091047, -13191166, 776729,
+ -17415375, -12020462, 4725005, 14044970},
+ {19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390,
+ -1411784, -19522291, -16109756},
+ {-24864089, 12986008, -10898878, -5558584, -11312371, -148526,
+ 19541418, 8180106, 9282262, 10282508},
+ },
+ {
+ {-26205082, 4428547, -8661196, -13194263, 4098402, -14165257,
+ 15522535, 8372215, 5542595, -10702683},
+ {-10562541, 14895633, 26814552, -16673850, -17480754, -2489360,
+ -2781891, 6993761, -18093885, 10114655},
+ {-20107055, -929418, 31422704, 10427861, -7110749, 6150669,
+ -29091755, -11529146, 25953725, -106158},
+ },
+ {
+ {-4234397, -8039292, -9119125, 3046000, 2101609, -12607294,
+ 19390020, 6094296, -3315279, 12831125},
+ {-15998678, 7578152, 5310217, 14408357, -33548620, -224739,
+ 31575954, 6326196, 7381791, -2421839},
+ {-20902779, 3296811, 24736065, -16328389, 18374254, 7318640,
+ 6295303, 8082724, -15362489, 12339664},
+ },
+ {
+ {27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414,
+ 15768922, 25091167, 14856294},
+ {-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300,
+ -12695493, -22182473, -9012899},
+ {-11423429, -5421590, 11632845, 3405020, 30536730, -11674039,
+ -27260765, 13866390, 30146206, 9142070},
+ },
+ {
+ {3924129, -15307516, -13817122, -10054960, 12291820, -668366,
+ -27702774, 9326384, -8237858, 4171294},
+ {-15921940, 16037937, 6713787, 16606682, -21612135, 2790944,
+ 26396185, 3731949, 345228, -5462949},
+ {-21327538, 13448259, 25284571, 1143661, 20614966, -8849387,
+ 2031539, -12391231, -16253183, -13582083},
+ },
+ {
+ {31016211, -16722429, 26371392, -14451233, -5027349, 14854137,
+ 17477601, 3842657, 28012650, -16405420},
+ {-5075835, 9368966, -8562079, -4600902, -15249953, 6970560,
+ -9189873, 16292057, -8867157, 3507940},
+ {29439664, 3537914, 23333589, 6997794, -17555561, -11018068,
+ -15209202, -15051267, -9164929, 6580396},
+ },
+ },
+ {
+ {
+ {-12185861, -7679788, 16438269, 10826160, -8696817, -6235611,
+ 17860444, -9273846, -2095802, 9304567},
+ {20714564, -4336911, 29088195, 7406487, 11426967, -5095705,
+ 14792667, -14608617, 5289421, -477127},
+ {-16665533, -10650790, -6160345, -13305760, 9192020, -1802462,
+ 17271490, 12349094, 26939669, -3752294},
+ },
+ {
+ {-12889898, 9373458, 31595848, 16374215, 21471720, 13221525,
+ -27283495, -12348559, -3698806, 117887},
+ {22263325, -6560050, 3984570, -11174646, -15114008, -566785,
+ 28311253, 5358056, -23319780, 541964},
+ {16259219, 3261970, 2309254, -15534474, -16885711, -4581916,
+ 24134070, -16705829, -13337066, -13552195},
+ },
+ {
+ {9378160, -13140186, -22845982, -12745264, 28198281, -7244098,
+ -2399684, -717351, 690426, 14876244},
+ {24977353, -314384, -8223969, -13465086, 28432343, -1176353,
+ -13068804, -12297348, -22380984, 6618999},
+ {-1538174, 11685646, 12944378, 13682314, -24389511, -14413193,
+ 8044829, -13817328, 32239829, -5652762},
+ },
+ {
+ {-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647,
+ -10350059, 32779359, 5095274},
+ {-33008130, -5214506, -32264887, -3685216, 9460461, -9327423,
+ -24601656, 14506724, 21639561, -2630236},
+ {-16400943, -13112215, 25239338, 15531969, 3987758, -4499318,
+ -1289502, -6863535, 17874574, 558605},
+ },
+ {
+ {-13600129, 10240081, 9171883, 16131053, -20869254, 9599700,
+ 33499487, 5080151, 2085892, 5119761},
+ {-22205145, -2519528, -16381601, 414691, -25019550, 2170430,
+ 30634760, -8363614, -31999993, -5759884},
+ {-6845704, 15791202, 8550074, -1312654, 29928809, -12092256,
+ 27534430, -7192145, -22351378, 12961482},
+ },
+ {
+ {-24492060, -9570771, 10368194, 11582341, -23397293, -2245287,
+ 16533930, 8206996, -30194652, -5159638},
+ {-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630,
+ 7031275, 7589640, 8945490},
+ {-32152748, 8917967, 6661220, -11677616, -1192060, -15793393,
+ 7251489, -11182180, 24099109, -14456170},
+ },
+ {
+ {5019558, -7907470, 4244127, -14714356, -26933272, 6453165,
+ -19118182, -13289025, -6231896, -10280736},
+ {10853594, 10721687, 26480089, 5861829, -22995819, 1972175,
+ -1866647, -10557898, -3363451, -6441124},
+ {-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661,
+ -2008168, -13866408, 7421392},
+ },
+ {
+ {8139927, -6546497, 32257646, -5890546, 30375719, 1886181,
+ -21175108, 15441252, 28826358, -4123029},
+ {6267086, 9695052, 7709135, -16603597, -32869068, -1886135,
+ 14795160, -7840124, 13746021, -1742048},
+ {28584902, 7787108, -6732942, -15050729, 22846041, -7571236,
+ -3181936, -363524, 4771362, -8419958},
+ },
+ },
+ {
+ {
+ {24949256, 6376279, -27466481, -8174608, -18646154, -9930606,
+ 33543569, -12141695, 3569627, 11342593},
+ {26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886,
+ 4608608, 7325975, -14801071},
+ {-11618399, -14554430, -24321212, 7655128, -1369274, 5214312,
+ -27400540, 10258390, -17646694, -8186692},
+ },
+ {
+ {11431204, 15823007, 26570245, 14329124, 18029990, 4796082,
+ -31446179, 15580664, 9280358, -3973687},
+ {-160783, -10326257, -22855316, -4304997, -20861367, -13621002,
+ -32810901, -11181622, -15545091, 4387441},
+ {-20799378, 12194512, 3937617, -5805892, -27154820, 9340370,
+ -24513992, 8548137, 20617071, -7482001},
+ },
+ {
+ {-938825, -3930586, -8714311, 16124718, 24603125, -6225393,
+ -13775352, -11875822, 24345683, 10325460},
+ {-19855277, -1568885, -22202708, 8714034, 14007766, 6928528,
+ 16318175, -1010689, 4766743, 3552007},
+ {-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514,
+ 14481909, 10988822, -3994762},
+ },
+ {
+ {15564307, -14311570, 3101243, 5684148, 30446780, -8051356,
+ 12677127, -6505343, -8295852, 13296005},
+ {-9442290, 6624296, -30298964, -11913677, -4670981, -2057379,
+ 31521204, 9614054, -30000824, 12074674},
+ {4771191, -135239, 14290749, -13089852, 27992298, 14998318,
+ -1413936, -1556716, 29832613, -16391035},
+ },
+ {
+ {7064884, -7541174, -19161962, -5067537, -18891269, -2912736,
+ 25825242, 5293297, -27122660, 13101590},
+ {-2298563, 2439670, -7466610, 1719965, -27267541, -16328445,
+ 32512469, -5317593, -30356070, -4190957},
+ {-30006540, 10162316, -33180176, 3981723, -16482138, -13070044,
+ 14413974, 9515896, 19568978, 9628812},
+ },
+ {
+ {33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894,
+ -6106839, -6291786, 3437740},
+ {-18978877, 3884493, 19469877, 12726490, 15913552, 13614290,
+ -22961733, 70104, 7463304, 4176122},
+ {-27124001, 10659917, 11482427, -16070381, 12771467, -6635117,
+ -32719404, -5322751, 24216882, 5944158},
+ },
+ {
+ {8894125, 7450974, -2664149, -9765752, -28080517, -12389115,
+ 19345746, 14680796, 11632993, 5847885},
+ {26942781, -2315317, 9129564, -4906607, 26024105, 11769399,
+ -11518837, 6367194, -9727230, 4782140},
+ {19916461, -4828410, -22910704, -11414391, 25606324, -5972441,
+ 33253853, 8220911, 6358847, -1873857},
+ },
+ {
+ {801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388,
+ -4480480, -13538503, 1387155},
+ {19646058, 5720633, -11416706, 12814209, 11607948, 12749789,
+ 14147075, 15156355, -21866831, 11835260},
+ {19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523,
+ 15467869, -26560550, 5052483},
+ },
+ },
+ {
+ {
+ {-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123,
+ -12618185, 12228557, -7003677},
+ {32944382, 14922211, -22844894, 5188528, 21913450, -8719943,
+ 4001465, 13238564, -6114803, 8653815},
+ {22865569, -4652735, 27603668, -12545395, 14348958, 8234005,
+ 24808405, 5719875, 28483275, 2841751},
+ },
+ {
+ {-16420968, -1113305, -327719, -12107856, 21886282, -15552774,
+ -1887966, -315658, 19932058, -12739203},
+ {-11656086, 10087521, -8864888, -5536143, -19278573, -3055912,
+ 3999228, 13239134, -4777469, -13910208},
+ {1382174, -11694719, 17266790, 9194690, -13324356, 9720081,
+ 20403944, 11284705, -14013818, 3093230},
+ },
+ {
+ {16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424,
+ 16271225, -24049421, -6691850},
+ {-21911077, -5927941, -4611316, -5560156, -31744103, -10785293,
+ 24123614, 15193618, -21652117, -16739389},
+ {-9935934, -4289447, -25279823, 4372842, 2087473, 10399484,
+ 31870908, 14690798, 17361620, 11864968},
+ },
+ {
+ {-11307610, 6210372, 13206574, 5806320, -29017692, -13967200,
+ -12331205, -7486601, -25578460, -16240689},
+ {14668462, -12270235, 26039039, 15305210, 25515617, 4542480,
+ 10453892, 6577524, 9145645, -6443880},
+ {5974874, 3053895, -9433049, -10385191, -31865124, 3225009,
+ -7972642, 3936128, -5652273, -3050304},
+ },
+ {
+ {30625386, -4729400, -25555961, -12792866, -20484575, 7695099,
+ 17097188, -16303496, -27999779, 1803632},
+ {-3553091, 9865099, -5228566, 4272701, -5673832, -16689700,
+ 14911344, 12196514, -21405489, 7047412},
+ {20093277, 9920966, -11138194, -5343857, 13161587, 12044805,
+ -32856851, 4124601, -32343828, -10257566},
+ },
+ {
+ {-20788824, 14084654, -13531713, 7842147, 19119038, -13822605,
+ 4752377, -8714640, -21679658, 2288038},
+ {-26819236, -3283715, 29965059, 3039786, -14473765, 2540457,
+ 29457502, 14625692, -24819617, 12570232},
+ {-1063558, -11551823, 16920318, 12494842, 1278292, -5869109,
+ -21159943, -3498680, -11974704, 4724943},
+ },
+ {
+ {17960970, -11775534, -4140968, -9702530, -8876562, -1410617,
+ -12907383, -8659932, -29576300, 1903856},
+ {23134274, -14279132, -10681997, -1611936, 20684485, 15770816,
+ -12989750, 3190296, 26955097, 14109738},
+ {15308788, 5320727, -30113809, -14318877, 22902008, 7767164,
+ 29425325, -11277562, 31960942, 11934971},
+ },
+ {
+ {-27395711, 8435796, 4109644, 12222639, -24627868, 14818669,
+ 20638173, 4875028, 10491392, 1379718},
+ {-13159415, 9197841, 3875503, -8936108, -1383712, -5879801,
+ 33518459, 16176658, 21432314, 12180697},
+ {-11787308, 11500838, 13787581, -13832590, -22430679, 10140205,
+ 1465425, 12689540, -10301319, -13872883},
+ },
+ },
+ {
+ {
+ {5414091, -15386041, -21007664, 9643570, 12834970, 1186149,
+ -2622916, -1342231, 26128231, 6032912},
+ {-26337395, -13766162, 32496025, -13653919, 17847801, -12669156,
+ 3604025, 8316894, -25875034, -10437358},
+ {3296484, 6223048, 24680646, -12246460, -23052020, 5903205,
+ -8862297, -4639164, 12376617, 3188849},
+ },
+ {
+ {29190488, -14659046, 27549113, -1183516, 3520066, -10697301,
+ 32049515, -7309113, -16109234, -9852307},
+ {-14744486, -9309156, 735818, -598978, -20407687, -5057904,
+ 25246078, -15795669, 18640741, -960977},
+ {-6928835, -16430795, 10361374, 5642961, 4910474, 12345252,
+ -31638386, -494430, 10530747, 1053335},
+ },
+ {
+ {-29265967, -14186805, -13538216, -12117373, -19457059, -10655384,
+ -31462369, -2948985, 24018831, 15026644},
+ {-22592535, -3145277, -2289276, 5953843, -13440189, 9425631,
+ 25310643, 13003497, -2314791, -15145616},
+ {-27419985, -603321, -8043984, -1669117, -26092265, 13987819,
+ -27297622, 187899, -23166419, -2531735},
+ },
+ {
+ {-21744398, -13810475, 1844840, 5021428, -10434399, -15911473,
+ 9716667, 16266922, -5070217, 726099},
+ {29370922, -6053998, 7334071, -15342259, 9385287, 2247707,
+ -13661962, -4839461, 30007388, -15823341},
+ {-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109,
+ 730663, 9835848, 4555336},
+ },
+ {
+ {-23376435, 1410446, -22253753, -12899614, 30867635, 15826977,
+ 17693930, 544696, -11985298, 12422646},
+ {31117226, -12215734, -13502838, 6561947, -9876867, -12757670,
+ -5118685, -4096706, 29120153, 13924425},
+ {-17400879, -14233209, 19675799, -2734756, -11006962, -5858820,
+ -9383939, -11317700, 7240931, -237388},
+ },
+ {
+ {-31361739, -11346780, -15007447, -5856218, -22453340, -12152771,
+ 1222336, 4389483, 3293637, -15551743},
+ {-16684801, -14444245, 11038544, 11054958, -13801175, -3338533,
+ -24319580, 7733547, 12796905, -6335822},
+ {-8759414, -10817836, -25418864, 10783769, -30615557, -9746811,
+ -28253339, 3647836, 3222231, -11160462},
+ },
+ {
+ {18606113, 1693100, -25448386, -15170272, 4112353, 10045021,
+ 23603893, -2048234, -7550776, 2484985},
+ {9255317, -3131197, -12156162, -1004256, 13098013, -9214866,
+ 16377220, -2102812, -19802075, -3034702},
+ {-22729289, 7496160, -5742199, 11329249, 19991973, -3347502,
+ -31718148, 9936966, -30097688, -10618797},
+ },
+ {
+ {21878590, -5001297, 4338336, 13643897, -3036865, 13160960,
+ 19708896, 5415497, -7360503, -4109293},
+ {27736861, 10103576, 12500508, 8502413, -3413016, -9633558,
+ 10436918, -1550276, -23659143, -8132100},
+ {19492550, -12104365, -29681976, -852630, -3208171, 12403437,
+ 30066266, 8367329, 13243957, 8709688},
+ },
+ },
+ {
+ {
+ {12015105, 2801261, 28198131, 10151021, 24818120, -4743133,
+ -11194191, -5645734, 5150968, 7274186},
+ {2831366, -12492146, 1478975, 6122054, 23825128, -12733586,
+ 31097299, 6083058, 31021603, -9793610},
+ {-2529932, -2229646, 445613, 10720828, -13849527, -11505937,
+ -23507731, 16354465, 15067285, -14147707},
+ },
+ {
+ {7840942, 14037873, -33364863, 15934016, -728213, -3642706,
+ 21403988, 1057586, -19379462, -12403220},
+ {915865, -16469274, 15608285, -8789130, -24357026, 6060030,
+ -17371319, 8410997, -7220461, 16527025},
+ {32922597, -556987, 20336074, -16184568, 10903705, -5384487,
+ 16957574, 52992, 23834301, 6588044},
+ },
+ {
+ {32752030, 11232950, 3381995, -8714866, 22652988, -10744103,
+ 17159699, 16689107, -20314580, -1305992},
+ {-4689649, 9166776, -25710296, -10847306, 11576752, 12733943,
+ 7924251, -2752281, 1976123, -7249027},
+ {21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041,
+ -3371252, 12331345, -8237197},
+ },
+ {
+ {8651614, -4477032, -16085636, -4996994, 13002507, 2950805,
+ 29054427, -5106970, 10008136, -4667901},
+ {31486080, 15114593, -14261250, 12951354, 14369431, -7387845,
+ 16347321, -13662089, 8684155, -10532952},
+ {19443825, 11385320, 24468943, -9659068, -23919258, 2187569,
+ -26263207, -6086921, 31316348, 14219878},
+ },
+ {
+ {-28594490, 1193785, 32245219, 11392485, 31092169, 15722801,
+ 27146014, 6992409, 29126555, 9207390},
+ {32382935, 1110093, 18477781, 11028262, -27411763, -7548111,
+ -4980517, 10843782, -7957600, -14435730},
+ {2814918, 7836403, 27519878, -7868156, -20894015, -11553689,
+ -21494559, 8550130, 28346258, 1994730},
+ },
+ {
+ {-19578299, 8085545, -14000519, -3948622, 2785838, -16231307,
+ -19516951, 7174894, 22628102, 8115180},
+ {-30405132, 955511, -11133838, -15078069, -32447087, -13278079,
+ -25651578, 3317160, -9943017, 930272},
+ {-15303681, -6833769, 28856490, 1357446, 23421993, 1057177,
+ 24091212, -1388970, -22765376, -10650715},
+ },
+ {
+ {-22751231, -5303997, -12907607, -12768866, -15811511, -7797053,
+ -14839018, -16554220, -1867018, 8398970},
+ {-31969310, 2106403, -4736360, 1362501, 12813763, 16200670,
+ 22981545, -6291273, 18009408, -15772772},
+ {-17220923, -9545221, -27784654, 14166835, 29815394, 7444469,
+ 29551787, -3727419, 19288549, 1325865},
+ },
+ {
+ {15100157, -15835752, -23923978, -1005098, -26450192, 15509408,
+ 12376730, -3479146, 33166107, -8042750},
+ {20909231, 13023121, -9209752, 16251778, -5778415, -8094914,
+ 12412151, 10018715, 2213263, -13878373},
+ {32529814, -11074689, 30361439, -16689753, -9135940, 1513226,
+ 22922121, 6382134, -5766928, 8371348},
+ },
+ },
+ {
+ {
+ {9923462, 11271500, 12616794, 3544722, -29998368, -1721626,
+ 12891687, -8193132, -26442943, 10486144},
+ {-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726,
+ 2610596, -23921530, -11455195},
+ {5408411, -1136691, -4969122, 10561668, 24145918, 14240566,
+ 31319731, -4235541, 19985175, -3436086},
+ },
+ {
+ {-13994457, 16616821, 14549246, 3341099, 32155958, 13648976,
+ -17577068, 8849297, 65030, 8370684},
+ {-8320926, -12049626, 31204563, 5839400, -20627288, -1057277,
+ -19442942, 6922164, 12743482, -9800518},
+ {-2361371, 12678785, 28815050, 4759974, -23893047, 4884717,
+ 23783145, 11038569, 18800704, 255233},
+ },
+ {
+ {-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847,
+ 9066957, 19258688, -14753793},
+ {-2936654, -10827535, -10432089, 14516793, -3640786, 4372541,
+ -31934921, 2209390, -1524053, 2055794},
+ {580882, 16705327, 5468415, -2683018, -30926419, -14696000,
+ -7203346, -8994389, -30021019, 7394435},
+ },
+ {
+ {23838809, 1822728, -15738443, 15242727, 8318092, -3733104,
+ -21672180, -3492205, -4821741, 14799921},
+ {13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804,
+ 13496856, -9056018, 7402518},
+ {2286874, -4435931, -20042458, -2008336, -13696227, 5038122,
+ 11006906, -15760352, 8205061, 1607563},
+ },
+ {
+ {14414086, -8002132, 3331830, -3208217, 22249151, -5594188,
+ 18364661, -2906958, 30019587, -9029278},
+ {-27688051, 1585953, -10775053, 931069, -29120221, -11002319,
+ -14410829, 12029093, 9944378, 8024},
+ {4368715, -3709630, 29874200, -15022983, -20230386, -11410704,
+ -16114594, -999085, -8142388, 5640030},
+ },
+ {
+ {10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887,
+ -16694564, 15219798, -14327783},
+ {27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605,
+ -1173195, -18342183, 9742717},
+ {6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614,
+ 7406442, 12420155, 1994844},
+ },
+ {
+ {14012521, -5024720, -18384453, -9578469, -26485342, -3936439,
+ -13033478, -10909803, 24319929, -6446333},
+ {16412690, -4507367, 10772641, 15929391, -17068788, -4658621,
+ 10555945, -10484049, -30102368, -4739048},
+ {22397382, -7767684, -9293161, -12792868, 17166287, -9755136,
+ -27333065, 6199366, 21880021, -12250760},
+ },
+ {
+ {-4283307, 5368523, -31117018, 8163389, -30323063, 3209128,
+ 16557151, 8890729, 8840445, 4957760},
+ {-15447727, 709327, -6919446, -10870178, -29777922, 6522332,
+ -21720181, 12130072, -14796503, 5005757},
+ {-2114751, -14308128, 23019042, 15765735, -25269683, 6002752,
+ 10183197, -13239326, -16395286, -2176112},
+ },
+ },
+ {
+ {
+ {-19025756, 1632005, 13466291, -7995100, -23640451, 16573537,
+ -32013908, -3057104, 22208662, 2000468},
+ {3065073, -1412761, -25598674, -361432, -17683065, -5703415,
+ -8164212, 11248527, -3691214, -7414184},
+ {10379208, -6045554, 8877319, 1473647, -29291284, -12507580,
+ 16690915, 2553332, -3132688, 16400289},
+ },
+ {
+ {15716668, 1254266, -18472690, 7446274, -8448918, 6344164,
+ -22097271, -7285580, 26894937, 9132066},
+ {24158887, 12938817, 11085297, -8177598, -28063478, -4457083,
+ -30576463, 64452, -6817084, -2692882},
+ {13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710,
+ -3418511, -4688006, 2364226},
+ },
+ {
+ {16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024,
+ -11697457, 15445875, -7798101},
+ {29004207, -7867081, 28661402, -640412, -12794003, -7943086,
+ 31863255, -4135540, -278050, -15759279},
+ {-6122061, -14866665, -28614905, 14569919, -10857999, -3591829,
+ 10343412, -6976290, -29828287, -10815811},
+ },
+ {
+ {27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636,
+ 15372179, 17293797, 960709},
+ {20263915, 11434237, -5765435, 11236810, 13505955, -10857102,
+ -16111345, 6493122, -19384511, 7639714},
+ {-2830798, -14839232, 25403038, -8215196, -8317012, -16173699,
+ 18006287, -16043750, 29994677, -15808121},
+ },
+ {
+ {9769828, 5202651, -24157398, -13631392, -28051003, -11561624,
+ -24613141, -13860782, -31184575, 709464},
+ {12286395, 13076066, -21775189, -1176622, -25003198, 4057652,
+ -32018128, -8890874, 16102007, 13205847},
+ {13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170,
+ 8525972, 10151379, 10394400},
+ },
+ {
+ {4024660, -16137551, 22436262, 12276534, -9099015, -2686099,
+ 19698229, 11743039, -33302334, 8934414},
+ {-15879800, -4525240, -8580747, -2934061, 14634845, -698278,
+ -9449077, 3137094, -11536886, 11721158},
+ {17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229,
+ 8835153, -9205489, -1280045},
+ },
+ {
+ {-461409, -7830014, 20614118, 16688288, -7514766, -4807119,
+ 22300304, 505429, 6108462, -6183415},
+ {-5070281, 12367917, -30663534, 3234473, 32617080, -8422642,
+ 29880583, -13483331, -26898490, -7867459},
+ {-31975283, 5726539, 26934134, 10237677, -3173717, -605053,
+ 24199304, 3795095, 7592688, -14992079},
+ },
+ {
+ {21594432, -14964228, 17466408, -4077222, 32537084, 2739898,
+ 6407723, 12018833, -28256052, 4298412},
+ {-20650503, -11961496, -27236275, 570498, 3767144, -1717540,
+ 13891942, -1569194, 13717174, 10805743},
+ {-14676630, -15644296, 15287174, 11927123, 24177847, -8175568,
+ -796431, 14860609, -26938930, -5863836},
+ },
+ },
+ {
+ {
+ {12962541, 5311799, -10060768, 11658280, 18855286, -7954201,
+ 13286263, -12808704, -4381056, 9882022},
+ {18512079, 11319350, -20123124, 15090309, 18818594, 5271736,
+ -22727904, 3666879, -23967430, -3299429},
+ {-6789020, -3146043, 16192429, 13241070, 15898607, -14206114,
+ -10084880, -6661110, -2403099, 5276065},
+ },
+ {
+ {30169808, -5317648, 26306206, -11750859, 27814964, 7069267,
+ 7152851, 3684982, 1449224, 13082861},
+ {10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382,
+ 15056736, -21016438, -8202000},
+ {-33150110, 3261608, 22745853, 7948688, 19370557, -15177665,
+ -26171976, 6482814, -10300080, -11060101},
+ },
+ {
+ {32869458, -5408545, 25609743, 15678670, -10687769, -15471071,
+ 26112421, 2521008, -22664288, 6904815},
+ {29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737,
+ 3841096, -29003639, -6657642},
+ {10340844, -6630377, -18656632, -2278430, 12621151, -13339055,
+ 30878497, -11824370, -25584551, 5181966},
+ },
+ {
+ {25940115, -12658025, 17324188, -10307374, -8671468, 15029094,
+ 24396252, -16450922, -2322852, -12388574},
+ {-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390,
+ 12641087, 20603771, -6561742},
+ {-18882287, -11673380, 24849422, 11501709, 13161720, -4768874,
+ 1925523, 11914390, 4662781, 7820689},
+ },
+ {
+ {12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456,
+ 12172924, 16136752, 15264020},
+ {-10349955, -14680563, -8211979, 2330220, -17662549, -14545780,
+ 10658213, 6671822, 19012087, 3772772},
+ {3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732,
+ -15762884, 20527771, 12988982},
+ },
+ {
+ {-14822485, -5797269, -3707987, 12689773, -898983, -10914866,
+ -24183046, -10564943, 3299665, -12424953},
+ {-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197,
+ 6461331, -25583147, 8991218},
+ {-17226263, 1816362, -1673288, -6086439, 31783888, -8175991,
+ -32948145, 7417950, -30242287, 1507265},
+ },
+ {
+ {29692663, 6829891, -10498800, 4334896, 20945975, -11906496,
+ -28887608, 8209391, 14606362, -10647073},
+ {-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695,
+ 9761487, 4170404, -2085325},
+ {-11587470, 14855945, -4127778, -1531857, -26649089, 15084046,
+ 22186522, 16002000, -14276837, -8400798},
+ },
+ {
+ {-4811456, 13761029, -31703877, -2483919, -3312471, 7869047,
+ -7113572, -9620092, 13240845, 10965870},
+ {-7742563, -8256762, -14768334, -13656260, -23232383, 12387166,
+ 4498947, 14147411, 29514390, 4302863},
+ {-13413405, -12407859, 20757302, -13801832, 14785143, 8976368,
+ -5061276, -2144373, 17846988, -13971927},
+ },
+ },
+ {
+ {
+ {-2244452, -754728, -4597030, -1066309, -6247172, 1455299,
+ -21647728, -9214789, -5222701, 12650267},
+ {-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813,
+ 13770293, -19134326, 10958663},
+ {22470984, 12369526, 23446014, -5441109, -21520802, -9698723,
+ -11772496, -11574455, -25083830, 4271862},
+ },
+ {
+ {-25169565, -10053642, -19909332, 15361595, -5984358, 2159192,
+ 75375, -4278529, -32526221, 8469673},
+ {15854970, 4148314, -8893890, 7259002, 11666551, 13824734,
+ -30531198, 2697372, 24154791, -9460943},
+ {15446137, -15806644, 29759747, 14019369, 30811221, -9610191,
+ -31582008, 12840104, 24913809, 9815020},
+ },
+ {
+ {-4709286, -5614269, -31841498, -12288893, -14443537, 10799414,
+ -9103676, 13438769, 18735128, 9466238},
+ {11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821,
+ -10896103, -22728655, 16199064},
+ {14576810, 379472, -26786533, -8317236, -29426508, -10812974,
+ -102766, 1876699, 30801119, 2164795},
+ },
+ {
+ {15995086, 3199873, 13672555, 13712240, -19378835, -4647646,
+ -13081610, -15496269, -13492807, 1268052},
+ {-10290614, -3659039, -3286592, 10948818, 23037027, 3794475,
+ -3470338, -12600221, -17055369, 3565904},
+ {29210088, -9419337, -5919792, -4952785, 10834811, -13327726,
+ -16512102, -10820713, -27162222, -14030531},
+ },
+ {
+ {-13161890, 15508588, 16663704, -8156150, -28349942, 9019123,
+ -29183421, -3769423, 2244111, -14001979},
+ {-5152875, -3800936, -9306475, -6071583, 16243069, 14684434,
+ -25673088, -16180800, 13491506, 4641841},
+ {10813417, 643330, -19188515, -728916, 30292062, -16600078,
+ 27548447, -7721242, 14476989, -12767431},
+ },
+ {
+ {10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937,
+ -1644259, -27912810, 12651324},
+ {-31185513, -813383, 22271204, 11835308, 10201545, 15351028,
+ 17099662, 3988035, 21721536, -3148940},
+ {10202177, -6545839, -31373232, -9574638, -32150642, -8119683,
+ -12906320, 3852694, 13216206, 14842320},
+ },
+ {
+ {-15815640, -10601066, -6538952, -7258995, -6984659, -6581778,
+ -31500847, 13765824, -27434397, 9900184},
+ {14465505, -13833331, -32133984, -14738873, -27443187, 12990492,
+ 33046193, 15796406, -7051866, -8040114},
+ {30924417, -8279620, 6359016, -12816335, 16508377, 9071735,
+ -25488601, 15413635, 9524356, -7018878},
+ },
+ {
+ {12274201, -13175547, 32627641, -1785326, 6736625, 13267305,
+ 5237659, -5109483, 15663516, 4035784},
+ {-2951309, 8903985, 17349946, 601635, -16432815, -4612556,
+ -13732739, -15889334, -22258478, 4659091},
+ {-16916263, -4952973, -30393711, -15158821, 20774812, 15897498,
+ 5736189, 15026997, -2178256, -13455585},
+ },
+ },
+ {
+ {
+ {-8858980, -2219056, 28571666, -10155518, -474467, -10105698,
+ -3801496, 278095, 23440562, -290208},
+ {10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275,
+ 11551483, -16571960, -7442864},
+ {17932739, -12437276, -24039557, 10749060, 11316803, 7535897,
+ 22503767, 5561594, -3646624, 3898661},
+ },
+ {
+ {7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531,
+ 7152530, 21831162, 1245233},
+ {26958459, -14658026, 4314586, 8346991, -5677764, 11960072,
+ -32589295, -620035, -30402091, -16716212},
+ {-12165896, 9166947, 33491384, 13673479, 29787085, 13096535,
+ 6280834, 14587357, -22338025, 13987525},
+ },
+ {
+ {-24349909, 7778775, 21116000, 15572597, -4833266, -5357778,
+ -4300898, -5124639, -7469781, -2858068},
+ {9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781,
+ 6439245, -14581012, 4091397},
+ {-8426427, 1470727, -28109679, -1596990, 3978627, -5123623,
+ -19622683, 12092163, 29077877, -14741988},
+ },
+ {
+ {5269168, -6859726, -13230211, -8020715, 25932563, 1763552,
+ -5606110, -5505881, -20017847, 2357889},
+ {32264008, -15407652, -5387735, -1160093, -2091322, -3946900,
+ 23104804, -12869908, 5727338, 189038},
+ {14609123, -8954470, -6000566, -16622781, -14577387, -7743898,
+ -26745169, 10942115, -25888931, -14884697},
+ },
+ {
+ {20513500, 5557931, -15604613, 7829531, 26413943, -2019404,
+ -21378968, 7471781, 13913677, -5137875},
+ {-25574376, 11967826, 29233242, 12948236, -6754465, 4713227,
+ -8940970, 14059180, 12878652, 8511905},
+ {-25656801, 3393631, -2955415, -7075526, -2250709, 9366908,
+ -30223418, 6812974, 5568676, -3127656},
+ },
+ {
+ {11630004, 12144454, 2116339, 13606037, 27378885, 15676917,
+ -17408753, -13504373, -14395196, 8070818},
+ {27117696, -10007378, -31282771, -5570088, 1127282, 12772488,
+ -29845906, 10483306, -11552749, -1028714},
+ {10637467, -5688064, 5674781, 1072708, -26343588, -6982302,
+ -1683975, 9177853, -27493162, 15431203},
+ },
+ {
+ {20525145, 10892566, -12742472, 12779443, -29493034, 16150075,
+ -28240519, 14943142, -15056790, -7935931},
+ {-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767,
+ -3239766, -3356550, 9594024},
+ {-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683,
+ -6492290, 13352335, -10977084},
+ },
+ {
+ {-1931799, -5407458, 3304649, -12884869, 17015806, -4877091,
+ -29783850, -7752482, -13215537, -319204},
+ {20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742,
+ 15077870, -22750759, 14523817},
+ {27406042, -6041657, 27423596, -4497394, 4996214, 10002360,
+ -28842031, -4545494, -30172742, -4805667},
+ },
+ },
+ {
+ {
+ {11374242, 12660715, 17861383, -12540833, 10935568, 1099227,
+ -13886076, -9091740, -27727044, 11358504},
+ {-12730809, 10311867, 1510375, 10778093, -2119455, -9145702,
+ 32676003, 11149336, -26123651, 4985768},
+ {-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043,
+ 13794114, -19414307, -15621255},
+ },
+ {
+ {6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603,
+ 6970005, -1691065, -9004790},
+ {1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622,
+ -5475723, -16796596, -5031438},
+ {-22273315, -13524424, -64685, -4334223, -18605636, -10921968,
+ -20571065, -7007978, -99853, -10237333},
+ },
+ {
+ {17747465, 10039260, 19368299, -4050591, -20630635, -16041286,
+ 31992683, -15857976, -29260363, -5511971},
+ {31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999,
+ -3744247, 4882242, -10626905},
+ {29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198,
+ 3272828, -5190932, -4162409},
+ },
+ {
+ {12501286, 4044383, -8612957, -13392385, -32430052, 5136599,
+ -19230378, -3529697, 330070, -3659409},
+ {6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522,
+ -8573892, -271295, 12071499},
+ {-8365515, -4042521, 25133448, -4517355, -6211027, 2265927,
+ -32769618, 1936675, -5159697, 3829363},
+ },
+ {
+ {28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550,
+ -6567787, 26333140, 14267664},
+ {-11067219, 11871231, 27385719, -10559544, -4585914, -11189312,
+ 10004786, -8709488, -21761224, 8930324},
+ {-21197785, -16396035, 25654216, -1725397, 12282012, 11008919,
+ 1541940, 4757911, -26491501, -16408940},
+ },
+ {
+ {13537262, -7759490, -20604840, 10961927, -5922820, -13218065,
+ -13156584, 6217254, -15943699, 13814990},
+ {-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681,
+ 9257833, -1956526, -1776914},
+ {-25045300, -10191966, 15366585, 15166509, -13105086, 8423556,
+ -29171540, 12361135, -18685978, 4578290},
+ },
+ {
+ {24579768, 3711570, 1342322, -11180126, -27005135, 14124956,
+ -22544529, 14074919, 21964432, 8235257},
+ {-6528613, -2411497, 9442966, -5925588, 12025640, -1487420,
+ -2981514, -1669206, 13006806, 2355433},
+ {-16304899, -13605259, -6632427, -5142349, 16974359, -10911083,
+ 27202044, 1719366, 1141648, -12796236},
+ },
+ {
+ {-12863944, -13219986, -8318266, -11018091, -6810145, -4843894,
+ 13475066, -3133972, 32674895, 13715045},
+ {11423335, -5468059, 32344216, 8962751, 24989809, 9241752,
+ -13265253, 16086212, -28740881, -15642093},
+ {-1409668, 12530728, -6368726, 10847387, 19531186, -14132160,
+ -11709148, 7791794, -27245943, 4383347},
+ },
+ },
+ {
+ {
+ {-28970898, 5271447, -1266009, -9736989, -12455236, 16732599,
+ -4862407, -4906449, 27193557, 6245191},
+ {-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898,
+ 3260492, 22510453, 8577507},
+ {-12632451, 11257346, -32692994, 13548177, -721004, 10879011,
+ 31168030, 13952092, -29571492, -3635906},
+ },
+ {
+ {3877321, -9572739, 32416692, 5405324, -11004407, -13656635,
+ 3759769, 11935320, 5611860, 8164018},
+ {-16275802, 14667797, 15906460, 12155291, -22111149, -9039718,
+ 32003002, -8832289, 5773085, -8422109},
+ {-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725,
+ 12376320, 31632953, 190926},
+ },
+ {
+ {-24593607, -16138885, -8423991, 13378746, 14162407, 6901328,
+ -8288749, 4508564, -25341555, -3627528},
+ {8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941,
+ -14786005, -1672488, 827625},
+ {-32720583, -16289296, -32503547, 7101210, 13354605, 2659080,
+ -1800575, -14108036, -24878478, 1541286},
+ },
+ {
+ {2901347, -1117687, 3880376, -10059388, -17620940, -3612781,
+ -21802117, -3567481, 20456845, -1885033},
+ {27019610, 12299467, -13658288, -1603234, -12861660, -4861471,
+ -19540150, -5016058, 29439641, 15138866},
+ {21536104, -6626420, -32447818, -10690208, -22408077, 5175814,
+ -5420040, -16361163, 7779328, 109896},
+ },
+ {
+ {30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390,
+ 12180118, 23177719, -554075},
+ {26572847, 3405927, -31701700, 12890905, -19265668, 5335866,
+ -6493768, 2378492, 4439158, -13279347},
+ {-22716706, 3489070, -9225266, -332753, 18875722, -1140095,
+ 14819434, -12731527, -17717757, -5461437},
+ },
+ {
+ {-5056483, 16566551, 15953661, 3767752, -10436499, 15627060,
+ -820954, 2177225, 8550082, -15114165},
+ {-18473302, 16596775, -381660, 15663611, 22860960, 15585581,
+ -27844109, -3582739, -23260460, -8428588},
+ {-32480551, 15707275, -8205912, -5652081, 29464558, 2713815,
+ -22725137, 15860482, -21902570, 1494193},
+ },
+ {
+ {-19562091, -14087393, -25583872, -9299552, 13127842, 759709,
+ 21923482, 16529112, 8742704, 12967017},
+ {-28464899, 1553205, 32536856, -10473729, -24691605, -406174,
+ -8914625, -2933896, -29903758, 15553883},
+ {21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572,
+ 14513274, 19375923, -12647961},
+ },
+ {
+ {8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818,
+ -6222716, 2862653, 9455043},
+ {29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124,
+ -2990080, 15511449, 4789663},
+ {-20679756, 7004547, 8824831, -9434977, -4045704, -3750736,
+ -5754762, 108893, 23513200, 16652362},
+ },
+ },
+ {
+ {
+ {-33256173, 4144782, -4476029, -6579123, 10770039, -7155542,
+ -6650416, -12936300, -18319198, 10212860},
+ {2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801,
+ 2600940, -9988298, -12506466},
+ {-24645692, 13317462, -30449259, -15653928, 21365574, -10869657,
+ 11344424, 864440, -2499677, -16710063},
+ },
+ {
+ {-26432803, 6148329, -17184412, -14474154, 18782929, -275997,
+ -22561534, 211300, 2719757, 4940997},
+ {-1323882, 3911313, -6948744, 14759765, -30027150, 7851207,
+ 21690126, 8518463, 26699843, 5276295},
+ {-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586,
+ 149635, -15452774, 7159369},
+ },
+ {
+ {9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009,
+ 8312176, 22477218, -8403385},
+ {18155857, -16504990, 19744716, 9006923, 15154154, -10538976,
+ 24256460, -4864995, -22548173, 9334109},
+ {2986088, -4911893, 10776628, -3473844, 10620590, -7083203,
+ -21413845, 14253545, -22587149, 536906},
+ },
+ {
+ {4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551,
+ 10589625, 10838060, -15420424},
+ {-19342404, 867880, 9277171, -3218459, -14431572, -1986443,
+ 19295826, -15796950, 6378260, 699185},
+ {7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039,
+ 15693155, -5045064, -13373962},
+ },
+ {
+ {-7737563, -5869402, -14566319, -7406919, 11385654, 13201616,
+ 31730678, -10962840, -3918636, -9669325},
+ {10188286, -15770834, -7336361, 13427543, 22223443, 14896287,
+ 30743455, 7116568, -21786507, 5427593},
+ {696102, 13206899, 27047647, -10632082, 15285305, -9853179,
+ 10798490, -4578720, 19236243, 12477404},
+ },
+ {
+ {-11229439, 11243796, -17054270, -8040865, -788228, -8167967,
+ -3897669, 11180504, -23169516, 7733644},
+ {17800790, -14036179, -27000429, -11766671, 23887827, 3149671,
+ 23466177, -10538171, 10322027, 15313801},
+ {26246234, 11968874, 32263343, -5468728, 6830755, -13323031,
+ -15794704, -101982, -24449242, 10890804},
+ },
+ {
+ {-31365647, 10271363, -12660625, -6267268, 16690207, -13062544,
+ -14982212, 16484931, 25180797, -5334884},
+ {-586574, 10376444, -32586414, -11286356, 19801893, 10997610,
+ 2276632, 9482883, 316878, 13820577},
+ {-9882808, -4510367, -2115506, 16457136, -11100081, 11674996,
+ 30756178, -7515054, 30696930, -3712849},
+ },
+ {
+ {32988917, -9603412, 12499366, 7910787, -10617257, -11931514,
+ -7342816, -9985397, -32349517, 7392473},
+ {-8855661, 15927861, 9866406, -3649411, -2396914, -16655781,
+ -30409476, -9134995, 25112947, -2926644},
+ {-2504044, -436966, 25621774, -5678772, 15085042, -5479877,
+ -24884878, -13526194, 5537438, -13914319},
+ },
+ },
+ {
+ {
+ {-11225584, 2320285, -9584280, 10149187, -33444663, 5808648,
+ -14876251, -1729667, 31234590, 6090599},
+ {-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721,
+ 15878753, -6970405, -9034768},
+ {-27757857, 247744, -15194774, -9002551, 23288161, -10011936,
+ -23869595, 6503646, 20650474, 1804084},
+ },
+ {
+ {-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995,
+ -10329713, 27842616, -202328},
+ {-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656,
+ 5031932, -11375082, 12714369},
+ {20807691, -7270825, 29286141, 11421711, -27876523, -13868230,
+ -21227475, 1035546, -19733229, 12796920},
+ },
+ {
+ {12076899, -14301286, -8785001, -11848922, -25012791, 16400684,
+ -17591495, -12899438, 3480665, -15182815},
+ {-32361549, 5457597, 28548107, 7833186, 7303070, -11953545,
+ -24363064, -15921875, -33374054, 2771025},
+ {-21389266, 421932, 26597266, 6860826, 22486084, -6737172,
+ -17137485, -4210226, -24552282, 15673397},
+ },
+ {
+ {-20184622, 2338216, 19788685, -9620956, -4001265, -8740893,
+ -20271184, 4733254, 3727144, -12934448},
+ {6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594,
+ 7975683, 31123697, -10958981},
+ {30069250, -11435332, 30434654, 2958439, 18399564, -976289,
+ 12296869, 9204260, -16432438, 9648165},
+ },
+ {
+ {32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266,
+ 5248604, -26008332, -11377501},
+ {17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711,
+ 15298639, 2662509, -16297073},
+ {-1172927, -7558695, -4366770, -4287744, -21346413, -8434326,
+ 32087529, -1222777, 32247248, -14389861},
+ },
+ {
+ {14312628, 1221556, 17395390, -8700143, -4945741, -8684635,
+ -28197744, -9637817, -16027623, -13378845},
+ {-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502,
+ 9803137, 17597934, 2346211},
+ {18510800, 15337574, 26171504, 981392, -22241552, 7827556,
+ -23491134, -11323352, 3059833, -11782870},
+ },
+ {
+ {10141598, 6082907, 17829293, -1947643, 9830092, 13613136,
+ -25556636, -5544586, -33502212, 3592096},
+ {33114168, -15889352, -26525686, -13343397, 33076705, 8716171,
+ 1151462, 1521897, -982665, -6837803},
+ {-32939165, -4255815, 23947181, -324178, -33072974, -12305637,
+ -16637686, 3891704, 26353178, 693168},
+ },
+ {
+ {30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294,
+ -400668, 31375464, 14369965},
+ {-14370654, -7772529, 1510301, 6434173, -18784789, -6262728,
+ 32732230, -13108839, 17901441, 16011505},
+ {18171223, -11934626, -12500402, 15197122, -11038147, -15230035,
+ -19172240, -16046376, 8764035, 12309598},
+ },
+ },
+ {
+ {
+ {5975908, -5243188, -19459362, -9681747, -11541277, 14015782,
+ -23665757, 1228319, 17544096, -10593782},
+ {5811932, -1715293, 3442887, -2269310, -18367348, -8359541,
+ -18044043, -15410127, -5565381, 12348900},
+ {-31399660, 11407555, 25755363, 6891399, -3256938, 14872274,
+ -24849353, 8141295, -10632534, -585479},
+ },
+ {
+ {-12675304, 694026, -5076145, 13300344, 14015258, -14451394,
+ -9698672, -11329050, 30944593, 1130208},
+ {8247766, -6710942, -26562381, -7709309, -14401939, -14648910,
+ 4652152, 2488540, 23550156, -271232},
+ {17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737,
+ -5908146, -408818, -137719},
+ },
+ {
+ {16091085, -16253926, 18599252, 7340678, 2137637, -1221657,
+ -3364161, 14550936, 3260525, -7166271},
+ {-4910104, -13332887, 18550887, 10864893, -16459325, -7291596,
+ -23028869, -13204905, -12748722, 2701326},
+ {-8574695, 16099415, 4629974, -16340524, -20786213, -6005432,
+ -10018363, 9276971, 11329923, 1862132},
+ },
+ {
+ {14763076, -15903608, -30918270, 3689867, 3511892, 10313526,
+ -21951088, 12219231, -9037963, -940300},
+ {8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216,
+ -2909717, -15438168, 11595570},
+ {15214962, 3537601, -26238722, -14058872, 4418657, -15230761,
+ 13947276, 10730794, -13489462, -4363670},
+ },
+ {
+ {-2538306, 7682793, 32759013, 263109, -29984731, -7955452,
+ -22332124, -10188635, 977108, 699994},
+ {-12466472, 4195084, -9211532, 550904, -15565337, 12917920,
+ 19118110, -439841, -30534533, -14337913},
+ {31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237,
+ -10051775, 12493932, -5409317},
+ },
+ {
+ {-25680606, 5260744, -19235809, -6284470, -3695942, 16566087,
+ 27218280, 2607121, 29375955, 6024730},
+ {842132, -2794693, -4763381, -8722815, 26332018, -12405641,
+ 11831880, 6985184, -9940361, 2854096},
+ {-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645,
+ 960770, 12121869, 16648078},
+ },
+ {
+ {-15218652, 14667096, -13336229, 2013717, 30598287, -464137,
+ -31504922, -7882064, 20237806, 2838411},
+ {-19288047, 4453152, 15298546, -16178388, 22115043, -15972604,
+ 12544294, -13470457, 1068881, -12499905},
+ {-9558883, -16518835, 33238498, 13506958, 30505848, -1114596,
+ -8486907, -2630053, 12521378, 4845654},
+ },
+ {
+ {-28198521, 10744108, -2958380, 10199664, 7759311, -13088600,
+ 3409348, -873400, -6482306, -12885870},
+ {-23561822, 6230156, -20382013, 10655314, -24040585, -11621172,
+ 10477734, -1240216, -3113227, 13974498},
+ {12966261, 15550616, -32038948, -1615346, 21025980, -629444,
+ 5642325, 7188737, 18895762, 12629579},
+ },
+ },
+ {
+ {
+ {14741879, -14946887, 22177208, -11721237, 1279741, 8058600,
+ 11758140, 789443, 32195181, 3895677},
+ {10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575,
+ -3566119, -8982069, 4429647},
+ {-2453894, 15725973, -20436342, -10410672, -5803908, -11040220,
+ -7135870, -11642895, 18047436, -15281743},
+ },
+ {
+ {-25173001, -11307165, 29759956, 11776784, -22262383, -15820455,
+ 10993114, -12850837, -17620701, -9408468},
+ {21987233, 700364, -24505048, 14972008, -7774265, -5718395,
+ 32155026, 2581431, -29958985, 8773375},
+ {-25568350, 454463, -13211935, 16126715, 25240068, 8594567,
+ 20656846, 12017935, -7874389, -13920155},
+ },
+ {
+ {6028182, 6263078, -31011806, -11301710, -818919, 2461772,
+ -31841174, -5468042, -1721788, -2776725},
+ {-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845,
+ -4166698, 28408820, 6816612},
+ {-10358094, -8237829, 19549651, -12169222, 22082623, 16147817,
+ 20613181, 13982702, -10339570, 5067943},
+ },
+ {
+ {-30505967, -3821767, 12074681, 13582412, -19877972, 2443951,
+ -19719286, 12746132, 5331210, -10105944},
+ {30528811, 3601899, -1957090, 4619785, -27361822, -15436388,
+ 24180793, -12570394, 27679908, -1648928},
+ {9402404, -13957065, 32834043, 10838634, -26580150, -13237195,
+ 26653274, -8685565, 22611444, -12715406},
+ },
+ {
+ {22190590, 1118029, 22736441, 15130463, -30460692, -5991321,
+ 19189625, -4648942, 4854859, 6622139},
+ {-8310738, -2953450, -8262579, -3388049, -10401731, -271929,
+ 13424426, -3567227, 26404409, 13001963},
+ {-31241838, -15415700, -2994250, 8939346, 11562230, -12840670,
+ -26064365, -11621720, -15405155, 11020693},
+ },
+ {
+ {1866042, -7949489, -7898649, -10301010, 12483315, 13477547,
+ 3175636, -12424163, 28761762, 1406734},
+ {-448555, -1777666, 13018551, 3194501, -9580420, -11161737,
+ 24760585, -4347088, 25577411, -13378680},
+ {-24290378, 4759345, -690653, -1852816, 2066747, 10693769,
+ -29595790, 9884936, -9368926, 4745410},
+ },
+ {
+ {-9141284, 6049714, -19531061, -4341411, -31260798, 9944276,
+ -15462008, -11311852, 10931924, -11931931},
+ {-16561513, 14112680, -8012645, 4817318, -8040464, -11414606,
+ -22853429, 10856641, -20470770, 13434654},
+ {22759489, -10073434, -16766264, -1871422, 13637442, -10168091,
+ 1765144, -12654326, 28445307, -5364710},
+ },
+ {
+ {29875063, 12493613, 2795536, -3786330, 1710620, 15181182,
+ -10195717, -8788675, 9074234, 1167180},
+ {-26205683, 11014233, -9842651, -2635485, -26908120, 7532294,
+ -18716888, -9535498, 3843903, 9367684},
+ {-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123,
+ 8601684, -139197, 4242895},
+ },
+ },
+ {
+ {
+ {22092954, -13191123, -2042793, -11968512, 32186753, -11517388,
+ -6574341, 2470660, -27417366, 16625501},
+ {-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857,
+ 2602725, -27351616, 14247413},
+ {6314175, -10264892, -32772502, 15957557, -10157730, 168750,
+ -8618807, 14290061, 27108877, -1180880},
+ },
+ {
+ {-8586597, -7170966, 13241782, 10960156, -32991015, -13794596,
+ 33547976, -11058889, -27148451, 981874},
+ {22833440, 9293594, -32649448, -13618667, -9136966, 14756819,
+ -22928859, -13970780, -10479804, -16197962},
+ {-7768587, 3326786, -28111797, 10783824, 19178761, 14905060,
+ 22680049, 13906969, -15933690, 3797899},
+ },
+ {
+ {21721356, -4212746, -12206123, 9310182, -3882239, -13653110,
+ 23740224, -2709232, 20491983, -8042152},
+ {9209270, -15135055, -13256557, -6167798, -731016, 15289673,
+ 25947805, 15286587, 30997318, -6703063},
+ {7392032, 16618386, 23946583, -8039892, -13265164, -1533858,
+ -14197445, -2321576, 17649998, -250080},
+ },
+ {
+ {-9301088, -14193827, 30609526, -3049543, -25175069, -1283752,
+ -15241566, -9525724, -2233253, 7662146},
+ {-17558673, 1763594, -33114336, 15908610, -30040870, -12174295,
+ 7335080, -8472199, -3174674, 3440183},
+ {-19889700, -5977008, -24111293, -9688870, 10799743, -16571957,
+ 40450, -4431835, 4862400, 1133},
+ },
+ {
+ {-32856209, -7873957, -5422389, 14860950, -16319031, 7956142,
+ 7258061, 311861, -30594991, -7379421},
+ {-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763,
+ 16527196, 18278453, 15405622},
+ {-4381906, 8508652, -19898366, -3674424, -5984453, 15149970,
+ -13313598, 843523, -21875062, 13626197},
+ },
+ {
+ {2281448, -13487055, -10915418, -2609910, 1879358, 16164207,
+ -10783882, 3953792, 13340839, 15928663},
+ {31727126, -7179855, -18437503, -8283652, 2875793, -16390330,
+ -25269894, -7014826, -23452306, 5964753},
+ {4100420, -5959452, -17179337, 6017714, -18705837, 12227141,
+ -26684835, 11344144, 2538215, -7570755},
+ },
+ {
+ {-9433605, 6123113, 11159803, -2156608, 30016280, 14966241,
+ -20474983, 1485421, -629256, -15958862},
+ {-26804558, 4260919, 11851389, 9658551, -32017107, 16367492,
+ -20205425, -13191288, 11659922, -11115118},
+ {26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568,
+ -10170080, 33100372, -1306171},
+ },
+ {
+ {15121113, -5201871, -10389905, 15427821, -27509937, -15992507,
+ 21670947, 4486675, -5931810, -14466380},
+ {16166486, -9483733, -11104130, 6023908, -31926798, -1364923,
+ 2340060, -16254968, -10735770, -10039824},
+ {28042865, -3557089, -12126526, 12259706, -3717498, -6945899,
+ 6766453, -8689599, 18036436, 5803270},
+ },
+ },
+ {
+ {
+ {-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391,
+ 4598332, -6159431, -14117438},
+ {-31031306, -14256194, 17332029, -2383520, 31312682, -5967183,
+ 696309, 50292, -20095739, 11763584},
+ {-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117,
+ -12613632, -19773211, -10713562},
+ },
+ {
+ {30464590, -11262872, -4127476, -12734478, 19835327, -7105613,
+ -24396175, 2075773, -17020157, 992471},
+ {18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841,
+ 8080033, -11574335, -10601610},
+ {19598397, 10334610, 12555054, 2555664, 18821899, -10339780,
+ 21873263, 16014234, 26224780, 16452269},
+ },
+ {
+ {-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804,
+ -7618186, -20533829, 3698650},
+ {14187449, 3448569, -10636236, -10810935, -22663880, -3433596,
+ 7268410, -10890444, 27394301, 12015369},
+ {19695761, 16087646, 28032085, 12999827, 6817792, 11427614,
+ 20244189, -1312777, -13259127, -3402461},
+ },
+ {
+ {30860103, 12735208, -1888245, -4699734, -16974906, 2256940,
+ -8166013, 12298312, -8550524, -10393462},
+ {-5719826, -11245325, -1910649, 15569035, 26642876, -7587760,
+ -5789354, -15118654, -4976164, 12651793},
+ {-2848395, 9953421, 11531313, -5282879, 26895123, -12697089,
+ -13118820, -16517902, 9768698, -2533218},
+ },
+ {
+ {-24719459, 1894651, -287698, -4704085, 15348719, -8156530,
+ 32767513, 12765450, 4940095, 10678226},
+ {18860224, 15980149, -18987240, -1562570, -26233012, -11071856,
+ -7843882, 13944024, -24372348, 16582019},
+ {-15504260, 4970268, -29893044, 4175593, -20993212, -2199756,
+ -11704054, 15444560, -11003761, 7989037},
+ },
+ {
+ {31490452, 5568061, -2412803, 2182383, -32336847, 4531686,
+ -32078269, 6200206, -19686113, -14800171},
+ {-17308668, -15879940, -31522777, -2831, -32887382, 16375549,
+ 8680158, -16371713, 28550068, -6857132},
+ {-28126887, -5688091, 16837845, -1820458, -6850681, 12700016,
+ -30039981, 4364038, 1155602, 5988841},
+ },
+ {
+ {21890435, -13272907, -12624011, 12154349, -7831873, 15300496,
+ 23148983, -4470481, 24618407, 8283181},
+ {-33136107, -10512751, 9975416, 6841041, -31559793, 16356536,
+ 3070187, -7025928, 1466169, 10740210},
+ {-1509399, -15488185, -13503385, -10655916, 32799044, 909394,
+ -13938903, -5779719, -32164649, -15327040},
+ },
+ {
+ {3960823, -14267803, -28026090, -15918051, -19404858, 13146868,
+ 15567327, 951507, -3260321, -573935},
+ {24740841, 5052253, -30094131, 8961361, 25877428, 6165135,
+ -24368180, 14397372, -7380369, -6144105},
+ {-28888365, 3510803, -28103278, -1158478, -11238128, -10631454,
+ -15441463, -14453128, -1625486, -6494814},
+ },
+ },
+ {
+ {
+ {793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843,
+ -4885251, -9906200, -621852},
+ {5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374,
+ 1468826, -6171428, -15186581},
+ {-4859255, -3779343, -2917758, -6748019, 7778750, 11688288,
+ -30404353, -9871238, -1558923, -9863646},
+ },
+ {
+ {10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958,
+ 14783338, -30581476, -15757844},
+ {10566929, 12612572, -31944212, 11118703, -12633376, 12362879,
+ 21752402, 8822496, 24003793, 14264025},
+ {27713862, -7355973, -11008240, 9227530, 27050101, 2504721,
+ 23886875, -13117525, 13958495, -5732453},
+ },
+ {
+ {-23481610, 4867226, -27247128, 3900521, 29838369, -8212291,
+ -31889399, -10041781, 7340521, -15410068},
+ {4646514, -8011124, -22766023, -11532654, 23184553, 8566613,
+ 31366726, -1381061, -15066784, -10375192},
+ {-17270517, 12723032, -16993061, 14878794, 21619651, -6197576,
+ 27584817, 3093888, -8843694, 3849921},
+ },
+ {
+ {-9064912, 2103172, 25561640, -15125738, -5239824, 9582958,
+ 32477045, -9017955, 5002294, -15550259},
+ {-12057553, -11177906, 21115585, -13365155, 8808712, -12030708,
+ 16489530, 13378448, -25845716, 12741426},
+ {-5946367, 10645103, -30911586, 15390284, -3286982, -7118677,
+ 24306472, 15852464, 28834118, -7646072},
+ },
+ {
+ {-17335748, -9107057, -24531279, 9434953, -8472084, -583362,
+ -13090771, 455841, 20461858, 5491305},
+ {13669248, -16095482, -12481974, -10203039, -14569770, -11893198,
+ -24995986, 11293807, -28588204, -9421832},
+ {28497928, 6272777, -33022994, 14470570, 8906179, -1225630,
+ 18504674, -14165166, 29867745, -8795943},
+ },
+ {
+ {-16207023, 13517196, -27799630, -13697798, 24009064, -6373891,
+ -6367600, -13175392, 22853429, -4012011},
+ {24191378, 16712145, -13931797, 15217831, 14542237, 1646131,
+ 18603514, -11037887, 12876623, -2112447},
+ {17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753,
+ 608397, 16031844, 3723494},
+ },
+ {
+ {-28632773, 12763728, -20446446, 7577504, 33001348, -13017745,
+ 17558842, -7872890, 23896954, -4314245},
+ {-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064,
+ 7229064, -9919646, -8826859},
+ {28816045, 298879, -28165016, -15920938, 19000928, -1665890,
+ -12680833, -2949325, -18051778, -2082915},
+ },
+ {
+ {16000882, -344896, 3493092, -11447198, -29504595, -13159789,
+ 12577740, 16041268, -19715240, 7847707},
+ {10151868, 10572098, 27312476, 7922682, 14825339, 4723128,
+ -32855931, -6519018, -10020567, 3852848},
+ {-11430470, 15697596, -21121557, -4420647, 5386314, 15063598,
+ 16514493, -15932110, 29330899, -15076224},
+ },
+ },
+ {
+ {
+ {-25499735, -4378794, -15222908, -6901211, 16615731, 2051784,
+ 3303702, 15490, -27548796, 12314391},
+ {15683520, -6003043, 18109120, -9980648, 15337968, -5997823,
+ -16717435, 15921866, 16103996, -3731215},
+ {-23169824, -10781249, 13588192, -1628807, -3798557, -1074929,
+ -19273607, 5402699, -29815713, -9841101},
+ },
+ {
+ {23190676, 2384583, -32714340, 3462154, -29903655, -1529132,
+ -11266856, 8911517, -25205859, 2739713},
+ {21374101, -3554250, -33524649, 9874411, 15377179, 11831242,
+ -33529904, 6134907, 4931255, 11987849},
+ {-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539,
+ 13861388, -30076310, 10117930},
+ },
+ {
+ {-29501170, -10744872, -26163768, 13051539, -25625564, 5089643,
+ -6325503, 6704079, 12890019, 15728940},
+ {-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376,
+ -10428139, 12885167, 8311031},
+ {-17516482, 5352194, 10384213, -13811658, 7506451, 13453191,
+ 26423267, 4384730, 1888765, -5435404},
+ },
+ {
+ {-25817338, -3107312, -13494599, -3182506, 30896459, -13921729,
+ -32251644, -12707869, -19464434, -3340243},
+ {-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245,
+ 14845197, 17151279, -9854116},
+ {-24830458, -12733720, -15165978, 10367250, -29530908, -265356,
+ 22825805, -7087279, -16866484, 16176525},
+ },
+ {
+ {-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182,
+ -10363426, -28746253, -10197509},
+ {-10626600, -4486402, -13320562, -5125317, 3432136, -6393229,
+ 23632037, -1940610, 32808310, 1099883},
+ {15030977, 5768825, -27451236, -2887299, -6427378, -15361371,
+ -15277896, -6809350, 2051441, -15225865},
+ },
+ {
+ {-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398,
+ -14154188, -22686354, 16633660},
+ {4577086, -16752288, 13249841, -15304328, 19958763, -14537274,
+ 18559670, -10759549, 8402478, -9864273},
+ {-28406330, -1051581, -26790155, -907698, -17212414, -11030789,
+ 9453451, -14980072, 17983010, 9967138},
+ },
+ {
+ {-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990,
+ 7806337, 17507396, 3651560},
+ {-10420457, -4118111, 14584639, 15971087, -15768321, 8861010,
+ 26556809, -5574557, -18553322, -11357135},
+ {2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121,
+ 8459447, -5605463, -7621941},
+ },
+ {
+ {-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813,
+ -849066, 17258084, -7977739},
+ {18164541, -10595176, -17154882, -1542417, 19237078, -9745295,
+ 23357533, -15217008, 26908270, 12150756},
+ {-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168,
+ -5537701, -32302074, 16215819},
+ },
+ },
+ {
+ {
+ {-6898905, 9824394, -12304779, -4401089, -31397141, -6276835,
+ 32574489, 12532905, -7503072, -8675347},
+ {-27343522, -16515468, -27151524, -10722951, 946346, 16291093,
+ 254968, 7168080, 21676107, -1943028},
+ {21260961, -8424752, -16831886, -11920822, -23677961, 3968121,
+ -3651949, -6215466, -3556191, -7913075},
+ },
+ {
+ {16544754, 13250366, -16804428, 15546242, -4583003, 12757258,
+ -2462308, -8680336, -18907032, -9662799},
+ {-2415239, -15577728, 18312303, 4964443, -15272530, -12653564,
+ 26820651, 16690659, 25459437, -4564609},
+ {-25144690, 11425020, 28423002, -11020557, -6144921, -15826224,
+ 9142795, -2391602, -6432418, -1644817},
+ },
+ {
+ {-23104652, 6253476, 16964147, -3768872, -25113972, -12296437,
+ -27457225, -16344658, 6335692, 7249989},
+ {-30333227, 13979675, 7503222, -12368314, -11956721, -4621693,
+ -30272269, 2682242, 25993170, -12478523},
+ {4364628, 5930691, 32304656, -10044554, -8054781, 15091131,
+ 22857016, -10598955, 31820368, 15075278},
+ },
+ {
+ {31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788,
+ -9650886, -17970238, 12833045},
+ {19073683, 14851414, -24403169, -11860168, 7625278, 11091125,
+ -19619190, 2074449, -9413939, 14905377},
+ {24483667, -11935567, -2518866, -11547418, -1553130, 15355506,
+ -25282080, 9253129, 27628530, -7555480},
+ },
+ {
+ {17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324,
+ -9157582, -14110875, 15297016},
+ {510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417,
+ -11864220, 8683221, 2921426},
+ {18606791, 11874196, 27155355, -5281482, -24031742, 6265446,
+ -25178240, -1278924, 4674690, 13890525},
+ },
+ {
+ {13609624, 13069022, -27372361, -13055908, 24360586, 9592974,
+ 14977157, 9835105, 4389687, 288396},
+ {9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062,
+ 8317628, 23388070, 16052080},
+ {12720016, 11937594, -31970060, -5028689, 26900120, 8561328,
+ -20155687, -11632979, -14754271, -10812892},
+ },
+ {
+ {15961858, 14150409, 26716931, -665832, -22794328, 13603569,
+ 11829573, 7467844, -28822128, 929275},
+ {11038231, -11582396, -27310482, -7316562, -10498527, -16307831,
+ -23479533, -9371869, -21393143, 2465074},
+ {20017163, -4323226, 27915242, 1529148, 12396362, 15675764,
+ 13817261, -9658066, 2463391, -4622140},
+ },
+ {
+ {-16358878, -12663911, -12065183, 4996454, -1256422, 1073572,
+ 9583558, 12851107, 4003896, 12673717},
+ {-1731589, -15155870, -3262930, 16143082, 19294135, 13385325,
+ 14741514, -9103726, 7903886, 2348101},
+ {24536016, -16515207, 12715592, -3862155, 1511293, 10047386,
+ -3842346, -7129159, -28377538, 10048127},
+ },
+ },
+ {
+ {
+ {-12622226, -6204820, 30718825, 2591312, -10617028, 12192840,
+ 18873298, -7297090, -32297756, 15221632},
+ {-26478122, -11103864, 11546244, -1852483, 9180880, 7656409,
+ -21343950, 2095755, 29769758, 6593415},
+ {-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345,
+ -6118678, 30958054, 8292160},
+ },
+ {
+ {31429822, -13959116, 29173532, 15632448, 12174511, -2760094,
+ 32808831, 3977186, 26143136, -3148876},
+ {22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633,
+ -1674433, -3758243, -2304625},
+ {-15491917, 8012313, -2514730, -12702462, -23965846, -10254029,
+ -1612713, -1535569, -16664475, 8194478},
+ },
+ {
+ {27338066, -7507420, -7414224, 10140405, -19026427, -6589889,
+ 27277191, 8855376, 28572286, 3005164},
+ {26287124, 4821776, 25476601, -4145903, -3764513, -15788984,
+ -18008582, 1182479, -26094821, -13079595},
+ {-7171154, 3178080, 23970071, 6201893, -17195577, -4489192,
+ -21876275, -13982627, 32208683, -1198248},
+ },
+ {
+ {-16657702, 2817643, -10286362, 14811298, 6024667, 13349505,
+ -27315504, -10497842, -27672585, -11539858},
+ {15941029, -9405932, -21367050, 8062055, 31876073, -238629,
+ -15278393, -1444429, 15397331, -4130193},
+ {8934485, -13485467, -23286397, -13423241, -32446090, 14047986,
+ 31170398, -1441021, -27505566, 15087184},
+ },
+ {
+ {-18357243, -2156491, 24524913, -16677868, 15520427, -6360776,
+ -15502406, 11461896, 16788528, -5868942},
+ {-1947386, 16013773, 21750665, 3714552, -17401782, -16055433,
+ -3770287, -10323320, 31322514, -11615635},
+ {21426655, -5650218, -13648287, -5347537, -28812189, -4920970,
+ -18275391, -14621414, 13040862, -12112948},
+ },
+ {
+ {11293895, 12478086, -27136401, 15083750, -29307421, 14748872,
+ 14555558, -13417103, 1613711, 4896935},
+ {-25894883, 15323294, -8489791, -8057900, 25967126, -13425460,
+ 2825960, -4897045, -23971776, -11267415},
+ {-15924766, -5229880, -17443532, 6410664, 3622847, 10243618,
+ 20615400, 12405433, -23753030, -8436416},
+ },
+ {
+ {-7091295, 12556208, -20191352, 9025187, -17072479, 4333801,
+ 4378436, 2432030, 23097949, -566018},
+ {4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264,
+ 10103221, -18512313, 2424778},
+ {366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678,
+ 1344109, -3642553, 12412659},
+ },
+ {
+ {-24001791, 7690286, 14929416, -168257, -32210835, -13412986,
+ 24162697, -15326504, -3141501, 11179385},
+ {18289522, -14724954, 8056945, 16430056, -21729724, 7842514,
+ -6001441, -1486897, -18684645, -11443503},
+ {476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959,
+ 13403813, 11052904, 5219329},
+ },
+ },
+ {
+ {
+ {20678546, -8375738, -32671898, 8849123, -5009758, 14574752,
+ 31186971, -3973730, 9014762, -8579056},
+ {-13644050, -10350239, -15962508, 5075808, -1514661, -11534600,
+ -33102500, 9160280, 8473550, -3256838},
+ {24900749, 14435722, 17209120, -15292541, -22592275, 9878983,
+ -7689309, -16335821, -24568481, 11788948},
+ },
+ {
+ {-3118155, -11395194, -13802089, 14797441, 9652448, -6845904,
+ -20037437, 10410733, -24568470, -1458691},
+ {-15659161, 16736706, -22467150, 10215878, -9097177, 7563911,
+ 11871841, -12505194, -18513325, 8464118},
+ {-23400612, 8348507, -14585951, -861714, -3950205, -6373419,
+ 14325289, 8628612, 33313881, -8370517},
+ },
+ {
+ {-20186973, -4967935, 22367356, 5271547, -1097117, -4788838,
+ -24805667, -10236854, -8940735, -5818269},
+ {-6948785, -1795212, -32625683, -16021179, 32635414, -7374245,
+ 15989197, -12838188, 28358192, -4253904},
+ {-23561781, -2799059, -32351682, -1661963, -9147719, 10429267,
+ -16637684, 4072016, -5351664, 5596589},
+ },
+ {
+ {-28236598, -3390048, 12312896, 6213178, 3117142, 16078565,
+ 29266239, 2557221, 1768301, 15373193},
+ {-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902,
+ -4504991, -24660491, 3442910},
+ {-30210571, 5124043, 14181784, 8197961, 18964734, -11939093,
+ 22597931, 7176455, -18585478, 13365930},
+ },
+ {
+ {-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107,
+ -8570186, -9689599, -3031667},
+ {25008904, -10771599, -4305031, -9638010, 16265036, 15721635,
+ 683793, -11823784, 15723479, -15163481},
+ {-9660625, 12374379, -27006999, -7026148, -7724114, -12314514,
+ 11879682, 5400171, 519526, -1235876},
+ },
+ {
+ {22258397, -16332233, -7869817, 14613016, -22520255, -2950923,
+ -20353881, 7315967, 16648397, 7605640},
+ {-8081308, -8464597, -8223311, 9719710, 19259459, -15348212,
+ 23994942, -5281555, -9468848, 4763278},
+ {-21699244, 9220969, -15730624, 1084137, -25476107, -2852390,
+ 31088447, -7764523, -11356529, 728112},
+ },
+ {
+ {26047220, -11751471, -6900323, -16521798, 24092068, 9158119,
+ -4273545, -12555558, -29365436, -5498272},
+ {17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007,
+ 12327945, 10750447, 10014012},
+ {-10312768, 3936952, 9156313, -8897683, 16498692, -994647,
+ -27481051, -666732, 3424691, 7540221},
+ },
+ {
+ {30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422,
+ -16317219, -9244265, 15258046},
+ {13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406,
+ 2711395, 1062915, -5136345},
+ {-19240248, -11254599, -29509029, -7499965, -5835763, 13005411,
+ -6066489, 12194497, 32960380, 1459310},
+ },
+ },
+ {
+ {
+ {19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197,
+ -6101885, 18638003, -11174937},
+ {31395534, 15098109, 26581030, 8030562, -16527914, -5007134,
+ 9012486, -7584354, -6643087, -5442636},
+ {-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222,
+ 9677543, -32294889, -6456008},
+ },
+ {
+ {-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579,
+ -7839692, -7852844, -8138429},
+ {-15236356, -15433509, 7766470, 746860, 26346930, -10221762,
+ -27333451, 10754588, -9431476, 5203576},
+ {31834314, 14135496, -770007, 5159118, 20917671, -16768096,
+ -7467973, -7337524, 31809243, 7347066},
+ },
+ {
+ {-9606723, -11874240, 20414459, 13033986, 13716524, -11691881,
+ 19797970, -12211255, 15192876, -2087490},
+ {-12663563, -2181719, 1168162, -3804809, 26747877, -14138091,
+ 10609330, 12694420, 33473243, -13382104},
+ {33184999, 11180355, 15832085, -11385430, -1633671, 225884,
+ 15089336, -11023903, -6135662, 14480053},
+ },
+ {
+ {31308717, -5619998, 31030840, -1897099, 15674547, -6582883,
+ 5496208, 13685227, 27595050, 8737275},
+ {-20318852, -15150239, 10933843, -16178022, 8335352, -7546022,
+ -31008351, -12610604, 26498114, 66511},
+ {22644454, -8761729, -16671776, 4884562, -3105614, -13559366,
+ 30540766, -4286747, -13327787, -7515095},
+ },
+ {
+ {-28017847, 9834845, 18617207, -2681312, -3401956, -13307506,
+ 8205540, 13585437, -17127465, 15115439},
+ {23711543, -672915, 31206561, -8362711, 6164647, -9709987,
+ -33535882, -1426096, 8236921, 16492939},
+ {-23910559, -13515526, -26299483, -4503841, 25005590, -7687270,
+ 19574902, 10071562, 6708380, -6222424},
+ },
+ {
+ {2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017,
+ 9328700, 29955601, -11678310},
+ {3096359, 9271816, -21620864, -15521844, -14847996, -7592937,
+ -25892142, -12635595, -9917575, 6216608},
+ {-32615849, 338663, -25195611, 2510422, -29213566, -13820213,
+ 24822830, -6146567, -26767480, 7525079},
+ },
+ {
+ {-23066649, -13985623, 16133487, -7896178, -3389565, 778788,
+ -910336, -2782495, -19386633, 11994101},
+ {21691500, -13624626, -641331, -14367021, 3285881, -3483596,
+ -25064666, 9718258, -7477437, 13381418},
+ {18445390, -4202236, 14979846, 11622458, -1727110, -3582980,
+ 23111648, -6375247, 28535282, 15779576},
+ },
+ {
+ {30098053, 3089662, -9234387, 16662135, -21306940, 11308411,
+ -14068454, 12021730, 9955285, -16303356},
+ {9734894, -14576830, -7473633, -9138735, 2060392, 11313496,
+ -18426029, 9924399, 20194861, 13380996},
+ {-26378102, -7965207, -22167821, 15789297, -18055342, -6168792,
+ -1984914, 15707771, 26342023, 10146099},
+ },
+ },
+ {
+ {
+ {-26016874, -219943, 21339191, -41388, 19745256, -2878700,
+ -29637280, 2227040, 21612326, -545728},
+ {-13077387, 1184228, 23562814, -5970442, -20351244, -6348714,
+ 25764461, 12243797, -20856566, 11649658},
+ {-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944,
+ 6114064, 33514190, 2333242},
+ },
+ {
+ {-21433588, -12421821, 8119782, 7219913, -21830522, -9016134,
+ -6679750, -12670638, 24350578, -13450001},
+ {-4116307, -11271533, -23886186, 4843615, -30088339, 690623,
+ -31536088, -10406836, 8317860, 12352766},
+ {18200138, -14475911, -33087759, -2696619, -23702521, -9102511,
+ -23552096, -2287550, 20712163, 6719373},
+ },
+ {
+ {26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530,
+ -3763210, 26224235, -3297458},
+ {-17168938, -14854097, -3395676, -16369877, -19954045, 14050420,
+ 21728352, 9493610, 18620611, -16428628},
+ {-13323321, 13325349, 11432106, 5964811, 18609221, 6062965,
+ -5269471, -9725556, -30701573, -16479657},
+ },
+ {
+ {-23860538, -11233159, 26961357, 1640861, -32413112, -16737940,
+ 12248509, -5240639, 13735342, 1934062},
+ {25089769, 6742589, 17081145, -13406266, 21909293, -16067981,
+ -15136294, -3765346, -21277997, 5473616},
+ {31883677, -7961101, 1083432, -11572403, 22828471, 13290673,
+ -7125085, 12469656, 29111212, -5451014},
+ },
+ {
+ {24244947, -15050407, -26262976, 2791540, -14997599, 16666678,
+ 24367466, 6388839, -10295587, 452383},
+ {-25640782, -3417841, 5217916, 16224624, 19987036, -4082269,
+ -24236251, -5915248, 15766062, 8407814},
+ {-20406999, 13990231, 15495425, 16395525, 5377168, 15166495,
+ -8917023, -4388953, -8067909, 2276718},
+ },
+ {
+ {30157918, 12924066, -17712050, 9245753, 19895028, 3368142,
+ -23827587, 5096219, 22740376, -7303417},
+ {2041139, -14256350, 7783687, 13876377, -25946985, -13352459,
+ 24051124, 13742383, -15637599, 13295222},
+ {33338237, -8505733, 12532113, 7977527, 9106186, -1715251,
+ -17720195, -4612972, -4451357, -14669444},
+ },
+ {
+ {-20045281, 5454097, -14346548, 6447146, 28862071, 1883651,
+ -2469266, -4141880, 7770569, 9620597},
+ {23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528,
+ -1694323, -33502340, -14767970},
+ {1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801,
+ 1220118, 30494170, -11440799},
+ },
+ {
+ {-5037580, -13028295, -2970559, -3061767, 15640974, -6701666,
+ -26739026, 926050, -1684339, -13333647},
+ {13908495, -3549272, 30919928, -6273825, -21521863, 7989039,
+ 9021034, 9078865, 3353509, 4033511},
+ {-29663431, -15113610, 32259991, -344482, 24295849, -12912123,
+ 23161163, 8839127, 27485041, 7356032},
+ },
+ },
+ {
+ {
+ {9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142,
+ 2625015, 28431036, -16771834},
+ {-23839233, -8311415, -25945511, 7480958, -17681669, -8354183,
+ -22545972, 14150565, 15970762, 4099461},
+ {29262576, 16756590, 26350592, -8793563, 8529671, -11208050,
+ 13617293, -9937143, 11465739, 8317062},
+ },
+ {
+ {-25493081, -6962928, 32500200, -9419051, -23038724, -2302222,
+ 14898637, 3848455, 20969334, -5157516},
+ {-20384450, -14347713, -18336405, 13884722, -33039454, 2842114,
+ -21610826, -3649888, 11177095, 14989547},
+ {-24496721, -11716016, 16959896, 2278463, 12066309, 10137771,
+ 13515641, 2581286, -28487508, 9930240},
+ },
+ {
+ {-17751622, -2097826, 16544300, -13009300, -15914807, -14949081,
+ 18345767, -13403753, 16291481, -5314038},
+ {-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774,
+ 6957617, 4368891, 9788741},
+ {16660756, 7281060, -10830758, 12911820, 20108584, -8101676,
+ -21722536, -8613148, 16250552, -11111103},
+ },
+ {
+ {-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584,
+ 10604807, -30190403, 4782747},
+ {-1354539, 14736941, -7367442, -13292886, 7710542, -14155590,
+ -9981571, 4383045, 22546403, 437323},
+ {31665577, -12180464, -16186830, 1491339, -18368625, 3294682,
+ 27343084, 2786261, -30633590, -14097016},
+ },
+ {
+ {-14467279, -683715, -33374107, 7448552, 19294360, 14334329,
+ -19690631, 2355319, -19284671, -6114373},
+ {15121312, -15796162, 6377020, -6031361, -10798111, -12957845,
+ 18952177, 15496498, -29380133, 11754228},
+ {-2637277, -13483075, 8488727, -14303896, 12728761, -1622493,
+ 7141596, 11724556, 22761615, -10134141},
+ },
+ {
+ {16918416, 11729663, -18083579, 3022987, -31015732, -13339659,
+ -28741185, -12227393, 32851222, 11717399},
+ {11166634, 7338049, -6722523, 4531520, -29468672, -7302055,
+ 31474879, 3483633, -1193175, -4030831},
+ {-185635, 9921305, 31456609, -13536438, -12013818, 13348923,
+ 33142652, 6546660, -19985279, -3948376},
+ },
+ {
+ {-32460596, 11266712, -11197107, -7899103, 31703694, 3855903,
+ -8537131, -12833048, -30772034, -15486313},
+ {-18006477, 12709068, 3991746, -6479188, -21491523, -10550425,
+ -31135347, -16049879, 10928917, 3011958},
+ {-6957757, -15594337, 31696059, 334240, 29576716, 14796075,
+ -30831056, -12805180, 18008031, 10258577},
+ },
+ {
+ {-22448644, 15655569, 7018479, -4410003, -30314266, -1201591,
+ -1853465, 1367120, 25127874, 6671743},
+ {29701166, -14373934, -10878120, 9279288, -17568, 13127210,
+ 21382910, 11042292, 25838796, 4642684},
+ {-20430234, 14955537, -24126347, 8124619, -5369288, -5990470,
+ 30468147, -13900640, 18423289, 4177476},
+ },
+ },
+};
+
+static uint8_t negative(signed char b) {
+ uint32_t x = b;
+ x >>= 31; /* 1: yes; 0: no */
+ return x;
+}
+
+static void table_select(ge_precomp *t, int pos, signed char b) {
+ ge_precomp minust;
+ uint8_t bnegative = negative(b);
+ uint8_t babs = b - ((uint8_t)((-bnegative) & b) << 1);
+
+ ge_precomp_0(t);
+ cmov(t, &k25519Precomp[pos][0], equal(babs, 1));
+ cmov(t, &k25519Precomp[pos][1], equal(babs, 2));
+ cmov(t, &k25519Precomp[pos][2], equal(babs, 3));
+ cmov(t, &k25519Precomp[pos][3], equal(babs, 4));
+ cmov(t, &k25519Precomp[pos][4], equal(babs, 5));
+ cmov(t, &k25519Precomp[pos][5], equal(babs, 6));
+ cmov(t, &k25519Precomp[pos][6], equal(babs, 7));
+ cmov(t, &k25519Precomp[pos][7], equal(babs, 8));
+ fe_copy(minust.yplusx, t->yminusx);
+ fe_copy(minust.yminusx, t->yplusx);
+ fe_neg(minust.xy2d, t->xy2d);
+ cmov(t, &minust, bnegative);
+}
+
+/* h = a * B
+ * where a = a[0]+256*a[1]+...+256^31 a[31]
+ * B is the Ed25519 base point (x,4/5) with x positive.
+ *
+ * Preconditions:
+ * a[31] <= 127 */
+void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t *a) {
+ signed char e[64];
+ signed char carry;
+ ge_p1p1 r;
+ ge_p2 s;
+ ge_precomp t;
+ int i;
+
+ for (i = 0; i < 32; ++i) {
+ e[2 * i + 0] = (a[i] >> 0) & 15;
+ e[2 * i + 1] = (a[i] >> 4) & 15;
+ }
+ /* each e[i] is between 0 and 15 */
+ /* e[63] is between 0 and 7 */
+
+ carry = 0;
+ for (i = 0; i < 63; ++i) {
+ e[i] += carry;
+ carry = e[i] + 8;
+ carry >>= 4;
+ e[i] -= carry << 4;
+ }
+ e[63] += carry;
+ /* each e[i] is between -8 and 8 */
+
+ ge_p3_0(h);
+ for (i = 1; i < 64; i += 2) {
+ table_select(&t, i / 2, e[i]);
+ ge_madd(&r, h, &t);
+ x25519_ge_p1p1_to_p3(h, &r);
+ }
+
+ ge_p3_dbl(&r, h);
+ x25519_ge_p1p1_to_p2(&s, &r);
+ ge_p2_dbl(&r, &s);
+ x25519_ge_p1p1_to_p2(&s, &r);
+ ge_p2_dbl(&r, &s);
+ x25519_ge_p1p1_to_p2(&s, &r);
+ ge_p2_dbl(&r, &s);
+ x25519_ge_p1p1_to_p3(h, &r);
+
+ for (i = 0; i < 64; i += 2) {
+ table_select(&t, i / 2, e[i]);
+ ge_madd(&r, h, &t);
+ x25519_ge_p1p1_to_p3(h, &r);
+ }
+}
+
+#endif
+
+static void cmov_cached(ge_cached *t, ge_cached *u, uint8_t b) {
+ fe_cmov(t->YplusX, u->YplusX, b);
+ fe_cmov(t->YminusX, u->YminusX, b);
+ fe_cmov(t->Z, u->Z, b);
+ fe_cmov(t->T2d, u->T2d, b);
+}
+
+/* r = scalar * A.
+ * where a = a[0]+256*a[1]+...+256^31 a[31]. */
+void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A) {
+ ge_p2 Ai_p2[8];
+ ge_cached Ai[16];
+ ge_p1p1 t;
+
+ ge_cached_0(&Ai[0]);
+ x25519_ge_p3_to_cached(&Ai[1], A);
+ ge_p3_to_p2(&Ai_p2[1], A);
+
+ unsigned i;
+ for (i = 2; i < 16; i += 2) {
+ ge_p2_dbl(&t, &Ai_p2[i / 2]);
+ ge_p1p1_to_cached(&Ai[i], &t);
+ if (i < 8) {
+ x25519_ge_p1p1_to_p2(&Ai_p2[i], &t);
+ }
+ x25519_ge_add(&t, A, &Ai[i]);
+ ge_p1p1_to_cached(&Ai[i + 1], &t);
+ if (i < 7) {
+ x25519_ge_p1p1_to_p2(&Ai_p2[i + 1], &t);
+ }
+ }
+
+ ge_p2_0(r);
+ ge_p3 u;
+
+ for (i = 0; i < 256; i += 4) {
+ ge_p2_dbl(&t, r);
+ x25519_ge_p1p1_to_p2(r, &t);
+ ge_p2_dbl(&t, r);
+ x25519_ge_p1p1_to_p2(r, &t);
+ ge_p2_dbl(&t, r);
+ x25519_ge_p1p1_to_p2(r, &t);
+ ge_p2_dbl(&t, r);
+ x25519_ge_p1p1_to_p3(&u, &t);
+
+ uint8_t index = scalar[31 - i/8];
+ index >>= 4 - (i & 4);
+ index &= 0xf;
+
+ unsigned j;
+ ge_cached selected;
+ ge_cached_0(&selected);
+ for (j = 0; j < 16; j++) {
+ cmov_cached(&selected, &Ai[j], equal(j, index));
+ }
+
+ x25519_ge_add(&t, &u, &selected);
+ x25519_ge_p1p1_to_p2(r, &t);
+ }
+}
+
+#ifdef ED25519
+static void slide(signed char *r, const uint8_t *a) {
+ int i;
+ int b;
+ int k;
+
+ for (i = 0; i < 256; ++i) {
+ r[i] = 1 & (a[i >> 3] >> (i & 7));
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (r[i]) {
+ for (b = 1; b <= 6 && i + b < 256; ++b) {
+ if (r[i + b]) {
+ if (r[i] + (r[i + b] << b) <= 15) {
+ r[i] += r[i + b] << b;
+ r[i + b] = 0;
+ } else if (r[i] - (r[i + b] << b) >= -15) {
+ r[i] -= r[i + b] << b;
+ for (k = i + b; k < 256; ++k) {
+ if (!r[k]) {
+ r[k] = 1;
+ break;
+ }
+ r[k] = 0;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+static const ge_precomp Bi[8] = {
+ {
+ {25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626,
+ -11754271, -6079156, 2047605},
+ {-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692,
+ 5043384, 19500929, -15469378},
+ {-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919,
+ 11864899, -24514362, -4438546},
+ },
+ {
+ {15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600,
+ -14772189, 28944400, -1550024},
+ {16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577,
+ -11775962, 7689662, 11199574},
+ {30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774,
+ 10017326, -17749093, -9920357},
+ },
+ {
+ {10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885,
+ 14515107, -15438304, 10819380},
+ {4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668,
+ 12483688, -12668491, 5581306},
+ {19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350,
+ 13850243, -23678021, -15815942},
+ },
+ {
+ {5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852,
+ 5230134, -23952439, -15175766},
+ {-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025,
+ 16520125, 30598449, 7715701},
+ {28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660,
+ 1370708, 29794553, -1409300},
+ },
+ {
+ {-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211,
+ -1361450, -13062696, 13821877},
+ {-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028,
+ -7212327, 18853322, -14220951},
+ {4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358,
+ -10431137, 2207753, -3209784},
+ },
+ {
+ {-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364,
+ -663000, -31111463, -16132436},
+ {25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789,
+ 15725684, 171356, 6466918},
+ {23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339,
+ -14088058, -30714912, 16193877},
+ },
+ {
+ {-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398,
+ 4729455, -18074513, 9256800},
+ {-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405,
+ 9761698, -19827198, 630305},
+ {-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551,
+ -15960994, -2449256, -14291300},
+ },
+ {
+ {-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575,
+ 15033784, 25105118, -7894876},
+ {-24326370, 15950226, -31801215, -14592823, -11662737, -5090925,
+ 1573892, -2625887, 2198790, -15804619},
+ {-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022,
+ -16236442, -32461234, -12290683},
+ },
+};
+
+/* r = a * A + b * B
+ * where a = a[0]+256*a[1]+...+256^31 a[31].
+ * and b = b[0]+256*b[1]+...+256^31 b[31].
+ * B is the Ed25519 base point (x,4/5) with x positive. */
+static void
+ge_double_scalarmult_vartime(ge_p2 *r, const uint8_t *a,
+ const ge_p3 *A, const uint8_t *b) {
+ signed char aslide[256];
+ signed char bslide[256];
+ ge_cached Ai[8]; /* A,3A,5A,7A,9A,11A,13A,15A */
+ ge_p1p1 t;
+ ge_p3 u;
+ ge_p3 A2;
+ int i;
+
+ slide(aslide, a);
+ slide(bslide, b);
+
+ x25519_ge_p3_to_cached(&Ai[0], A);
+ ge_p3_dbl(&t, A);
+ x25519_ge_p1p1_to_p3(&A2, &t);
+ x25519_ge_add(&t, &A2, &Ai[0]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[1], &u);
+ x25519_ge_add(&t, &A2, &Ai[1]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[2], &u);
+ x25519_ge_add(&t, &A2, &Ai[2]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[3], &u);
+ x25519_ge_add(&t, &A2, &Ai[3]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[4], &u);
+ x25519_ge_add(&t, &A2, &Ai[4]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[5], &u);
+ x25519_ge_add(&t, &A2, &Ai[5]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[6], &u);
+ x25519_ge_add(&t, &A2, &Ai[6]);
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_p3_to_cached(&Ai[7], &u);
+
+ ge_p2_0(r);
+
+ for (i = 255; i >= 0; --i) {
+ if (aslide[i] || bslide[i]) {
+ break;
+ }
+ }
+
+ for (; i >= 0; --i) {
+ ge_p2_dbl(&t, r);
+
+ if (aslide[i] > 0) {
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_add(&t, &u, &Ai[aslide[i] / 2]);
+ } else if (aslide[i] < 0) {
+ x25519_ge_p1p1_to_p3(&u, &t);
+ x25519_ge_sub(&t, &u, &Ai[(-aslide[i]) / 2]);
+ }
+
+ if (bslide[i] > 0) {
+ x25519_ge_p1p1_to_p3(&u, &t);
+ ge_madd(&t, &u, &Bi[bslide[i] / 2]);
+ } else if (bslide[i] < 0) {
+ x25519_ge_p1p1_to_p3(&u, &t);
+ ge_msub(&t, &u, &Bi[(-bslide[i]) / 2]);
+ }
+
+ x25519_ge_p1p1_to_p2(r, &t);
+ }
+}
+#endif
+
+/* The set of scalars is \Z/l
+ * where l = 2^252 + 27742317777372353535851937790883648493. */
+
+/* Input:
+ * s[0]+256*s[1]+...+256^63*s[63] = s
+ *
+ * Output:
+ * s[0]+256*s[1]+...+256^31*s[31] = s mod l
+ * where l = 2^252 + 27742317777372353535851937790883648493.
+ * Overwrites s in place. */
+void
+x25519_sc_reduce(uint8_t *s) {
+ int64_t s0 = 2097151 & load_3(s);
+ int64_t s1 = 2097151 & (load_4(s + 2) >> 5);
+ int64_t s2 = 2097151 & (load_3(s + 5) >> 2);
+ int64_t s3 = 2097151 & (load_4(s + 7) >> 7);
+ int64_t s4 = 2097151 & (load_4(s + 10) >> 4);
+ int64_t s5 = 2097151 & (load_3(s + 13) >> 1);
+ int64_t s6 = 2097151 & (load_4(s + 15) >> 6);
+ int64_t s7 = 2097151 & (load_3(s + 18) >> 3);
+ int64_t s8 = 2097151 & load_3(s + 21);
+ int64_t s9 = 2097151 & (load_4(s + 23) >> 5);
+ int64_t s10 = 2097151 & (load_3(s + 26) >> 2);
+ int64_t s11 = 2097151 & (load_4(s + 28) >> 7);
+ int64_t s12 = 2097151 & (load_4(s + 31) >> 4);
+ int64_t s13 = 2097151 & (load_3(s + 34) >> 1);
+ int64_t s14 = 2097151 & (load_4(s + 36) >> 6);
+ int64_t s15 = 2097151 & (load_3(s + 39) >> 3);
+ int64_t s16 = 2097151 & load_3(s + 42);
+ int64_t s17 = 2097151 & (load_4(s + 44) >> 5);
+ int64_t s18 = 2097151 & (load_3(s + 47) >> 2);
+ int64_t s19 = 2097151 & (load_4(s + 49) >> 7);
+ int64_t s20 = 2097151 & (load_4(s + 52) >> 4);
+ int64_t s21 = 2097151 & (load_3(s + 55) >> 1);
+ int64_t s22 = 2097151 & (load_4(s + 57) >> 6);
+ int64_t s23 = (load_4(s + 60) >> 3);
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+ int64_t carry10;
+ int64_t carry11;
+ int64_t carry12;
+ int64_t carry13;
+ int64_t carry14;
+ int64_t carry15;
+ int64_t carry16;
+
+ s11 += s23 * 666643;
+ s12 += s23 * 470296;
+ s13 += s23 * 654183;
+ s14 -= s23 * 997805;
+ s15 += s23 * 136657;
+ s16 -= s23 * 683901;
+ s23 = 0;
+
+ s10 += s22 * 666643;
+ s11 += s22 * 470296;
+ s12 += s22 * 654183;
+ s13 -= s22 * 997805;
+ s14 += s22 * 136657;
+ s15 -= s22 * 683901;
+ s22 = 0;
+
+ s9 += s21 * 666643;
+ s10 += s21 * 470296;
+ s11 += s21 * 654183;
+ s12 -= s21 * 997805;
+ s13 += s21 * 136657;
+ s14 -= s21 * 683901;
+ s21 = 0;
+
+ s8 += s20 * 666643;
+ s9 += s20 * 470296;
+ s10 += s20 * 654183;
+ s11 -= s20 * 997805;
+ s12 += s20 * 136657;
+ s13 -= s20 * 683901;
+ s20 = 0;
+
+ s7 += s19 * 666643;
+ s8 += s19 * 470296;
+ s9 += s19 * 654183;
+ s10 -= s19 * 997805;
+ s11 += s19 * 136657;
+ s12 -= s19 * 683901;
+ s19 = 0;
+
+ s6 += s18 * 666643;
+ s7 += s18 * 470296;
+ s8 += s18 * 654183;
+ s9 -= s18 * 997805;
+ s10 += s18 * 136657;
+ s11 -= s18 * 683901;
+ s18 = 0;
+
+ carry6 = (s6 + (1 << 20)) >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry8 = (s8 + (1 << 20)) >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry10 = (s10 + (1 << 20)) >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+ carry12 = (s12 + (1 << 20)) >> 21;
+ s13 += carry12;
+ s12 -= carry12 << 21;
+ carry14 = (s14 + (1 << 20)) >> 21;
+ s15 += carry14;
+ s14 -= carry14 << 21;
+ carry16 = (s16 + (1 << 20)) >> 21;
+ s17 += carry16;
+ s16 -= carry16 << 21;
+
+ carry7 = (s7 + (1 << 20)) >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry9 = (s9 + (1 << 20)) >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry11 = (s11 + (1 << 20)) >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+ carry13 = (s13 + (1 << 20)) >> 21;
+ s14 += carry13;
+ s13 -= carry13 << 21;
+ carry15 = (s15 + (1 << 20)) >> 21;
+ s16 += carry15;
+ s15 -= carry15 << 21;
+
+ s5 += s17 * 666643;
+ s6 += s17 * 470296;
+ s7 += s17 * 654183;
+ s8 -= s17 * 997805;
+ s9 += s17 * 136657;
+ s10 -= s17 * 683901;
+ s17 = 0;
+
+ s4 += s16 * 666643;
+ s5 += s16 * 470296;
+ s6 += s16 * 654183;
+ s7 -= s16 * 997805;
+ s8 += s16 * 136657;
+ s9 -= s16 * 683901;
+ s16 = 0;
+
+ s3 += s15 * 666643;
+ s4 += s15 * 470296;
+ s5 += s15 * 654183;
+ s6 -= s15 * 997805;
+ s7 += s15 * 136657;
+ s8 -= s15 * 683901;
+ s15 = 0;
+
+ s2 += s14 * 666643;
+ s3 += s14 * 470296;
+ s4 += s14 * 654183;
+ s5 -= s14 * 997805;
+ s6 += s14 * 136657;
+ s7 -= s14 * 683901;
+ s14 = 0;
+
+ s1 += s13 * 666643;
+ s2 += s13 * 470296;
+ s3 += s13 * 654183;
+ s4 -= s13 * 997805;
+ s5 += s13 * 136657;
+ s6 -= s13 * 683901;
+ s13 = 0;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = (s0 + (1 << 20)) >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry2 = (s2 + (1 << 20)) >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry4 = (s4 + (1 << 20)) >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry6 = (s6 + (1 << 20)) >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry8 = (s8 + (1 << 20)) >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry10 = (s10 + (1 << 20)) >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+
+ carry1 = (s1 + (1 << 20)) >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry3 = (s3 + (1 << 20)) >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry5 = (s5 + (1 << 20)) >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry7 = (s7 + (1 << 20)) >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry9 = (s9 + (1 << 20)) >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry11 = (s11 + (1 << 20)) >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = s0 >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry1 = s1 >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry2 = s2 >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry3 = s3 >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry4 = s4 >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry5 = s5 >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry6 = s6 >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry7 = s7 >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry8 = s8 >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry9 = s9 >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry10 = s10 >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+ carry11 = s11 >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = s0 >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry1 = s1 >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry2 = s2 >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry3 = s3 >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry4 = s4 >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry5 = s5 >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry6 = s6 >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry7 = s7 >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry8 = s8 >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry9 = s9 >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry10 = s10 >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+
+ s[0] = s0 >> 0;
+ s[1] = s0 >> 8;
+ s[2] = (s0 >> 16) | (s1 << 5);
+ s[3] = s1 >> 3;
+ s[4] = s1 >> 11;
+ s[5] = (s1 >> 19) | (s2 << 2);
+ s[6] = s2 >> 6;
+ s[7] = (s2 >> 14) | (s3 << 7);
+ s[8] = s3 >> 1;
+ s[9] = s3 >> 9;
+ s[10] = (s3 >> 17) | (s4 << 4);
+ s[11] = s4 >> 4;
+ s[12] = s4 >> 12;
+ s[13] = (s4 >> 20) | (s5 << 1);
+ s[14] = s5 >> 7;
+ s[15] = (s5 >> 15) | (s6 << 6);
+ s[16] = s6 >> 2;
+ s[17] = s6 >> 10;
+ s[18] = (s6 >> 18) | (s7 << 3);
+ s[19] = s7 >> 5;
+ s[20] = s7 >> 13;
+ s[21] = s8 >> 0;
+ s[22] = s8 >> 8;
+ s[23] = (s8 >> 16) | (s9 << 5);
+ s[24] = s9 >> 3;
+ s[25] = s9 >> 11;
+ s[26] = (s9 >> 19) | (s10 << 2);
+ s[27] = s10 >> 6;
+ s[28] = (s10 >> 14) | (s11 << 7);
+ s[29] = s11 >> 1;
+ s[30] = s11 >> 9;
+ s[31] = s11 >> 17;
+}
+
+#ifdef ED25519
+/* Input:
+ * a[0]+256*a[1]+...+256^31*a[31] = a
+ * b[0]+256*b[1]+...+256^31*b[31] = b
+ * c[0]+256*c[1]+...+256^31*c[31] = c
+ *
+ * Output:
+ * s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
+ * where l = 2^252 + 27742317777372353535851937790883648493. */
+static void
+sc_muladd(uint8_t *s, const uint8_t *a, const uint8_t *b,
+ const uint8_t *c)
+{
+ int64_t a0 = 2097151 & load_3(a);
+ int64_t a1 = 2097151 & (load_4(a + 2) >> 5);
+ int64_t a2 = 2097151 & (load_3(a + 5) >> 2);
+ int64_t a3 = 2097151 & (load_4(a + 7) >> 7);
+ int64_t a4 = 2097151 & (load_4(a + 10) >> 4);
+ int64_t a5 = 2097151 & (load_3(a + 13) >> 1);
+ int64_t a6 = 2097151 & (load_4(a + 15) >> 6);
+ int64_t a7 = 2097151 & (load_3(a + 18) >> 3);
+ int64_t a8 = 2097151 & load_3(a + 21);
+ int64_t a9 = 2097151 & (load_4(a + 23) >> 5);
+ int64_t a10 = 2097151 & (load_3(a + 26) >> 2);
+ int64_t a11 = (load_4(a + 28) >> 7);
+ int64_t b0 = 2097151 & load_3(b);
+ int64_t b1 = 2097151 & (load_4(b + 2) >> 5);
+ int64_t b2 = 2097151 & (load_3(b + 5) >> 2);
+ int64_t b3 = 2097151 & (load_4(b + 7) >> 7);
+ int64_t b4 = 2097151 & (load_4(b + 10) >> 4);
+ int64_t b5 = 2097151 & (load_3(b + 13) >> 1);
+ int64_t b6 = 2097151 & (load_4(b + 15) >> 6);
+ int64_t b7 = 2097151 & (load_3(b + 18) >> 3);
+ int64_t b8 = 2097151 & load_3(b + 21);
+ int64_t b9 = 2097151 & (load_4(b + 23) >> 5);
+ int64_t b10 = 2097151 & (load_3(b + 26) >> 2);
+ int64_t b11 = (load_4(b + 28) >> 7);
+ int64_t c0 = 2097151 & load_3(c);
+ int64_t c1 = 2097151 & (load_4(c + 2) >> 5);
+ int64_t c2 = 2097151 & (load_3(c + 5) >> 2);
+ int64_t c3 = 2097151 & (load_4(c + 7) >> 7);
+ int64_t c4 = 2097151 & (load_4(c + 10) >> 4);
+ int64_t c5 = 2097151 & (load_3(c + 13) >> 1);
+ int64_t c6 = 2097151 & (load_4(c + 15) >> 6);
+ int64_t c7 = 2097151 & (load_3(c + 18) >> 3);
+ int64_t c8 = 2097151 & load_3(c + 21);
+ int64_t c9 = 2097151 & (load_4(c + 23) >> 5);
+ int64_t c10 = 2097151 & (load_3(c + 26) >> 2);
+ int64_t c11 = (load_4(c + 28) >> 7);
+ int64_t s0;
+ int64_t s1;
+ int64_t s2;
+ int64_t s3;
+ int64_t s4;
+ int64_t s5;
+ int64_t s6;
+ int64_t s7;
+ int64_t s8;
+ int64_t s9;
+ int64_t s10;
+ int64_t s11;
+ int64_t s12;
+ int64_t s13;
+ int64_t s14;
+ int64_t s15;
+ int64_t s16;
+ int64_t s17;
+ int64_t s18;
+ int64_t s19;
+ int64_t s20;
+ int64_t s21;
+ int64_t s22;
+ int64_t s23;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+ int64_t carry10;
+ int64_t carry11;
+ int64_t carry12;
+ int64_t carry13;
+ int64_t carry14;
+ int64_t carry15;
+ int64_t carry16;
+ int64_t carry17;
+ int64_t carry18;
+ int64_t carry19;
+ int64_t carry20;
+ int64_t carry21;
+ int64_t carry22;
+
+ s0 = c0 + a0 * b0;
+ s1 = c1 + a0 * b1 + a1 * b0;
+ s2 = c2 + a0 * b2 + a1 * b1 + a2 * b0;
+ s3 = c3 + a0 * b3 + a1 * b2 + a2 * b1 + a3 * b0;
+ s4 = c4 + a0 * b4 + a1 * b3 + a2 * b2 + a3 * b1 + a4 * b0;
+ s5 = c5 + a0 * b5 + a1 * b4 + a2 * b3 + a3 * b2 + a4 * b1 + a5 * b0;
+ s6 = c6 + a0 * b6 + a1 * b5 + a2 * b4 + a3 * b3 + a4 * b2 + a5 * b1 + a6 * b0;
+ s7 = c7 + a0 * b7 + a1 * b6 + a2 * b5 + a3 * b4 + a4 * b3 + a5 * b2 +
+ a6 * b1 + a7 * b0;
+ s8 = c8 + a0 * b8 + a1 * b7 + a2 * b6 + a3 * b5 + a4 * b4 + a5 * b3 +
+ a6 * b2 + a7 * b1 + a8 * b0;
+ s9 = c9 + a0 * b9 + a1 * b8 + a2 * b7 + a3 * b6 + a4 * b5 + a5 * b4 +
+ a6 * b3 + a7 * b2 + a8 * b1 + a9 * b0;
+ s10 = c10 + a0 * b10 + a1 * b9 + a2 * b8 + a3 * b7 + a4 * b6 + a5 * b5 +
+ a6 * b4 + a7 * b3 + a8 * b2 + a9 * b1 + a10 * b0;
+ s11 = c11 + a0 * b11 + a1 * b10 + a2 * b9 + a3 * b8 + a4 * b7 + a5 * b6 +
+ a6 * b5 + a7 * b4 + a8 * b3 + a9 * b2 + a10 * b1 + a11 * b0;
+ s12 = a1 * b11 + a2 * b10 + a3 * b9 + a4 * b8 + a5 * b7 + a6 * b6 + a7 * b5 +
+ a8 * b4 + a9 * b3 + a10 * b2 + a11 * b1;
+ s13 = a2 * b11 + a3 * b10 + a4 * b9 + a5 * b8 + a6 * b7 + a7 * b6 + a8 * b5 +
+ a9 * b4 + a10 * b3 + a11 * b2;
+ s14 = a3 * b11 + a4 * b10 + a5 * b9 + a6 * b8 + a7 * b7 + a8 * b6 + a9 * b5 +
+ a10 * b4 + a11 * b3;
+ s15 = a4 * b11 + a5 * b10 + a6 * b9 + a7 * b8 + a8 * b7 + a9 * b6 + a10 * b5 +
+ a11 * b4;
+ s16 = a5 * b11 + a6 * b10 + a7 * b9 + a8 * b8 + a9 * b7 + a10 * b6 + a11 * b5;
+ s17 = a6 * b11 + a7 * b10 + a8 * b9 + a9 * b8 + a10 * b7 + a11 * b6;
+ s18 = a7 * b11 + a8 * b10 + a9 * b9 + a10 * b8 + a11 * b7;
+ s19 = a8 * b11 + a9 * b10 + a10 * b9 + a11 * b8;
+ s20 = a9 * b11 + a10 * b10 + a11 * b9;
+ s21 = a10 * b11 + a11 * b10;
+ s22 = a11 * b11;
+ s23 = 0;
+
+ carry0 = (s0 + (1 << 20)) >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry2 = (s2 + (1 << 20)) >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry4 = (s4 + (1 << 20)) >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry6 = (s6 + (1 << 20)) >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry8 = (s8 + (1 << 20)) >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry10 = (s10 + (1 << 20)) >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+ carry12 = (s12 + (1 << 20)) >> 21;
+ s13 += carry12;
+ s12 -= carry12 << 21;
+ carry14 = (s14 + (1 << 20)) >> 21;
+ s15 += carry14;
+ s14 -= carry14 << 21;
+ carry16 = (s16 + (1 << 20)) >> 21;
+ s17 += carry16;
+ s16 -= carry16 << 21;
+ carry18 = (s18 + (1 << 20)) >> 21;
+ s19 += carry18;
+ s18 -= carry18 << 21;
+ carry20 = (s20 + (1 << 20)) >> 21;
+ s21 += carry20;
+ s20 -= carry20 << 21;
+ carry22 = (s22 + (1 << 20)) >> 21;
+ s23 += carry22;
+ s22 -= carry22 << 21;
+
+ carry1 = (s1 + (1 << 20)) >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry3 = (s3 + (1 << 20)) >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry5 = (s5 + (1 << 20)) >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry7 = (s7 + (1 << 20)) >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry9 = (s9 + (1 << 20)) >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry11 = (s11 + (1 << 20)) >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+ carry13 = (s13 + (1 << 20)) >> 21;
+ s14 += carry13;
+ s13 -= carry13 << 21;
+ carry15 = (s15 + (1 << 20)) >> 21;
+ s16 += carry15;
+ s15 -= carry15 << 21;
+ carry17 = (s17 + (1 << 20)) >> 21;
+ s18 += carry17;
+ s17 -= carry17 << 21;
+ carry19 = (s19 + (1 << 20)) >> 21;
+ s20 += carry19;
+ s19 -= carry19 << 21;
+ carry21 = (s21 + (1 << 20)) >> 21;
+ s22 += carry21;
+ s21 -= carry21 << 21;
+
+ s11 += s23 * 666643;
+ s12 += s23 * 470296;
+ s13 += s23 * 654183;
+ s14 -= s23 * 997805;
+ s15 += s23 * 136657;
+ s16 -= s23 * 683901;
+ s23 = 0;
+
+ s10 += s22 * 666643;
+ s11 += s22 * 470296;
+ s12 += s22 * 654183;
+ s13 -= s22 * 997805;
+ s14 += s22 * 136657;
+ s15 -= s22 * 683901;
+ s22 = 0;
+
+ s9 += s21 * 666643;
+ s10 += s21 * 470296;
+ s11 += s21 * 654183;
+ s12 -= s21 * 997805;
+ s13 += s21 * 136657;
+ s14 -= s21 * 683901;
+ s21 = 0;
+
+ s8 += s20 * 666643;
+ s9 += s20 * 470296;
+ s10 += s20 * 654183;
+ s11 -= s20 * 997805;
+ s12 += s20 * 136657;
+ s13 -= s20 * 683901;
+ s20 = 0;
+
+ s7 += s19 * 666643;
+ s8 += s19 * 470296;
+ s9 += s19 * 654183;
+ s10 -= s19 * 997805;
+ s11 += s19 * 136657;
+ s12 -= s19 * 683901;
+ s19 = 0;
+
+ s6 += s18 * 666643;
+ s7 += s18 * 470296;
+ s8 += s18 * 654183;
+ s9 -= s18 * 997805;
+ s10 += s18 * 136657;
+ s11 -= s18 * 683901;
+ s18 = 0;
+
+ carry6 = (s6 + (1 << 20)) >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry8 = (s8 + (1 << 20)) >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry10 = (s10 + (1 << 20)) >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+ carry12 = (s12 + (1 << 20)) >> 21;
+ s13 += carry12;
+ s12 -= carry12 << 21;
+ carry14 = (s14 + (1 << 20)) >> 21;
+ s15 += carry14;
+ s14 -= carry14 << 21;
+ carry16 = (s16 + (1 << 20)) >> 21;
+ s17 += carry16;
+ s16 -= carry16 << 21;
+
+ carry7 = (s7 + (1 << 20)) >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry9 = (s9 + (1 << 20)) >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry11 = (s11 + (1 << 20)) >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+ carry13 = (s13 + (1 << 20)) >> 21;
+ s14 += carry13;
+ s13 -= carry13 << 21;
+ carry15 = (s15 + (1 << 20)) >> 21;
+ s16 += carry15;
+ s15 -= carry15 << 21;
+
+ s5 += s17 * 666643;
+ s6 += s17 * 470296;
+ s7 += s17 * 654183;
+ s8 -= s17 * 997805;
+ s9 += s17 * 136657;
+ s10 -= s17 * 683901;
+ s17 = 0;
+
+ s4 += s16 * 666643;
+ s5 += s16 * 470296;
+ s6 += s16 * 654183;
+ s7 -= s16 * 997805;
+ s8 += s16 * 136657;
+ s9 -= s16 * 683901;
+ s16 = 0;
+
+ s3 += s15 * 666643;
+ s4 += s15 * 470296;
+ s5 += s15 * 654183;
+ s6 -= s15 * 997805;
+ s7 += s15 * 136657;
+ s8 -= s15 * 683901;
+ s15 = 0;
+
+ s2 += s14 * 666643;
+ s3 += s14 * 470296;
+ s4 += s14 * 654183;
+ s5 -= s14 * 997805;
+ s6 += s14 * 136657;
+ s7 -= s14 * 683901;
+ s14 = 0;
+
+ s1 += s13 * 666643;
+ s2 += s13 * 470296;
+ s3 += s13 * 654183;
+ s4 -= s13 * 997805;
+ s5 += s13 * 136657;
+ s6 -= s13 * 683901;
+ s13 = 0;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = (s0 + (1 << 20)) >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry2 = (s2 + (1 << 20)) >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry4 = (s4 + (1 << 20)) >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry6 = (s6 + (1 << 20)) >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry8 = (s8 + (1 << 20)) >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry10 = (s10 + (1 << 20)) >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+
+ carry1 = (s1 + (1 << 20)) >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry3 = (s3 + (1 << 20)) >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry5 = (s5 + (1 << 20)) >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry7 = (s7 + (1 << 20)) >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry9 = (s9 + (1 << 20)) >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry11 = (s11 + (1 << 20)) >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = s0 >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry1 = s1 >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry2 = s2 >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry3 = s3 >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry4 = s4 >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry5 = s5 >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry6 = s6 >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry7 = s7 >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry8 = s8 >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry9 = s9 >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry10 = s10 >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+ carry11 = s11 >> 21;
+ s12 += carry11;
+ s11 -= carry11 << 21;
+
+ s0 += s12 * 666643;
+ s1 += s12 * 470296;
+ s2 += s12 * 654183;
+ s3 -= s12 * 997805;
+ s4 += s12 * 136657;
+ s5 -= s12 * 683901;
+ s12 = 0;
+
+ carry0 = s0 >> 21;
+ s1 += carry0;
+ s0 -= carry0 << 21;
+ carry1 = s1 >> 21;
+ s2 += carry1;
+ s1 -= carry1 << 21;
+ carry2 = s2 >> 21;
+ s3 += carry2;
+ s2 -= carry2 << 21;
+ carry3 = s3 >> 21;
+ s4 += carry3;
+ s3 -= carry3 << 21;
+ carry4 = s4 >> 21;
+ s5 += carry4;
+ s4 -= carry4 << 21;
+ carry5 = s5 >> 21;
+ s6 += carry5;
+ s5 -= carry5 << 21;
+ carry6 = s6 >> 21;
+ s7 += carry6;
+ s6 -= carry6 << 21;
+ carry7 = s7 >> 21;
+ s8 += carry7;
+ s7 -= carry7 << 21;
+ carry8 = s8 >> 21;
+ s9 += carry8;
+ s8 -= carry8 << 21;
+ carry9 = s9 >> 21;
+ s10 += carry9;
+ s9 -= carry9 << 21;
+ carry10 = s10 >> 21;
+ s11 += carry10;
+ s10 -= carry10 << 21;
+
+ s[0] = s0 >> 0;
+ s[1] = s0 >> 8;
+ s[2] = (s0 >> 16) | (s1 << 5);
+ s[3] = s1 >> 3;
+ s[4] = s1 >> 11;
+ s[5] = (s1 >> 19) | (s2 << 2);
+ s[6] = s2 >> 6;
+ s[7] = (s2 >> 14) | (s3 << 7);
+ s[8] = s3 >> 1;
+ s[9] = s3 >> 9;
+ s[10] = (s3 >> 17) | (s4 << 4);
+ s[11] = s4 >> 4;
+ s[12] = s4 >> 12;
+ s[13] = (s4 >> 20) | (s5 << 1);
+ s[14] = s5 >> 7;
+ s[15] = (s5 >> 15) | (s6 << 6);
+ s[16] = s6 >> 2;
+ s[17] = s6 >> 10;
+ s[18] = (s6 >> 18) | (s7 << 3);
+ s[19] = s7 >> 5;
+ s[20] = s7 >> 13;
+ s[21] = s8 >> 0;
+ s[22] = s8 >> 8;
+ s[23] = (s8 >> 16) | (s9 << 5);
+ s[24] = s9 >> 3;
+ s[25] = s9 >> 11;
+ s[26] = (s9 >> 19) | (s10 << 2);
+ s[27] = s10 >> 6;
+ s[28] = (s10 >> 14) | (s11 << 7);
+ s[29] = s11 >> 1;
+ s[30] = s11 >> 9;
+ s[31] = s11 >> 17;
+}
+#endif
+
+#ifdef ED25519
+void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]) {
+ uint8_t seed[32];
+ arc4random_buf(seed, 32);
+
+ uint8_t az[SHA512_DIGEST_LENGTH];
+ SHA512(seed, 32, az);
+
+ az[0] &= 248;
+ az[31] &= 63;
+ az[31] |= 64;
+
+ ge_p3 A;
+ x25519_ge_scalarmult_base(&A, az);
+ ge_p3_tobytes(out_public_key, &A);
+
+ memcpy(out_private_key, seed, 32);
+ memmove(out_private_key + 32, out_public_key, 32);
+}
+
+int ED25519_sign(uint8_t *out_sig, const uint8_t *message, size_t message_len,
+ const uint8_t private_key[64]) {
+ uint8_t az[SHA512_DIGEST_LENGTH];
+ SHA512(private_key, 32, az);
+
+ az[0] &= 248;
+ az[31] &= 63;
+ az[31] |= 64;
+
+ SHA512_CTX hash_ctx;
+ SHA512_Init(&hash_ctx);
+ SHA512_Update(&hash_ctx, az + 32, 32);
+ SHA512_Update(&hash_ctx, message, message_len);
+ uint8_t nonce[SHA512_DIGEST_LENGTH];
+ SHA512_Final(nonce, &hash_ctx);
+
+ x25519_sc_reduce(nonce);
+ ge_p3 R;
+ x25519_ge_scalarmult_base(&R, nonce);
+ ge_p3_tobytes(out_sig, &R);
+
+ SHA512_Init(&hash_ctx);
+ SHA512_Update(&hash_ctx, out_sig, 32);
+ SHA512_Update(&hash_ctx, private_key + 32, 32);
+ SHA512_Update(&hash_ctx, message, message_len);
+ uint8_t hram[SHA512_DIGEST_LENGTH];
+ SHA512_Final(hram, &hash_ctx);
+
+ x25519_sc_reduce(hram);
+ sc_muladd(out_sig + 32, hram, az, nonce);
+
+ return 1;
+}
+
+int ED25519_verify(const uint8_t *message, size_t message_len,
+ const uint8_t signature[64], const uint8_t public_key[32]) {
+ ge_p3 A;
+ if ((signature[63] & 224) != 0 ||
+ x25519_ge_frombytes_vartime(&A, public_key) != 0) {
+ return 0;
+ }
+
+ fe_neg(A.X, A.X);
+ fe_neg(A.T, A.T);
+
+ uint8_t pkcopy[32];
+ memcpy(pkcopy, public_key, 32);
+ uint8_t rcopy[32];
+ memcpy(rcopy, signature, 32);
+ uint8_t scopy[32];
+ memcpy(scopy, signature + 32, 32);
+
+ SHA512_CTX hash_ctx;
+ SHA512_Init(&hash_ctx);
+ SHA512_Update(&hash_ctx, signature, 32);
+ SHA512_Update(&hash_ctx, public_key, 32);
+ SHA512_Update(&hash_ctx, message, message_len);
+ uint8_t h[SHA512_DIGEST_LENGTH];
+ SHA512_Final(h, &hash_ctx);
+
+ x25519_sc_reduce(h);
+
+ ge_p2 R;
+ ge_double_scalarmult_vartime(&R, h, &A, scopy);
+
+ uint8_t rcheck[32];
+ x25519_ge_tobytes(rcheck, &R);
+
+ return timingsafe_memcmp(rcheck, rcopy, sizeof(rcheck)) == 0;
+}
+#endif
+
+/* Replace (f,g) with (g,f) if b == 1;
+ * replace (f,g) with (f,g) if b == 0.
+ *
+ * Preconditions: b in {0,1}. */
+static void fe_cswap(fe f, fe g, unsigned int b) {
+ b = 0-b;
+ unsigned i;
+ for (i = 0; i < 10; i++) {
+ int32_t x = f[i] ^ g[i];
+ x &= b;
+ f[i] ^= x;
+ g[i] ^= x;
+ }
+}
+
+/* h = f * 121666
+ * Can overlap h with f.
+ *
+ * Preconditions:
+ * |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+ *
+ * Postconditions:
+ * |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. */
+static void fe_mul121666(fe h, fe f) {
+ int32_t f0 = f[0];
+ int32_t f1 = f[1];
+ int32_t f2 = f[2];
+ int32_t f3 = f[3];
+ int32_t f4 = f[4];
+ int32_t f5 = f[5];
+ int32_t f6 = f[6];
+ int32_t f7 = f[7];
+ int32_t f8 = f[8];
+ int32_t f9 = f[9];
+ int64_t h0 = f0 * (int64_t) 121666;
+ int64_t h1 = f1 * (int64_t) 121666;
+ int64_t h2 = f2 * (int64_t) 121666;
+ int64_t h3 = f3 * (int64_t) 121666;
+ int64_t h4 = f4 * (int64_t) 121666;
+ int64_t h5 = f5 * (int64_t) 121666;
+ int64_t h6 = f6 * (int64_t) 121666;
+ int64_t h7 = f7 * (int64_t) 121666;
+ int64_t h8 = f8 * (int64_t) 121666;
+ int64_t h9 = f9 * (int64_t) 121666;
+ int64_t carry0;
+ int64_t carry1;
+ int64_t carry2;
+ int64_t carry3;
+ int64_t carry4;
+ int64_t carry5;
+ int64_t carry6;
+ int64_t carry7;
+ int64_t carry8;
+ int64_t carry9;
+
+ carry9 = h9 + (1 << 24); h0 += (carry9 >> 25) * 19; h9 -= carry9 & kTop39Bits;
+ carry1 = h1 + (1 << 24); h2 += carry1 >> 25; h1 -= carry1 & kTop39Bits;
+ carry3 = h3 + (1 << 24); h4 += carry3 >> 25; h3 -= carry3 & kTop39Bits;
+ carry5 = h5 + (1 << 24); h6 += carry5 >> 25; h5 -= carry5 & kTop39Bits;
+ carry7 = h7 + (1 << 24); h8 += carry7 >> 25; h7 -= carry7 & kTop39Bits;
+
+ carry0 = h0 + (1 << 25); h1 += carry0 >> 26; h0 -= carry0 & kTop38Bits;
+ carry2 = h2 + (1 << 25); h3 += carry2 >> 26; h2 -= carry2 & kTop38Bits;
+ carry4 = h4 + (1 << 25); h5 += carry4 >> 26; h4 -= carry4 & kTop38Bits;
+ carry6 = h6 + (1 << 25); h7 += carry6 >> 26; h6 -= carry6 & kTop38Bits;
+ carry8 = h8 + (1 << 25); h9 += carry8 >> 26; h8 -= carry8 & kTop38Bits;
+
+ h[0] = h0;
+ h[1] = h1;
+ h[2] = h2;
+ h[3] = h3;
+ h[4] = h4;
+ h[5] = h5;
+ h[6] = h6;
+ h[7] = h7;
+ h[8] = h8;
+ h[9] = h9;
+}
+
+void
+x25519_scalar_mult_generic(uint8_t out[32], const uint8_t scalar[32],
+ const uint8_t point[32]) {
+ fe x1, x2, z2, x3, z3, tmp0, tmp1;
+
+ uint8_t e[32];
+ memcpy(e, scalar, 32);
+ e[0] &= 248;
+ e[31] &= 127;
+ e[31] |= 64;
+ fe_frombytes(x1, point);
+ fe_1(x2);
+ fe_0(z2);
+ fe_copy(x3, x1);
+ fe_1(z3);
+
+ unsigned swap = 0;
+ int pos;
+ for (pos = 254; pos >= 0; --pos) {
+ unsigned b = 1 & (e[pos / 8] >> (pos & 7));
+ swap ^= b;
+ fe_cswap(x2, x3, swap);
+ fe_cswap(z2, z3, swap);
+ swap = b;
+ fe_sub(tmp0, x3, z3);
+ fe_sub(tmp1, x2, z2);
+ fe_add(x2, x2, z2);
+ fe_add(z2, x3, z3);
+ fe_mul(z3, tmp0, x2);
+ fe_mul(z2, z2, tmp1);
+ fe_sq(tmp0, tmp1);
+ fe_sq(tmp1, x2);
+ fe_add(x3, z3, z2);
+ fe_sub(z2, z3, z2);
+ fe_mul(x2, tmp1, tmp0);
+ fe_sub(tmp1, tmp1, tmp0);
+ fe_sq(z2, z2);
+ fe_mul121666(z3, tmp1);
+ fe_sq(x3, x3);
+ fe_add(tmp0, tmp0, z3);
+ fe_mul(z3, x1, z2);
+ fe_mul(z2, tmp1, tmp0);
+ }
+ fe_cswap(x2, x3, swap);
+ fe_cswap(z2, z3, swap);
+
+ fe_invert(z2, z2);
+ fe_mul(x2, x2, z2);
+ fe_tobytes(out, x2);
+}
+
+#ifdef unused
+void
+x25519_public_from_private_generic(uint8_t out_public_value[32],
+ const uint8_t private_key[32])
+{
+ uint8_t e[32];
+
+ memcpy(e, private_key, 32);
+ e[0] &= 248;
+ e[31] &= 127;
+ e[31] |= 64;
+
+ ge_p3 A;
+ x25519_ge_scalarmult_base(&A, e);
+
+ /* We only need the u-coordinate of the curve25519 point. The map is
+ * u=(y+1)/(1-y). Since y=Y/Z, this gives u=(Z+Y)/(Z-Y). */
+ fe zplusy, zminusy, zminusy_inv;
+ fe_add(zplusy, A.Z, A.Y);
+ fe_sub(zminusy, A.Z, A.Y);
+ fe_invert(zminusy_inv, zminusy);
+ fe_mul(zplusy, zplusy, zminusy_inv);
+ fe_tobytes(out_public_value, zplusy);
+}
+#endif
+
+void
+x25519_public_from_private(uint8_t out_public_value[32],
+ const uint8_t private_key[32])
+{
+ static const uint8_t kMongomeryBasePoint[32] = {9};
+
+ x25519_scalar_mult(out_public_value, private_key, kMongomeryBasePoint);
+}
+
+void
+X25519_keypair(uint8_t out_public_value[X25519_KEY_LENGTH],
+ uint8_t out_private_key[X25519_KEY_LENGTH])
+{
+ /* All X25519 implementations should decode scalars correctly (see
+ * https://tools.ietf.org/html/rfc7748#section-5). However, if an
+ * implementation doesn't then it might interoperate with random keys a
+ * fraction of the time because they'll, randomly, happen to be correctly
+ * formed.
+ *
+ * Thus we do the opposite of the masking here to make sure that our private
+ * keys are never correctly masked and so, hopefully, any incorrect
+ * implementations are deterministically broken.
+ *
+ * This does not affect security because, although we're throwing away
+ * entropy, a valid implementation of scalarmult should throw away the exact
+ * same bits anyway. */
+ arc4random_buf(out_private_key, 32);
+
+ out_private_key[0] |= 7;
+ out_private_key[31] &= 63;
+ out_private_key[31] |= 128;
+
+ x25519_public_from_private(out_public_value, out_private_key);
+}
+
+int
+X25519(uint8_t out_shared_key[X25519_KEY_LENGTH],
+ const uint8_t private_key[X25519_KEY_LENGTH],
+ const uint8_t peer_public_value[X25519_KEY_LENGTH])
+{
+ static const uint8_t kZeros[32] = {0};
+
+ x25519_scalar_mult(out_shared_key, private_key, peer_public_value);
+
+ /* The all-zero output results when the input is a point of small order. */
+ return timingsafe_memcmp(kZeros, out_shared_key, 32) != 0;
+}
diff --git a/crypto/libressl/crypto/curve25519/curve25519_internal.h b/crypto/libressl/crypto/curve25519/curve25519_internal.h
new file mode 100644
index 0000000..09d20a4
--- /dev/null
+++ b/crypto/libressl/crypto/curve25519/curve25519_internal.h
@@ -0,0 +1,99 @@
+/* $OpenBSD: curve25519_internal.h,v 1.3 2019/05/11 15:55:52 tb Exp $ */
+/*
+ * Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HEADER_CURVE25519_INTERNAL_H
+#define HEADER_CURVE25519_INTERNAL_H
+
+#include <stdint.h>
+
+__BEGIN_HIDDEN_DECLS
+
+/* fe means field element. Here the field is \Z/(2^255-19). An element t,
+ * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+ * t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+ * context. */
+typedef int32_t fe[10];
+
+/* ge means group element.
+
+ * Here the group is the set of pairs (x,y) of field elements (see fe.h)
+ * satisfying -x^2 + y^2 = 1 + d x^2y^2
+ * where d = -121665/121666.
+ *
+ * Representations:
+ * ge_p2 (projective): (X:Y:Z) satisfying x=X/Z, y=Y/Z
+ * ge_p3 (extended): (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
+ * ge_p1p1 (completed): ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
+ * ge_precomp (Duif): (y+x,y-x,2dxy) */
+
+typedef struct {
+ fe X;
+ fe Y;
+ fe Z;
+} ge_p2;
+
+typedef struct {
+ fe X;
+ fe Y;
+ fe Z;
+ fe T;
+} ge_p3;
+
+typedef struct {
+ fe X;
+ fe Y;
+ fe Z;
+ fe T;
+} ge_p1p1;
+
+typedef struct {
+ fe yplusx;
+ fe yminusx;
+ fe xy2d;
+} ge_precomp;
+
+typedef struct {
+ fe YplusX;
+ fe YminusX;
+ fe Z;
+ fe T2d;
+} ge_cached;
+
+void x25519_ge_tobytes(uint8_t *s, const ge_p2 *h);
+int x25519_ge_frombytes_vartime(ge_p3 *h, const uint8_t *s);
+void x25519_ge_p3_to_cached(ge_cached *r, const ge_p3 *p);
+void x25519_ge_p1p1_to_p2(ge_p2 *r, const ge_p1p1 *p);
+void x25519_ge_p1p1_to_p3(ge_p3 *r, const ge_p1p1 *p);
+void x25519_ge_add(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q);
+void x25519_ge_sub(ge_p1p1 *r, const ge_p3 *p, const ge_cached *q);
+void x25519_ge_scalarmult_small_precomp(ge_p3 *h, const uint8_t a[32],
+ const uint8_t precomp_table[15 * 2 * 32]);
+void x25519_ge_scalarmult_base(ge_p3 *h, const uint8_t a[32]);
+void x25519_ge_scalarmult(ge_p2 *r, const uint8_t *scalar, const ge_p3 *A);
+void x25519_sc_reduce(uint8_t *s);
+
+void x25519_public_from_private(uint8_t out_public_value[32],
+ const uint8_t private_key[32]);
+
+void x25519_scalar_mult(uint8_t out[32], const uint8_t scalar[32],
+ const uint8_t point[32]);
+void x25519_scalar_mult_generic(uint8_t out[32], const uint8_t scalar[32],
+ const uint8_t point[32]);
+
+__END_HIDDEN_DECLS
+
+#endif /* HEADER_CURVE25519_INTERNAL_H */
diff --git a/crypto/libressl/crypto/poly1305/Makefile b/crypto/libressl/crypto/poly1305/Makefile
new file mode 100644
index 0000000..50eb22a
--- /dev/null
+++ b/crypto/libressl/crypto/poly1305/Makefile
@@ -0,0 +1,13 @@
+include ssl_common.mk
+
+obj = poly1305.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/poly1305/poly1305-donna.c b/crypto/libressl/crypto/poly1305/poly1305-donna.c
new file mode 100644
index 0000000..773ea4e
--- /dev/null
+++ b/crypto/libressl/crypto/poly1305/poly1305-donna.c
@@ -0,0 +1,321 @@
+/* $OpenBSD: poly1305-donna.c,v 1.3 2014/06/12 15:49:30 deraadt Exp $ */
+/*
+ * Public Domain poly1305 from Andrew Moon
+ * Based on poly1305-donna.c, poly1305-donna-32.h and poly1305-donna.h from:
+ * https://github.com/floodyberry/poly1305-donna
+ */
+
+#include <stddef.h>
+
+static inline void poly1305_init(poly1305_context *ctx,
+ const unsigned char key[32]);
+static inline void poly1305_update(poly1305_context *ctx,
+ const unsigned char *m, size_t bytes);
+static inline void poly1305_finish(poly1305_context *ctx,
+ unsigned char mac[16]);
+
+/*
+ * poly1305 implementation using 32 bit * 32 bit = 64 bit multiplication
+ * and 64 bit addition.
+ */
+
+#define poly1305_block_size 16
+
+/* 17 + sizeof(size_t) + 14*sizeof(unsigned long) */
+typedef struct poly1305_state_internal_t {
+ unsigned long r[5];
+ unsigned long h[5];
+ unsigned long pad[4];
+ size_t leftover;
+ unsigned char buffer[poly1305_block_size];
+ unsigned char final;
+} poly1305_state_internal_t;
+
+/* interpret four 8 bit unsigned integers as a 32 bit unsigned integer in little endian */
+static unsigned long
+U8TO32(const unsigned char *p)
+{
+ return (((unsigned long)(p[0] & 0xff)) |
+ ((unsigned long)(p[1] & 0xff) << 8) |
+ ((unsigned long)(p[2] & 0xff) << 16) |
+ ((unsigned long)(p[3] & 0xff) << 24));
+}
+
+/* store a 32 bit unsigned integer as four 8 bit unsigned integers in little endian */
+static void
+U32TO8(unsigned char *p, unsigned long v)
+{
+ p[0] = (v) & 0xff;
+ p[1] = (v >> 8) & 0xff;
+ p[2] = (v >> 16) & 0xff;
+ p[3] = (v >> 24) & 0xff;
+}
+
+static inline void
+poly1305_init(poly1305_context *ctx, const unsigned char key[32])
+{
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+
+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
+ st->r[0] = (U8TO32(&key[0])) & 0x3ffffff;
+ st->r[1] = (U8TO32(&key[3]) >> 2) & 0x3ffff03;
+ st->r[2] = (U8TO32(&key[6]) >> 4) & 0x3ffc0ff;
+ st->r[3] = (U8TO32(&key[9]) >> 6) & 0x3f03fff;
+ st->r[4] = (U8TO32(&key[12]) >> 8) & 0x00fffff;
+
+ /* h = 0 */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->h[3] = 0;
+ st->h[4] = 0;
+
+ /* save pad for later */
+ st->pad[0] = U8TO32(&key[16]);
+ st->pad[1] = U8TO32(&key[20]);
+ st->pad[2] = U8TO32(&key[24]);
+ st->pad[3] = U8TO32(&key[28]);
+
+ st->leftover = 0;
+ st->final = 0;
+}
+
+static void
+poly1305_blocks(poly1305_state_internal_t *st, const unsigned char *m, size_t bytes)
+{
+ const unsigned long hibit = (st->final) ? 0 : (1 << 24); /* 1 << 128 */
+ unsigned long r0, r1, r2, r3, r4;
+ unsigned long s1, s2, s3, s4;
+ unsigned long h0, h1, h2, h3, h4;
+ unsigned long long d0, d1, d2, d3, d4;
+ unsigned long c;
+
+ r0 = st->r[0];
+ r1 = st->r[1];
+ r2 = st->r[2];
+ r3 = st->r[3];
+ r4 = st->r[4];
+
+ s1 = r1 * 5;
+ s2 = r2 * 5;
+ s3 = r3 * 5;
+ s4 = r4 * 5;
+
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ while (bytes >= poly1305_block_size) {
+ /* h += m[i] */
+ h0 += (U8TO32(m + 0)) & 0x3ffffff;
+ h1 += (U8TO32(m + 3) >> 2) & 0x3ffffff;
+ h2 += (U8TO32(m + 6) >> 4) & 0x3ffffff;
+ h3 += (U8TO32(m + 9) >> 6) & 0x3ffffff;
+ h4 += (U8TO32(m + 12) >> 8) | hibit;
+
+ /* h *= r */
+ d0 = ((unsigned long long)h0 * r0) +
+ ((unsigned long long)h1 * s4) +
+ ((unsigned long long)h2 * s3) +
+ ((unsigned long long)h3 * s2) +
+ ((unsigned long long)h4 * s1);
+ d1 = ((unsigned long long)h0 * r1) +
+ ((unsigned long long)h1 * r0) +
+ ((unsigned long long)h2 * s4) +
+ ((unsigned long long)h3 * s3) +
+ ((unsigned long long)h4 * s2);
+ d2 = ((unsigned long long)h0 * r2) +
+ ((unsigned long long)h1 * r1) +
+ ((unsigned long long)h2 * r0) +
+ ((unsigned long long)h3 * s4) +
+ ((unsigned long long)h4 * s3);
+ d3 = ((unsigned long long)h0 * r3) +
+ ((unsigned long long)h1 * r2) +
+ ((unsigned long long)h2 * r1) +
+ ((unsigned long long)h3 * r0) +
+ ((unsigned long long)h4 * s4);
+ d4 = ((unsigned long long)h0 * r4) +
+ ((unsigned long long)h1 * r3) +
+ ((unsigned long long)h2 * r2) +
+ ((unsigned long long)h3 * r1) +
+ ((unsigned long long)h4 * r0);
+
+ /* (partial) h %= p */
+ c = (unsigned long)(d0 >> 26);
+ h0 = (unsigned long)d0 & 0x3ffffff;
+ d1 += c;
+ c = (unsigned long)(d1 >> 26);
+ h1 = (unsigned long)d1 & 0x3ffffff;
+ d2 += c;
+ c = (unsigned long)(d2 >> 26);
+ h2 = (unsigned long)d2 & 0x3ffffff;
+ d3 += c;
+ c = (unsigned long)(d3 >> 26);
+ h3 = (unsigned long)d3 & 0x3ffffff;
+ d4 += c;
+ c = (unsigned long)(d4 >> 26);
+ h4 = (unsigned long)d4 & 0x3ffffff;
+ h0 += c * 5;
+ c = (h0 >> 26);
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ m += poly1305_block_size;
+ bytes -= poly1305_block_size;
+ }
+
+ st->h[0] = h0;
+ st->h[1] = h1;
+ st->h[2] = h2;
+ st->h[3] = h3;
+ st->h[4] = h4;
+}
+
+static inline void
+poly1305_update(poly1305_context *ctx, const unsigned char *m, size_t bytes)
+{
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+ size_t i;
+
+ /* handle leftover */
+ if (st->leftover) {
+ size_t want = (poly1305_block_size - st->leftover);
+ if (want > bytes)
+ want = bytes;
+ for (i = 0; i < want; i++)
+ st->buffer[st->leftover + i] = m[i];
+ bytes -= want;
+ m += want;
+ st->leftover += want;
+ if (st->leftover < poly1305_block_size)
+ return;
+ poly1305_blocks(st, st->buffer, poly1305_block_size);
+ st->leftover = 0;
+ }
+
+ /* process full blocks */
+ if (bytes >= poly1305_block_size) {
+ size_t want = (bytes & ~(poly1305_block_size - 1));
+ poly1305_blocks(st, m, want);
+ m += want;
+ bytes -= want;
+ }
+
+ /* store leftover */
+ if (bytes) {
+ for (i = 0; i < bytes; i++)
+ st->buffer[st->leftover + i] = m[i];
+ st->leftover += bytes;
+ }
+}
+
+static inline void
+poly1305_finish(poly1305_context *ctx, unsigned char mac[16])
+{
+ poly1305_state_internal_t *st = (poly1305_state_internal_t *)ctx;
+ unsigned long h0, h1, h2, h3, h4, c;
+ unsigned long g0, g1, g2, g3, g4;
+ unsigned long long f;
+ unsigned long mask;
+
+ /* process the remaining block */
+ if (st->leftover) {
+ size_t i = st->leftover;
+ st->buffer[i++] = 1;
+ for (; i < poly1305_block_size; i++)
+ st->buffer[i] = 0;
+ st->final = 1;
+ poly1305_blocks(st, st->buffer, poly1305_block_size);
+ }
+
+ /* fully carry h */
+ h0 = st->h[0];
+ h1 = st->h[1];
+ h2 = st->h[2];
+ h3 = st->h[3];
+ h4 = st->h[4];
+
+ c = h1 >> 26;
+ h1 = h1 & 0x3ffffff;
+ h2 += c;
+ c = h2 >> 26;
+ h2 = h2 & 0x3ffffff;
+ h3 += c;
+ c = h3 >> 26;
+ h3 = h3 & 0x3ffffff;
+ h4 += c;
+ c = h4 >> 26;
+ h4 = h4 & 0x3ffffff;
+ h0 += c * 5;
+ c = h0 >> 26;
+ h0 = h0 & 0x3ffffff;
+ h1 += c;
+
+ /* compute h + -p */
+ g0 = h0 + 5;
+ c = g0 >> 26;
+ g0 &= 0x3ffffff;
+ g1 = h1 + c;
+ c = g1 >> 26;
+ g1 &= 0x3ffffff;
+ g2 = h2 + c;
+ c = g2 >> 26;
+ g2 &= 0x3ffffff;
+ g3 = h3 + c;
+ c = g3 >> 26;
+ g3 &= 0x3ffffff;
+ g4 = h4 + c - (1 << 26);
+
+ /* select h if h < p, or h + -p if h >= p */
+ mask = (g4 >> ((sizeof(unsigned long) * 8) - 1)) - 1;
+ g0 &= mask;
+ g1 &= mask;
+ g2 &= mask;
+ g3 &= mask;
+ g4 &= mask;
+ mask = ~mask;
+ h0 = (h0 & mask) | g0;
+ h1 = (h1 & mask) | g1;
+ h2 = (h2 & mask) | g2;
+ h3 = (h3 & mask) | g3;
+ h4 = (h4 & mask) | g4;
+
+ /* h = h % (2^128) */
+ h0 = ((h0) | (h1 << 26)) & 0xffffffff;
+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff;
+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff;
+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff;
+
+ /* mac = (h + pad) % (2^128) */
+ f = (unsigned long long)h0 + st->pad[0];
+ h0 = (unsigned long)f;
+ f = (unsigned long long)h1 + st->pad[1] + (f >> 32);
+ h1 = (unsigned long)f;
+ f = (unsigned long long)h2 + st->pad[2] + (f >> 32);
+ h2 = (unsigned long)f;
+ f = (unsigned long long)h3 + st->pad[3] + (f >> 32);
+ h3 = (unsigned long)f;
+
+ U32TO8(mac + 0, h0);
+ U32TO8(mac + 4, h1);
+ U32TO8(mac + 8, h2);
+ U32TO8(mac + 12, h3);
+
+ /* zero out the state */
+ st->h[0] = 0;
+ st->h[1] = 0;
+ st->h[2] = 0;
+ st->h[3] = 0;
+ st->h[4] = 0;
+ st->r[0] = 0;
+ st->r[1] = 0;
+ st->r[2] = 0;
+ st->r[3] = 0;
+ st->r[4] = 0;
+ st->pad[0] = 0;
+ st->pad[1] = 0;
+ st->pad[2] = 0;
+ st->pad[3] = 0;
+}
diff --git a/crypto/libressl/crypto/poly1305/poly1305.c b/crypto/libressl/crypto/poly1305/poly1305.c
new file mode 100644
index 0000000..75a34cc
--- /dev/null
+++ b/crypto/libressl/crypto/poly1305/poly1305.c
@@ -0,0 +1,38 @@
+/* $OpenBSD: poly1305.c,v 1.3 2014/06/12 15:49:30 deraadt Exp $ */
+/*
+ * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <openssl/poly1305.h>
+#include "poly1305-donna.c"
+
+void
+CRYPTO_poly1305_init(poly1305_context *ctx, const unsigned char key[32])
+{
+ poly1305_init(ctx, key);
+}
+
+void
+CRYPTO_poly1305_update(poly1305_context *ctx, const unsigned char *in,
+ size_t len)
+{
+ poly1305_update(ctx, in, len);
+}
+
+void
+CRYPTO_poly1305_finish(poly1305_context *ctx, unsigned char mac[16])
+{
+ poly1305_finish(ctx, mac);
+}
diff --git a/crypto/libressl/crypto/sha/Makefile b/crypto/libressl/crypto/sha/Makefile
new file mode 100644
index 0000000..51fbfcf
--- /dev/null
+++ b/crypto/libressl/crypto/sha/Makefile
@@ -0,0 +1,14 @@
+include ssl_common.mk
+CFLAGS += -D__BEGIN_HIDDEN_DECLS= -D__END_HIDDEN_DECLS=
+
+obj = sha1dgst.o sha1_one.o sha256.o sha512.o
+
+
+all: $(obj)
+dep: all
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $<
+
+clean:
+ rm -f *.o *.a
diff --git a/crypto/libressl/crypto/sha/sha1-elf-armv4.S b/crypto/libressl/crypto/sha/sha1-elf-armv4.S
new file mode 100644
index 0000000..5aeaf7c
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1-elf-armv4.S
@@ -0,0 +1,455 @@
+#include "arm_arch.h"
+
+.text
+
+.global sha1_block_data_order
+.type sha1_block_data_order,%function
+
+.align 2
+sha1_block_data_order:
+ stmdb sp!,{r4-r12,lr}
+ add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
+ ldmia r0,{r3,r4,r5,r6,r7}
+.Lloop:
+ ldr r8,.LK_00_19
+ mov r14,sp
+ sub sp,sp,#15*4
+ mov r5,r5,ror#30
+ mov r6,r6,ror#30
+ mov r7,r7,ror#30 @ [6]
+.L_00_15:
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r5,r6 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ eor r10,r5,r6 @ F_xx_xx
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r4,r10,ror#2
+ add r7,r7,r9 @ E+=X[i]
+ eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r7,r7,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r6,r8,r6,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r4,r5 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r6,r8,r6,ror#2 @ E+=K_00_19
+ eor r10,r4,r5 @ F_xx_xx
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r3,r10,ror#2
+ add r6,r6,r9 @ E+=X[i]
+ eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r6,r6,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r5,r8,r5,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r3,r4 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r5,r8,r5,ror#2 @ E+=K_00_19
+ eor r10,r3,r4 @ F_xx_xx
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r7,r10,ror#2
+ add r5,r5,r9 @ E+=X[i]
+ eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r5,r5,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r4,r8,r4,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r7,r3 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r4,r8,r4,ror#2 @ E+=K_00_19
+ eor r10,r7,r3 @ F_xx_xx
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r6,r10,ror#2
+ add r4,r4,r9 @ E+=X[i]
+ eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r4,r4,r10 @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r3,r8,r3,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r6,r7 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r3,r8,r3,ror#2 @ E+=K_00_19
+ eor r10,r6,r7 @ F_xx_xx
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r5,r10,ror#2
+ add r3,r3,r9 @ E+=X[i]
+ eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r3,r3,r10 @ E+=F_00_19(B,C,D)
+ teq r14,sp
+ bne .L_00_15 @ [((11+4)*5+2)*3]
+ sub sp,sp,#25*4
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r10,[r1,#2]
+ ldrb r9,[r1,#3]
+ ldrb r11,[r1,#1]
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ ldrb r12,[r1],#4
+ orr r9,r9,r10,lsl#8
+ eor r10,r5,r6 @ F_xx_xx
+ orr r9,r9,r11,lsl#16
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ orr r9,r9,r12,lsl#24
+#else
+ ldr r9,[r1],#4 @ handles unaligned
+ add r7,r8,r7,ror#2 @ E+=K_00_19
+ eor r10,r5,r6 @ F_xx_xx
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+ rev r9,r9 @ byte swap
+#endif
+#endif
+ and r10,r4,r10,ror#2
+ add r7,r7,r9 @ E+=X[i]
+ eor r10,r10,r6,ror#2 @ F_00_19(B,C,D)
+ str r9,[r14,#-4]!
+ add r7,r7,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r3,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ eor r10,r10,r5,ror#2 @ F_00_19(B,C,D)
+ add r6,r6,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r7,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ eor r10,r10,r4,ror#2 @ F_00_19(B,C,D)
+ add r5,r5,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r6,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ eor r10,r10,r3,ror#2 @ F_00_19(B,C,D)
+ add r4,r4,r10 @ E+=F_00_19(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r5,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
+ add r3,r3,r10 @ E+=F_00_19(B,C,D)
+
+ ldr r8,.LK_20_39 @ [+15+16*4]
+ cmn sp,#0 @ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r5,r6 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r4,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r7,r7,r9 @ E+=X[i]
+ add r7,r7,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r3,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ add r6,r6,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r7,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ add r5,r5,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r6,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ add r4,r4,r10 @ E+=F_20_39(B,C,D)
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ eor r10,r5,r10,ror#2 @ F_xx_xx
+ @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ add r3,r3,r10 @ E+=F_20_39(B,C,D)
+ teq r14,sp @ preserve carry
+ bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
+ bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
+
+ ldr r8,.LK_40_59
+ sub sp,sp,#20*4 @ [+2]
+.L_40_59:
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r7,r8,r7,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r5,r6 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r7,r7,r3,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r4,r10,ror#2 @ F_xx_xx
+ and r11,r5,r6 @ F_xx_xx
+ add r7,r7,r9 @ E+=X[i]
+ add r7,r7,r10 @ E+=F_40_59(B,C,D)
+ add r7,r7,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r6,r8,r6,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r4,r5 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r6,r6,r7,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r3,r10,ror#2 @ F_xx_xx
+ and r11,r4,r5 @ F_xx_xx
+ add r6,r6,r9 @ E+=X[i]
+ add r6,r6,r10 @ E+=F_40_59(B,C,D)
+ add r6,r6,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r5,r8,r5,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r3,r4 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r5,r5,r6,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r7,r10,ror#2 @ F_xx_xx
+ and r11,r3,r4 @ F_xx_xx
+ add r5,r5,r9 @ E+=X[i]
+ add r5,r5,r10 @ E+=F_40_59(B,C,D)
+ add r5,r5,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r4,r8,r4,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r7,r3 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r4,r4,r5,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r6,r10,ror#2 @ F_xx_xx
+ and r11,r7,r3 @ F_xx_xx
+ add r4,r4,r9 @ E+=X[i]
+ add r4,r4,r10 @ E+=F_40_59(B,C,D)
+ add r4,r4,r11,ror#2
+ ldr r9,[r14,#15*4]
+ ldr r10,[r14,#13*4]
+ ldr r11,[r14,#7*4]
+ add r3,r8,r3,ror#2 @ E+=K_xx_xx
+ ldr r12,[r14,#2*4]
+ eor r9,r9,r10
+ eor r11,r11,r12 @ 1 cycle stall
+ eor r10,r6,r7 @ F_xx_xx
+ mov r9,r9,ror#31
+ add r3,r3,r4,ror#27 @ E+=ROR(A,27)
+ eor r9,r9,r11,ror#31
+ str r9,[r14,#-4]!
+ and r10,r5,r10,ror#2 @ F_xx_xx
+ and r11,r6,r7 @ F_xx_xx
+ add r3,r3,r9 @ E+=X[i]
+ add r3,r3,r10 @ E+=F_40_59(B,C,D)
+ add r3,r3,r11,ror#2
+ teq r14,sp
+ bne .L_40_59 @ [+((12+5)*5+2)*4]
+
+ ldr r8,.LK_60_79
+ sub sp,sp,#20*4
+ cmp sp,#0 @ set carry to denote 60_79
+ b .L_20_39_or_60_79 @ [+4], spare 300 bytes
+.L_done:
+ add sp,sp,#80*4 @ "deallocate" stack frame
+ ldmia r0,{r8,r9,r10,r11,r12}
+ add r3,r8,r3
+ add r4,r9,r4
+ add r5,r10,r5,ror#2
+ add r6,r11,r6,ror#2
+ add r7,r12,r7,ror#2
+ stmia r0,{r3,r4,r5,r6,r7}
+ teq r1,r2
+ bne .Lloop @ [+18], total 1307
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.align 2
+.LK_00_19: .word 0x5a827999
+.LK_20_39: .word 0x6ed9eba1
+.LK_40_59: .word 0x8f1bbcdc
+.LK_60_79: .word 0xca62c1d6
+.size sha1_block_data_order,.-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha1-elf-x86_64.S b/crypto/libressl/crypto/sha/sha1-elf-x86_64.S
new file mode 100644
index 0000000..5a37019
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1-elf-x86_64.S
@@ -0,0 +1,2491 @@
+#include "x86_arch.h"
+.text
+
+.hidden OPENSSL_ia32cap_P
+
+.globl sha1_block_data_order
+.type sha1_block_data_order,@function
+.align 16
+sha1_block_data_order:
+ movl OPENSSL_ia32cap_P+0(%rip),%r9d
+ movl OPENSSL_ia32cap_P+4(%rip),%r8d
+ testl $IA32CAP_MASK1_SSSE3,%r8d
+ jz .Lialu
+ jmp _ssse3_shortcut
+
+.align 16
+.Lialu:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ movq %rsp,%r11
+ movq %rdi,%r8
+ subq $72,%rsp
+ movq %rsi,%r9
+ andq $-64,%rsp
+ movq %rdx,%r10
+ movq %r11,64(%rsp)
+.Lprologue:
+
+ movl 0(%r8),%esi
+ movl 4(%r8),%edi
+ movl 8(%r8),%r11d
+ movl 12(%r8),%r12d
+ movl 16(%r8),%r13d
+ jmp .Lloop
+
+.align 16
+.Lloop:
+ movl 0(%r9),%edx
+ bswapl %edx
+ movl %edx,0(%rsp)
+ movl %r11d,%eax
+ movl 4(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,4(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 8(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,8(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 12(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,12(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 16(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,16(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 20(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,20(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 24(%r9),%edx
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r13,1),%r13d
+ andl %edi,%eax
+ movl %edx,24(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 28(%r9),%ebp
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r12,1),%r12d
+ andl %esi,%eax
+ movl %ebp,28(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 32(%r9),%edx
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %edx,32(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 36(%r9),%ebp
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %ebp,36(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 40(%r9),%edx
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %edx,40(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 44(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,44(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 48(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,48(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 52(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,52(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 56(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,56(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 60(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,60(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl 0(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 32(%rsp),%edx
+ andl %edi,%eax
+ leal 1518500249(%rbp,%r13,1),%r13d
+ xorl 52(%rsp),%edx
+ xorl %r12d,%eax
+ roll $1,%edx
+ addl %ecx,%r13d
+ roll $30,%edi
+ movl %edx,0(%rsp)
+ addl %eax,%r13d
+ movl 4(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 36(%rsp),%ebp
+ andl %esi,%eax
+ leal 1518500249(%rdx,%r12,1),%r12d
+ xorl 56(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $1,%ebp
+ addl %ecx,%r12d
+ roll $30,%esi
+ movl %ebp,4(%rsp)
+ addl %eax,%r12d
+ movl 8(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ xorl 40(%rsp),%edx
+ andl %r13d,%eax
+ leal 1518500249(%rbp,%r11,1),%r11d
+ xorl 60(%rsp),%edx
+ xorl %edi,%eax
+ roll $1,%edx
+ addl %ecx,%r11d
+ roll $30,%r13d
+ movl %edx,8(%rsp)
+ addl %eax,%r11d
+ movl 12(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ leal 1518500249(%rdx,%rdi,1),%edi
+ xorl 0(%rsp),%ebp
+ xorl %esi,%eax
+ roll $1,%ebp
+ addl %ecx,%edi
+ roll $30,%r12d
+ movl %ebp,12(%rsp)
+ addl %eax,%edi
+ movl 16(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ leal 1518500249(%rbp,%rsi,1),%esi
+ xorl 4(%rsp),%edx
+ xorl %r13d,%eax
+ roll $1,%edx
+ addl %ecx,%esi
+ roll $30,%r11d
+ movl %edx,16(%rsp)
+ addl %eax,%esi
+ movl 20(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 52(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 8(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 56(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 12(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 16(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 20(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 24(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 28(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 32(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 36(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 40(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 44(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 48(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 52(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 56(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 60(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 0(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 4(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 8(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 12(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 16(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 20(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r13d
+ movl 40(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,40(%rsp)
+ addl %ecx,%r12d
+ movl 44(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 52(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,44(%rsp)
+ addl %ecx,%r11d
+ movl 48(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 56(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,48(%rsp)
+ addl %ecx,%edi
+ movl 52(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 60(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 40(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,52(%rsp)
+ addl %ecx,%esi
+ movl 56(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 0(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 44(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,56(%rsp)
+ addl %ecx,%r13d
+ movl 60(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 4(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 48(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,60(%rsp)
+ addl %ecx,%r12d
+ movl 0(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 8(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 52(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,0(%rsp)
+ addl %ecx,%r11d
+ movl 4(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 12(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 56(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,4(%rsp)
+ addl %ecx,%edi
+ movl 8(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 16(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 60(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,8(%rsp)
+ addl %ecx,%esi
+ movl 12(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 20(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 0(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,12(%rsp)
+ addl %ecx,%r13d
+ movl 16(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 24(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 4(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,16(%rsp)
+ addl %ecx,%r12d
+ movl 20(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 28(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 8(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,20(%rsp)
+ addl %ecx,%r11d
+ movl 24(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 32(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 12(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,24(%rsp)
+ addl %ecx,%edi
+ movl 28(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 36(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 16(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,28(%rsp)
+ addl %ecx,%esi
+ movl 32(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 40(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 20(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,32(%rsp)
+ addl %ecx,%r13d
+ movl 36(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r12d
+ movl 40(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 48(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,40(%rsp)
+ addl %ecx,%r11d
+ movl 44(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 52(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,44(%rsp)
+ addl %ecx,%edi
+ movl 48(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 56(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,48(%rsp)
+ addl %ecx,%esi
+ movl 52(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 20(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 40(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 24(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 44(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 48(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 52(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 56(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 60(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 0(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 4(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 8(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rsi,1),%esi
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 12(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 16(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 20(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 24(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 28(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 32(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 36(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 40(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl 56(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 44(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl 60(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 48(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl %r11d,%eax
+ leal -899497514(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ roll $30,%r11d
+ addl %eax,%esi
+ addl 0(%r8),%esi
+ addl 4(%r8),%edi
+ addl 8(%r8),%r11d
+ addl 12(%r8),%r12d
+ addl 16(%r8),%r13d
+ movl %esi,0(%r8)
+ movl %edi,4(%r8)
+ movl %r11d,8(%r8)
+ movl %r12d,12(%r8)
+ movl %r13d,16(%r8)
+
+ subq $1,%r10
+ leaq 64(%r9),%r9
+ jnz .Lloop
+
+ movq 64(%rsp),%rsi
+ movq (%rsi),%r13
+ movq 8(%rsi),%r12
+ movq 16(%rsi),%rbp
+ movq 24(%rsi),%rbx
+ leaq 32(%rsi),%rsp
+.Lepilogue:
+ retq
+.size sha1_block_data_order,.-sha1_block_data_order
+.type sha1_block_data_order_ssse3,@function
+.align 16
+sha1_block_data_order_ssse3:
+_ssse3_shortcut:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ leaq -64(%rsp),%rsp
+ movq %rdi,%r8
+ movq %rsi,%r9
+ movq %rdx,%r10
+
+ shlq $6,%r10
+ addq %r9,%r10
+ leaq K_XX_XX(%rip),%r11
+
+ movl 0(%r8),%eax
+ movl 4(%r8),%ebx
+ movl 8(%r8),%ecx
+ movl 12(%r8),%edx
+ movl %ebx,%esi
+ movl 16(%r8),%ebp
+
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+.byte 102,15,56,0,206
+.byte 102,15,56,0,214
+.byte 102,15,56,0,222
+ paddd %xmm9,%xmm0
+ paddd %xmm9,%xmm1
+ paddd %xmm9,%xmm2
+ movdqa %xmm0,0(%rsp)
+ psubd %xmm9,%xmm0
+ movdqa %xmm1,16(%rsp)
+ psubd %xmm9,%xmm1
+ movdqa %xmm2,32(%rsp)
+ psubd %xmm9,%xmm2
+ jmp .Loop_ssse3
+.align 16
+.Loop_ssse3:
+ movdqa %xmm1,%xmm4
+ addl 0(%rsp),%ebp
+ xorl %edx,%ecx
+ movdqa %xmm3,%xmm8
+.byte 102,15,58,15,224,8
+ movl %eax,%edi
+ roll $5,%eax
+ paddd %xmm3,%xmm9
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrldq $4,%xmm8
+ xorl %edx,%esi
+ addl %eax,%ebp
+ pxor %xmm0,%xmm4
+ rorl $2,%ebx
+ addl %esi,%ebp
+ pxor %xmm2,%xmm8
+ addl 4(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pxor %xmm8,%xmm4
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm9,48(%rsp)
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm4,%xmm8
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 8(%rsp),%ecx
+ xorl %ebx,%eax
+ pslldq $12,%xmm10
+ paddd %xmm4,%xmm4
+ movl %edx,%edi
+ roll $5,%edx
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrld $31,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ movdqa %xmm10,%xmm9
+ rorl $7,%ebp
+ addl %esi,%ecx
+ psrld $30,%xmm10
+ por %xmm8,%xmm4
+ addl 12(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm4
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa 0(%r11),%xmm10
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ pxor %xmm9,%xmm4
+ rorl $7,%edx
+ addl %edi,%ebx
+ movdqa %xmm2,%xmm5
+ addl 16(%rsp),%eax
+ xorl %ebp,%edx
+ movdqa %xmm4,%xmm9
+.byte 102,15,58,15,233,8
+ movl %ebx,%edi
+ roll $5,%ebx
+ paddd %xmm4,%xmm10
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrldq $4,%xmm9
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ pxor %xmm1,%xmm5
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm3,%xmm9
+ addl 20(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pxor %xmm9,%xmm5
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm10,0(%rsp)
+ xorl %edx,%edi
+ addl %eax,%ebp
+ movdqa %xmm5,%xmm8
+ movdqa %xmm5,%xmm9
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 24(%rsp),%edx
+ xorl %ecx,%ebx
+ pslldq $12,%xmm8
+ paddd %xmm5,%xmm5
+ movl %ebp,%edi
+ roll $5,%ebp
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ psrld $31,%xmm9
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ movdqa %xmm8,%xmm10
+ rorl $7,%eax
+ addl %esi,%edx
+ psrld $30,%xmm8
+ por %xmm9,%xmm5
+ addl 28(%rsp),%ecx
+ xorl %ebx,%eax
+ movl %edx,%esi
+ roll $5,%edx
+ pslld $2,%xmm10
+ pxor %xmm8,%xmm5
+ andl %eax,%edi
+ xorl %ebx,%eax
+ movdqa 16(%r11),%xmm8
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ pxor %xmm10,%xmm5
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movdqa %xmm3,%xmm6
+ addl 32(%rsp),%ebx
+ xorl %eax,%ebp
+ movdqa %xmm5,%xmm10
+.byte 102,15,58,15,242,8
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm5,%xmm8
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ psrldq $4,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ pxor %xmm2,%xmm6
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm4,%xmm10
+ addl 36(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ pxor %xmm10,%xmm6
+ andl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm8,16(%rsp)
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ movdqa %xmm6,%xmm9
+ movdqa %xmm6,%xmm10
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 40(%rsp),%ebp
+ xorl %edx,%ecx
+ pslldq $12,%xmm9
+ paddd %xmm6,%xmm6
+ movl %eax,%edi
+ roll $5,%eax
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrld $31,%xmm10
+ xorl %edx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ psrld $30,%xmm9
+ por %xmm10,%xmm6
+ addl 44(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pslld $2,%xmm8
+ pxor %xmm9,%xmm6
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa 16(%r11),%xmm9
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ pxor %xmm8,%xmm6
+ rorl $7,%eax
+ addl %edi,%edx
+ movdqa %xmm4,%xmm7
+ addl 48(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm8
+.byte 102,15,58,15,251,8
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm6,%xmm9
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrldq $4,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ pxor %xmm3,%xmm7
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm5,%xmm8
+ addl 52(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pxor %xmm8,%xmm7
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm9,32(%rsp)
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ movdqa %xmm7,%xmm10
+ movdqa %xmm7,%xmm8
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 56(%rsp),%eax
+ xorl %ebp,%edx
+ pslldq $12,%xmm10
+ paddd %xmm7,%xmm7
+ movl %ebx,%edi
+ roll $5,%ebx
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrld $31,%xmm8
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ movdqa %xmm10,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ psrld $30,%xmm10
+ por %xmm8,%xmm7
+ addl 60(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm7
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa 16(%r11),%xmm10
+ xorl %edx,%edi
+ addl %eax,%ebp
+ pxor %xmm9,%xmm7
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movdqa %xmm7,%xmm9
+ addl 0(%rsp),%edx
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,206,8
+ xorl %ecx,%ebx
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm1,%xmm0
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm7,%xmm10
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ pxor %xmm9,%xmm0
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 4(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm0,%xmm9
+ movdqa %xmm10,48(%rsp)
+ movl %edx,%esi
+ roll $5,%edx
+ andl %eax,%edi
+ xorl %ebx,%eax
+ pslld $2,%xmm0
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ psrld $30,%xmm9
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 8(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%edi
+ roll $5,%ecx
+ por %xmm9,%xmm0
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ movdqa %xmm0,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 12(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ andl %edx,%edi
+ xorl %ebp,%edx
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 16(%rsp),%ebp
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,215,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm2,%xmm1
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm8,%xmm9
+ paddd %xmm0,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm10,%xmm1
+ addl 20(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm1,%xmm10
+ movdqa %xmm8,0(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm1
+ addl 24(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm10
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm10,%xmm1
+ addl 28(%rsp),%ebx
+ xorl %eax,%edi
+ movdqa %xmm1,%xmm8
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 32(%rsp),%eax
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,192,8
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ pxor %xmm3,%xmm2
+ xorl %edx,%esi
+ addl %ebx,%eax
+ movdqa 32(%r11),%xmm10
+ paddd %xmm1,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm8,%xmm2
+ addl 36(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ movdqa %xmm2,%xmm8
+ movdqa %xmm9,16(%rsp)
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ pslld $2,%xmm2
+ addl 40(%rsp),%edx
+ xorl %ecx,%esi
+ psrld $30,%xmm8
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ por %xmm8,%xmm2
+ addl 44(%rsp),%ecx
+ xorl %ebx,%edi
+ movdqa %xmm2,%xmm9
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 48(%rsp),%ebx
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,201,8
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ pxor %xmm4,%xmm3
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm2,%xmm10
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm9,%xmm3
+ addl 52(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ movdqa %xmm3,%xmm9
+ movdqa %xmm10,32(%rsp)
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ pslld $2,%xmm3
+ addl 56(%rsp),%ebp
+ xorl %edx,%esi
+ psrld $30,%xmm9
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ por %xmm9,%xmm3
+ addl 60(%rsp),%edx
+ xorl %ecx,%edi
+ movdqa %xmm3,%xmm10
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 0(%rsp),%ecx
+ pxor %xmm0,%xmm4
+.byte 102,68,15,58,15,210,8
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ pxor %xmm5,%xmm4
+ xorl %eax,%esi
+ addl %edx,%ecx
+ movdqa %xmm8,%xmm9
+ paddd %xmm3,%xmm8
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm10,%xmm4
+ addl 4(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm8,48(%rsp)
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ pslld $2,%xmm4
+ addl 8(%rsp),%eax
+ xorl %ebp,%esi
+ psrld $30,%xmm10
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ por %xmm10,%xmm4
+ addl 12(%rsp),%ebp
+ xorl %edx,%edi
+ movdqa %xmm4,%xmm8
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 16(%rsp),%edx
+ pxor %xmm1,%xmm5
+.byte 102,68,15,58,15,195,8
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm6,%xmm5
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm4,%xmm9
+ rorl $7,%eax
+ addl %esi,%edx
+ pxor %xmm8,%xmm5
+ addl 20(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ movdqa %xmm5,%xmm8
+ movdqa %xmm9,0(%rsp)
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ pslld $2,%xmm5
+ addl 24(%rsp),%ebx
+ xorl %eax,%esi
+ psrld $30,%xmm8
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ por %xmm8,%xmm5
+ addl 28(%rsp),%eax
+ xorl %ebp,%edi
+ movdqa %xmm5,%xmm9
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ecx,%edi
+ pxor %xmm2,%xmm6
+.byte 102,68,15,58,15,204,8
+ xorl %edx,%ecx
+ addl 32(%rsp),%ebp
+ andl %edx,%edi
+ pxor %xmm7,%xmm6
+ andl %ecx,%esi
+ rorl $7,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm5,%xmm10
+ addl %edi,%ebp
+ movl %eax,%edi
+ pxor %xmm9,%xmm6
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movdqa %xmm6,%xmm9
+ movdqa %xmm10,16(%rsp)
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 36(%rsp),%edx
+ andl %ecx,%esi
+ pslld $2,%xmm6
+ andl %ebx,%edi
+ rorl $7,%eax
+ psrld $30,%xmm9
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ por %xmm9,%xmm6
+ movl %eax,%edi
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm10
+ addl 40(%rsp),%ecx
+ andl %ebx,%edi
+ andl %eax,%esi
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 44(%rsp),%ebx
+ andl %eax,%esi
+ andl %ebp,%edi
+ rorl $7,%edx
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%edi
+ pxor %xmm3,%xmm7
+.byte 102,68,15,58,15,213,8
+ xorl %ebp,%edx
+ addl 48(%rsp),%eax
+ andl %ebp,%edi
+ pxor %xmm0,%xmm7
+ andl %edx,%esi
+ rorl $7,%ecx
+ movdqa 48(%r11),%xmm9
+ paddd %xmm6,%xmm8
+ addl %edi,%eax
+ movl %ebx,%edi
+ pxor %xmm10,%xmm7
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movdqa %xmm7,%xmm10
+ movdqa %xmm8,32(%rsp)
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 52(%rsp),%ebp
+ andl %edx,%esi
+ pslld $2,%xmm7
+ andl %ecx,%edi
+ rorl $7,%ebx
+ psrld $30,%xmm10
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ por %xmm10,%xmm7
+ movl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm7,%xmm8
+ addl 56(%rsp),%edx
+ andl %ecx,%edi
+ andl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
+ movl %ebp,%edi
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 60(%rsp),%ecx
+ andl %ebx,%esi
+ andl %eax,%edi
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%edi
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,198,8
+ xorl %eax,%ebp
+ addl 0(%rsp),%ebx
+ andl %eax,%edi
+ pxor %xmm1,%xmm0
+ andl %ebp,%esi
+ rorl $7,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm7,%xmm9
+ addl %edi,%ebx
+ movl %ecx,%edi
+ pxor %xmm8,%xmm0
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movdqa %xmm0,%xmm8
+ movdqa %xmm9,48(%rsp)
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 4(%rsp),%eax
+ andl %ebp,%esi
+ pslld $2,%xmm0
+ andl %edx,%edi
+ rorl $7,%ecx
+ psrld $30,%xmm8
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ por %xmm8,%xmm0
+ movl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm0,%xmm9
+ addl 8(%rsp),%ebp
+ andl %edx,%edi
+ andl %ecx,%esi
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movl %eax,%edi
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 12(%rsp),%edx
+ andl %ecx,%esi
+ andl %ebx,%edi
+ rorl $7,%eax
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%edi
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,207,8
+ xorl %ebx,%eax
+ addl 16(%rsp),%ecx
+ andl %ebx,%edi
+ pxor %xmm2,%xmm1
+ andl %eax,%esi
+ rorl $7,%ebp
+ movdqa %xmm10,%xmm8
+ paddd %xmm0,%xmm10
+ addl %edi,%ecx
+ movl %edx,%edi
+ pxor %xmm9,%xmm1
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movdqa %xmm1,%xmm9
+ movdqa %xmm10,0(%rsp)
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 20(%rsp),%ebx
+ andl %eax,%esi
+ pslld $2,%xmm1
+ andl %ebp,%edi
+ rorl $7,%edx
+ psrld $30,%xmm9
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ por %xmm9,%xmm1
+ movl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm1,%xmm10
+ addl 24(%rsp),%eax
+ andl %ebp,%edi
+ andl %edx,%esi
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 28(%rsp),%ebp
+ andl %edx,%esi
+ andl %ecx,%edi
+ rorl $7,%ebx
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%edi
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,208,8
+ xorl %ecx,%ebx
+ addl 32(%rsp),%edx
+ andl %ecx,%edi
+ pxor %xmm3,%xmm2
+ andl %ebx,%esi
+ rorl $7,%eax
+ movdqa %xmm8,%xmm9
+ paddd %xmm1,%xmm8
+ addl %edi,%edx
+ movl %ebp,%edi
+ pxor %xmm10,%xmm2
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movdqa %xmm2,%xmm10
+ movdqa %xmm8,16(%rsp)
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 36(%rsp),%ecx
+ andl %ebx,%esi
+ pslld $2,%xmm2
+ andl %eax,%edi
+ rorl $7,%ebp
+ psrld $30,%xmm10
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ por %xmm10,%xmm2
+ movl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm2,%xmm8
+ addl 40(%rsp),%ebx
+ andl %eax,%edi
+ andl %ebp,%esi
+ rorl $7,%edx
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 44(%rsp),%eax
+ andl %ebp,%esi
+ andl %edx,%edi
+ rorl $7,%ecx
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ addl 48(%rsp),%ebp
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,193,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm4,%xmm3
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm10
+ paddd %xmm2,%xmm9
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm8,%xmm3
+ addl 52(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm3,%xmm8
+ movdqa %xmm9,32(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm3
+ addl 56(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm8
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm8,%xmm3
+ addl 60(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 0(%rsp),%eax
+ paddd %xmm3,%xmm10
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ movdqa %xmm10,48(%rsp)
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 4(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 8(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 12(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ cmpq %r10,%r9
+ je .Ldone_ssse3
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+.byte 102,15,56,0,206
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm9,%xmm0
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ movdqa %xmm0,0(%rsp)
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ psubd %xmm9,%xmm0
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+.byte 102,15,56,0,214
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm9,%xmm1
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movdqa %xmm1,16(%rsp)
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ psubd %xmm9,%xmm1
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+.byte 102,15,56,0,222
+ movl %ebp,%edi
+ roll $5,%ebp
+ paddd %xmm9,%xmm2
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ movdqa %xmm2,32(%rsp)
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ psubd %xmm9,%xmm2
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ addl 12(%r8),%edx
+ movl %eax,0(%r8)
+ addl 16(%r8),%ebp
+ movl %esi,4(%r8)
+ movl %esi,%ebx
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ jmp .Loop_ssse3
+
+.align 16
+.Ldone_ssse3:
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ movl %eax,0(%r8)
+ addl 12(%r8),%edx
+ movl %esi,4(%r8)
+ addl 16(%r8),%ebp
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ leaq 64(%rsp),%rsi
+ movq 0(%rsi),%r12
+ movq 8(%rsi),%rbp
+ movq 16(%rsi),%rbx
+ leaq 24(%rsi),%rsp
+.Lepilogue_ssse3:
+ retq
+.size sha1_block_data_order_ssse3,.-sha1_block_data_order_ssse3
+.align 64
+K_XX_XX:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.align 64
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha1-macosx-x86_64.S b/crypto/libressl/crypto/sha/sha1-macosx-x86_64.S
new file mode 100644
index 0000000..04a8aff
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1-macosx-x86_64.S
@@ -0,0 +1,2488 @@
+#include "x86_arch.h"
+.text
+
+.private_extern _OPENSSL_ia32cap_P
+
+.globl _sha1_block_data_order
+
+.p2align 4
+_sha1_block_data_order:
+ movl _OPENSSL_ia32cap_P+0(%rip),%r9d
+ movl _OPENSSL_ia32cap_P+4(%rip),%r8d
+ testl $IA32CAP_MASK1_SSSE3,%r8d
+ jz L$ialu
+ jmp _ssse3_shortcut
+
+.p2align 4
+L$ialu:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ movq %rsp,%r11
+ movq %rdi,%r8
+ subq $72,%rsp
+ movq %rsi,%r9
+ andq $-64,%rsp
+ movq %rdx,%r10
+ movq %r11,64(%rsp)
+L$prologue:
+
+ movl 0(%r8),%esi
+ movl 4(%r8),%edi
+ movl 8(%r8),%r11d
+ movl 12(%r8),%r12d
+ movl 16(%r8),%r13d
+ jmp L$loop
+
+.p2align 4
+L$loop:
+ movl 0(%r9),%edx
+ bswapl %edx
+ movl %edx,0(%rsp)
+ movl %r11d,%eax
+ movl 4(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,4(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 8(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,8(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 12(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,12(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 16(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,16(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 20(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,20(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 24(%r9),%edx
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r13,1),%r13d
+ andl %edi,%eax
+ movl %edx,24(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 28(%r9),%ebp
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r12,1),%r12d
+ andl %esi,%eax
+ movl %ebp,28(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 32(%r9),%edx
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %edx,32(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 36(%r9),%ebp
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %ebp,36(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 40(%r9),%edx
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %edx,40(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 44(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,44(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 48(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,48(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 52(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,52(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 56(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,56(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 60(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,60(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl 0(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 32(%rsp),%edx
+ andl %edi,%eax
+ leal 1518500249(%rbp,%r13,1),%r13d
+ xorl 52(%rsp),%edx
+ xorl %r12d,%eax
+ roll $1,%edx
+ addl %ecx,%r13d
+ roll $30,%edi
+ movl %edx,0(%rsp)
+ addl %eax,%r13d
+ movl 4(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 36(%rsp),%ebp
+ andl %esi,%eax
+ leal 1518500249(%rdx,%r12,1),%r12d
+ xorl 56(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $1,%ebp
+ addl %ecx,%r12d
+ roll $30,%esi
+ movl %ebp,4(%rsp)
+ addl %eax,%r12d
+ movl 8(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ xorl 40(%rsp),%edx
+ andl %r13d,%eax
+ leal 1518500249(%rbp,%r11,1),%r11d
+ xorl 60(%rsp),%edx
+ xorl %edi,%eax
+ roll $1,%edx
+ addl %ecx,%r11d
+ roll $30,%r13d
+ movl %edx,8(%rsp)
+ addl %eax,%r11d
+ movl 12(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ leal 1518500249(%rdx,%rdi,1),%edi
+ xorl 0(%rsp),%ebp
+ xorl %esi,%eax
+ roll $1,%ebp
+ addl %ecx,%edi
+ roll $30,%r12d
+ movl %ebp,12(%rsp)
+ addl %eax,%edi
+ movl 16(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ leal 1518500249(%rbp,%rsi,1),%esi
+ xorl 4(%rsp),%edx
+ xorl %r13d,%eax
+ roll $1,%edx
+ addl %ecx,%esi
+ roll $30,%r11d
+ movl %edx,16(%rsp)
+ addl %eax,%esi
+ movl 20(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 52(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 8(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 56(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 12(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 16(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 20(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 24(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 28(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 32(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 36(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 40(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 44(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 48(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 52(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 56(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 60(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 0(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 4(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 8(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 12(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 16(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 20(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r13d
+ movl 40(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,40(%rsp)
+ addl %ecx,%r12d
+ movl 44(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 52(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,44(%rsp)
+ addl %ecx,%r11d
+ movl 48(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 56(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,48(%rsp)
+ addl %ecx,%edi
+ movl 52(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 60(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 40(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,52(%rsp)
+ addl %ecx,%esi
+ movl 56(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 0(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 44(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,56(%rsp)
+ addl %ecx,%r13d
+ movl 60(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 4(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 48(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,60(%rsp)
+ addl %ecx,%r12d
+ movl 0(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 8(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 52(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,0(%rsp)
+ addl %ecx,%r11d
+ movl 4(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 12(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 56(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,4(%rsp)
+ addl %ecx,%edi
+ movl 8(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 16(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 60(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,8(%rsp)
+ addl %ecx,%esi
+ movl 12(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 20(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 0(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,12(%rsp)
+ addl %ecx,%r13d
+ movl 16(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 24(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 4(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,16(%rsp)
+ addl %ecx,%r12d
+ movl 20(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 28(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 8(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,20(%rsp)
+ addl %ecx,%r11d
+ movl 24(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 32(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 12(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,24(%rsp)
+ addl %ecx,%edi
+ movl 28(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 36(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 16(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,28(%rsp)
+ addl %ecx,%esi
+ movl 32(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 40(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 20(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,32(%rsp)
+ addl %ecx,%r13d
+ movl 36(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r12d
+ movl 40(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 48(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,40(%rsp)
+ addl %ecx,%r11d
+ movl 44(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 52(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,44(%rsp)
+ addl %ecx,%edi
+ movl 48(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 56(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,48(%rsp)
+ addl %ecx,%esi
+ movl 52(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 20(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 40(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 24(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 44(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 48(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 52(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 56(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 60(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 0(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 4(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 8(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rsi,1),%esi
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 12(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 16(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 20(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 24(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 28(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 32(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 36(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 40(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl 56(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 44(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl 60(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 48(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl %r11d,%eax
+ leal -899497514(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ roll $30,%r11d
+ addl %eax,%esi
+ addl 0(%r8),%esi
+ addl 4(%r8),%edi
+ addl 8(%r8),%r11d
+ addl 12(%r8),%r12d
+ addl 16(%r8),%r13d
+ movl %esi,0(%r8)
+ movl %edi,4(%r8)
+ movl %r11d,8(%r8)
+ movl %r12d,12(%r8)
+ movl %r13d,16(%r8)
+
+ subq $1,%r10
+ leaq 64(%r9),%r9
+ jnz L$loop
+
+ movq 64(%rsp),%rsi
+ movq (%rsi),%r13
+ movq 8(%rsi),%r12
+ movq 16(%rsi),%rbp
+ movq 24(%rsi),%rbx
+ leaq 32(%rsi),%rsp
+L$epilogue:
+ retq
+
+
+.p2align 4
+sha1_block_data_order_ssse3:
+_ssse3_shortcut:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ leaq -64(%rsp),%rsp
+ movq %rdi,%r8
+ movq %rsi,%r9
+ movq %rdx,%r10
+
+ shlq $6,%r10
+ addq %r9,%r10
+ leaq K_XX_XX(%rip),%r11
+
+ movl 0(%r8),%eax
+ movl 4(%r8),%ebx
+ movl 8(%r8),%ecx
+ movl 12(%r8),%edx
+ movl %ebx,%esi
+ movl 16(%r8),%ebp
+
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+.byte 102,15,56,0,206
+.byte 102,15,56,0,214
+.byte 102,15,56,0,222
+ paddd %xmm9,%xmm0
+ paddd %xmm9,%xmm1
+ paddd %xmm9,%xmm2
+ movdqa %xmm0,0(%rsp)
+ psubd %xmm9,%xmm0
+ movdqa %xmm1,16(%rsp)
+ psubd %xmm9,%xmm1
+ movdqa %xmm2,32(%rsp)
+ psubd %xmm9,%xmm2
+ jmp L$oop_ssse3
+.p2align 4
+L$oop_ssse3:
+ movdqa %xmm1,%xmm4
+ addl 0(%rsp),%ebp
+ xorl %edx,%ecx
+ movdqa %xmm3,%xmm8
+.byte 102,15,58,15,224,8
+ movl %eax,%edi
+ roll $5,%eax
+ paddd %xmm3,%xmm9
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrldq $4,%xmm8
+ xorl %edx,%esi
+ addl %eax,%ebp
+ pxor %xmm0,%xmm4
+ rorl $2,%ebx
+ addl %esi,%ebp
+ pxor %xmm2,%xmm8
+ addl 4(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pxor %xmm8,%xmm4
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm9,48(%rsp)
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm4,%xmm8
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 8(%rsp),%ecx
+ xorl %ebx,%eax
+ pslldq $12,%xmm10
+ paddd %xmm4,%xmm4
+ movl %edx,%edi
+ roll $5,%edx
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrld $31,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ movdqa %xmm10,%xmm9
+ rorl $7,%ebp
+ addl %esi,%ecx
+ psrld $30,%xmm10
+ por %xmm8,%xmm4
+ addl 12(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm4
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa 0(%r11),%xmm10
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ pxor %xmm9,%xmm4
+ rorl $7,%edx
+ addl %edi,%ebx
+ movdqa %xmm2,%xmm5
+ addl 16(%rsp),%eax
+ xorl %ebp,%edx
+ movdqa %xmm4,%xmm9
+.byte 102,15,58,15,233,8
+ movl %ebx,%edi
+ roll $5,%ebx
+ paddd %xmm4,%xmm10
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrldq $4,%xmm9
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ pxor %xmm1,%xmm5
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm3,%xmm9
+ addl 20(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pxor %xmm9,%xmm5
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm10,0(%rsp)
+ xorl %edx,%edi
+ addl %eax,%ebp
+ movdqa %xmm5,%xmm8
+ movdqa %xmm5,%xmm9
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 24(%rsp),%edx
+ xorl %ecx,%ebx
+ pslldq $12,%xmm8
+ paddd %xmm5,%xmm5
+ movl %ebp,%edi
+ roll $5,%ebp
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ psrld $31,%xmm9
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ movdqa %xmm8,%xmm10
+ rorl $7,%eax
+ addl %esi,%edx
+ psrld $30,%xmm8
+ por %xmm9,%xmm5
+ addl 28(%rsp),%ecx
+ xorl %ebx,%eax
+ movl %edx,%esi
+ roll $5,%edx
+ pslld $2,%xmm10
+ pxor %xmm8,%xmm5
+ andl %eax,%edi
+ xorl %ebx,%eax
+ movdqa 16(%r11),%xmm8
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ pxor %xmm10,%xmm5
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movdqa %xmm3,%xmm6
+ addl 32(%rsp),%ebx
+ xorl %eax,%ebp
+ movdqa %xmm5,%xmm10
+.byte 102,15,58,15,242,8
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm5,%xmm8
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ psrldq $4,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ pxor %xmm2,%xmm6
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm4,%xmm10
+ addl 36(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ pxor %xmm10,%xmm6
+ andl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm8,16(%rsp)
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ movdqa %xmm6,%xmm9
+ movdqa %xmm6,%xmm10
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 40(%rsp),%ebp
+ xorl %edx,%ecx
+ pslldq $12,%xmm9
+ paddd %xmm6,%xmm6
+ movl %eax,%edi
+ roll $5,%eax
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrld $31,%xmm10
+ xorl %edx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ psrld $30,%xmm9
+ por %xmm10,%xmm6
+ addl 44(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pslld $2,%xmm8
+ pxor %xmm9,%xmm6
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa 16(%r11),%xmm9
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ pxor %xmm8,%xmm6
+ rorl $7,%eax
+ addl %edi,%edx
+ movdqa %xmm4,%xmm7
+ addl 48(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm8
+.byte 102,15,58,15,251,8
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm6,%xmm9
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrldq $4,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ pxor %xmm3,%xmm7
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm5,%xmm8
+ addl 52(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pxor %xmm8,%xmm7
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm9,32(%rsp)
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ movdqa %xmm7,%xmm10
+ movdqa %xmm7,%xmm8
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 56(%rsp),%eax
+ xorl %ebp,%edx
+ pslldq $12,%xmm10
+ paddd %xmm7,%xmm7
+ movl %ebx,%edi
+ roll $5,%ebx
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrld $31,%xmm8
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ movdqa %xmm10,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ psrld $30,%xmm10
+ por %xmm8,%xmm7
+ addl 60(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm7
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa 16(%r11),%xmm10
+ xorl %edx,%edi
+ addl %eax,%ebp
+ pxor %xmm9,%xmm7
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movdqa %xmm7,%xmm9
+ addl 0(%rsp),%edx
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,206,8
+ xorl %ecx,%ebx
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm1,%xmm0
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm7,%xmm10
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ pxor %xmm9,%xmm0
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 4(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm0,%xmm9
+ movdqa %xmm10,48(%rsp)
+ movl %edx,%esi
+ roll $5,%edx
+ andl %eax,%edi
+ xorl %ebx,%eax
+ pslld $2,%xmm0
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ psrld $30,%xmm9
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 8(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%edi
+ roll $5,%ecx
+ por %xmm9,%xmm0
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ movdqa %xmm0,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 12(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ andl %edx,%edi
+ xorl %ebp,%edx
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 16(%rsp),%ebp
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,215,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm2,%xmm1
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm8,%xmm9
+ paddd %xmm0,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm10,%xmm1
+ addl 20(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm1,%xmm10
+ movdqa %xmm8,0(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm1
+ addl 24(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm10
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm10,%xmm1
+ addl 28(%rsp),%ebx
+ xorl %eax,%edi
+ movdqa %xmm1,%xmm8
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 32(%rsp),%eax
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,192,8
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ pxor %xmm3,%xmm2
+ xorl %edx,%esi
+ addl %ebx,%eax
+ movdqa 32(%r11),%xmm10
+ paddd %xmm1,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm8,%xmm2
+ addl 36(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ movdqa %xmm2,%xmm8
+ movdqa %xmm9,16(%rsp)
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ pslld $2,%xmm2
+ addl 40(%rsp),%edx
+ xorl %ecx,%esi
+ psrld $30,%xmm8
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ por %xmm8,%xmm2
+ addl 44(%rsp),%ecx
+ xorl %ebx,%edi
+ movdqa %xmm2,%xmm9
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 48(%rsp),%ebx
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,201,8
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ pxor %xmm4,%xmm3
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm2,%xmm10
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm9,%xmm3
+ addl 52(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ movdqa %xmm3,%xmm9
+ movdqa %xmm10,32(%rsp)
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ pslld $2,%xmm3
+ addl 56(%rsp),%ebp
+ xorl %edx,%esi
+ psrld $30,%xmm9
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ por %xmm9,%xmm3
+ addl 60(%rsp),%edx
+ xorl %ecx,%edi
+ movdqa %xmm3,%xmm10
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 0(%rsp),%ecx
+ pxor %xmm0,%xmm4
+.byte 102,68,15,58,15,210,8
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ pxor %xmm5,%xmm4
+ xorl %eax,%esi
+ addl %edx,%ecx
+ movdqa %xmm8,%xmm9
+ paddd %xmm3,%xmm8
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm10,%xmm4
+ addl 4(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm8,48(%rsp)
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ pslld $2,%xmm4
+ addl 8(%rsp),%eax
+ xorl %ebp,%esi
+ psrld $30,%xmm10
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ por %xmm10,%xmm4
+ addl 12(%rsp),%ebp
+ xorl %edx,%edi
+ movdqa %xmm4,%xmm8
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 16(%rsp),%edx
+ pxor %xmm1,%xmm5
+.byte 102,68,15,58,15,195,8
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm6,%xmm5
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm4,%xmm9
+ rorl $7,%eax
+ addl %esi,%edx
+ pxor %xmm8,%xmm5
+ addl 20(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ movdqa %xmm5,%xmm8
+ movdqa %xmm9,0(%rsp)
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ pslld $2,%xmm5
+ addl 24(%rsp),%ebx
+ xorl %eax,%esi
+ psrld $30,%xmm8
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ por %xmm8,%xmm5
+ addl 28(%rsp),%eax
+ xorl %ebp,%edi
+ movdqa %xmm5,%xmm9
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ecx,%edi
+ pxor %xmm2,%xmm6
+.byte 102,68,15,58,15,204,8
+ xorl %edx,%ecx
+ addl 32(%rsp),%ebp
+ andl %edx,%edi
+ pxor %xmm7,%xmm6
+ andl %ecx,%esi
+ rorl $7,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm5,%xmm10
+ addl %edi,%ebp
+ movl %eax,%edi
+ pxor %xmm9,%xmm6
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movdqa %xmm6,%xmm9
+ movdqa %xmm10,16(%rsp)
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 36(%rsp),%edx
+ andl %ecx,%esi
+ pslld $2,%xmm6
+ andl %ebx,%edi
+ rorl $7,%eax
+ psrld $30,%xmm9
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ por %xmm9,%xmm6
+ movl %eax,%edi
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm10
+ addl 40(%rsp),%ecx
+ andl %ebx,%edi
+ andl %eax,%esi
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 44(%rsp),%ebx
+ andl %eax,%esi
+ andl %ebp,%edi
+ rorl $7,%edx
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%edi
+ pxor %xmm3,%xmm7
+.byte 102,68,15,58,15,213,8
+ xorl %ebp,%edx
+ addl 48(%rsp),%eax
+ andl %ebp,%edi
+ pxor %xmm0,%xmm7
+ andl %edx,%esi
+ rorl $7,%ecx
+ movdqa 48(%r11),%xmm9
+ paddd %xmm6,%xmm8
+ addl %edi,%eax
+ movl %ebx,%edi
+ pxor %xmm10,%xmm7
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movdqa %xmm7,%xmm10
+ movdqa %xmm8,32(%rsp)
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 52(%rsp),%ebp
+ andl %edx,%esi
+ pslld $2,%xmm7
+ andl %ecx,%edi
+ rorl $7,%ebx
+ psrld $30,%xmm10
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ por %xmm10,%xmm7
+ movl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm7,%xmm8
+ addl 56(%rsp),%edx
+ andl %ecx,%edi
+ andl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
+ movl %ebp,%edi
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 60(%rsp),%ecx
+ andl %ebx,%esi
+ andl %eax,%edi
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%edi
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,198,8
+ xorl %eax,%ebp
+ addl 0(%rsp),%ebx
+ andl %eax,%edi
+ pxor %xmm1,%xmm0
+ andl %ebp,%esi
+ rorl $7,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm7,%xmm9
+ addl %edi,%ebx
+ movl %ecx,%edi
+ pxor %xmm8,%xmm0
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movdqa %xmm0,%xmm8
+ movdqa %xmm9,48(%rsp)
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 4(%rsp),%eax
+ andl %ebp,%esi
+ pslld $2,%xmm0
+ andl %edx,%edi
+ rorl $7,%ecx
+ psrld $30,%xmm8
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ por %xmm8,%xmm0
+ movl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm0,%xmm9
+ addl 8(%rsp),%ebp
+ andl %edx,%edi
+ andl %ecx,%esi
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movl %eax,%edi
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 12(%rsp),%edx
+ andl %ecx,%esi
+ andl %ebx,%edi
+ rorl $7,%eax
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%edi
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,207,8
+ xorl %ebx,%eax
+ addl 16(%rsp),%ecx
+ andl %ebx,%edi
+ pxor %xmm2,%xmm1
+ andl %eax,%esi
+ rorl $7,%ebp
+ movdqa %xmm10,%xmm8
+ paddd %xmm0,%xmm10
+ addl %edi,%ecx
+ movl %edx,%edi
+ pxor %xmm9,%xmm1
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movdqa %xmm1,%xmm9
+ movdqa %xmm10,0(%rsp)
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 20(%rsp),%ebx
+ andl %eax,%esi
+ pslld $2,%xmm1
+ andl %ebp,%edi
+ rorl $7,%edx
+ psrld $30,%xmm9
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ por %xmm9,%xmm1
+ movl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm1,%xmm10
+ addl 24(%rsp),%eax
+ andl %ebp,%edi
+ andl %edx,%esi
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 28(%rsp),%ebp
+ andl %edx,%esi
+ andl %ecx,%edi
+ rorl $7,%ebx
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%edi
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,208,8
+ xorl %ecx,%ebx
+ addl 32(%rsp),%edx
+ andl %ecx,%edi
+ pxor %xmm3,%xmm2
+ andl %ebx,%esi
+ rorl $7,%eax
+ movdqa %xmm8,%xmm9
+ paddd %xmm1,%xmm8
+ addl %edi,%edx
+ movl %ebp,%edi
+ pxor %xmm10,%xmm2
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movdqa %xmm2,%xmm10
+ movdqa %xmm8,16(%rsp)
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 36(%rsp),%ecx
+ andl %ebx,%esi
+ pslld $2,%xmm2
+ andl %eax,%edi
+ rorl $7,%ebp
+ psrld $30,%xmm10
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ por %xmm10,%xmm2
+ movl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm2,%xmm8
+ addl 40(%rsp),%ebx
+ andl %eax,%edi
+ andl %ebp,%esi
+ rorl $7,%edx
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 44(%rsp),%eax
+ andl %ebp,%esi
+ andl %edx,%edi
+ rorl $7,%ecx
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ addl 48(%rsp),%ebp
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,193,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm4,%xmm3
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm10
+ paddd %xmm2,%xmm9
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm8,%xmm3
+ addl 52(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm3,%xmm8
+ movdqa %xmm9,32(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm3
+ addl 56(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm8
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm8,%xmm3
+ addl 60(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 0(%rsp),%eax
+ paddd %xmm3,%xmm10
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ movdqa %xmm10,48(%rsp)
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 4(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 8(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 12(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ cmpq %r10,%r9
+ je L$done_ssse3
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+.byte 102,15,56,0,206
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm9,%xmm0
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ movdqa %xmm0,0(%rsp)
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ psubd %xmm9,%xmm0
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+.byte 102,15,56,0,214
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm9,%xmm1
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movdqa %xmm1,16(%rsp)
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ psubd %xmm9,%xmm1
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+.byte 102,15,56,0,222
+ movl %ebp,%edi
+ roll $5,%ebp
+ paddd %xmm9,%xmm2
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ movdqa %xmm2,32(%rsp)
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ psubd %xmm9,%xmm2
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ addl 12(%r8),%edx
+ movl %eax,0(%r8)
+ addl 16(%r8),%ebp
+ movl %esi,4(%r8)
+ movl %esi,%ebx
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ jmp L$oop_ssse3
+
+.p2align 4
+L$done_ssse3:
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ movl %eax,0(%r8)
+ addl 12(%r8),%edx
+ movl %esi,4(%r8)
+ addl 16(%r8),%ebp
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ leaq 64(%rsp),%rsi
+ movq 0(%rsi),%r12
+ movq 8(%rsi),%rbp
+ movq 16(%rsi),%rbx
+ leaq 24(%rsi),%rsp
+L$epilogue_ssse3:
+ retq
+
+.p2align 6
+K_XX_XX:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.p2align 6
diff --git a/crypto/libressl/crypto/sha/sha1-masm-x86_64.S b/crypto/libressl/crypto/sha/sha1-masm-x86_64.S
new file mode 100644
index 0000000..36d8732
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1-masm-x86_64.S
@@ -0,0 +1,2746 @@
+; 1 "crypto/sha/sha1-masm-x86_64.S.tmp"
+; 1 "<built-in>" 1
+; 1 "<built-in>" 3
+; 340 "<built-in>" 3
+; 1 "<command line>" 1
+; 1 "<built-in>" 2
+; 1 "crypto/sha/sha1-masm-x86_64.S.tmp" 2
+OPTION DOTNAME
+
+; 1 "./crypto/x86_arch.h" 1
+
+
+; 16 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+; 40 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+; 3 "crypto/sha/sha1-masm-x86_64.S.tmp" 2
+.text$ SEGMENT ALIGN(64) 'CODE'
+EXTERN OPENSSL_ia32cap_P:NEAR
+
+
+PUBLIC sha1_block_data_order
+
+ALIGN 16
+sha1_block_data_order PROC PUBLIC
+ mov QWORD PTR[8+rsp],rdi ;WIN64 prologue
+ mov QWORD PTR[16+rsp],rsi
+ mov rax,rsp
+$L$SEH_begin_sha1_block_data_order::
+ mov rdi,rcx
+ mov rsi,rdx
+ mov rdx,r8
+
+
+ mov r9d,DWORD PTR[((OPENSSL_ia32cap_P+0))]
+ mov r8d,DWORD PTR[((OPENSSL_ia32cap_P+4))]
+ test r8d,(1 SHL 9)
+ jz $L$ialu
+ jmp _ssse3_shortcut
+
+ALIGN 16
+$L$ialu::
+ push rbx
+ push rbp
+ push r12
+ push r13
+ mov r11,rsp
+ mov r8,rdi
+ sub rsp,72
+ mov r9,rsi
+ and rsp,-64
+ mov r10,rdx
+ mov QWORD PTR[64+rsp],r11
+$L$prologue::
+
+ mov esi,DWORD PTR[r8]
+ mov edi,DWORD PTR[4+r8]
+ mov r11d,DWORD PTR[8+r8]
+ mov r12d,DWORD PTR[12+r8]
+ mov r13d,DWORD PTR[16+r8]
+ jmp $L$loop
+
+ALIGN 16
+$L$loop::
+ mov edx,DWORD PTR[r9]
+ bswap edx
+ mov DWORD PTR[rsp],edx
+ mov eax,r11d
+ mov ebp,DWORD PTR[4+r9]
+ mov ecx,esi
+ xor eax,r12d
+ bswap ebp
+ rol ecx,5
+ lea r13d,DWORD PTR[1518500249+r13*1+rdx]
+ and eax,edi
+ mov DWORD PTR[4+rsp],ebp
+ add r13d,ecx
+ xor eax,r12d
+ rol edi,30
+ add r13d,eax
+ mov eax,edi
+ mov edx,DWORD PTR[8+r9]
+ mov ecx,r13d
+ xor eax,r11d
+ bswap edx
+ rol ecx,5
+ lea r12d,DWORD PTR[1518500249+r12*1+rbp]
+ and eax,esi
+ mov DWORD PTR[8+rsp],edx
+ add r12d,ecx
+ xor eax,r11d
+ rol esi,30
+ add r12d,eax
+ mov eax,esi
+ mov ebp,DWORD PTR[12+r9]
+ mov ecx,r12d
+ xor eax,edi
+ bswap ebp
+ rol ecx,5
+ lea r11d,DWORD PTR[1518500249+r11*1+rdx]
+ and eax,r13d
+ mov DWORD PTR[12+rsp],ebp
+ add r11d,ecx
+ xor eax,edi
+ rol r13d,30
+ add r11d,eax
+ mov eax,r13d
+ mov edx,DWORD PTR[16+r9]
+ mov ecx,r11d
+ xor eax,esi
+ bswap edx
+ rol ecx,5
+ lea edi,DWORD PTR[1518500249+rdi*1+rbp]
+ and eax,r12d
+ mov DWORD PTR[16+rsp],edx
+ add edi,ecx
+ xor eax,esi
+ rol r12d,30
+ add edi,eax
+ mov eax,r12d
+ mov ebp,DWORD PTR[20+r9]
+ mov ecx,edi
+ xor eax,r13d
+ bswap ebp
+ rol ecx,5
+ lea esi,DWORD PTR[1518500249+rsi*1+rdx]
+ and eax,r11d
+ mov DWORD PTR[20+rsp],ebp
+ add esi,ecx
+ xor eax,r13d
+ rol r11d,30
+ add esi,eax
+ mov eax,r11d
+ mov edx,DWORD PTR[24+r9]
+ mov ecx,esi
+ xor eax,r12d
+ bswap edx
+ rol ecx,5
+ lea r13d,DWORD PTR[1518500249+r13*1+rbp]
+ and eax,edi
+ mov DWORD PTR[24+rsp],edx
+ add r13d,ecx
+ xor eax,r12d
+ rol edi,30
+ add r13d,eax
+ mov eax,edi
+ mov ebp,DWORD PTR[28+r9]
+ mov ecx,r13d
+ xor eax,r11d
+ bswap ebp
+ rol ecx,5
+ lea r12d,DWORD PTR[1518500249+r12*1+rdx]
+ and eax,esi
+ mov DWORD PTR[28+rsp],ebp
+ add r12d,ecx
+ xor eax,r11d
+ rol esi,30
+ add r12d,eax
+ mov eax,esi
+ mov edx,DWORD PTR[32+r9]
+ mov ecx,r12d
+ xor eax,edi
+ bswap edx
+ rol ecx,5
+ lea r11d,DWORD PTR[1518500249+r11*1+rbp]
+ and eax,r13d
+ mov DWORD PTR[32+rsp],edx
+ add r11d,ecx
+ xor eax,edi
+ rol r13d,30
+ add r11d,eax
+ mov eax,r13d
+ mov ebp,DWORD PTR[36+r9]
+ mov ecx,r11d
+ xor eax,esi
+ bswap ebp
+ rol ecx,5
+ lea edi,DWORD PTR[1518500249+rdi*1+rdx]
+ and eax,r12d
+ mov DWORD PTR[36+rsp],ebp
+ add edi,ecx
+ xor eax,esi
+ rol r12d,30
+ add edi,eax
+ mov eax,r12d
+ mov edx,DWORD PTR[40+r9]
+ mov ecx,edi
+ xor eax,r13d
+ bswap edx
+ rol ecx,5
+ lea esi,DWORD PTR[1518500249+rsi*1+rbp]
+ and eax,r11d
+ mov DWORD PTR[40+rsp],edx
+ add esi,ecx
+ xor eax,r13d
+ rol r11d,30
+ add esi,eax
+ mov eax,r11d
+ mov ebp,DWORD PTR[44+r9]
+ mov ecx,esi
+ xor eax,r12d
+ bswap ebp
+ rol ecx,5
+ lea r13d,DWORD PTR[1518500249+r13*1+rdx]
+ and eax,edi
+ mov DWORD PTR[44+rsp],ebp
+ add r13d,ecx
+ xor eax,r12d
+ rol edi,30
+ add r13d,eax
+ mov eax,edi
+ mov edx,DWORD PTR[48+r9]
+ mov ecx,r13d
+ xor eax,r11d
+ bswap edx
+ rol ecx,5
+ lea r12d,DWORD PTR[1518500249+r12*1+rbp]
+ and eax,esi
+ mov DWORD PTR[48+rsp],edx
+ add r12d,ecx
+ xor eax,r11d
+ rol esi,30
+ add r12d,eax
+ mov eax,esi
+ mov ebp,DWORD PTR[52+r9]
+ mov ecx,r12d
+ xor eax,edi
+ bswap ebp
+ rol ecx,5
+ lea r11d,DWORD PTR[1518500249+r11*1+rdx]
+ and eax,r13d
+ mov DWORD PTR[52+rsp],ebp
+ add r11d,ecx
+ xor eax,edi
+ rol r13d,30
+ add r11d,eax
+ mov eax,r13d
+ mov edx,DWORD PTR[56+r9]
+ mov ecx,r11d
+ xor eax,esi
+ bswap edx
+ rol ecx,5
+ lea edi,DWORD PTR[1518500249+rdi*1+rbp]
+ and eax,r12d
+ mov DWORD PTR[56+rsp],edx
+ add edi,ecx
+ xor eax,esi
+ rol r12d,30
+ add edi,eax
+ mov eax,r12d
+ mov ebp,DWORD PTR[60+r9]
+ mov ecx,edi
+ xor eax,r13d
+ bswap ebp
+ rol ecx,5
+ lea esi,DWORD PTR[1518500249+rsi*1+rdx]
+ and eax,r11d
+ mov DWORD PTR[60+rsp],ebp
+ add esi,ecx
+ xor eax,r13d
+ rol r11d,30
+ add esi,eax
+ mov edx,DWORD PTR[rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor edx,DWORD PTR[8+rsp]
+ xor eax,r12d
+ rol ecx,5
+ xor edx,DWORD PTR[32+rsp]
+ and eax,edi
+ lea r13d,DWORD PTR[1518500249+r13*1+rbp]
+ xor edx,DWORD PTR[52+rsp]
+ xor eax,r12d
+ rol edx,1
+ add r13d,ecx
+ rol edi,30
+ mov DWORD PTR[rsp],edx
+ add r13d,eax
+ mov ebp,DWORD PTR[4+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor ebp,DWORD PTR[12+rsp]
+ xor eax,r11d
+ rol ecx,5
+ xor ebp,DWORD PTR[36+rsp]
+ and eax,esi
+ lea r12d,DWORD PTR[1518500249+r12*1+rdx]
+ xor ebp,DWORD PTR[56+rsp]
+ xor eax,r11d
+ rol ebp,1
+ add r12d,ecx
+ rol esi,30
+ mov DWORD PTR[4+rsp],ebp
+ add r12d,eax
+ mov edx,DWORD PTR[8+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor edx,DWORD PTR[16+rsp]
+ xor eax,edi
+ rol ecx,5
+ xor edx,DWORD PTR[40+rsp]
+ and eax,r13d
+ lea r11d,DWORD PTR[1518500249+r11*1+rbp]
+ xor edx,DWORD PTR[60+rsp]
+ xor eax,edi
+ rol edx,1
+ add r11d,ecx
+ rol r13d,30
+ mov DWORD PTR[8+rsp],edx
+ add r11d,eax
+ mov ebp,DWORD PTR[12+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,esi
+ rol ecx,5
+ xor ebp,DWORD PTR[44+rsp]
+ and eax,r12d
+ lea edi,DWORD PTR[1518500249+rdi*1+rdx]
+ xor ebp,DWORD PTR[rsp]
+ xor eax,esi
+ rol ebp,1
+ add edi,ecx
+ rol r12d,30
+ mov DWORD PTR[12+rsp],ebp
+ add edi,eax
+ mov edx,DWORD PTR[16+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,r13d
+ rol ecx,5
+ xor edx,DWORD PTR[48+rsp]
+ and eax,r11d
+ lea esi,DWORD PTR[1518500249+rsi*1+rbp]
+ xor edx,DWORD PTR[4+rsp]
+ xor eax,r13d
+ rol edx,1
+ add esi,ecx
+ rol r11d,30
+ mov DWORD PTR[16+rsp],edx
+ add esi,eax
+ mov ebp,DWORD PTR[20+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[1859775393+r13*1+rdx]
+ xor ebp,DWORD PTR[52+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor ebp,DWORD PTR[8+rsp]
+ rol edi,30
+ add r13d,eax
+ rol ebp,1
+ mov DWORD PTR[20+rsp],ebp
+ mov edx,DWORD PTR[24+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor edx,DWORD PTR[32+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[1859775393+r12*1+rbp]
+ xor edx,DWORD PTR[56+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor edx,DWORD PTR[12+rsp]
+ rol esi,30
+ add r12d,eax
+ rol edx,1
+ mov DWORD PTR[24+rsp],edx
+ mov ebp,DWORD PTR[28+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[36+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[1859775393+r11*1+rdx]
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor ebp,DWORD PTR[16+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol ebp,1
+ mov DWORD PTR[28+rsp],ebp
+ mov edx,DWORD PTR[32+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor edx,DWORD PTR[40+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[1859775393+rdi*1+rbp]
+ xor edx,DWORD PTR[rsp]
+ xor eax,esi
+ add edi,ecx
+ xor edx,DWORD PTR[20+rsp]
+ rol r12d,30
+ add edi,eax
+ rol edx,1
+ mov DWORD PTR[32+rsp],edx
+ mov ebp,DWORD PTR[36+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor ebp,DWORD PTR[44+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[1859775393+rsi*1+rdx]
+ xor ebp,DWORD PTR[4+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor ebp,DWORD PTR[24+rsp]
+ rol r11d,30
+ add esi,eax
+ rol ebp,1
+ mov DWORD PTR[36+rsp],ebp
+ mov edx,DWORD PTR[40+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor edx,DWORD PTR[48+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[1859775393+r13*1+rbp]
+ xor edx,DWORD PTR[8+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor edx,DWORD PTR[28+rsp]
+ rol edi,30
+ add r13d,eax
+ rol edx,1
+ mov DWORD PTR[40+rsp],edx
+ mov ebp,DWORD PTR[44+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor ebp,DWORD PTR[52+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[1859775393+r12*1+rdx]
+ xor ebp,DWORD PTR[12+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor ebp,DWORD PTR[32+rsp]
+ rol esi,30
+ add r12d,eax
+ rol ebp,1
+ mov DWORD PTR[44+rsp],ebp
+ mov edx,DWORD PTR[48+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor edx,DWORD PTR[56+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[1859775393+r11*1+rbp]
+ xor edx,DWORD PTR[16+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor edx,DWORD PTR[36+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol edx,1
+ mov DWORD PTR[48+rsp],edx
+ mov ebp,DWORD PTR[52+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[1859775393+rdi*1+rdx]
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor ebp,DWORD PTR[40+rsp]
+ rol r12d,30
+ add edi,eax
+ rol ebp,1
+ mov DWORD PTR[52+rsp],ebp
+ mov edx,DWORD PTR[56+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor edx,DWORD PTR[rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[1859775393+rsi*1+rbp]
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor edx,DWORD PTR[44+rsp]
+ rol r11d,30
+ add esi,eax
+ rol edx,1
+ mov DWORD PTR[56+rsp],edx
+ mov ebp,DWORD PTR[60+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor ebp,DWORD PTR[4+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[1859775393+r13*1+rdx]
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor ebp,DWORD PTR[48+rsp]
+ rol edi,30
+ add r13d,eax
+ rol ebp,1
+ mov DWORD PTR[60+rsp],ebp
+ mov edx,DWORD PTR[rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor edx,DWORD PTR[8+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[1859775393+r12*1+rbp]
+ xor edx,DWORD PTR[32+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor edx,DWORD PTR[52+rsp]
+ rol esi,30
+ add r12d,eax
+ rol edx,1
+ mov DWORD PTR[rsp],edx
+ mov ebp,DWORD PTR[4+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[12+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[1859775393+r11*1+rdx]
+ xor ebp,DWORD PTR[36+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor ebp,DWORD PTR[56+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol ebp,1
+ mov DWORD PTR[4+rsp],ebp
+ mov edx,DWORD PTR[8+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor edx,DWORD PTR[16+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[1859775393+rdi*1+rbp]
+ xor edx,DWORD PTR[40+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor edx,DWORD PTR[60+rsp]
+ rol r12d,30
+ add edi,eax
+ rol edx,1
+ mov DWORD PTR[8+rsp],edx
+ mov ebp,DWORD PTR[12+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[1859775393+rsi*1+rdx]
+ xor ebp,DWORD PTR[44+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor ebp,DWORD PTR[rsp]
+ rol r11d,30
+ add esi,eax
+ rol ebp,1
+ mov DWORD PTR[12+rsp],ebp
+ mov edx,DWORD PTR[16+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[1859775393+r13*1+rbp]
+ xor edx,DWORD PTR[48+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor edx,DWORD PTR[4+rsp]
+ rol edi,30
+ add r13d,eax
+ rol edx,1
+ mov DWORD PTR[16+rsp],edx
+ mov ebp,DWORD PTR[20+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[1859775393+r12*1+rdx]
+ xor ebp,DWORD PTR[52+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor ebp,DWORD PTR[8+rsp]
+ rol esi,30
+ add r12d,eax
+ rol ebp,1
+ mov DWORD PTR[20+rsp],ebp
+ mov edx,DWORD PTR[24+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor edx,DWORD PTR[32+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[1859775393+r11*1+rbp]
+ xor edx,DWORD PTR[56+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor edx,DWORD PTR[12+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol edx,1
+ mov DWORD PTR[24+rsp],edx
+ mov ebp,DWORD PTR[28+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor ebp,DWORD PTR[36+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[1859775393+rdi*1+rdx]
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor ebp,DWORD PTR[16+rsp]
+ rol r12d,30
+ add edi,eax
+ rol ebp,1
+ mov DWORD PTR[28+rsp],ebp
+ mov edx,DWORD PTR[32+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor edx,DWORD PTR[40+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[1859775393+rsi*1+rbp]
+ xor edx,DWORD PTR[rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor edx,DWORD PTR[20+rsp]
+ rol r11d,30
+ add esi,eax
+ rol edx,1
+ mov DWORD PTR[32+rsp],edx
+ mov ebp,DWORD PTR[36+rsp]
+ mov eax,r11d
+ mov ebx,r11d
+ xor ebp,DWORD PTR[44+rsp]
+ and eax,r12d
+ mov ecx,esi
+ xor ebp,DWORD PTR[4+rsp]
+ xor ebx,r12d
+ lea r13d,DWORD PTR[((-1894007588))+r13*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[24+rsp]
+ add r13d,eax
+ and ebx,edi
+ rol ebp,1
+ add r13d,ebx
+ rol edi,30
+ mov DWORD PTR[36+rsp],ebp
+ add r13d,ecx
+ mov edx,DWORD PTR[40+rsp]
+ mov eax,edi
+ mov ebx,edi
+ xor edx,DWORD PTR[48+rsp]
+ and eax,r11d
+ mov ecx,r13d
+ xor edx,DWORD PTR[8+rsp]
+ xor ebx,r11d
+ lea r12d,DWORD PTR[((-1894007588))+r12*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[28+rsp]
+ add r12d,eax
+ and ebx,esi
+ rol edx,1
+ add r12d,ebx
+ rol esi,30
+ mov DWORD PTR[40+rsp],edx
+ add r12d,ecx
+ mov ebp,DWORD PTR[44+rsp]
+ mov eax,esi
+ mov ebx,esi
+ xor ebp,DWORD PTR[52+rsp]
+ and eax,edi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[12+rsp]
+ xor ebx,edi
+ lea r11d,DWORD PTR[((-1894007588))+r11*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[32+rsp]
+ add r11d,eax
+ and ebx,r13d
+ rol ebp,1
+ add r11d,ebx
+ rol r13d,30
+ mov DWORD PTR[44+rsp],ebp
+ add r11d,ecx
+ mov edx,DWORD PTR[48+rsp]
+ mov eax,r13d
+ mov ebx,r13d
+ xor edx,DWORD PTR[56+rsp]
+ and eax,esi
+ mov ecx,r11d
+ xor edx,DWORD PTR[16+rsp]
+ xor ebx,esi
+ lea edi,DWORD PTR[((-1894007588))+rdi*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[36+rsp]
+ add edi,eax
+ and ebx,r12d
+ rol edx,1
+ add edi,ebx
+ rol r12d,30
+ mov DWORD PTR[48+rsp],edx
+ add edi,ecx
+ mov ebp,DWORD PTR[52+rsp]
+ mov eax,r12d
+ mov ebx,r12d
+ xor ebp,DWORD PTR[60+rsp]
+ and eax,r13d
+ mov ecx,edi
+ xor ebp,DWORD PTR[20+rsp]
+ xor ebx,r13d
+ lea esi,DWORD PTR[((-1894007588))+rsi*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[40+rsp]
+ add esi,eax
+ and ebx,r11d
+ rol ebp,1
+ add esi,ebx
+ rol r11d,30
+ mov DWORD PTR[52+rsp],ebp
+ add esi,ecx
+ mov edx,DWORD PTR[56+rsp]
+ mov eax,r11d
+ mov ebx,r11d
+ xor edx,DWORD PTR[rsp]
+ and eax,r12d
+ mov ecx,esi
+ xor edx,DWORD PTR[24+rsp]
+ xor ebx,r12d
+ lea r13d,DWORD PTR[((-1894007588))+r13*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[44+rsp]
+ add r13d,eax
+ and ebx,edi
+ rol edx,1
+ add r13d,ebx
+ rol edi,30
+ mov DWORD PTR[56+rsp],edx
+ add r13d,ecx
+ mov ebp,DWORD PTR[60+rsp]
+ mov eax,edi
+ mov ebx,edi
+ xor ebp,DWORD PTR[4+rsp]
+ and eax,r11d
+ mov ecx,r13d
+ xor ebp,DWORD PTR[28+rsp]
+ xor ebx,r11d
+ lea r12d,DWORD PTR[((-1894007588))+r12*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[48+rsp]
+ add r12d,eax
+ and ebx,esi
+ rol ebp,1
+ add r12d,ebx
+ rol esi,30
+ mov DWORD PTR[60+rsp],ebp
+ add r12d,ecx
+ mov edx,DWORD PTR[rsp]
+ mov eax,esi
+ mov ebx,esi
+ xor edx,DWORD PTR[8+rsp]
+ and eax,edi
+ mov ecx,r12d
+ xor edx,DWORD PTR[32+rsp]
+ xor ebx,edi
+ lea r11d,DWORD PTR[((-1894007588))+r11*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[52+rsp]
+ add r11d,eax
+ and ebx,r13d
+ rol edx,1
+ add r11d,ebx
+ rol r13d,30
+ mov DWORD PTR[rsp],edx
+ add r11d,ecx
+ mov ebp,DWORD PTR[4+rsp]
+ mov eax,r13d
+ mov ebx,r13d
+ xor ebp,DWORD PTR[12+rsp]
+ and eax,esi
+ mov ecx,r11d
+ xor ebp,DWORD PTR[36+rsp]
+ xor ebx,esi
+ lea edi,DWORD PTR[((-1894007588))+rdi*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[56+rsp]
+ add edi,eax
+ and ebx,r12d
+ rol ebp,1
+ add edi,ebx
+ rol r12d,30
+ mov DWORD PTR[4+rsp],ebp
+ add edi,ecx
+ mov edx,DWORD PTR[8+rsp]
+ mov eax,r12d
+ mov ebx,r12d
+ xor edx,DWORD PTR[16+rsp]
+ and eax,r13d
+ mov ecx,edi
+ xor edx,DWORD PTR[40+rsp]
+ xor ebx,r13d
+ lea esi,DWORD PTR[((-1894007588))+rsi*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[60+rsp]
+ add esi,eax
+ and ebx,r11d
+ rol edx,1
+ add esi,ebx
+ rol r11d,30
+ mov DWORD PTR[8+rsp],edx
+ add esi,ecx
+ mov ebp,DWORD PTR[12+rsp]
+ mov eax,r11d
+ mov ebx,r11d
+ xor ebp,DWORD PTR[20+rsp]
+ and eax,r12d
+ mov ecx,esi
+ xor ebp,DWORD PTR[44+rsp]
+ xor ebx,r12d
+ lea r13d,DWORD PTR[((-1894007588))+r13*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[rsp]
+ add r13d,eax
+ and ebx,edi
+ rol ebp,1
+ add r13d,ebx
+ rol edi,30
+ mov DWORD PTR[12+rsp],ebp
+ add r13d,ecx
+ mov edx,DWORD PTR[16+rsp]
+ mov eax,edi
+ mov ebx,edi
+ xor edx,DWORD PTR[24+rsp]
+ and eax,r11d
+ mov ecx,r13d
+ xor edx,DWORD PTR[48+rsp]
+ xor ebx,r11d
+ lea r12d,DWORD PTR[((-1894007588))+r12*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[4+rsp]
+ add r12d,eax
+ and ebx,esi
+ rol edx,1
+ add r12d,ebx
+ rol esi,30
+ mov DWORD PTR[16+rsp],edx
+ add r12d,ecx
+ mov ebp,DWORD PTR[20+rsp]
+ mov eax,esi
+ mov ebx,esi
+ xor ebp,DWORD PTR[28+rsp]
+ and eax,edi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[52+rsp]
+ xor ebx,edi
+ lea r11d,DWORD PTR[((-1894007588))+r11*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[8+rsp]
+ add r11d,eax
+ and ebx,r13d
+ rol ebp,1
+ add r11d,ebx
+ rol r13d,30
+ mov DWORD PTR[20+rsp],ebp
+ add r11d,ecx
+ mov edx,DWORD PTR[24+rsp]
+ mov eax,r13d
+ mov ebx,r13d
+ xor edx,DWORD PTR[32+rsp]
+ and eax,esi
+ mov ecx,r11d
+ xor edx,DWORD PTR[56+rsp]
+ xor ebx,esi
+ lea edi,DWORD PTR[((-1894007588))+rdi*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[12+rsp]
+ add edi,eax
+ and ebx,r12d
+ rol edx,1
+ add edi,ebx
+ rol r12d,30
+ mov DWORD PTR[24+rsp],edx
+ add edi,ecx
+ mov ebp,DWORD PTR[28+rsp]
+ mov eax,r12d
+ mov ebx,r12d
+ xor ebp,DWORD PTR[36+rsp]
+ and eax,r13d
+ mov ecx,edi
+ xor ebp,DWORD PTR[60+rsp]
+ xor ebx,r13d
+ lea esi,DWORD PTR[((-1894007588))+rsi*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[16+rsp]
+ add esi,eax
+ and ebx,r11d
+ rol ebp,1
+ add esi,ebx
+ rol r11d,30
+ mov DWORD PTR[28+rsp],ebp
+ add esi,ecx
+ mov edx,DWORD PTR[32+rsp]
+ mov eax,r11d
+ mov ebx,r11d
+ xor edx,DWORD PTR[40+rsp]
+ and eax,r12d
+ mov ecx,esi
+ xor edx,DWORD PTR[rsp]
+ xor ebx,r12d
+ lea r13d,DWORD PTR[((-1894007588))+r13*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[20+rsp]
+ add r13d,eax
+ and ebx,edi
+ rol edx,1
+ add r13d,ebx
+ rol edi,30
+ mov DWORD PTR[32+rsp],edx
+ add r13d,ecx
+ mov ebp,DWORD PTR[36+rsp]
+ mov eax,edi
+ mov ebx,edi
+ xor ebp,DWORD PTR[44+rsp]
+ and eax,r11d
+ mov ecx,r13d
+ xor ebp,DWORD PTR[4+rsp]
+ xor ebx,r11d
+ lea r12d,DWORD PTR[((-1894007588))+r12*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[24+rsp]
+ add r12d,eax
+ and ebx,esi
+ rol ebp,1
+ add r12d,ebx
+ rol esi,30
+ mov DWORD PTR[36+rsp],ebp
+ add r12d,ecx
+ mov edx,DWORD PTR[40+rsp]
+ mov eax,esi
+ mov ebx,esi
+ xor edx,DWORD PTR[48+rsp]
+ and eax,edi
+ mov ecx,r12d
+ xor edx,DWORD PTR[8+rsp]
+ xor ebx,edi
+ lea r11d,DWORD PTR[((-1894007588))+r11*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[28+rsp]
+ add r11d,eax
+ and ebx,r13d
+ rol edx,1
+ add r11d,ebx
+ rol r13d,30
+ mov DWORD PTR[40+rsp],edx
+ add r11d,ecx
+ mov ebp,DWORD PTR[44+rsp]
+ mov eax,r13d
+ mov ebx,r13d
+ xor ebp,DWORD PTR[52+rsp]
+ and eax,esi
+ mov ecx,r11d
+ xor ebp,DWORD PTR[12+rsp]
+ xor ebx,esi
+ lea edi,DWORD PTR[((-1894007588))+rdi*1+rdx]
+ rol ecx,5
+ xor ebp,DWORD PTR[32+rsp]
+ add edi,eax
+ and ebx,r12d
+ rol ebp,1
+ add edi,ebx
+ rol r12d,30
+ mov DWORD PTR[44+rsp],ebp
+ add edi,ecx
+ mov edx,DWORD PTR[48+rsp]
+ mov eax,r12d
+ mov ebx,r12d
+ xor edx,DWORD PTR[56+rsp]
+ and eax,r13d
+ mov ecx,edi
+ xor edx,DWORD PTR[16+rsp]
+ xor ebx,r13d
+ lea esi,DWORD PTR[((-1894007588))+rsi*1+rbp]
+ rol ecx,5
+ xor edx,DWORD PTR[36+rsp]
+ add esi,eax
+ and ebx,r11d
+ rol edx,1
+ add esi,ebx
+ rol r11d,30
+ mov DWORD PTR[48+rsp],edx
+ add esi,ecx
+ mov ebp,DWORD PTR[52+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[((-899497514))+r13*1+rdx]
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor ebp,DWORD PTR[40+rsp]
+ rol edi,30
+ add r13d,eax
+ rol ebp,1
+ mov DWORD PTR[52+rsp],ebp
+ mov edx,DWORD PTR[56+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor edx,DWORD PTR[rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[((-899497514))+r12*1+rbp]
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor edx,DWORD PTR[44+rsp]
+ rol esi,30
+ add r12d,eax
+ rol edx,1
+ mov DWORD PTR[56+rsp],edx
+ mov ebp,DWORD PTR[60+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[4+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[((-899497514))+r11*1+rdx]
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor ebp,DWORD PTR[48+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol ebp,1
+ mov DWORD PTR[60+rsp],ebp
+ mov edx,DWORD PTR[rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor edx,DWORD PTR[8+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[((-899497514))+rdi*1+rbp]
+ xor edx,DWORD PTR[32+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor edx,DWORD PTR[52+rsp]
+ rol r12d,30
+ add edi,eax
+ rol edx,1
+ mov DWORD PTR[rsp],edx
+ mov ebp,DWORD PTR[4+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor ebp,DWORD PTR[12+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[((-899497514))+rsi*1+rdx]
+ xor ebp,DWORD PTR[36+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor ebp,DWORD PTR[56+rsp]
+ rol r11d,30
+ add esi,eax
+ rol ebp,1
+ mov DWORD PTR[4+rsp],ebp
+ mov edx,DWORD PTR[8+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor edx,DWORD PTR[16+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[((-899497514))+r13*1+rbp]
+ xor edx,DWORD PTR[40+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor edx,DWORD PTR[60+rsp]
+ rol edi,30
+ add r13d,eax
+ rol edx,1
+ mov DWORD PTR[8+rsp],edx
+ mov ebp,DWORD PTR[12+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[((-899497514))+r12*1+rdx]
+ xor ebp,DWORD PTR[44+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor ebp,DWORD PTR[rsp]
+ rol esi,30
+ add r12d,eax
+ rol ebp,1
+ mov DWORD PTR[12+rsp],ebp
+ mov edx,DWORD PTR[16+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[((-899497514))+r11*1+rbp]
+ xor edx,DWORD PTR[48+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor edx,DWORD PTR[4+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol edx,1
+ mov DWORD PTR[16+rsp],edx
+ mov ebp,DWORD PTR[20+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[((-899497514))+rdi*1+rdx]
+ xor ebp,DWORD PTR[52+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor ebp,DWORD PTR[8+rsp]
+ rol r12d,30
+ add edi,eax
+ rol ebp,1
+ mov DWORD PTR[20+rsp],ebp
+ mov edx,DWORD PTR[24+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor edx,DWORD PTR[32+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[((-899497514))+rsi*1+rbp]
+ xor edx,DWORD PTR[56+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor edx,DWORD PTR[12+rsp]
+ rol r11d,30
+ add esi,eax
+ rol edx,1
+ mov DWORD PTR[24+rsp],edx
+ mov ebp,DWORD PTR[28+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor ebp,DWORD PTR[36+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[((-899497514))+r13*1+rdx]
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor ebp,DWORD PTR[16+rsp]
+ rol edi,30
+ add r13d,eax
+ rol ebp,1
+ mov DWORD PTR[28+rsp],ebp
+ mov edx,DWORD PTR[32+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor edx,DWORD PTR[40+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[((-899497514))+r12*1+rbp]
+ xor edx,DWORD PTR[rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor edx,DWORD PTR[20+rsp]
+ rol esi,30
+ add r12d,eax
+ rol edx,1
+ mov DWORD PTR[32+rsp],edx
+ mov ebp,DWORD PTR[36+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor ebp,DWORD PTR[44+rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[((-899497514))+r11*1+rdx]
+ xor ebp,DWORD PTR[4+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor ebp,DWORD PTR[24+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol ebp,1
+ mov DWORD PTR[36+rsp],ebp
+ mov edx,DWORD PTR[40+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor edx,DWORD PTR[48+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[((-899497514))+rdi*1+rbp]
+ xor edx,DWORD PTR[8+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor edx,DWORD PTR[28+rsp]
+ rol r12d,30
+ add edi,eax
+ rol edx,1
+ mov DWORD PTR[40+rsp],edx
+ mov ebp,DWORD PTR[44+rsp]
+ mov eax,r12d
+ mov ecx,edi
+ xor ebp,DWORD PTR[52+rsp]
+ xor eax,r11d
+ rol ecx,5
+ lea esi,DWORD PTR[((-899497514))+rsi*1+rdx]
+ xor ebp,DWORD PTR[12+rsp]
+ xor eax,r13d
+ add esi,ecx
+ xor ebp,DWORD PTR[32+rsp]
+ rol r11d,30
+ add esi,eax
+ rol ebp,1
+ mov DWORD PTR[44+rsp],ebp
+ mov edx,DWORD PTR[48+rsp]
+ mov eax,r11d
+ mov ecx,esi
+ xor edx,DWORD PTR[56+rsp]
+ xor eax,edi
+ rol ecx,5
+ lea r13d,DWORD PTR[((-899497514))+r13*1+rbp]
+ xor edx,DWORD PTR[16+rsp]
+ xor eax,r12d
+ add r13d,ecx
+ xor edx,DWORD PTR[36+rsp]
+ rol edi,30
+ add r13d,eax
+ rol edx,1
+ mov DWORD PTR[48+rsp],edx
+ mov ebp,DWORD PTR[52+rsp]
+ mov eax,edi
+ mov ecx,r13d
+ xor ebp,DWORD PTR[60+rsp]
+ xor eax,esi
+ rol ecx,5
+ lea r12d,DWORD PTR[((-899497514))+r12*1+rdx]
+ xor ebp,DWORD PTR[20+rsp]
+ xor eax,r11d
+ add r12d,ecx
+ xor ebp,DWORD PTR[40+rsp]
+ rol esi,30
+ add r12d,eax
+ rol ebp,1
+ mov edx,DWORD PTR[56+rsp]
+ mov eax,esi
+ mov ecx,r12d
+ xor edx,DWORD PTR[rsp]
+ xor eax,r13d
+ rol ecx,5
+ lea r11d,DWORD PTR[((-899497514))+r11*1+rbp]
+ xor edx,DWORD PTR[24+rsp]
+ xor eax,edi
+ add r11d,ecx
+ xor edx,DWORD PTR[44+rsp]
+ rol r13d,30
+ add r11d,eax
+ rol edx,1
+ mov ebp,DWORD PTR[60+rsp]
+ mov eax,r13d
+ mov ecx,r11d
+ xor ebp,DWORD PTR[4+rsp]
+ xor eax,r12d
+ rol ecx,5
+ lea edi,DWORD PTR[((-899497514))+rdi*1+rdx]
+ xor ebp,DWORD PTR[28+rsp]
+ xor eax,esi
+ add edi,ecx
+ xor ebp,DWORD PTR[48+rsp]
+ rol r12d,30
+ add edi,eax
+ rol ebp,1
+ mov eax,r12d
+ mov ecx,edi
+ xor eax,r11d
+ lea esi,DWORD PTR[((-899497514))+rsi*1+rbp]
+ rol ecx,5
+ xor eax,r13d
+ add esi,ecx
+ rol r11d,30
+ add esi,eax
+ add esi,DWORD PTR[r8]
+ add edi,DWORD PTR[4+r8]
+ add r11d,DWORD PTR[8+r8]
+ add r12d,DWORD PTR[12+r8]
+ add r13d,DWORD PTR[16+r8]
+ mov DWORD PTR[r8],esi
+ mov DWORD PTR[4+r8],edi
+ mov DWORD PTR[8+r8],r11d
+ mov DWORD PTR[12+r8],r12d
+ mov DWORD PTR[16+r8],r13d
+
+ sub r10,1
+ lea r9,QWORD PTR[64+r9]
+ jnz $L$loop
+
+ mov rsi,QWORD PTR[64+rsp]
+ mov r13,QWORD PTR[rsi]
+ mov r12,QWORD PTR[8+rsi]
+ mov rbp,QWORD PTR[16+rsi]
+ mov rbx,QWORD PTR[24+rsi]
+ lea rsp,QWORD PTR[32+rsi]
+$L$epilogue::
+ mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue
+ mov rsi,QWORD PTR[16+rsp]
+ DB 0F3h,0C3h ;repret
+$L$SEH_end_sha1_block_data_order::
+sha1_block_data_order ENDP
+
+ALIGN 16
+sha1_block_data_order_ssse3 PROC PRIVATE
+ mov QWORD PTR[8+rsp],rdi ;WIN64 prologue
+ mov QWORD PTR[16+rsp],rsi
+ mov rax,rsp
+$L$SEH_begin_sha1_block_data_order_ssse3::
+ mov rdi,rcx
+ mov rsi,rdx
+ mov rdx,r8
+
+
+_ssse3_shortcut::
+ push rbx
+ push rbp
+ push r12
+ lea rsp,QWORD PTR[((-144))+rsp]
+ movaps XMMWORD PTR[(64+0)+rsp],xmm6
+ movaps XMMWORD PTR[(64+16)+rsp],xmm7
+ movaps XMMWORD PTR[(64+32)+rsp],xmm8
+ movaps XMMWORD PTR[(64+48)+rsp],xmm9
+ movaps XMMWORD PTR[(64+64)+rsp],xmm10
+$L$prologue_ssse3::
+ mov r8,rdi
+ mov r9,rsi
+ mov r10,rdx
+
+ shl r10,6
+ add r10,r9
+ lea r11,QWORD PTR[K_XX_XX]
+
+ mov eax,DWORD PTR[r8]
+ mov ebx,DWORD PTR[4+r8]
+ mov ecx,DWORD PTR[8+r8]
+ mov edx,DWORD PTR[12+r8]
+ mov esi,ebx
+ mov ebp,DWORD PTR[16+r8]
+
+ movdqa xmm6,XMMWORD PTR[64+r11]
+ movdqa xmm9,XMMWORD PTR[r11]
+ movdqu xmm0,XMMWORD PTR[r9]
+ movdqu xmm1,XMMWORD PTR[16+r9]
+ movdqu xmm2,XMMWORD PTR[32+r9]
+ movdqu xmm3,XMMWORD PTR[48+r9]
+DB 102,15,56,0,198
+ add r9,64
+DB 102,15,56,0,206
+DB 102,15,56,0,214
+DB 102,15,56,0,222
+ paddd xmm0,xmm9
+ paddd xmm1,xmm9
+ paddd xmm2,xmm9
+ movdqa XMMWORD PTR[rsp],xmm0
+ psubd xmm0,xmm9
+ movdqa XMMWORD PTR[16+rsp],xmm1
+ psubd xmm1,xmm9
+ movdqa XMMWORD PTR[32+rsp],xmm2
+ psubd xmm2,xmm9
+ jmp $L$oop_ssse3
+ALIGN 16
+$L$oop_ssse3::
+ movdqa xmm4,xmm1
+ add ebp,DWORD PTR[rsp]
+ xor ecx,edx
+ movdqa xmm8,xmm3
+DB 102,15,58,15,224,8
+ mov edi,eax
+ rol eax,5
+ paddd xmm9,xmm3
+ and esi,ecx
+ xor ecx,edx
+ psrldq xmm8,4
+ xor esi,edx
+ add ebp,eax
+ pxor xmm4,xmm0
+ ror ebx,2
+ add ebp,esi
+ pxor xmm8,xmm2
+ add edx,DWORD PTR[4+rsp]
+ xor ebx,ecx
+ mov esi,ebp
+ rol ebp,5
+ pxor xmm4,xmm8
+ and edi,ebx
+ xor ebx,ecx
+ movdqa XMMWORD PTR[48+rsp],xmm9
+ xor edi,ecx
+ add edx,ebp
+ movdqa xmm10,xmm4
+ movdqa xmm8,xmm4
+ ror eax,7
+ add edx,edi
+ add ecx,DWORD PTR[8+rsp]
+ xor eax,ebx
+ pslldq xmm10,12
+ paddd xmm4,xmm4
+ mov edi,edx
+ rol edx,5
+ and esi,eax
+ xor eax,ebx
+ psrld xmm8,31
+ xor esi,ebx
+ add ecx,edx
+ movdqa xmm9,xmm10
+ ror ebp,7
+ add ecx,esi
+ psrld xmm10,30
+ por xmm4,xmm8
+ add ebx,DWORD PTR[12+rsp]
+ xor ebp,eax
+ mov esi,ecx
+ rol ecx,5
+ pslld xmm9,2
+ pxor xmm4,xmm10
+ and edi,ebp
+ xor ebp,eax
+ movdqa xmm10,XMMWORD PTR[r11]
+ xor edi,eax
+ add ebx,ecx
+ pxor xmm4,xmm9
+ ror edx,7
+ add ebx,edi
+ movdqa xmm5,xmm2
+ add eax,DWORD PTR[16+rsp]
+ xor edx,ebp
+ movdqa xmm9,xmm4
+DB 102,15,58,15,233,8
+ mov edi,ebx
+ rol ebx,5
+ paddd xmm10,xmm4
+ and esi,edx
+ xor edx,ebp
+ psrldq xmm9,4
+ xor esi,ebp
+ add eax,ebx
+ pxor xmm5,xmm1
+ ror ecx,7
+ add eax,esi
+ pxor xmm9,xmm3
+ add ebp,DWORD PTR[20+rsp]
+ xor ecx,edx
+ mov esi,eax
+ rol eax,5
+ pxor xmm5,xmm9
+ and edi,ecx
+ xor ecx,edx
+ movdqa XMMWORD PTR[rsp],xmm10
+ xor edi,edx
+ add ebp,eax
+ movdqa xmm8,xmm5
+ movdqa xmm9,xmm5
+ ror ebx,7
+ add ebp,edi
+ add edx,DWORD PTR[24+rsp]
+ xor ebx,ecx
+ pslldq xmm8,12
+ paddd xmm5,xmm5
+ mov edi,ebp
+ rol ebp,5
+ and esi,ebx
+ xor ebx,ecx
+ psrld xmm9,31
+ xor esi,ecx
+ add edx,ebp
+ movdqa xmm10,xmm8
+ ror eax,7
+ add edx,esi
+ psrld xmm8,30
+ por xmm5,xmm9
+ add ecx,DWORD PTR[28+rsp]
+ xor eax,ebx
+ mov esi,edx
+ rol edx,5
+ pslld xmm10,2
+ pxor xmm5,xmm8
+ and edi,eax
+ xor eax,ebx
+ movdqa xmm8,XMMWORD PTR[16+r11]
+ xor edi,ebx
+ add ecx,edx
+ pxor xmm5,xmm10
+ ror ebp,7
+ add ecx,edi
+ movdqa xmm6,xmm3
+ add ebx,DWORD PTR[32+rsp]
+ xor ebp,eax
+ movdqa xmm10,xmm5
+DB 102,15,58,15,242,8
+ mov edi,ecx
+ rol ecx,5
+ paddd xmm8,xmm5
+ and esi,ebp
+ xor ebp,eax
+ psrldq xmm10,4
+ xor esi,eax
+ add ebx,ecx
+ pxor xmm6,xmm2
+ ror edx,7
+ add ebx,esi
+ pxor xmm10,xmm4
+ add eax,DWORD PTR[36+rsp]
+ xor edx,ebp
+ mov esi,ebx
+ rol ebx,5
+ pxor xmm6,xmm10
+ and edi,edx
+ xor edx,ebp
+ movdqa XMMWORD PTR[16+rsp],xmm8
+ xor edi,ebp
+ add eax,ebx
+ movdqa xmm9,xmm6
+ movdqa xmm10,xmm6
+ ror ecx,7
+ add eax,edi
+ add ebp,DWORD PTR[40+rsp]
+ xor ecx,edx
+ pslldq xmm9,12
+ paddd xmm6,xmm6
+ mov edi,eax
+ rol eax,5
+ and esi,ecx
+ xor ecx,edx
+ psrld xmm10,31
+ xor esi,edx
+ add ebp,eax
+ movdqa xmm8,xmm9
+ ror ebx,7
+ add ebp,esi
+ psrld xmm9,30
+ por xmm6,xmm10
+ add edx,DWORD PTR[44+rsp]
+ xor ebx,ecx
+ mov esi,ebp
+ rol ebp,5
+ pslld xmm8,2
+ pxor xmm6,xmm9
+ and edi,ebx
+ xor ebx,ecx
+ movdqa xmm9,XMMWORD PTR[16+r11]
+ xor edi,ecx
+ add edx,ebp
+ pxor xmm6,xmm8
+ ror eax,7
+ add edx,edi
+ movdqa xmm7,xmm4
+ add ecx,DWORD PTR[48+rsp]
+ xor eax,ebx
+ movdqa xmm8,xmm6
+DB 102,15,58,15,251,8
+ mov edi,edx
+ rol edx,5
+ paddd xmm9,xmm6
+ and esi,eax
+ xor eax,ebx
+ psrldq xmm8,4
+ xor esi,ebx
+ add ecx,edx
+ pxor xmm7,xmm3
+ ror ebp,7
+ add ecx,esi
+ pxor xmm8,xmm5
+ add ebx,DWORD PTR[52+rsp]
+ xor ebp,eax
+ mov esi,ecx
+ rol ecx,5
+ pxor xmm7,xmm8
+ and edi,ebp
+ xor ebp,eax
+ movdqa XMMWORD PTR[32+rsp],xmm9
+ xor edi,eax
+ add ebx,ecx
+ movdqa xmm10,xmm7
+ movdqa xmm8,xmm7
+ ror edx,7
+ add ebx,edi
+ add eax,DWORD PTR[56+rsp]
+ xor edx,ebp
+ pslldq xmm10,12
+ paddd xmm7,xmm7
+ mov edi,ebx
+ rol ebx,5
+ and esi,edx
+ xor edx,ebp
+ psrld xmm8,31
+ xor esi,ebp
+ add eax,ebx
+ movdqa xmm9,xmm10
+ ror ecx,7
+ add eax,esi
+ psrld xmm10,30
+ por xmm7,xmm8
+ add ebp,DWORD PTR[60+rsp]
+ xor ecx,edx
+ mov esi,eax
+ rol eax,5
+ pslld xmm9,2
+ pxor xmm7,xmm10
+ and edi,ecx
+ xor ecx,edx
+ movdqa xmm10,XMMWORD PTR[16+r11]
+ xor edi,edx
+ add ebp,eax
+ pxor xmm7,xmm9
+ ror ebx,7
+ add ebp,edi
+ movdqa xmm9,xmm7
+ add edx,DWORD PTR[rsp]
+ pxor xmm0,xmm4
+DB 102,68,15,58,15,206,8
+ xor ebx,ecx
+ mov edi,ebp
+ rol ebp,5
+ pxor xmm0,xmm1
+ and esi,ebx
+ xor ebx,ecx
+ movdqa xmm8,xmm10
+ paddd xmm10,xmm7
+ xor esi,ecx
+ add edx,ebp
+ pxor xmm0,xmm9
+ ror eax,7
+ add edx,esi
+ add ecx,DWORD PTR[4+rsp]
+ xor eax,ebx
+ movdqa xmm9,xmm0
+ movdqa XMMWORD PTR[48+rsp],xmm10
+ mov esi,edx
+ rol edx,5
+ and edi,eax
+ xor eax,ebx
+ pslld xmm0,2
+ xor edi,ebx
+ add ecx,edx
+ psrld xmm9,30
+ ror ebp,7
+ add ecx,edi
+ add ebx,DWORD PTR[8+rsp]
+ xor ebp,eax
+ mov edi,ecx
+ rol ecx,5
+ por xmm0,xmm9
+ and esi,ebp
+ xor ebp,eax
+ movdqa xmm10,xmm0
+ xor esi,eax
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ add eax,DWORD PTR[12+rsp]
+ xor edx,ebp
+ mov esi,ebx
+ rol ebx,5
+ and edi,edx
+ xor edx,ebp
+ xor edi,ebp
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ add ebp,DWORD PTR[16+rsp]
+ pxor xmm1,xmm5
+DB 102,68,15,58,15,215,8
+ xor esi,edx
+ mov edi,eax
+ rol eax,5
+ pxor xmm1,xmm2
+ xor esi,ecx
+ add ebp,eax
+ movdqa xmm9,xmm8
+ paddd xmm8,xmm0
+ ror ebx,7
+ add ebp,esi
+ pxor xmm1,xmm10
+ add edx,DWORD PTR[20+rsp]
+ xor edi,ecx
+ mov esi,ebp
+ rol ebp,5
+ movdqa xmm10,xmm1
+ movdqa XMMWORD PTR[rsp],xmm8
+ xor edi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,edi
+ pslld xmm1,2
+ add ecx,DWORD PTR[24+rsp]
+ xor esi,ebx
+ psrld xmm10,30
+ mov edi,edx
+ rol edx,5
+ xor esi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,esi
+ por xmm1,xmm10
+ add ebx,DWORD PTR[28+rsp]
+ xor edi,eax
+ movdqa xmm8,xmm1
+ mov esi,ecx
+ rol ecx,5
+ xor edi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,edi
+ add eax,DWORD PTR[32+rsp]
+ pxor xmm2,xmm6
+DB 102,68,15,58,15,192,8
+ xor esi,ebp
+ mov edi,ebx
+ rol ebx,5
+ pxor xmm2,xmm3
+ xor esi,edx
+ add eax,ebx
+ movdqa xmm10,XMMWORD PTR[32+r11]
+ paddd xmm9,xmm1
+ ror ecx,7
+ add eax,esi
+ pxor xmm2,xmm8
+ add ebp,DWORD PTR[36+rsp]
+ xor edi,edx
+ mov esi,eax
+ rol eax,5
+ movdqa xmm8,xmm2
+ movdqa XMMWORD PTR[16+rsp],xmm9
+ xor edi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,edi
+ pslld xmm2,2
+ add edx,DWORD PTR[40+rsp]
+ xor esi,ecx
+ psrld xmm8,30
+ mov edi,ebp
+ rol ebp,5
+ xor esi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,esi
+ por xmm2,xmm8
+ add ecx,DWORD PTR[44+rsp]
+ xor edi,ebx
+ movdqa xmm9,xmm2
+ mov esi,edx
+ rol edx,5
+ xor edi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,edi
+ add ebx,DWORD PTR[48+rsp]
+ pxor xmm3,xmm7
+DB 102,68,15,58,15,201,8
+ xor esi,eax
+ mov edi,ecx
+ rol ecx,5
+ pxor xmm3,xmm4
+ xor esi,ebp
+ add ebx,ecx
+ movdqa xmm8,xmm10
+ paddd xmm10,xmm2
+ ror edx,7
+ add ebx,esi
+ pxor xmm3,xmm9
+ add eax,DWORD PTR[52+rsp]
+ xor edi,ebp
+ mov esi,ebx
+ rol ebx,5
+ movdqa xmm9,xmm3
+ movdqa XMMWORD PTR[32+rsp],xmm10
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ pslld xmm3,2
+ add ebp,DWORD PTR[56+rsp]
+ xor esi,edx
+ psrld xmm9,30
+ mov edi,eax
+ rol eax,5
+ xor esi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,esi
+ por xmm3,xmm9
+ add edx,DWORD PTR[60+rsp]
+ xor edi,ecx
+ movdqa xmm10,xmm3
+ mov esi,ebp
+ rol ebp,5
+ xor edi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,edi
+ add ecx,DWORD PTR[rsp]
+ pxor xmm4,xmm0
+DB 102,68,15,58,15,210,8
+ xor esi,ebx
+ mov edi,edx
+ rol edx,5
+ pxor xmm4,xmm5
+ xor esi,eax
+ add ecx,edx
+ movdqa xmm9,xmm8
+ paddd xmm8,xmm3
+ ror ebp,7
+ add ecx,esi
+ pxor xmm4,xmm10
+ add ebx,DWORD PTR[4+rsp]
+ xor edi,eax
+ mov esi,ecx
+ rol ecx,5
+ movdqa xmm10,xmm4
+ movdqa XMMWORD PTR[48+rsp],xmm8
+ xor edi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,edi
+ pslld xmm4,2
+ add eax,DWORD PTR[8+rsp]
+ xor esi,ebp
+ psrld xmm10,30
+ mov edi,ebx
+ rol ebx,5
+ xor esi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,esi
+ por xmm4,xmm10
+ add ebp,DWORD PTR[12+rsp]
+ xor edi,edx
+ movdqa xmm8,xmm4
+ mov esi,eax
+ rol eax,5
+ xor edi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,edi
+ add edx,DWORD PTR[16+rsp]
+ pxor xmm5,xmm1
+DB 102,68,15,58,15,195,8
+ xor esi,ecx
+ mov edi,ebp
+ rol ebp,5
+ pxor xmm5,xmm6
+ xor esi,ebx
+ add edx,ebp
+ movdqa xmm10,xmm9
+ paddd xmm9,xmm4
+ ror eax,7
+ add edx,esi
+ pxor xmm5,xmm8
+ add ecx,DWORD PTR[20+rsp]
+ xor edi,ebx
+ mov esi,edx
+ rol edx,5
+ movdqa xmm8,xmm5
+ movdqa XMMWORD PTR[rsp],xmm9
+ xor edi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,edi
+ pslld xmm5,2
+ add ebx,DWORD PTR[24+rsp]
+ xor esi,eax
+ psrld xmm8,30
+ mov edi,ecx
+ rol ecx,5
+ xor esi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ por xmm5,xmm8
+ add eax,DWORD PTR[28+rsp]
+ xor edi,ebp
+ movdqa xmm9,xmm5
+ mov esi,ebx
+ rol ebx,5
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ mov edi,ecx
+ pxor xmm6,xmm2
+DB 102,68,15,58,15,204,8
+ xor ecx,edx
+ add ebp,DWORD PTR[32+rsp]
+ and edi,edx
+ pxor xmm6,xmm7
+ and esi,ecx
+ ror ebx,7
+ movdqa xmm8,xmm10
+ paddd xmm10,xmm5
+ add ebp,edi
+ mov edi,eax
+ pxor xmm6,xmm9
+ rol eax,5
+ add ebp,esi
+ xor ecx,edx
+ add ebp,eax
+ movdqa xmm9,xmm6
+ movdqa XMMWORD PTR[16+rsp],xmm10
+ mov esi,ebx
+ xor ebx,ecx
+ add edx,DWORD PTR[36+rsp]
+ and esi,ecx
+ pslld xmm6,2
+ and edi,ebx
+ ror eax,7
+ psrld xmm9,30
+ add edx,esi
+ mov esi,ebp
+ rol ebp,5
+ add edx,edi
+ xor ebx,ecx
+ add edx,ebp
+ por xmm6,xmm9
+ mov edi,eax
+ xor eax,ebx
+ movdqa xmm10,xmm6
+ add ecx,DWORD PTR[40+rsp]
+ and edi,ebx
+ and esi,eax
+ ror ebp,7
+ add ecx,edi
+ mov edi,edx
+ rol edx,5
+ add ecx,esi
+ xor eax,ebx
+ add ecx,edx
+ mov esi,ebp
+ xor ebp,eax
+ add ebx,DWORD PTR[44+rsp]
+ and esi,eax
+ and edi,ebp
+ ror edx,7
+ add ebx,esi
+ mov esi,ecx
+ rol ecx,5
+ add ebx,edi
+ xor ebp,eax
+ add ebx,ecx
+ mov edi,edx
+ pxor xmm7,xmm3
+DB 102,68,15,58,15,213,8
+ xor edx,ebp
+ add eax,DWORD PTR[48+rsp]
+ and edi,ebp
+ pxor xmm7,xmm0
+ and esi,edx
+ ror ecx,7
+ movdqa xmm9,XMMWORD PTR[48+r11]
+ paddd xmm8,xmm6
+ add eax,edi
+ mov edi,ebx
+ pxor xmm7,xmm10
+ rol ebx,5
+ add eax,esi
+ xor edx,ebp
+ add eax,ebx
+ movdqa xmm10,xmm7
+ movdqa XMMWORD PTR[32+rsp],xmm8
+ mov esi,ecx
+ xor ecx,edx
+ add ebp,DWORD PTR[52+rsp]
+ and esi,edx
+ pslld xmm7,2
+ and edi,ecx
+ ror ebx,7
+ psrld xmm10,30
+ add ebp,esi
+ mov esi,eax
+ rol eax,5
+ add ebp,edi
+ xor ecx,edx
+ add ebp,eax
+ por xmm7,xmm10
+ mov edi,ebx
+ xor ebx,ecx
+ movdqa xmm8,xmm7
+ add edx,DWORD PTR[56+rsp]
+ and edi,ecx
+ and esi,ebx
+ ror eax,7
+ add edx,edi
+ mov edi,ebp
+ rol ebp,5
+ add edx,esi
+ xor ebx,ecx
+ add edx,ebp
+ mov esi,eax
+ xor eax,ebx
+ add ecx,DWORD PTR[60+rsp]
+ and esi,ebx
+ and edi,eax
+ ror ebp,7
+ add ecx,esi
+ mov esi,edx
+ rol edx,5
+ add ecx,edi
+ xor eax,ebx
+ add ecx,edx
+ mov edi,ebp
+ pxor xmm0,xmm4
+DB 102,68,15,58,15,198,8
+ xor ebp,eax
+ add ebx,DWORD PTR[rsp]
+ and edi,eax
+ pxor xmm0,xmm1
+ and esi,ebp
+ ror edx,7
+ movdqa xmm10,xmm9
+ paddd xmm9,xmm7
+ add ebx,edi
+ mov edi,ecx
+ pxor xmm0,xmm8
+ rol ecx,5
+ add ebx,esi
+ xor ebp,eax
+ add ebx,ecx
+ movdqa xmm8,xmm0
+ movdqa XMMWORD PTR[48+rsp],xmm9
+ mov esi,edx
+ xor edx,ebp
+ add eax,DWORD PTR[4+rsp]
+ and esi,ebp
+ pslld xmm0,2
+ and edi,edx
+ ror ecx,7
+ psrld xmm8,30
+ add eax,esi
+ mov esi,ebx
+ rol ebx,5
+ add eax,edi
+ xor edx,ebp
+ add eax,ebx
+ por xmm0,xmm8
+ mov edi,ecx
+ xor ecx,edx
+ movdqa xmm9,xmm0
+ add ebp,DWORD PTR[8+rsp]
+ and edi,edx
+ and esi,ecx
+ ror ebx,7
+ add ebp,edi
+ mov edi,eax
+ rol eax,5
+ add ebp,esi
+ xor ecx,edx
+ add ebp,eax
+ mov esi,ebx
+ xor ebx,ecx
+ add edx,DWORD PTR[12+rsp]
+ and esi,ecx
+ and edi,ebx
+ ror eax,7
+ add edx,esi
+ mov esi,ebp
+ rol ebp,5
+ add edx,edi
+ xor ebx,ecx
+ add edx,ebp
+ mov edi,eax
+ pxor xmm1,xmm5
+DB 102,68,15,58,15,207,8
+ xor eax,ebx
+ add ecx,DWORD PTR[16+rsp]
+ and edi,ebx
+ pxor xmm1,xmm2
+ and esi,eax
+ ror ebp,7
+ movdqa xmm8,xmm10
+ paddd xmm10,xmm0
+ add ecx,edi
+ mov edi,edx
+ pxor xmm1,xmm9
+ rol edx,5
+ add ecx,esi
+ xor eax,ebx
+ add ecx,edx
+ movdqa xmm9,xmm1
+ movdqa XMMWORD PTR[rsp],xmm10
+ mov esi,ebp
+ xor ebp,eax
+ add ebx,DWORD PTR[20+rsp]
+ and esi,eax
+ pslld xmm1,2
+ and edi,ebp
+ ror edx,7
+ psrld xmm9,30
+ add ebx,esi
+ mov esi,ecx
+ rol ecx,5
+ add ebx,edi
+ xor ebp,eax
+ add ebx,ecx
+ por xmm1,xmm9
+ mov edi,edx
+ xor edx,ebp
+ movdqa xmm10,xmm1
+ add eax,DWORD PTR[24+rsp]
+ and edi,ebp
+ and esi,edx
+ ror ecx,7
+ add eax,edi
+ mov edi,ebx
+ rol ebx,5
+ add eax,esi
+ xor edx,ebp
+ add eax,ebx
+ mov esi,ecx
+ xor ecx,edx
+ add ebp,DWORD PTR[28+rsp]
+ and esi,edx
+ and edi,ecx
+ ror ebx,7
+ add ebp,esi
+ mov esi,eax
+ rol eax,5
+ add ebp,edi
+ xor ecx,edx
+ add ebp,eax
+ mov edi,ebx
+ pxor xmm2,xmm6
+DB 102,68,15,58,15,208,8
+ xor ebx,ecx
+ add edx,DWORD PTR[32+rsp]
+ and edi,ecx
+ pxor xmm2,xmm3
+ and esi,ebx
+ ror eax,7
+ movdqa xmm9,xmm8
+ paddd xmm8,xmm1
+ add edx,edi
+ mov edi,ebp
+ pxor xmm2,xmm10
+ rol ebp,5
+ add edx,esi
+ xor ebx,ecx
+ add edx,ebp
+ movdqa xmm10,xmm2
+ movdqa XMMWORD PTR[16+rsp],xmm8
+ mov esi,eax
+ xor eax,ebx
+ add ecx,DWORD PTR[36+rsp]
+ and esi,ebx
+ pslld xmm2,2
+ and edi,eax
+ ror ebp,7
+ psrld xmm10,30
+ add ecx,esi
+ mov esi,edx
+ rol edx,5
+ add ecx,edi
+ xor eax,ebx
+ add ecx,edx
+ por xmm2,xmm10
+ mov edi,ebp
+ xor ebp,eax
+ movdqa xmm8,xmm2
+ add ebx,DWORD PTR[40+rsp]
+ and edi,eax
+ and esi,ebp
+ ror edx,7
+ add ebx,edi
+ mov edi,ecx
+ rol ecx,5
+ add ebx,esi
+ xor ebp,eax
+ add ebx,ecx
+ mov esi,edx
+ xor edx,ebp
+ add eax,DWORD PTR[44+rsp]
+ and esi,ebp
+ and edi,edx
+ ror ecx,7
+ add eax,esi
+ mov esi,ebx
+ rol ebx,5
+ add eax,edi
+ xor edx,ebp
+ add eax,ebx
+ add ebp,DWORD PTR[48+rsp]
+ pxor xmm3,xmm7
+DB 102,68,15,58,15,193,8
+ xor esi,edx
+ mov edi,eax
+ rol eax,5
+ pxor xmm3,xmm4
+ xor esi,ecx
+ add ebp,eax
+ movdqa xmm10,xmm9
+ paddd xmm9,xmm2
+ ror ebx,7
+ add ebp,esi
+ pxor xmm3,xmm8
+ add edx,DWORD PTR[52+rsp]
+ xor edi,ecx
+ mov esi,ebp
+ rol ebp,5
+ movdqa xmm8,xmm3
+ movdqa XMMWORD PTR[32+rsp],xmm9
+ xor edi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,edi
+ pslld xmm3,2
+ add ecx,DWORD PTR[56+rsp]
+ xor esi,ebx
+ psrld xmm8,30
+ mov edi,edx
+ rol edx,5
+ xor esi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,esi
+ por xmm3,xmm8
+ add ebx,DWORD PTR[60+rsp]
+ xor edi,eax
+ mov esi,ecx
+ rol ecx,5
+ xor edi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,edi
+ add eax,DWORD PTR[rsp]
+ paddd xmm10,xmm3
+ xor esi,ebp
+ mov edi,ebx
+ rol ebx,5
+ xor esi,edx
+ movdqa XMMWORD PTR[48+rsp],xmm10
+ add eax,ebx
+ ror ecx,7
+ add eax,esi
+ add ebp,DWORD PTR[4+rsp]
+ xor edi,edx
+ mov esi,eax
+ rol eax,5
+ xor edi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,edi
+ add edx,DWORD PTR[8+rsp]
+ xor esi,ecx
+ mov edi,ebp
+ rol ebp,5
+ xor esi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,esi
+ add ecx,DWORD PTR[12+rsp]
+ xor edi,ebx
+ mov esi,edx
+ rol edx,5
+ xor edi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,edi
+ cmp r9,r10
+ je $L$done_ssse3
+ movdqa xmm6,XMMWORD PTR[64+r11]
+ movdqa xmm9,XMMWORD PTR[r11]
+ movdqu xmm0,XMMWORD PTR[r9]
+ movdqu xmm1,XMMWORD PTR[16+r9]
+ movdqu xmm2,XMMWORD PTR[32+r9]
+ movdqu xmm3,XMMWORD PTR[48+r9]
+DB 102,15,56,0,198
+ add r9,64
+ add ebx,DWORD PTR[16+rsp]
+ xor esi,eax
+DB 102,15,56,0,206
+ mov edi,ecx
+ rol ecx,5
+ paddd xmm0,xmm9
+ xor esi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ movdqa XMMWORD PTR[rsp],xmm0
+ add eax,DWORD PTR[20+rsp]
+ xor edi,ebp
+ psubd xmm0,xmm9
+ mov esi,ebx
+ rol ebx,5
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ add ebp,DWORD PTR[24+rsp]
+ xor esi,edx
+ mov edi,eax
+ rol eax,5
+ xor esi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,esi
+ add edx,DWORD PTR[28+rsp]
+ xor edi,ecx
+ mov esi,ebp
+ rol ebp,5
+ xor edi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,edi
+ add ecx,DWORD PTR[32+rsp]
+ xor esi,ebx
+DB 102,15,56,0,214
+ mov edi,edx
+ rol edx,5
+ paddd xmm1,xmm9
+ xor esi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,esi
+ movdqa XMMWORD PTR[16+rsp],xmm1
+ add ebx,DWORD PTR[36+rsp]
+ xor edi,eax
+ psubd xmm1,xmm9
+ mov esi,ecx
+ rol ecx,5
+ xor edi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,edi
+ add eax,DWORD PTR[40+rsp]
+ xor esi,ebp
+ mov edi,ebx
+ rol ebx,5
+ xor esi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,esi
+ add ebp,DWORD PTR[44+rsp]
+ xor edi,edx
+ mov esi,eax
+ rol eax,5
+ xor edi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,edi
+ add edx,DWORD PTR[48+rsp]
+ xor esi,ecx
+DB 102,15,56,0,222
+ mov edi,ebp
+ rol ebp,5
+ paddd xmm2,xmm9
+ xor esi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,esi
+ movdqa XMMWORD PTR[32+rsp],xmm2
+ add ecx,DWORD PTR[52+rsp]
+ xor edi,ebx
+ psubd xmm2,xmm9
+ mov esi,edx
+ rol edx,5
+ xor edi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,edi
+ add ebx,DWORD PTR[56+rsp]
+ xor esi,eax
+ mov edi,ecx
+ rol ecx,5
+ xor esi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ add eax,DWORD PTR[60+rsp]
+ xor edi,ebp
+ mov esi,ebx
+ rol ebx,5
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ add eax,DWORD PTR[r8]
+ add esi,DWORD PTR[4+r8]
+ add ecx,DWORD PTR[8+r8]
+ add edx,DWORD PTR[12+r8]
+ mov DWORD PTR[r8],eax
+ add ebp,DWORD PTR[16+r8]
+ mov DWORD PTR[4+r8],esi
+ mov ebx,esi
+ mov DWORD PTR[8+r8],ecx
+ mov DWORD PTR[12+r8],edx
+ mov DWORD PTR[16+r8],ebp
+ jmp $L$oop_ssse3
+
+ALIGN 16
+$L$done_ssse3::
+ add ebx,DWORD PTR[16+rsp]
+ xor esi,eax
+ mov edi,ecx
+ rol ecx,5
+ xor esi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ add eax,DWORD PTR[20+rsp]
+ xor edi,ebp
+ mov esi,ebx
+ rol ebx,5
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ add ebp,DWORD PTR[24+rsp]
+ xor esi,edx
+ mov edi,eax
+ rol eax,5
+ xor esi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,esi
+ add edx,DWORD PTR[28+rsp]
+ xor edi,ecx
+ mov esi,ebp
+ rol ebp,5
+ xor edi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,edi
+ add ecx,DWORD PTR[32+rsp]
+ xor esi,ebx
+ mov edi,edx
+ rol edx,5
+ xor esi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,esi
+ add ebx,DWORD PTR[36+rsp]
+ xor edi,eax
+ mov esi,ecx
+ rol ecx,5
+ xor edi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,edi
+ add eax,DWORD PTR[40+rsp]
+ xor esi,ebp
+ mov edi,ebx
+ rol ebx,5
+ xor esi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,esi
+ add ebp,DWORD PTR[44+rsp]
+ xor edi,edx
+ mov esi,eax
+ rol eax,5
+ xor edi,ecx
+ add ebp,eax
+ ror ebx,7
+ add ebp,edi
+ add edx,DWORD PTR[48+rsp]
+ xor esi,ecx
+ mov edi,ebp
+ rol ebp,5
+ xor esi,ebx
+ add edx,ebp
+ ror eax,7
+ add edx,esi
+ add ecx,DWORD PTR[52+rsp]
+ xor edi,ebx
+ mov esi,edx
+ rol edx,5
+ xor edi,eax
+ add ecx,edx
+ ror ebp,7
+ add ecx,edi
+ add ebx,DWORD PTR[56+rsp]
+ xor esi,eax
+ mov edi,ecx
+ rol ecx,5
+ xor esi,ebp
+ add ebx,ecx
+ ror edx,7
+ add ebx,esi
+ add eax,DWORD PTR[60+rsp]
+ xor edi,ebp
+ mov esi,ebx
+ rol ebx,5
+ xor edi,edx
+ add eax,ebx
+ ror ecx,7
+ add eax,edi
+ add eax,DWORD PTR[r8]
+ add esi,DWORD PTR[4+r8]
+ add ecx,DWORD PTR[8+r8]
+ mov DWORD PTR[r8],eax
+ add edx,DWORD PTR[12+r8]
+ mov DWORD PTR[4+r8],esi
+ add ebp,DWORD PTR[16+r8]
+ mov DWORD PTR[8+r8],ecx
+ mov DWORD PTR[12+r8],edx
+ mov DWORD PTR[16+r8],ebp
+ movaps xmm6,XMMWORD PTR[((64+0))+rsp]
+ movaps xmm7,XMMWORD PTR[((64+16))+rsp]
+ movaps xmm8,XMMWORD PTR[((64+32))+rsp]
+ movaps xmm9,XMMWORD PTR[((64+48))+rsp]
+ movaps xmm10,XMMWORD PTR[((64+64))+rsp]
+ lea rsi,QWORD PTR[144+rsp]
+ mov r12,QWORD PTR[rsi]
+ mov rbp,QWORD PTR[8+rsi]
+ mov rbx,QWORD PTR[16+rsi]
+ lea rsp,QWORD PTR[24+rsi]
+$L$epilogue_ssse3::
+ mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue
+ mov rsi,QWORD PTR[16+rsp]
+ DB 0F3h,0C3h ;repret
+$L$SEH_end_sha1_block_data_order_ssse3::
+sha1_block_data_order_ssse3 ENDP
+ALIGN 64
+K_XX_XX::
+ DD 05a827999h,05a827999h,05a827999h,05a827999h
+ DD 06ed9eba1h,06ed9eba1h,06ed9eba1h,06ed9eba1h
+ DD 08f1bbcdch,08f1bbcdch,08f1bbcdch,08f1bbcdch
+ DD 0ca62c1d6h,0ca62c1d6h,0ca62c1d6h,0ca62c1d6h
+ DD 000010203h,004050607h,008090a0bh,00c0d0e0fh
+DB 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115
+DB 102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44
+DB 32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60
+DB 97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114
+DB 103,62,0
+ALIGN 64
+EXTERN __imp_RtlVirtualUnwind:NEAR
+
+ALIGN 16
+se_handler PROC PRIVATE
+ push rsi
+ push rdi
+ push rbx
+ push rbp
+ push r12
+ push r13
+ push r14
+ push r15
+ pushfq
+ sub rsp,64
+
+ mov rax,QWORD PTR[120+r8]
+ mov rbx,QWORD PTR[248+r8]
+
+ lea r10,QWORD PTR[$L$prologue]
+ cmp rbx,r10
+ jb $L$common_seh_tail
+
+ mov rax,QWORD PTR[152+r8]
+
+ lea r10,QWORD PTR[$L$epilogue]
+ cmp rbx,r10
+ jae $L$common_seh_tail
+
+ mov rax,QWORD PTR[64+rax]
+ lea rax,QWORD PTR[32+rax]
+
+ mov rbx,QWORD PTR[((-8))+rax]
+ mov rbp,QWORD PTR[((-16))+rax]
+ mov r12,QWORD PTR[((-24))+rax]
+ mov r13,QWORD PTR[((-32))+rax]
+ mov QWORD PTR[144+r8],rbx
+ mov QWORD PTR[160+r8],rbp
+ mov QWORD PTR[216+r8],r12
+ mov QWORD PTR[224+r8],r13
+
+ jmp $L$common_seh_tail
+se_handler ENDP
+
+
+ALIGN 16
+ssse3_handler PROC PRIVATE
+ push rsi
+ push rdi
+ push rbx
+ push rbp
+ push r12
+ push r13
+ push r14
+ push r15
+ pushfq
+ sub rsp,64
+
+ mov rax,QWORD PTR[120+r8]
+ mov rbx,QWORD PTR[248+r8]
+
+ mov rsi,QWORD PTR[8+r9]
+ mov r11,QWORD PTR[56+r9]
+
+ mov r10d,DWORD PTR[r11]
+ lea r10,QWORD PTR[r10*1+rsi]
+ cmp rbx,r10
+ jb $L$common_seh_tail
+
+ mov rax,QWORD PTR[152+r8]
+
+ mov r10d,DWORD PTR[4+r11]
+ lea r10,QWORD PTR[r10*1+rsi]
+ cmp rbx,r10
+ jae $L$common_seh_tail
+
+ lea rsi,QWORD PTR[64+rax]
+ lea rdi,QWORD PTR[512+r8]
+ mov ecx,10
+ DD 0a548f3fch
+ lea rax,QWORD PTR[168+rax]
+
+ mov rbx,QWORD PTR[((-8))+rax]
+ mov rbp,QWORD PTR[((-16))+rax]
+ mov r12,QWORD PTR[((-24))+rax]
+ mov QWORD PTR[144+r8],rbx
+ mov QWORD PTR[160+r8],rbp
+ mov QWORD PTR[216+r8],r12
+
+$L$common_seh_tail::
+ mov rdi,QWORD PTR[8+rax]
+ mov rsi,QWORD PTR[16+rax]
+ mov QWORD PTR[152+r8],rax
+ mov QWORD PTR[168+r8],rsi
+ mov QWORD PTR[176+r8],rdi
+
+ mov rdi,QWORD PTR[40+r9]
+ mov rsi,r8
+ mov ecx,154
+ DD 0a548f3fch
+
+ mov rsi,r9
+ xor rcx,rcx
+ mov rdx,QWORD PTR[8+rsi]
+ mov r8,QWORD PTR[rsi]
+ mov r9,QWORD PTR[16+rsi]
+ mov r10,QWORD PTR[40+rsi]
+ lea r11,QWORD PTR[56+rsi]
+ lea r12,QWORD PTR[24+rsi]
+ mov QWORD PTR[32+rsp],r10
+ mov QWORD PTR[40+rsp],r11
+ mov QWORD PTR[48+rsp],r12
+ mov QWORD PTR[56+rsp],rcx
+ call QWORD PTR[__imp_RtlVirtualUnwind]
+
+ mov eax,1
+ add rsp,64
+ popfq
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop rbp
+ pop rbx
+ pop rdi
+ pop rsi
+ DB 0F3h,0C3h ;repret
+ssse3_handler ENDP
+
+.text$ ENDS
+.pdata SEGMENT READONLY ALIGN(4)
+ALIGN 4
+ DD imagerel $L$SEH_begin_sha1_block_data_order
+ DD imagerel $L$SEH_end_sha1_block_data_order
+ DD imagerel $L$SEH_info_sha1_block_data_order
+ DD imagerel $L$SEH_begin_sha1_block_data_order_ssse3
+ DD imagerel $L$SEH_end_sha1_block_data_order_ssse3
+ DD imagerel $L$SEH_info_sha1_block_data_order_ssse3
+.pdata ENDS
+.xdata SEGMENT READONLY ALIGN(8)
+ALIGN 8
+$L$SEH_info_sha1_block_data_order::
+DB 9,0,0,0
+ DD imagerel se_handler
+$L$SEH_info_sha1_block_data_order_ssse3::
+DB 9,0,0,0
+ DD imagerel ssse3_handler
+ DD imagerel $L$prologue_ssse3,imagerel $L$epilogue_ssse3
+
+.xdata ENDS
+END
+
diff --git a/crypto/libressl/crypto/sha/sha1-mingw64-x86_64.S b/crypto/libressl/crypto/sha/sha1-mingw64-x86_64.S
new file mode 100644
index 0000000..3ce9fc9
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1-mingw64-x86_64.S
@@ -0,0 +1,2664 @@
+#include "x86_arch.h"
+.text
+
+
+
+.globl sha1_block_data_order
+.def sha1_block_data_order; .scl 2; .type 32; .endef
+.p2align 4
+sha1_block_data_order:
+ movq %rdi,8(%rsp)
+ movq %rsi,16(%rsp)
+ movq %rsp,%rax
+.LSEH_begin_sha1_block_data_order:
+ movq %rcx,%rdi
+ movq %rdx,%rsi
+ movq %r8,%rdx
+
+ movl OPENSSL_ia32cap_P+0(%rip),%r9d
+ movl OPENSSL_ia32cap_P+4(%rip),%r8d
+ testl $IA32CAP_MASK1_SSSE3,%r8d
+ jz .Lialu
+ jmp _ssse3_shortcut
+
+.p2align 4
+.Lialu:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ movq %rsp,%r11
+ movq %rdi,%r8
+ subq $72,%rsp
+ movq %rsi,%r9
+ andq $-64,%rsp
+ movq %rdx,%r10
+ movq %r11,64(%rsp)
+.Lprologue:
+
+ movl 0(%r8),%esi
+ movl 4(%r8),%edi
+ movl 8(%r8),%r11d
+ movl 12(%r8),%r12d
+ movl 16(%r8),%r13d
+ jmp .Lloop
+
+.p2align 4
+.Lloop:
+ movl 0(%r9),%edx
+ bswapl %edx
+ movl %edx,0(%rsp)
+ movl %r11d,%eax
+ movl 4(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,4(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 8(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,8(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 12(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,12(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 16(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,16(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 20(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,20(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 24(%r9),%edx
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r13,1),%r13d
+ andl %edi,%eax
+ movl %edx,24(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 28(%r9),%ebp
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r12,1),%r12d
+ andl %esi,%eax
+ movl %ebp,28(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 32(%r9),%edx
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %edx,32(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 36(%r9),%ebp
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %ebp,36(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 40(%r9),%edx
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %edx,40(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl %r11d,%eax
+ movl 44(%r9),%ebp
+ movl %esi,%ecx
+ xorl %r12d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r13,1),%r13d
+ andl %edi,%eax
+ movl %ebp,44(%rsp)
+ addl %ecx,%r13d
+ xorl %r12d,%eax
+ roll $30,%edi
+ addl %eax,%r13d
+ movl %edi,%eax
+ movl 48(%r9),%edx
+ movl %r13d,%ecx
+ xorl %r11d,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%r12,1),%r12d
+ andl %esi,%eax
+ movl %edx,48(%rsp)
+ addl %ecx,%r12d
+ xorl %r11d,%eax
+ roll $30,%esi
+ addl %eax,%r12d
+ movl %esi,%eax
+ movl 52(%r9),%ebp
+ movl %r12d,%ecx
+ xorl %edi,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%r11,1),%r11d
+ andl %r13d,%eax
+ movl %ebp,52(%rsp)
+ addl %ecx,%r11d
+ xorl %edi,%eax
+ roll $30,%r13d
+ addl %eax,%r11d
+ movl %r13d,%eax
+ movl 56(%r9),%edx
+ movl %r11d,%ecx
+ xorl %esi,%eax
+ bswapl %edx
+ roll $5,%ecx
+ leal 1518500249(%rbp,%rdi,1),%edi
+ andl %r12d,%eax
+ movl %edx,56(%rsp)
+ addl %ecx,%edi
+ xorl %esi,%eax
+ roll $30,%r12d
+ addl %eax,%edi
+ movl %r12d,%eax
+ movl 60(%r9),%ebp
+ movl %edi,%ecx
+ xorl %r13d,%eax
+ bswapl %ebp
+ roll $5,%ecx
+ leal 1518500249(%rdx,%rsi,1),%esi
+ andl %r11d,%eax
+ movl %ebp,60(%rsp)
+ addl %ecx,%esi
+ xorl %r13d,%eax
+ roll $30,%r11d
+ addl %eax,%esi
+ movl 0(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ xorl 32(%rsp),%edx
+ andl %edi,%eax
+ leal 1518500249(%rbp,%r13,1),%r13d
+ xorl 52(%rsp),%edx
+ xorl %r12d,%eax
+ roll $1,%edx
+ addl %ecx,%r13d
+ roll $30,%edi
+ movl %edx,0(%rsp)
+ addl %eax,%r13d
+ movl 4(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ xorl 36(%rsp),%ebp
+ andl %esi,%eax
+ leal 1518500249(%rdx,%r12,1),%r12d
+ xorl 56(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $1,%ebp
+ addl %ecx,%r12d
+ roll $30,%esi
+ movl %ebp,4(%rsp)
+ addl %eax,%r12d
+ movl 8(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ xorl 40(%rsp),%edx
+ andl %r13d,%eax
+ leal 1518500249(%rbp,%r11,1),%r11d
+ xorl 60(%rsp),%edx
+ xorl %edi,%eax
+ roll $1,%edx
+ addl %ecx,%r11d
+ roll $30,%r13d
+ movl %edx,8(%rsp)
+ addl %eax,%r11d
+ movl 12(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ leal 1518500249(%rdx,%rdi,1),%edi
+ xorl 0(%rsp),%ebp
+ xorl %esi,%eax
+ roll $1,%ebp
+ addl %ecx,%edi
+ roll $30,%r12d
+ movl %ebp,12(%rsp)
+ addl %eax,%edi
+ movl 16(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ leal 1518500249(%rbp,%rsi,1),%esi
+ xorl 4(%rsp),%edx
+ xorl %r13d,%eax
+ roll $1,%edx
+ addl %ecx,%esi
+ roll $30,%r11d
+ movl %edx,16(%rsp)
+ addl %eax,%esi
+ movl 20(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 52(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 8(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 56(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 12(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 16(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 20(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 24(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 28(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 32(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 36(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 40(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 44(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r13,1),%r13d
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 48(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r12,1),%r12d
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 52(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r11,1),%r11d
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 56(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rdi,1),%edi
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 60(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rsi,1),%esi
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 0(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r13,1),%r13d
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 4(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%r12,1),%r12d
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 8(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%r11,1),%r11d
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 12(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rdx,%rdi,1),%edi
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 16(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal 1859775393(%rbp,%rsi,1),%esi
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 20(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r13d
+ movl 40(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 48(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,40(%rsp)
+ addl %ecx,%r12d
+ movl 44(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 52(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,44(%rsp)
+ addl %ecx,%r11d
+ movl 48(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 56(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 16(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,48(%rsp)
+ addl %ecx,%edi
+ movl 52(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 60(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 40(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,52(%rsp)
+ addl %ecx,%esi
+ movl 56(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 0(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 44(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,56(%rsp)
+ addl %ecx,%r13d
+ movl 60(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 4(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 48(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,60(%rsp)
+ addl %ecx,%r12d
+ movl 0(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 8(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 32(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 52(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,0(%rsp)
+ addl %ecx,%r11d
+ movl 4(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 12(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 56(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,4(%rsp)
+ addl %ecx,%edi
+ movl 8(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 16(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 40(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 60(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,8(%rsp)
+ addl %ecx,%esi
+ movl 12(%rsp),%ebp
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 20(%rsp),%ebp
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r12d,%ebx
+ leal -1894007588(%rdx,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 0(%rsp),%ebp
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %ebp,12(%rsp)
+ addl %ecx,%r13d
+ movl 16(%rsp),%edx
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 24(%rsp),%edx
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r11d,%ebx
+ leal -1894007588(%rbp,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 4(%rsp),%edx
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%edx
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %edx,16(%rsp)
+ addl %ecx,%r12d
+ movl 20(%rsp),%ebp
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 28(%rsp),%ebp
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %edi,%ebx
+ leal -1894007588(%rdx,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 8(%rsp),%ebp
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%ebp
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %ebp,20(%rsp)
+ addl %ecx,%r11d
+ movl 24(%rsp),%edx
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 32(%rsp),%edx
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 56(%rsp),%edx
+ xorl %esi,%ebx
+ leal -1894007588(%rbp,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 12(%rsp),%edx
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%edx
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %edx,24(%rsp)
+ addl %ecx,%edi
+ movl 28(%rsp),%ebp
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 36(%rsp),%ebp
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %r13d,%ebx
+ leal -1894007588(%rdx,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 16(%rsp),%ebp
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%ebp
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %ebp,28(%rsp)
+ addl %ecx,%esi
+ movl 32(%rsp),%edx
+ movl %r11d,%eax
+ movl %r11d,%ebx
+ xorl 40(%rsp),%edx
+ andl %r12d,%eax
+ movl %esi,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r12d,%ebx
+ leal -1894007588(%rbp,%r13,1),%r13d
+ roll $5,%ecx
+ xorl 20(%rsp),%edx
+ addl %eax,%r13d
+ andl %edi,%ebx
+ roll $1,%edx
+ addl %ebx,%r13d
+ roll $30,%edi
+ movl %edx,32(%rsp)
+ addl %ecx,%r13d
+ movl 36(%rsp),%ebp
+ movl %edi,%eax
+ movl %edi,%ebx
+ xorl 44(%rsp),%ebp
+ andl %r11d,%eax
+ movl %r13d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r11d,%ebx
+ leal -1894007588(%rdx,%r12,1),%r12d
+ roll $5,%ecx
+ xorl 24(%rsp),%ebp
+ addl %eax,%r12d
+ andl %esi,%ebx
+ roll $1,%ebp
+ addl %ebx,%r12d
+ roll $30,%esi
+ movl %ebp,36(%rsp)
+ addl %ecx,%r12d
+ movl 40(%rsp),%edx
+ movl %esi,%eax
+ movl %esi,%ebx
+ xorl 48(%rsp),%edx
+ andl %edi,%eax
+ movl %r12d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %edi,%ebx
+ leal -1894007588(%rbp,%r11,1),%r11d
+ roll $5,%ecx
+ xorl 28(%rsp),%edx
+ addl %eax,%r11d
+ andl %r13d,%ebx
+ roll $1,%edx
+ addl %ebx,%r11d
+ roll $30,%r13d
+ movl %edx,40(%rsp)
+ addl %ecx,%r11d
+ movl 44(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r13d,%ebx
+ xorl 52(%rsp),%ebp
+ andl %esi,%eax
+ movl %r11d,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %esi,%ebx
+ leal -1894007588(%rdx,%rdi,1),%edi
+ roll $5,%ecx
+ xorl 32(%rsp),%ebp
+ addl %eax,%edi
+ andl %r12d,%ebx
+ roll $1,%ebp
+ addl %ebx,%edi
+ roll $30,%r12d
+ movl %ebp,44(%rsp)
+ addl %ecx,%edi
+ movl 48(%rsp),%edx
+ movl %r12d,%eax
+ movl %r12d,%ebx
+ xorl 56(%rsp),%edx
+ andl %r13d,%eax
+ movl %edi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %r13d,%ebx
+ leal -1894007588(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl 36(%rsp),%edx
+ addl %eax,%esi
+ andl %r11d,%ebx
+ roll $1,%edx
+ addl %ebx,%esi
+ roll $30,%r11d
+ movl %edx,48(%rsp)
+ addl %ecx,%esi
+ movl 52(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 20(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 40(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,52(%rsp)
+ movl 56(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 24(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 44(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,56(%rsp)
+ movl 60(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 28(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 48(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,60(%rsp)
+ movl 0(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 8(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 32(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 52(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,0(%rsp)
+ movl 4(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 12(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 36(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 56(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,4(%rsp)
+ movl 8(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 16(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 40(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 60(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,8(%rsp)
+ movl 12(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 20(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 44(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 0(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl %ebp,12(%rsp)
+ movl 16(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 24(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 48(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 4(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl %edx,16(%rsp)
+ movl 20(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 28(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 52(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 8(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %ebp,20(%rsp)
+ movl 24(%rsp),%edx
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 32(%rsp),%edx
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rsi,1),%esi
+ xorl 56(%rsp),%edx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 12(%rsp),%edx
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%edx
+ movl %edx,24(%rsp)
+ movl 28(%rsp),%ebp
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 36(%rsp),%ebp
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r13,1),%r13d
+ xorl 60(%rsp),%ebp
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 16(%rsp),%ebp
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%ebp
+ movl %ebp,28(%rsp)
+ movl 32(%rsp),%edx
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 40(%rsp),%edx
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r12,1),%r12d
+ xorl 0(%rsp),%edx
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 20(%rsp),%edx
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%edx
+ movl %edx,32(%rsp)
+ movl 36(%rsp),%ebp
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 44(%rsp),%ebp
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r11,1),%r11d
+ xorl 4(%rsp),%ebp
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 24(%rsp),%ebp
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%ebp
+ movl %ebp,36(%rsp)
+ movl 40(%rsp),%edx
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 48(%rsp),%edx
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%rdi,1),%edi
+ xorl 8(%rsp),%edx
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 28(%rsp),%edx
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%edx
+ movl %edx,40(%rsp)
+ movl 44(%rsp),%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl 52(%rsp),%ebp
+ xorl %r11d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rsi,1),%esi
+ xorl 12(%rsp),%ebp
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ xorl 32(%rsp),%ebp
+ roll $30,%r11d
+ addl %eax,%esi
+ roll $1,%ebp
+ movl %ebp,44(%rsp)
+ movl 48(%rsp),%edx
+ movl %r11d,%eax
+ movl %esi,%ecx
+ xorl 56(%rsp),%edx
+ xorl %edi,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r13,1),%r13d
+ xorl 16(%rsp),%edx
+ xorl %r12d,%eax
+ addl %ecx,%r13d
+ xorl 36(%rsp),%edx
+ roll $30,%edi
+ addl %eax,%r13d
+ roll $1,%edx
+ movl %edx,48(%rsp)
+ movl 52(%rsp),%ebp
+ movl %edi,%eax
+ movl %r13d,%ecx
+ xorl 60(%rsp),%ebp
+ xorl %esi,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%r12,1),%r12d
+ xorl 20(%rsp),%ebp
+ xorl %r11d,%eax
+ addl %ecx,%r12d
+ xorl 40(%rsp),%ebp
+ roll $30,%esi
+ addl %eax,%r12d
+ roll $1,%ebp
+ movl 56(%rsp),%edx
+ movl %esi,%eax
+ movl %r12d,%ecx
+ xorl 0(%rsp),%edx
+ xorl %r13d,%eax
+ roll $5,%ecx
+ leal -899497514(%rbp,%r11,1),%r11d
+ xorl 24(%rsp),%edx
+ xorl %edi,%eax
+ addl %ecx,%r11d
+ xorl 44(%rsp),%edx
+ roll $30,%r13d
+ addl %eax,%r11d
+ roll $1,%edx
+ movl 60(%rsp),%ebp
+ movl %r13d,%eax
+ movl %r11d,%ecx
+ xorl 4(%rsp),%ebp
+ xorl %r12d,%eax
+ roll $5,%ecx
+ leal -899497514(%rdx,%rdi,1),%edi
+ xorl 28(%rsp),%ebp
+ xorl %esi,%eax
+ addl %ecx,%edi
+ xorl 48(%rsp),%ebp
+ roll $30,%r12d
+ addl %eax,%edi
+ roll $1,%ebp
+ movl %r12d,%eax
+ movl %edi,%ecx
+ xorl %r11d,%eax
+ leal -899497514(%rbp,%rsi,1),%esi
+ roll $5,%ecx
+ xorl %r13d,%eax
+ addl %ecx,%esi
+ roll $30,%r11d
+ addl %eax,%esi
+ addl 0(%r8),%esi
+ addl 4(%r8),%edi
+ addl 8(%r8),%r11d
+ addl 12(%r8),%r12d
+ addl 16(%r8),%r13d
+ movl %esi,0(%r8)
+ movl %edi,4(%r8)
+ movl %r11d,8(%r8)
+ movl %r12d,12(%r8)
+ movl %r13d,16(%r8)
+
+ subq $1,%r10
+ leaq 64(%r9),%r9
+ jnz .Lloop
+
+ movq 64(%rsp),%rsi
+ movq (%rsi),%r13
+ movq 8(%rsi),%r12
+ movq 16(%rsi),%rbp
+ movq 24(%rsi),%rbx
+ leaq 32(%rsi),%rsp
+.Lepilogue:
+ movq 8(%rsp),%rdi
+ movq 16(%rsp),%rsi
+ retq
+.LSEH_end_sha1_block_data_order:
+.def sha1_block_data_order_ssse3; .scl 3; .type 32; .endef
+.p2align 4
+sha1_block_data_order_ssse3:
+ movq %rdi,8(%rsp)
+ movq %rsi,16(%rsp)
+ movq %rsp,%rax
+.LSEH_begin_sha1_block_data_order_ssse3:
+ movq %rcx,%rdi
+ movq %rdx,%rsi
+ movq %r8,%rdx
+
+_ssse3_shortcut:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ leaq -144(%rsp),%rsp
+ movaps %xmm6,64+0(%rsp)
+ movaps %xmm7,64+16(%rsp)
+ movaps %xmm8,64+32(%rsp)
+ movaps %xmm9,64+48(%rsp)
+ movaps %xmm10,64+64(%rsp)
+.Lprologue_ssse3:
+ movq %rdi,%r8
+ movq %rsi,%r9
+ movq %rdx,%r10
+
+ shlq $6,%r10
+ addq %r9,%r10
+ leaq K_XX_XX(%rip),%r11
+
+ movl 0(%r8),%eax
+ movl 4(%r8),%ebx
+ movl 8(%r8),%ecx
+ movl 12(%r8),%edx
+ movl %ebx,%esi
+ movl 16(%r8),%ebp
+
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+.byte 102,15,56,0,206
+.byte 102,15,56,0,214
+.byte 102,15,56,0,222
+ paddd %xmm9,%xmm0
+ paddd %xmm9,%xmm1
+ paddd %xmm9,%xmm2
+ movdqa %xmm0,0(%rsp)
+ psubd %xmm9,%xmm0
+ movdqa %xmm1,16(%rsp)
+ psubd %xmm9,%xmm1
+ movdqa %xmm2,32(%rsp)
+ psubd %xmm9,%xmm2
+ jmp .Loop_ssse3
+.p2align 4
+.Loop_ssse3:
+ movdqa %xmm1,%xmm4
+ addl 0(%rsp),%ebp
+ xorl %edx,%ecx
+ movdqa %xmm3,%xmm8
+.byte 102,15,58,15,224,8
+ movl %eax,%edi
+ roll $5,%eax
+ paddd %xmm3,%xmm9
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrldq $4,%xmm8
+ xorl %edx,%esi
+ addl %eax,%ebp
+ pxor %xmm0,%xmm4
+ rorl $2,%ebx
+ addl %esi,%ebp
+ pxor %xmm2,%xmm8
+ addl 4(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pxor %xmm8,%xmm4
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm9,48(%rsp)
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm4,%xmm8
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 8(%rsp),%ecx
+ xorl %ebx,%eax
+ pslldq $12,%xmm10
+ paddd %xmm4,%xmm4
+ movl %edx,%edi
+ roll $5,%edx
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrld $31,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ movdqa %xmm10,%xmm9
+ rorl $7,%ebp
+ addl %esi,%ecx
+ psrld $30,%xmm10
+ por %xmm8,%xmm4
+ addl 12(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm4
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa 0(%r11),%xmm10
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ pxor %xmm9,%xmm4
+ rorl $7,%edx
+ addl %edi,%ebx
+ movdqa %xmm2,%xmm5
+ addl 16(%rsp),%eax
+ xorl %ebp,%edx
+ movdqa %xmm4,%xmm9
+.byte 102,15,58,15,233,8
+ movl %ebx,%edi
+ roll $5,%ebx
+ paddd %xmm4,%xmm10
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrldq $4,%xmm9
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ pxor %xmm1,%xmm5
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm3,%xmm9
+ addl 20(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pxor %xmm9,%xmm5
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm10,0(%rsp)
+ xorl %edx,%edi
+ addl %eax,%ebp
+ movdqa %xmm5,%xmm8
+ movdqa %xmm5,%xmm9
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 24(%rsp),%edx
+ xorl %ecx,%ebx
+ pslldq $12,%xmm8
+ paddd %xmm5,%xmm5
+ movl %ebp,%edi
+ roll $5,%ebp
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ psrld $31,%xmm9
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ movdqa %xmm8,%xmm10
+ rorl $7,%eax
+ addl %esi,%edx
+ psrld $30,%xmm8
+ por %xmm9,%xmm5
+ addl 28(%rsp),%ecx
+ xorl %ebx,%eax
+ movl %edx,%esi
+ roll $5,%edx
+ pslld $2,%xmm10
+ pxor %xmm8,%xmm5
+ andl %eax,%edi
+ xorl %ebx,%eax
+ movdqa 16(%r11),%xmm8
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ pxor %xmm10,%xmm5
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movdqa %xmm3,%xmm6
+ addl 32(%rsp),%ebx
+ xorl %eax,%ebp
+ movdqa %xmm5,%xmm10
+.byte 102,15,58,15,242,8
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm5,%xmm8
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ psrldq $4,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ pxor %xmm2,%xmm6
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm4,%xmm10
+ addl 36(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ pxor %xmm10,%xmm6
+ andl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm8,16(%rsp)
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ movdqa %xmm6,%xmm9
+ movdqa %xmm6,%xmm10
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 40(%rsp),%ebp
+ xorl %edx,%ecx
+ pslldq $12,%xmm9
+ paddd %xmm6,%xmm6
+ movl %eax,%edi
+ roll $5,%eax
+ andl %ecx,%esi
+ xorl %edx,%ecx
+ psrld $31,%xmm10
+ xorl %edx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ psrld $30,%xmm9
+ por %xmm10,%xmm6
+ addl 44(%rsp),%edx
+ xorl %ecx,%ebx
+ movl %ebp,%esi
+ roll $5,%ebp
+ pslld $2,%xmm8
+ pxor %xmm9,%xmm6
+ andl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa 16(%r11),%xmm9
+ xorl %ecx,%edi
+ addl %ebp,%edx
+ pxor %xmm8,%xmm6
+ rorl $7,%eax
+ addl %edi,%edx
+ movdqa %xmm4,%xmm7
+ addl 48(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm8
+.byte 102,15,58,15,251,8
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm6,%xmm9
+ andl %eax,%esi
+ xorl %ebx,%eax
+ psrldq $4,%xmm8
+ xorl %ebx,%esi
+ addl %edx,%ecx
+ pxor %xmm3,%xmm7
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm5,%xmm8
+ addl 52(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%esi
+ roll $5,%ecx
+ pxor %xmm8,%xmm7
+ andl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm9,32(%rsp)
+ xorl %eax,%edi
+ addl %ecx,%ebx
+ movdqa %xmm7,%xmm10
+ movdqa %xmm7,%xmm8
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 56(%rsp),%eax
+ xorl %ebp,%edx
+ pslldq $12,%xmm10
+ paddd %xmm7,%xmm7
+ movl %ebx,%edi
+ roll $5,%ebx
+ andl %edx,%esi
+ xorl %ebp,%edx
+ psrld $31,%xmm8
+ xorl %ebp,%esi
+ addl %ebx,%eax
+ movdqa %xmm10,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ psrld $30,%xmm10
+ por %xmm8,%xmm7
+ addl 60(%rsp),%ebp
+ xorl %edx,%ecx
+ movl %eax,%esi
+ roll $5,%eax
+ pslld $2,%xmm9
+ pxor %xmm10,%xmm7
+ andl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa 16(%r11),%xmm10
+ xorl %edx,%edi
+ addl %eax,%ebp
+ pxor %xmm9,%xmm7
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movdqa %xmm7,%xmm9
+ addl 0(%rsp),%edx
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,206,8
+ xorl %ecx,%ebx
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm1,%xmm0
+ andl %ebx,%esi
+ xorl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm7,%xmm10
+ xorl %ecx,%esi
+ addl %ebp,%edx
+ pxor %xmm9,%xmm0
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 4(%rsp),%ecx
+ xorl %ebx,%eax
+ movdqa %xmm0,%xmm9
+ movdqa %xmm10,48(%rsp)
+ movl %edx,%esi
+ roll $5,%edx
+ andl %eax,%edi
+ xorl %ebx,%eax
+ pslld $2,%xmm0
+ xorl %ebx,%edi
+ addl %edx,%ecx
+ psrld $30,%xmm9
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 8(%rsp),%ebx
+ xorl %eax,%ebp
+ movl %ecx,%edi
+ roll $5,%ecx
+ por %xmm9,%xmm0
+ andl %ebp,%esi
+ xorl %eax,%ebp
+ movdqa %xmm0,%xmm10
+ xorl %eax,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 12(%rsp),%eax
+ xorl %ebp,%edx
+ movl %ebx,%esi
+ roll $5,%ebx
+ andl %edx,%edi
+ xorl %ebp,%edx
+ xorl %ebp,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 16(%rsp),%ebp
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,215,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm2,%xmm1
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm8,%xmm9
+ paddd %xmm0,%xmm8
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm10,%xmm1
+ addl 20(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm1,%xmm10
+ movdqa %xmm8,0(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm1
+ addl 24(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm10
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm10,%xmm1
+ addl 28(%rsp),%ebx
+ xorl %eax,%edi
+ movdqa %xmm1,%xmm8
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 32(%rsp),%eax
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,192,8
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ pxor %xmm3,%xmm2
+ xorl %edx,%esi
+ addl %ebx,%eax
+ movdqa 32(%r11),%xmm10
+ paddd %xmm1,%xmm9
+ rorl $7,%ecx
+ addl %esi,%eax
+ pxor %xmm8,%xmm2
+ addl 36(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ movdqa %xmm2,%xmm8
+ movdqa %xmm9,16(%rsp)
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ pslld $2,%xmm2
+ addl 40(%rsp),%edx
+ xorl %ecx,%esi
+ psrld $30,%xmm8
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ por %xmm8,%xmm2
+ addl 44(%rsp),%ecx
+ xorl %ebx,%edi
+ movdqa %xmm2,%xmm9
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 48(%rsp),%ebx
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,201,8
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ pxor %xmm4,%xmm3
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm2,%xmm10
+ rorl $7,%edx
+ addl %esi,%ebx
+ pxor %xmm9,%xmm3
+ addl 52(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ movdqa %xmm3,%xmm9
+ movdqa %xmm10,32(%rsp)
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ pslld $2,%xmm3
+ addl 56(%rsp),%ebp
+ xorl %edx,%esi
+ psrld $30,%xmm9
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ por %xmm9,%xmm3
+ addl 60(%rsp),%edx
+ xorl %ecx,%edi
+ movdqa %xmm3,%xmm10
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 0(%rsp),%ecx
+ pxor %xmm0,%xmm4
+.byte 102,68,15,58,15,210,8
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ pxor %xmm5,%xmm4
+ xorl %eax,%esi
+ addl %edx,%ecx
+ movdqa %xmm8,%xmm9
+ paddd %xmm3,%xmm8
+ rorl $7,%ebp
+ addl %esi,%ecx
+ pxor %xmm10,%xmm4
+ addl 4(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ movdqa %xmm4,%xmm10
+ movdqa %xmm8,48(%rsp)
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ pslld $2,%xmm4
+ addl 8(%rsp),%eax
+ xorl %ebp,%esi
+ psrld $30,%xmm10
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ por %xmm10,%xmm4
+ addl 12(%rsp),%ebp
+ xorl %edx,%edi
+ movdqa %xmm4,%xmm8
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 16(%rsp),%edx
+ pxor %xmm1,%xmm5
+.byte 102,68,15,58,15,195,8
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ pxor %xmm6,%xmm5
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm4,%xmm9
+ rorl $7,%eax
+ addl %esi,%edx
+ pxor %xmm8,%xmm5
+ addl 20(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ movdqa %xmm5,%xmm8
+ movdqa %xmm9,0(%rsp)
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ pslld $2,%xmm5
+ addl 24(%rsp),%ebx
+ xorl %eax,%esi
+ psrld $30,%xmm8
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ por %xmm8,%xmm5
+ addl 28(%rsp),%eax
+ xorl %ebp,%edi
+ movdqa %xmm5,%xmm9
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ecx,%edi
+ pxor %xmm2,%xmm6
+.byte 102,68,15,58,15,204,8
+ xorl %edx,%ecx
+ addl 32(%rsp),%ebp
+ andl %edx,%edi
+ pxor %xmm7,%xmm6
+ andl %ecx,%esi
+ rorl $7,%ebx
+ movdqa %xmm10,%xmm8
+ paddd %xmm5,%xmm10
+ addl %edi,%ebp
+ movl %eax,%edi
+ pxor %xmm9,%xmm6
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movdqa %xmm6,%xmm9
+ movdqa %xmm10,16(%rsp)
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 36(%rsp),%edx
+ andl %ecx,%esi
+ pslld $2,%xmm6
+ andl %ebx,%edi
+ rorl $7,%eax
+ psrld $30,%xmm9
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ por %xmm9,%xmm6
+ movl %eax,%edi
+ xorl %ebx,%eax
+ movdqa %xmm6,%xmm10
+ addl 40(%rsp),%ecx
+ andl %ebx,%edi
+ andl %eax,%esi
+ rorl $7,%ebp
+ addl %edi,%ecx
+ movl %edx,%edi
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 44(%rsp),%ebx
+ andl %eax,%esi
+ andl %ebp,%edi
+ rorl $7,%edx
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%edi
+ pxor %xmm3,%xmm7
+.byte 102,68,15,58,15,213,8
+ xorl %ebp,%edx
+ addl 48(%rsp),%eax
+ andl %ebp,%edi
+ pxor %xmm0,%xmm7
+ andl %edx,%esi
+ rorl $7,%ecx
+ movdqa 48(%r11),%xmm9
+ paddd %xmm6,%xmm8
+ addl %edi,%eax
+ movl %ebx,%edi
+ pxor %xmm10,%xmm7
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movdqa %xmm7,%xmm10
+ movdqa %xmm8,32(%rsp)
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 52(%rsp),%ebp
+ andl %edx,%esi
+ pslld $2,%xmm7
+ andl %ecx,%edi
+ rorl $7,%ebx
+ psrld $30,%xmm10
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ por %xmm10,%xmm7
+ movl %ebx,%edi
+ xorl %ecx,%ebx
+ movdqa %xmm7,%xmm8
+ addl 56(%rsp),%edx
+ andl %ecx,%edi
+ andl %ebx,%esi
+ rorl $7,%eax
+ addl %edi,%edx
+ movl %ebp,%edi
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 60(%rsp),%ecx
+ andl %ebx,%esi
+ andl %eax,%edi
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movl %ebp,%edi
+ pxor %xmm4,%xmm0
+.byte 102,68,15,58,15,198,8
+ xorl %eax,%ebp
+ addl 0(%rsp),%ebx
+ andl %eax,%edi
+ pxor %xmm1,%xmm0
+ andl %ebp,%esi
+ rorl $7,%edx
+ movdqa %xmm9,%xmm10
+ paddd %xmm7,%xmm9
+ addl %edi,%ebx
+ movl %ecx,%edi
+ pxor %xmm8,%xmm0
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movdqa %xmm0,%xmm8
+ movdqa %xmm9,48(%rsp)
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 4(%rsp),%eax
+ andl %ebp,%esi
+ pslld $2,%xmm0
+ andl %edx,%edi
+ rorl $7,%ecx
+ psrld $30,%xmm8
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ por %xmm8,%xmm0
+ movl %ecx,%edi
+ xorl %edx,%ecx
+ movdqa %xmm0,%xmm9
+ addl 8(%rsp),%ebp
+ andl %edx,%edi
+ andl %ecx,%esi
+ rorl $7,%ebx
+ addl %edi,%ebp
+ movl %eax,%edi
+ roll $5,%eax
+ addl %esi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%esi
+ xorl %ecx,%ebx
+ addl 12(%rsp),%edx
+ andl %ecx,%esi
+ andl %ebx,%edi
+ rorl $7,%eax
+ addl %esi,%edx
+ movl %ebp,%esi
+ roll $5,%ebp
+ addl %edi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movl %eax,%edi
+ pxor %xmm5,%xmm1
+.byte 102,68,15,58,15,207,8
+ xorl %ebx,%eax
+ addl 16(%rsp),%ecx
+ andl %ebx,%edi
+ pxor %xmm2,%xmm1
+ andl %eax,%esi
+ rorl $7,%ebp
+ movdqa %xmm10,%xmm8
+ paddd %xmm0,%xmm10
+ addl %edi,%ecx
+ movl %edx,%edi
+ pxor %xmm9,%xmm1
+ roll $5,%edx
+ addl %esi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ movdqa %xmm1,%xmm9
+ movdqa %xmm10,0(%rsp)
+ movl %ebp,%esi
+ xorl %eax,%ebp
+ addl 20(%rsp),%ebx
+ andl %eax,%esi
+ pslld $2,%xmm1
+ andl %ebp,%edi
+ rorl $7,%edx
+ psrld $30,%xmm9
+ addl %esi,%ebx
+ movl %ecx,%esi
+ roll $5,%ecx
+ addl %edi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ por %xmm9,%xmm1
+ movl %edx,%edi
+ xorl %ebp,%edx
+ movdqa %xmm1,%xmm10
+ addl 24(%rsp),%eax
+ andl %ebp,%edi
+ andl %edx,%esi
+ rorl $7,%ecx
+ addl %edi,%eax
+ movl %ebx,%edi
+ roll $5,%ebx
+ addl %esi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ movl %ecx,%esi
+ xorl %edx,%ecx
+ addl 28(%rsp),%ebp
+ andl %edx,%esi
+ andl %ecx,%edi
+ rorl $7,%ebx
+ addl %esi,%ebp
+ movl %eax,%esi
+ roll $5,%eax
+ addl %edi,%ebp
+ xorl %edx,%ecx
+ addl %eax,%ebp
+ movl %ebx,%edi
+ pxor %xmm6,%xmm2
+.byte 102,68,15,58,15,208,8
+ xorl %ecx,%ebx
+ addl 32(%rsp),%edx
+ andl %ecx,%edi
+ pxor %xmm3,%xmm2
+ andl %ebx,%esi
+ rorl $7,%eax
+ movdqa %xmm8,%xmm9
+ paddd %xmm1,%xmm8
+ addl %edi,%edx
+ movl %ebp,%edi
+ pxor %xmm10,%xmm2
+ roll $5,%ebp
+ addl %esi,%edx
+ xorl %ecx,%ebx
+ addl %ebp,%edx
+ movdqa %xmm2,%xmm10
+ movdqa %xmm8,16(%rsp)
+ movl %eax,%esi
+ xorl %ebx,%eax
+ addl 36(%rsp),%ecx
+ andl %ebx,%esi
+ pslld $2,%xmm2
+ andl %eax,%edi
+ rorl $7,%ebp
+ psrld $30,%xmm10
+ addl %esi,%ecx
+ movl %edx,%esi
+ roll $5,%edx
+ addl %edi,%ecx
+ xorl %ebx,%eax
+ addl %edx,%ecx
+ por %xmm10,%xmm2
+ movl %ebp,%edi
+ xorl %eax,%ebp
+ movdqa %xmm2,%xmm8
+ addl 40(%rsp),%ebx
+ andl %eax,%edi
+ andl %ebp,%esi
+ rorl $7,%edx
+ addl %edi,%ebx
+ movl %ecx,%edi
+ roll $5,%ecx
+ addl %esi,%ebx
+ xorl %eax,%ebp
+ addl %ecx,%ebx
+ movl %edx,%esi
+ xorl %ebp,%edx
+ addl 44(%rsp),%eax
+ andl %ebp,%esi
+ andl %edx,%edi
+ rorl $7,%ecx
+ addl %esi,%eax
+ movl %ebx,%esi
+ roll $5,%ebx
+ addl %edi,%eax
+ xorl %ebp,%edx
+ addl %ebx,%eax
+ addl 48(%rsp),%ebp
+ pxor %xmm7,%xmm3
+.byte 102,68,15,58,15,193,8
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ pxor %xmm4,%xmm3
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ movdqa %xmm9,%xmm10
+ paddd %xmm2,%xmm9
+ rorl $7,%ebx
+ addl %esi,%ebp
+ pxor %xmm8,%xmm3
+ addl 52(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ movdqa %xmm3,%xmm8
+ movdqa %xmm9,32(%rsp)
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ pslld $2,%xmm3
+ addl 56(%rsp),%ecx
+ xorl %ebx,%esi
+ psrld $30,%xmm8
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ por %xmm8,%xmm3
+ addl 60(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 0(%rsp),%eax
+ paddd %xmm3,%xmm10
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ movdqa %xmm10,48(%rsp)
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 4(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 8(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 12(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ cmpq %r10,%r9
+ je .Ldone_ssse3
+ movdqa 64(%r11),%xmm6
+ movdqa 0(%r11),%xmm9
+ movdqu 0(%r9),%xmm0
+ movdqu 16(%r9),%xmm1
+ movdqu 32(%r9),%xmm2
+ movdqu 48(%r9),%xmm3
+.byte 102,15,56,0,198
+ addq $64,%r9
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+.byte 102,15,56,0,206
+ movl %ecx,%edi
+ roll $5,%ecx
+ paddd %xmm9,%xmm0
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ movdqa %xmm0,0(%rsp)
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ psubd %xmm9,%xmm0
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+.byte 102,15,56,0,214
+ movl %edx,%edi
+ roll $5,%edx
+ paddd %xmm9,%xmm1
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ movdqa %xmm1,16(%rsp)
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ psubd %xmm9,%xmm1
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+.byte 102,15,56,0,222
+ movl %ebp,%edi
+ roll $5,%ebp
+ paddd %xmm9,%xmm2
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ movdqa %xmm2,32(%rsp)
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ psubd %xmm9,%xmm2
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ addl 12(%r8),%edx
+ movl %eax,0(%r8)
+ addl 16(%r8),%ebp
+ movl %esi,4(%r8)
+ movl %esi,%ebx
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ jmp .Loop_ssse3
+
+.p2align 4
+.Ldone_ssse3:
+ addl 16(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 20(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 24(%rsp),%ebp
+ xorl %edx,%esi
+ movl %eax,%edi
+ roll $5,%eax
+ xorl %ecx,%esi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %esi,%ebp
+ addl 28(%rsp),%edx
+ xorl %ecx,%edi
+ movl %ebp,%esi
+ roll $5,%ebp
+ xorl %ebx,%edi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %edi,%edx
+ addl 32(%rsp),%ecx
+ xorl %ebx,%esi
+ movl %edx,%edi
+ roll $5,%edx
+ xorl %eax,%esi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %esi,%ecx
+ addl 36(%rsp),%ebx
+ xorl %eax,%edi
+ movl %ecx,%esi
+ roll $5,%ecx
+ xorl %ebp,%edi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %edi,%ebx
+ addl 40(%rsp),%eax
+ xorl %ebp,%esi
+ movl %ebx,%edi
+ roll $5,%ebx
+ xorl %edx,%esi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %esi,%eax
+ addl 44(%rsp),%ebp
+ xorl %edx,%edi
+ movl %eax,%esi
+ roll $5,%eax
+ xorl %ecx,%edi
+ addl %eax,%ebp
+ rorl $7,%ebx
+ addl %edi,%ebp
+ addl 48(%rsp),%edx
+ xorl %ecx,%esi
+ movl %ebp,%edi
+ roll $5,%ebp
+ xorl %ebx,%esi
+ addl %ebp,%edx
+ rorl $7,%eax
+ addl %esi,%edx
+ addl 52(%rsp),%ecx
+ xorl %ebx,%edi
+ movl %edx,%esi
+ roll $5,%edx
+ xorl %eax,%edi
+ addl %edx,%ecx
+ rorl $7,%ebp
+ addl %edi,%ecx
+ addl 56(%rsp),%ebx
+ xorl %eax,%esi
+ movl %ecx,%edi
+ roll $5,%ecx
+ xorl %ebp,%esi
+ addl %ecx,%ebx
+ rorl $7,%edx
+ addl %esi,%ebx
+ addl 60(%rsp),%eax
+ xorl %ebp,%edi
+ movl %ebx,%esi
+ roll $5,%ebx
+ xorl %edx,%edi
+ addl %ebx,%eax
+ rorl $7,%ecx
+ addl %edi,%eax
+ addl 0(%r8),%eax
+ addl 4(%r8),%esi
+ addl 8(%r8),%ecx
+ movl %eax,0(%r8)
+ addl 12(%r8),%edx
+ movl %esi,4(%r8)
+ addl 16(%r8),%ebp
+ movl %ecx,8(%r8)
+ movl %edx,12(%r8)
+ movl %ebp,16(%r8)
+ movaps 64+0(%rsp),%xmm6
+ movaps 64+16(%rsp),%xmm7
+ movaps 64+32(%rsp),%xmm8
+ movaps 64+48(%rsp),%xmm9
+ movaps 64+64(%rsp),%xmm10
+ leaq 144(%rsp),%rsi
+ movq 0(%rsi),%r12
+ movq 8(%rsi),%rbp
+ movq 16(%rsi),%rbx
+ leaq 24(%rsi),%rsp
+.Lepilogue_ssse3:
+ movq 8(%rsp),%rdi
+ movq 16(%rsp),%rsi
+ retq
+.LSEH_end_sha1_block_data_order_ssse3:
+.p2align 6
+K_XX_XX:
+.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
+.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
+.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
+.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
+.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
+.byte 83,72,65,49,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
+.p2align 6
+
+.def se_handler; .scl 3; .type 32; .endef
+.p2align 4
+se_handler:
+ pushq %rsi
+ pushq %rdi
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ pushfq
+ subq $64,%rsp
+
+ movq 120(%r8),%rax
+ movq 248(%r8),%rbx
+
+ leaq .Lprologue(%rip),%r10
+ cmpq %r10,%rbx
+ jb .Lcommon_seh_tail
+
+ movq 152(%r8),%rax
+
+ leaq .Lepilogue(%rip),%r10
+ cmpq %r10,%rbx
+ jae .Lcommon_seh_tail
+
+ movq 64(%rax),%rax
+ leaq 32(%rax),%rax
+
+ movq -8(%rax),%rbx
+ movq -16(%rax),%rbp
+ movq -24(%rax),%r12
+ movq -32(%rax),%r13
+ movq %rbx,144(%r8)
+ movq %rbp,160(%r8)
+ movq %r12,216(%r8)
+ movq %r13,224(%r8)
+
+ jmp .Lcommon_seh_tail
+
+
+.def ssse3_handler; .scl 3; .type 32; .endef
+.p2align 4
+ssse3_handler:
+ pushq %rsi
+ pushq %rdi
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ pushfq
+ subq $64,%rsp
+
+ movq 120(%r8),%rax
+ movq 248(%r8),%rbx
+
+ movq 8(%r9),%rsi
+ movq 56(%r9),%r11
+
+ movl 0(%r11),%r10d
+ leaq (%rsi,%r10,1),%r10
+ cmpq %r10,%rbx
+ jb .Lcommon_seh_tail
+
+ movq 152(%r8),%rax
+
+ movl 4(%r11),%r10d
+ leaq (%rsi,%r10,1),%r10
+ cmpq %r10,%rbx
+ jae .Lcommon_seh_tail
+
+ leaq 64(%rax),%rsi
+ leaq 512(%r8),%rdi
+ movl $10,%ecx
+.long 0xa548f3fc
+ leaq 168(%rax),%rax
+
+ movq -8(%rax),%rbx
+ movq -16(%rax),%rbp
+ movq -24(%rax),%r12
+ movq %rbx,144(%r8)
+ movq %rbp,160(%r8)
+ movq %r12,216(%r8)
+
+.Lcommon_seh_tail:
+ movq 8(%rax),%rdi
+ movq 16(%rax),%rsi
+ movq %rax,152(%r8)
+ movq %rsi,168(%r8)
+ movq %rdi,176(%r8)
+
+ movq 40(%r9),%rdi
+ movq %r8,%rsi
+ movl $154,%ecx
+.long 0xa548f3fc
+
+ movq %r9,%rsi
+ xorq %rcx,%rcx
+ movq 8(%rsi),%rdx
+ movq 0(%rsi),%r8
+ movq 16(%rsi),%r9
+ movq 40(%rsi),%r10
+ leaq 56(%rsi),%r11
+ leaq 24(%rsi),%r12
+ movq %r10,32(%rsp)
+ movq %r11,40(%rsp)
+ movq %r12,48(%rsp)
+ movq %rcx,56(%rsp)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ movl $1,%eax
+ addq $64,%rsp
+ popfq
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbp
+ popq %rbx
+ popq %rdi
+ popq %rsi
+ retq
+
+
+.section .pdata
+.p2align 2
+.rva .LSEH_begin_sha1_block_data_order
+.rva .LSEH_end_sha1_block_data_order
+.rva .LSEH_info_sha1_block_data_order
+.rva .LSEH_begin_sha1_block_data_order_ssse3
+.rva .LSEH_end_sha1_block_data_order_ssse3
+.rva .LSEH_info_sha1_block_data_order_ssse3
+.section .xdata
+.p2align 3
+.LSEH_info_sha1_block_data_order:
+.byte 9,0,0,0
+.rva se_handler
+.LSEH_info_sha1_block_data_order_ssse3:
+.byte 9,0,0,0
+.rva ssse3_handler
+.rva .Lprologue_ssse3,.Lepilogue_ssse3
diff --git a/crypto/libressl/crypto/sha/sha1_one.c b/crypto/libressl/crypto/sha/sha1_one.c
new file mode 100644
index 0000000..8c8a0e9
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1_one.c
@@ -0,0 +1,81 @@
+/* $OpenBSD: sha1_one.c,v 1.12 2015/09/10 15:56:26 jsing Exp $ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <openssl/opensslconf.h>
+
+//#include <openssl/crypto.h>
+#include <openssl/sha.h>
+
+#ifndef OPENSSL_NO_SHA1
+unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ SHA_CTX c;
+ static unsigned char m[SHA_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ if (!SHA1_Init(&c))
+ return NULL;
+ SHA1_Update(&c,d,n);
+ SHA1_Final(md,&c);
+ explicit_bzero(&c,sizeof(c));
+ return(md);
+ }
+#endif
diff --git a/crypto/libressl/crypto/sha/sha1dgst.c b/crypto/libressl/crypto/sha/sha1dgst.c
new file mode 100644
index 0000000..b741f3b
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha1dgst.c
@@ -0,0 +1,72 @@
+/* $OpenBSD: sha1dgst.c,v 1.14 2015/09/13 21:09:56 doug Exp $ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <openssl/opensslconf.h>
+
+//#include <openssl/crypto.h>
+
+#if !defined(OPENSSL_NO_SHA1) && !defined(OPENSSL_NO_SHA)
+
+#include <openssl/opensslv.h>
+
+/* The implementation is in ../md32_common.h */
+
+#include "sha_locl.h"
+
+#endif
+
diff --git a/crypto/libressl/crypto/sha/sha256-elf-armv4.S b/crypto/libressl/crypto/sha/sha256-elf-armv4.S
new file mode 100644
index 0000000..9b155c7
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256-elf-armv4.S
@@ -0,0 +1,1520 @@
+#include "arm_arch.h"
+
+.text
+.code 32
+
+.type K256,%object
+.align 5
+K256:
+.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size K256,.-K256
+
+.global sha256_block_data_order
+.type sha256_block_data_order,%function
+sha256_block_data_order:
+ sub r3,pc,#8 @ sha256_block_data_order
+ add r2,r1,r2,lsl#6 @ len to point at the end of inp
+ stmdb sp!,{r0,r1,r2,r4-r11,lr}
+ ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+ sub r14,r3,#256 @ K256
+ sub sp,sp,#16*4 @ alloca(X[16])
+.Loop:
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 0
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r8,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r8,ror#11
+ eor r2,r9,r10
+#if 0>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 0==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ and r2,r2,r8
+ str r3,[sp,#0*4]
+ add r3,r3,r0
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r11
+ mov r11,r4,ror#2
+ add r3,r3,r2
+ eor r11,r11,r4,ror#13
+ add r3,r3,r12
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+#if 0>=15
+ ldr r1,[sp,#2*4] @ from BODY_16_xx
+#endif
+ orr r0,r4,r5
+ and r2,r4,r5
+ and r0,r0,r6
+ add r11,r11,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r3
+ add r11,r11,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 1
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r7,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r7,ror#11
+ eor r2,r8,r9
+#if 1>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 1==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ and r2,r2,r7
+ str r3,[sp,#1*4]
+ add r3,r3,r0
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r10
+ mov r10,r11,ror#2
+ add r3,r3,r2
+ eor r10,r10,r11,ror#13
+ add r3,r3,r12
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+#if 1>=15
+ ldr r1,[sp,#3*4] @ from BODY_16_xx
+#endif
+ orr r0,r11,r4
+ and r2,r11,r4
+ and r0,r0,r5
+ add r10,r10,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r3
+ add r10,r10,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 2
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r6,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r6,ror#11
+ eor r2,r7,r8
+#if 2>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 2==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ and r2,r2,r6
+ str r3,[sp,#2*4]
+ add r3,r3,r0
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r9
+ mov r9,r10,ror#2
+ add r3,r3,r2
+ eor r9,r9,r10,ror#13
+ add r3,r3,r12
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+#if 2>=15
+ ldr r1,[sp,#4*4] @ from BODY_16_xx
+#endif
+ orr r0,r10,r11
+ and r2,r10,r11
+ and r0,r0,r4
+ add r9,r9,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r3
+ add r9,r9,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 3
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r5,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r5,ror#11
+ eor r2,r6,r7
+#if 3>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 3==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ and r2,r2,r5
+ str r3,[sp,#3*4]
+ add r3,r3,r0
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r8
+ mov r8,r9,ror#2
+ add r3,r3,r2
+ eor r8,r8,r9,ror#13
+ add r3,r3,r12
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+#if 3>=15
+ ldr r1,[sp,#5*4] @ from BODY_16_xx
+#endif
+ orr r0,r9,r10
+ and r2,r9,r10
+ and r0,r0,r11
+ add r8,r8,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r3
+ add r8,r8,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 4
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r4,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r4,ror#11
+ eor r2,r5,r6
+#if 4>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 4==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ and r2,r2,r4
+ str r3,[sp,#4*4]
+ add r3,r3,r0
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r7
+ mov r7,r8,ror#2
+ add r3,r3,r2
+ eor r7,r7,r8,ror#13
+ add r3,r3,r12
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+#if 4>=15
+ ldr r1,[sp,#6*4] @ from BODY_16_xx
+#endif
+ orr r0,r8,r9
+ and r2,r8,r9
+ and r0,r0,r10
+ add r7,r7,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r3
+ add r7,r7,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 5
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r11,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r11,ror#11
+ eor r2,r4,r5
+#if 5>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 5==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ and r2,r2,r11
+ str r3,[sp,#5*4]
+ add r3,r3,r0
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r6
+ mov r6,r7,ror#2
+ add r3,r3,r2
+ eor r6,r6,r7,ror#13
+ add r3,r3,r12
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+#if 5>=15
+ ldr r1,[sp,#7*4] @ from BODY_16_xx
+#endif
+ orr r0,r7,r8
+ and r2,r7,r8
+ and r0,r0,r9
+ add r6,r6,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r3
+ add r6,r6,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 6
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r10,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r10,ror#11
+ eor r2,r11,r4
+#if 6>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 6==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ and r2,r2,r10
+ str r3,[sp,#6*4]
+ add r3,r3,r0
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r5
+ mov r5,r6,ror#2
+ add r3,r3,r2
+ eor r5,r5,r6,ror#13
+ add r3,r3,r12
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+#if 6>=15
+ ldr r1,[sp,#8*4] @ from BODY_16_xx
+#endif
+ orr r0,r6,r7
+ and r2,r6,r7
+ and r0,r0,r8
+ add r5,r5,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r3
+ add r5,r5,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 7
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r9,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r9,ror#11
+ eor r2,r10,r11
+#if 7>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 7==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ and r2,r2,r9
+ str r3,[sp,#7*4]
+ add r3,r3,r0
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r4
+ mov r4,r5,ror#2
+ add r3,r3,r2
+ eor r4,r4,r5,ror#13
+ add r3,r3,r12
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+#if 7>=15
+ ldr r1,[sp,#9*4] @ from BODY_16_xx
+#endif
+ orr r0,r5,r6
+ and r2,r5,r6
+ and r0,r0,r7
+ add r4,r4,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r3
+ add r4,r4,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 8
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r8,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r8,ror#11
+ eor r2,r9,r10
+#if 8>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 8==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ and r2,r2,r8
+ str r3,[sp,#8*4]
+ add r3,r3,r0
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r11
+ mov r11,r4,ror#2
+ add r3,r3,r2
+ eor r11,r11,r4,ror#13
+ add r3,r3,r12
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+#if 8>=15
+ ldr r1,[sp,#10*4] @ from BODY_16_xx
+#endif
+ orr r0,r4,r5
+ and r2,r4,r5
+ and r0,r0,r6
+ add r11,r11,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r3
+ add r11,r11,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 9
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r7,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r7,ror#11
+ eor r2,r8,r9
+#if 9>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 9==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ and r2,r2,r7
+ str r3,[sp,#9*4]
+ add r3,r3,r0
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r10
+ mov r10,r11,ror#2
+ add r3,r3,r2
+ eor r10,r10,r11,ror#13
+ add r3,r3,r12
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+#if 9>=15
+ ldr r1,[sp,#11*4] @ from BODY_16_xx
+#endif
+ orr r0,r11,r4
+ and r2,r11,r4
+ and r0,r0,r5
+ add r10,r10,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r3
+ add r10,r10,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 10
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r6,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r6,ror#11
+ eor r2,r7,r8
+#if 10>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 10==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ and r2,r2,r6
+ str r3,[sp,#10*4]
+ add r3,r3,r0
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r9
+ mov r9,r10,ror#2
+ add r3,r3,r2
+ eor r9,r9,r10,ror#13
+ add r3,r3,r12
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+#if 10>=15
+ ldr r1,[sp,#12*4] @ from BODY_16_xx
+#endif
+ orr r0,r10,r11
+ and r2,r10,r11
+ and r0,r0,r4
+ add r9,r9,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r3
+ add r9,r9,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 11
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r5,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r5,ror#11
+ eor r2,r6,r7
+#if 11>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 11==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ and r2,r2,r5
+ str r3,[sp,#11*4]
+ add r3,r3,r0
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r8
+ mov r8,r9,ror#2
+ add r3,r3,r2
+ eor r8,r8,r9,ror#13
+ add r3,r3,r12
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+#if 11>=15
+ ldr r1,[sp,#13*4] @ from BODY_16_xx
+#endif
+ orr r0,r9,r10
+ and r2,r9,r10
+ and r0,r0,r11
+ add r8,r8,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r3
+ add r8,r8,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 12
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r4,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r4,ror#11
+ eor r2,r5,r6
+#if 12>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 12==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ and r2,r2,r4
+ str r3,[sp,#12*4]
+ add r3,r3,r0
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r7
+ mov r7,r8,ror#2
+ add r3,r3,r2
+ eor r7,r7,r8,ror#13
+ add r3,r3,r12
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+#if 12>=15
+ ldr r1,[sp,#14*4] @ from BODY_16_xx
+#endif
+ orr r0,r8,r9
+ and r2,r8,r9
+ and r0,r0,r10
+ add r7,r7,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r3
+ add r7,r7,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 13
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r11,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r11,ror#11
+ eor r2,r4,r5
+#if 13>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 13==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ and r2,r2,r11
+ str r3,[sp,#13*4]
+ add r3,r3,r0
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r6
+ mov r6,r7,ror#2
+ add r3,r3,r2
+ eor r6,r6,r7,ror#13
+ add r3,r3,r12
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+#if 13>=15
+ ldr r1,[sp,#15*4] @ from BODY_16_xx
+#endif
+ orr r0,r7,r8
+ and r2,r7,r8
+ and r0,r0,r9
+ add r6,r6,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r3
+ add r6,r6,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 14
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r10,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r10,ror#11
+ eor r2,r11,r4
+#if 14>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 14==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ and r2,r2,r10
+ str r3,[sp,#14*4]
+ add r3,r3,r0
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r5
+ mov r5,r6,ror#2
+ add r3,r3,r2
+ eor r5,r5,r6,ror#13
+ add r3,r3,r12
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+#if 14>=15
+ ldr r1,[sp,#0*4] @ from BODY_16_xx
+#endif
+ orr r0,r6,r7
+ and r2,r6,r7
+ and r0,r0,r8
+ add r5,r5,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r3
+ add r5,r5,r0
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r3,[r1],#4
+#else
+ ldrb r3,[r1,#3] @ 15
+ ldrb r12,[r1,#2]
+ ldrb r2,[r1,#1]
+ ldrb r0,[r1],#4
+ orr r3,r3,r12,lsl#8
+ orr r3,r3,r2,lsl#16
+ orr r3,r3,r0,lsl#24
+#endif
+ mov r0,r9,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r9,ror#11
+ eor r2,r10,r11
+#if 15>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 15==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ and r2,r2,r9
+ str r3,[sp,#15*4]
+ add r3,r3,r0
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r4
+ mov r4,r5,ror#2
+ add r3,r3,r2
+ eor r4,r4,r5,ror#13
+ add r3,r3,r12
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+#if 15>=15
+ ldr r1,[sp,#1*4] @ from BODY_16_xx
+#endif
+ orr r0,r5,r6
+ and r2,r5,r6
+ and r0,r0,r7
+ add r4,r4,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r3
+ add r4,r4,r0
+.Lrounds_16_xx:
+ @ ldr r1,[sp,#1*4] @ 16
+ ldr r12,[sp,#14*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#0*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#9*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r8,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r8,ror#11
+ eor r2,r9,r10
+#if 16>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 16==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ and r2,r2,r8
+ str r3,[sp,#0*4]
+ add r3,r3,r0
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r11
+ mov r11,r4,ror#2
+ add r3,r3,r2
+ eor r11,r11,r4,ror#13
+ add r3,r3,r12
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+#if 16>=15
+ ldr r1,[sp,#2*4] @ from BODY_16_xx
+#endif
+ orr r0,r4,r5
+ and r2,r4,r5
+ and r0,r0,r6
+ add r11,r11,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r3
+ add r11,r11,r0
+ @ ldr r1,[sp,#2*4] @ 17
+ ldr r12,[sp,#15*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#1*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#10*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r7,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r7,ror#11
+ eor r2,r8,r9
+#if 17>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 17==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ and r2,r2,r7
+ str r3,[sp,#1*4]
+ add r3,r3,r0
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r10
+ mov r10,r11,ror#2
+ add r3,r3,r2
+ eor r10,r10,r11,ror#13
+ add r3,r3,r12
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+#if 17>=15
+ ldr r1,[sp,#3*4] @ from BODY_16_xx
+#endif
+ orr r0,r11,r4
+ and r2,r11,r4
+ and r0,r0,r5
+ add r10,r10,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r3
+ add r10,r10,r0
+ @ ldr r1,[sp,#3*4] @ 18
+ ldr r12,[sp,#0*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#2*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#11*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r6,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r6,ror#11
+ eor r2,r7,r8
+#if 18>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 18==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ and r2,r2,r6
+ str r3,[sp,#2*4]
+ add r3,r3,r0
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r9
+ mov r9,r10,ror#2
+ add r3,r3,r2
+ eor r9,r9,r10,ror#13
+ add r3,r3,r12
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+#if 18>=15
+ ldr r1,[sp,#4*4] @ from BODY_16_xx
+#endif
+ orr r0,r10,r11
+ and r2,r10,r11
+ and r0,r0,r4
+ add r9,r9,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r3
+ add r9,r9,r0
+ @ ldr r1,[sp,#4*4] @ 19
+ ldr r12,[sp,#1*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#3*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#12*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r5,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r5,ror#11
+ eor r2,r6,r7
+#if 19>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 19==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ and r2,r2,r5
+ str r3,[sp,#3*4]
+ add r3,r3,r0
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r8
+ mov r8,r9,ror#2
+ add r3,r3,r2
+ eor r8,r8,r9,ror#13
+ add r3,r3,r12
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+#if 19>=15
+ ldr r1,[sp,#5*4] @ from BODY_16_xx
+#endif
+ orr r0,r9,r10
+ and r2,r9,r10
+ and r0,r0,r11
+ add r8,r8,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r3
+ add r8,r8,r0
+ @ ldr r1,[sp,#5*4] @ 20
+ ldr r12,[sp,#2*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#4*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#13*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r4,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r4,ror#11
+ eor r2,r5,r6
+#if 20>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 20==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ and r2,r2,r4
+ str r3,[sp,#4*4]
+ add r3,r3,r0
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r7
+ mov r7,r8,ror#2
+ add r3,r3,r2
+ eor r7,r7,r8,ror#13
+ add r3,r3,r12
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+#if 20>=15
+ ldr r1,[sp,#6*4] @ from BODY_16_xx
+#endif
+ orr r0,r8,r9
+ and r2,r8,r9
+ and r0,r0,r10
+ add r7,r7,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r3
+ add r7,r7,r0
+ @ ldr r1,[sp,#6*4] @ 21
+ ldr r12,[sp,#3*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#5*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#14*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r11,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r11,ror#11
+ eor r2,r4,r5
+#if 21>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 21==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ and r2,r2,r11
+ str r3,[sp,#5*4]
+ add r3,r3,r0
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r6
+ mov r6,r7,ror#2
+ add r3,r3,r2
+ eor r6,r6,r7,ror#13
+ add r3,r3,r12
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+#if 21>=15
+ ldr r1,[sp,#7*4] @ from BODY_16_xx
+#endif
+ orr r0,r7,r8
+ and r2,r7,r8
+ and r0,r0,r9
+ add r6,r6,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r3
+ add r6,r6,r0
+ @ ldr r1,[sp,#7*4] @ 22
+ ldr r12,[sp,#4*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#6*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#15*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r10,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r10,ror#11
+ eor r2,r11,r4
+#if 22>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 22==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ and r2,r2,r10
+ str r3,[sp,#6*4]
+ add r3,r3,r0
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r5
+ mov r5,r6,ror#2
+ add r3,r3,r2
+ eor r5,r5,r6,ror#13
+ add r3,r3,r12
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+#if 22>=15
+ ldr r1,[sp,#8*4] @ from BODY_16_xx
+#endif
+ orr r0,r6,r7
+ and r2,r6,r7
+ and r0,r0,r8
+ add r5,r5,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r3
+ add r5,r5,r0
+ @ ldr r1,[sp,#8*4] @ 23
+ ldr r12,[sp,#5*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#7*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#0*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r9,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r9,ror#11
+ eor r2,r10,r11
+#if 23>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 23==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ and r2,r2,r9
+ str r3,[sp,#7*4]
+ add r3,r3,r0
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r4
+ mov r4,r5,ror#2
+ add r3,r3,r2
+ eor r4,r4,r5,ror#13
+ add r3,r3,r12
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+#if 23>=15
+ ldr r1,[sp,#9*4] @ from BODY_16_xx
+#endif
+ orr r0,r5,r6
+ and r2,r5,r6
+ and r0,r0,r7
+ add r4,r4,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r3
+ add r4,r4,r0
+ @ ldr r1,[sp,#9*4] @ 24
+ ldr r12,[sp,#6*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#8*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#1*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r8,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r8,ror#11
+ eor r2,r9,r10
+#if 24>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 24==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r8,ror#25 @ Sigma1(e)
+ and r2,r2,r8
+ str r3,[sp,#8*4]
+ add r3,r3,r0
+ eor r2,r2,r10 @ Ch(e,f,g)
+ add r3,r3,r11
+ mov r11,r4,ror#2
+ add r3,r3,r2
+ eor r11,r11,r4,ror#13
+ add r3,r3,r12
+ eor r11,r11,r4,ror#22 @ Sigma0(a)
+#if 24>=15
+ ldr r1,[sp,#10*4] @ from BODY_16_xx
+#endif
+ orr r0,r4,r5
+ and r2,r4,r5
+ and r0,r0,r6
+ add r11,r11,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r7,r7,r3
+ add r11,r11,r0
+ @ ldr r1,[sp,#10*4] @ 25
+ ldr r12,[sp,#7*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#9*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#2*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r7,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r7,ror#11
+ eor r2,r8,r9
+#if 25>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 25==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r7,ror#25 @ Sigma1(e)
+ and r2,r2,r7
+ str r3,[sp,#9*4]
+ add r3,r3,r0
+ eor r2,r2,r9 @ Ch(e,f,g)
+ add r3,r3,r10
+ mov r10,r11,ror#2
+ add r3,r3,r2
+ eor r10,r10,r11,ror#13
+ add r3,r3,r12
+ eor r10,r10,r11,ror#22 @ Sigma0(a)
+#if 25>=15
+ ldr r1,[sp,#11*4] @ from BODY_16_xx
+#endif
+ orr r0,r11,r4
+ and r2,r11,r4
+ and r0,r0,r5
+ add r10,r10,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r6,r6,r3
+ add r10,r10,r0
+ @ ldr r1,[sp,#11*4] @ 26
+ ldr r12,[sp,#8*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#10*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#3*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r6,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r6,ror#11
+ eor r2,r7,r8
+#if 26>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 26==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r6,ror#25 @ Sigma1(e)
+ and r2,r2,r6
+ str r3,[sp,#10*4]
+ add r3,r3,r0
+ eor r2,r2,r8 @ Ch(e,f,g)
+ add r3,r3,r9
+ mov r9,r10,ror#2
+ add r3,r3,r2
+ eor r9,r9,r10,ror#13
+ add r3,r3,r12
+ eor r9,r9,r10,ror#22 @ Sigma0(a)
+#if 26>=15
+ ldr r1,[sp,#12*4] @ from BODY_16_xx
+#endif
+ orr r0,r10,r11
+ and r2,r10,r11
+ and r0,r0,r4
+ add r9,r9,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r5,r5,r3
+ add r9,r9,r0
+ @ ldr r1,[sp,#12*4] @ 27
+ ldr r12,[sp,#9*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#11*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#4*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r5,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r5,ror#11
+ eor r2,r6,r7
+#if 27>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 27==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r5,ror#25 @ Sigma1(e)
+ and r2,r2,r5
+ str r3,[sp,#11*4]
+ add r3,r3,r0
+ eor r2,r2,r7 @ Ch(e,f,g)
+ add r3,r3,r8
+ mov r8,r9,ror#2
+ add r3,r3,r2
+ eor r8,r8,r9,ror#13
+ add r3,r3,r12
+ eor r8,r8,r9,ror#22 @ Sigma0(a)
+#if 27>=15
+ ldr r1,[sp,#13*4] @ from BODY_16_xx
+#endif
+ orr r0,r9,r10
+ and r2,r9,r10
+ and r0,r0,r11
+ add r8,r8,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r4,r4,r3
+ add r8,r8,r0
+ @ ldr r1,[sp,#13*4] @ 28
+ ldr r12,[sp,#10*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#12*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#5*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r4,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r4,ror#11
+ eor r2,r5,r6
+#if 28>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 28==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r4,ror#25 @ Sigma1(e)
+ and r2,r2,r4
+ str r3,[sp,#12*4]
+ add r3,r3,r0
+ eor r2,r2,r6 @ Ch(e,f,g)
+ add r3,r3,r7
+ mov r7,r8,ror#2
+ add r3,r3,r2
+ eor r7,r7,r8,ror#13
+ add r3,r3,r12
+ eor r7,r7,r8,ror#22 @ Sigma0(a)
+#if 28>=15
+ ldr r1,[sp,#14*4] @ from BODY_16_xx
+#endif
+ orr r0,r8,r9
+ and r2,r8,r9
+ and r0,r0,r10
+ add r7,r7,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r11,r11,r3
+ add r7,r7,r0
+ @ ldr r1,[sp,#14*4] @ 29
+ ldr r12,[sp,#11*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#13*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#6*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r11,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r11,ror#11
+ eor r2,r4,r5
+#if 29>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 29==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r11,ror#25 @ Sigma1(e)
+ and r2,r2,r11
+ str r3,[sp,#13*4]
+ add r3,r3,r0
+ eor r2,r2,r5 @ Ch(e,f,g)
+ add r3,r3,r6
+ mov r6,r7,ror#2
+ add r3,r3,r2
+ eor r6,r6,r7,ror#13
+ add r3,r3,r12
+ eor r6,r6,r7,ror#22 @ Sigma0(a)
+#if 29>=15
+ ldr r1,[sp,#15*4] @ from BODY_16_xx
+#endif
+ orr r0,r7,r8
+ and r2,r7,r8
+ and r0,r0,r9
+ add r6,r6,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r10,r10,r3
+ add r6,r6,r0
+ @ ldr r1,[sp,#15*4] @ 30
+ ldr r12,[sp,#12*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#14*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#7*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r10,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r10,ror#11
+ eor r2,r11,r4
+#if 30>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 30==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r10,ror#25 @ Sigma1(e)
+ and r2,r2,r10
+ str r3,[sp,#14*4]
+ add r3,r3,r0
+ eor r2,r2,r4 @ Ch(e,f,g)
+ add r3,r3,r5
+ mov r5,r6,ror#2
+ add r3,r3,r2
+ eor r5,r5,r6,ror#13
+ add r3,r3,r12
+ eor r5,r5,r6,ror#22 @ Sigma0(a)
+#if 30>=15
+ ldr r1,[sp,#0*4] @ from BODY_16_xx
+#endif
+ orr r0,r6,r7
+ and r2,r6,r7
+ and r0,r0,r8
+ add r5,r5,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r9,r9,r3
+ add r5,r5,r0
+ @ ldr r1,[sp,#0*4] @ 31
+ ldr r12,[sp,#13*4]
+ mov r0,r1,ror#7
+ ldr r3,[sp,#15*4]
+ eor r0,r0,r1,ror#18
+ ldr r2,[sp,#8*4]
+ eor r0,r0,r1,lsr#3 @ sigma0(X[i+1])
+ mov r1,r12,ror#17
+ add r3,r3,r0
+ eor r1,r1,r12,ror#19
+ add r3,r3,r2
+ eor r1,r1,r12,lsr#10 @ sigma1(X[i+14])
+ @ add r3,r3,r1
+ mov r0,r9,ror#6
+ ldr r12,[r14],#4 @ *K256++
+ eor r0,r0,r9,ror#11
+ eor r2,r10,r11
+#if 31>=16
+ add r3,r3,r1 @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__) && !defined(__STRICT_ALIGNMENT)
+ rev r3,r3
+#endif
+#if 31==15
+ str r1,[sp,#17*4] @ leave room for r1
+#endif
+ eor r0,r0,r9,ror#25 @ Sigma1(e)
+ and r2,r2,r9
+ str r3,[sp,#15*4]
+ add r3,r3,r0
+ eor r2,r2,r11 @ Ch(e,f,g)
+ add r3,r3,r4
+ mov r4,r5,ror#2
+ add r3,r3,r2
+ eor r4,r4,r5,ror#13
+ add r3,r3,r12
+ eor r4,r4,r5,ror#22 @ Sigma0(a)
+#if 31>=15
+ ldr r1,[sp,#1*4] @ from BODY_16_xx
+#endif
+ orr r0,r5,r6
+ and r2,r5,r6
+ and r0,r0,r7
+ add r4,r4,r3
+ orr r0,r0,r2 @ Maj(a,b,c)
+ add r8,r8,r3
+ add r4,r4,r0
+ and r12,r12,#0xff
+ cmp r12,#0xf2
+ bne .Lrounds_16_xx
+
+ ldr r3,[sp,#16*4] @ pull ctx
+ ldr r0,[r3,#0]
+ ldr r2,[r3,#4]
+ ldr r12,[r3,#8]
+ add r4,r4,r0
+ ldr r0,[r3,#12]
+ add r5,r5,r2
+ ldr r2,[r3,#16]
+ add r6,r6,r12
+ ldr r12,[r3,#20]
+ add r7,r7,r0
+ ldr r0,[r3,#24]
+ add r8,r8,r2
+ ldr r2,[r3,#28]
+ add r9,r9,r12
+ ldr r1,[sp,#17*4] @ pull inp
+ ldr r12,[sp,#18*4] @ pull inp+len
+ add r10,r10,r0
+ add r11,r11,r2
+ stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+ cmp r1,r12
+ sub r14,r14,#256 @ rewind Ktbl
+ bne .Loop
+
+ add sp,sp,#19*4 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r11,pc}
+#else
+ ldmia sp!,{r4-r11,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+.size sha256_block_data_order,.-sha256_block_data_order
+.asciz "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha256-elf-x86_64.S b/crypto/libressl/crypto/sha/sha256-elf-x86_64.S
new file mode 100644
index 0000000..9eea6a7
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256-elf-x86_64.S
@@ -0,0 +1,1782 @@
+#include "x86_arch.h"
+.text
+
+.globl sha256_block_data_order
+.type sha256_block_data_order,@function
+.align 16
+sha256_block_data_order:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $64+32,%rsp
+ leaq (%rsi,%rdx,4),%rdx
+ andq $-64,%rsp
+ movq %rdi,64+0(%rsp)
+ movq %rsi,64+8(%rsp)
+ movq %rdx,64+16(%rsp)
+ movq %r11,64+24(%rsp)
+.Lprologue:
+
+ leaq K256(%rip),%rbp
+
+ movl 0(%rdi),%eax
+ movl 4(%rdi),%ebx
+ movl 8(%rdi),%ecx
+ movl 12(%rdi),%edx
+ movl 16(%rdi),%r8d
+ movl 20(%rdi),%r9d
+ movl 24(%rdi),%r10d
+ movl 28(%rdi),%r11d
+ jmp .Lloop
+
+.align 16
+.Lloop:
+ xorq %rdi,%rdi
+ movl 0(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 4(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 8(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 12(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 16(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 20(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 24(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 28(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 32(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 36(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 40(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 44(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 48(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 52(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 56(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 60(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ jmp .Lrounds_16_xx
+.align 16
+.Lrounds_16_xx:
+ movl 4(%rsp),%r13d
+ movl 56(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 36(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 0(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 8(%rsp),%r13d
+ movl 60(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 40(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 4(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 12(%rsp),%r13d
+ movl 0(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 44(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 8(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 16(%rsp),%r13d
+ movl 4(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 48(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 12(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 20(%rsp),%r13d
+ movl 8(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 52(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 16(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 24(%rsp),%r13d
+ movl 12(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 56(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 20(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 28(%rsp),%r13d
+ movl 16(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 60(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 24(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 32(%rsp),%r13d
+ movl 20(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 0(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 28(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 36(%rsp),%r13d
+ movl 24(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 4(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 32(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 40(%rsp),%r13d
+ movl 28(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 8(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 36(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 44(%rsp),%r13d
+ movl 32(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 12(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 40(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 48(%rsp),%r13d
+ movl 36(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 16(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 44(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 52(%rsp),%r13d
+ movl 40(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 20(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 48(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 56(%rsp),%r13d
+ movl 44(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 24(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 52(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 60(%rsp),%r13d
+ movl 48(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 28(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 56(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 0(%rsp),%r13d
+ movl 52(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 32(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 60(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ cmpq $64,%rdi
+ jb .Lrounds_16_xx
+
+ movq 64+0(%rsp),%rdi
+ leaq 64(%rsi),%rsi
+
+ addl 0(%rdi),%eax
+ addl 4(%rdi),%ebx
+ addl 8(%rdi),%ecx
+ addl 12(%rdi),%edx
+ addl 16(%rdi),%r8d
+ addl 20(%rdi),%r9d
+ addl 24(%rdi),%r10d
+ addl 28(%rdi),%r11d
+
+ cmpq 64+16(%rsp),%rsi
+
+ movl %eax,0(%rdi)
+ movl %ebx,4(%rdi)
+ movl %ecx,8(%rdi)
+ movl %edx,12(%rdi)
+ movl %r8d,16(%rdi)
+ movl %r9d,20(%rdi)
+ movl %r10d,24(%rdi)
+ movl %r11d,28(%rdi)
+ jb .Lloop
+
+ movq 64+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lepilogue:
+ retq
+.size sha256_block_data_order,.-sha256_block_data_order
+.align 64
+.type K256,@object
+K256:
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha256-macosx-x86_64.S b/crypto/libressl/crypto/sha/sha256-macosx-x86_64.S
new file mode 100644
index 0000000..4b468b7
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256-macosx-x86_64.S
@@ -0,0 +1,1779 @@
+#include "x86_arch.h"
+.text
+
+.globl _sha256_block_data_order
+
+.p2align 4
+_sha256_block_data_order:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $64+32,%rsp
+ leaq (%rsi,%rdx,4),%rdx
+ andq $-64,%rsp
+ movq %rdi,64+0(%rsp)
+ movq %rsi,64+8(%rsp)
+ movq %rdx,64+16(%rsp)
+ movq %r11,64+24(%rsp)
+L$prologue:
+
+ leaq K256(%rip),%rbp
+
+ movl 0(%rdi),%eax
+ movl 4(%rdi),%ebx
+ movl 8(%rdi),%ecx
+ movl 12(%rdi),%edx
+ movl 16(%rdi),%r8d
+ movl 20(%rdi),%r9d
+ movl 24(%rdi),%r10d
+ movl 28(%rdi),%r11d
+ jmp L$loop
+
+.p2align 4
+L$loop:
+ xorq %rdi,%rdi
+ movl 0(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 4(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 8(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 12(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 16(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 20(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 24(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 28(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 32(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 36(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 40(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 44(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 48(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 52(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 56(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 60(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ jmp L$rounds_16_xx
+.p2align 4
+L$rounds_16_xx:
+ movl 4(%rsp),%r13d
+ movl 56(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 36(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 0(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 8(%rsp),%r13d
+ movl 60(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 40(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 4(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 12(%rsp),%r13d
+ movl 0(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 44(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 8(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 16(%rsp),%r13d
+ movl 4(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 48(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 12(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 20(%rsp),%r13d
+ movl 8(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 52(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 16(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 24(%rsp),%r13d
+ movl 12(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 56(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 20(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 28(%rsp),%r13d
+ movl 16(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 60(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 24(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 32(%rsp),%r13d
+ movl 20(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 0(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 28(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 36(%rsp),%r13d
+ movl 24(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 4(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 32(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 40(%rsp),%r13d
+ movl 28(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 8(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 36(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 44(%rsp),%r13d
+ movl 32(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 12(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 40(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 48(%rsp),%r13d
+ movl 36(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 16(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 44(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 52(%rsp),%r13d
+ movl 40(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 20(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 48(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 56(%rsp),%r13d
+ movl 44(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 24(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 52(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 60(%rsp),%r13d
+ movl 48(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 28(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 56(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 0(%rsp),%r13d
+ movl 52(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 32(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 60(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ cmpq $64,%rdi
+ jb L$rounds_16_xx
+
+ movq 64+0(%rsp),%rdi
+ leaq 64(%rsi),%rsi
+
+ addl 0(%rdi),%eax
+ addl 4(%rdi),%ebx
+ addl 8(%rdi),%ecx
+ addl 12(%rdi),%edx
+ addl 16(%rdi),%r8d
+ addl 20(%rdi),%r9d
+ addl 24(%rdi),%r10d
+ addl 28(%rdi),%r11d
+
+ cmpq 64+16(%rsp),%rsi
+
+ movl %eax,0(%rdi)
+ movl %ebx,4(%rdi)
+ movl %ecx,8(%rdi)
+ movl %edx,12(%rdi)
+ movl %r8d,16(%rdi)
+ movl %r9d,20(%rdi)
+ movl %r10d,24(%rdi)
+ movl %r11d,28(%rdi)
+ jb L$loop
+
+ movq 64+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+L$epilogue:
+ retq
+
+.p2align 6
+
+K256:
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/crypto/libressl/crypto/sha/sha256-masm-x86_64.S b/crypto/libressl/crypto/sha/sha256-masm-x86_64.S
new file mode 100644
index 0000000..33c705d
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256-masm-x86_64.S
@@ -0,0 +1,1864 @@
+; 1 "crypto/sha/sha256-masm-x86_64.S.tmp"
+; 1 "<built-in>" 1
+; 1 "<built-in>" 3
+; 340 "<built-in>" 3
+; 1 "<command line>" 1
+; 1 "<built-in>" 2
+; 1 "crypto/sha/sha256-masm-x86_64.S.tmp" 2
+OPTION DOTNAME
+
+; 1 "./crypto/x86_arch.h" 1
+
+
+; 16 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+; 40 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+; 3 "crypto/sha/sha256-masm-x86_64.S.tmp" 2
+.text$ SEGMENT ALIGN(64) 'CODE'
+
+PUBLIC sha256_block_data_order
+
+ALIGN 16
+sha256_block_data_order PROC PUBLIC
+ mov QWORD PTR[8+rsp],rdi ;WIN64 prologue
+ mov QWORD PTR[16+rsp],rsi
+ mov rax,rsp
+$L$SEH_begin_sha256_block_data_order::
+ mov rdi,rcx
+ mov rsi,rdx
+ mov rdx,r8
+ mov rcx,r9
+
+
+ push rbx
+ push rbp
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r11,rsp
+ shl rdx,4
+ sub rsp,16*4+4*8
+ lea rdx,QWORD PTR[rdx*4+rsi]
+ and rsp,-64
+ mov QWORD PTR[((64+0))+rsp],rdi
+ mov QWORD PTR[((64+8))+rsp],rsi
+ mov QWORD PTR[((64+16))+rsp],rdx
+ mov QWORD PTR[((64+24))+rsp],r11
+$L$prologue::
+
+ lea rbp,QWORD PTR[K256]
+
+ mov eax,DWORD PTR[rdi]
+ mov ebx,DWORD PTR[4+rdi]
+ mov ecx,DWORD PTR[8+rdi]
+ mov edx,DWORD PTR[12+rdi]
+ mov r8d,DWORD PTR[16+rdi]
+ mov r9d,DWORD PTR[20+rdi]
+ mov r10d,DWORD PTR[24+rdi]
+ mov r11d,DWORD PTR[28+rdi]
+ jmp $L$loop
+
+ALIGN 16
+$L$loop::
+ xor rdi,rdi
+ mov r12d,DWORD PTR[rsi]
+ mov r13d,r8d
+ mov r14d,eax
+ bswap r12d
+ ror r13d,14
+ mov r15d,r9d
+ mov DWORD PTR[rsp],r12d
+
+ ror r14d,9
+ xor r13d,r8d
+ xor r15d,r10d
+
+ ror r13d,5
+ add r12d,r11d
+ xor r14d,eax
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r8d
+ mov r11d,ebx
+
+ ror r14d,11
+ xor r13d,r8d
+ xor r15d,r10d
+
+ xor r11d,ecx
+ xor r14d,eax
+ add r12d,r15d
+ mov r15d,ebx
+
+ ror r13d,6
+ and r11d,eax
+ and r15d,ecx
+
+ ror r14d,2
+ add r12d,r13d
+ add r11d,r15d
+
+ add edx,r12d
+ add r11d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r11d,r14d
+
+ mov r12d,DWORD PTR[4+rsi]
+ mov r13d,edx
+ mov r14d,r11d
+ bswap r12d
+ ror r13d,14
+ mov r15d,r8d
+ mov DWORD PTR[4+rsp],r12d
+
+ ror r14d,9
+ xor r13d,edx
+ xor r15d,r9d
+
+ ror r13d,5
+ add r12d,r10d
+ xor r14d,r11d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,edx
+ mov r10d,eax
+
+ ror r14d,11
+ xor r13d,edx
+ xor r15d,r9d
+
+ xor r10d,ebx
+ xor r14d,r11d
+ add r12d,r15d
+ mov r15d,eax
+
+ ror r13d,6
+ and r10d,r11d
+ and r15d,ebx
+
+ ror r14d,2
+ add r12d,r13d
+ add r10d,r15d
+
+ add ecx,r12d
+ add r10d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r10d,r14d
+
+ mov r12d,DWORD PTR[8+rsi]
+ mov r13d,ecx
+ mov r14d,r10d
+ bswap r12d
+ ror r13d,14
+ mov r15d,edx
+ mov DWORD PTR[8+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ecx
+ xor r15d,r8d
+
+ ror r13d,5
+ add r12d,r9d
+ xor r14d,r10d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ecx
+ mov r9d,r11d
+
+ ror r14d,11
+ xor r13d,ecx
+ xor r15d,r8d
+
+ xor r9d,eax
+ xor r14d,r10d
+ add r12d,r15d
+ mov r15d,r11d
+
+ ror r13d,6
+ and r9d,r10d
+ and r15d,eax
+
+ ror r14d,2
+ add r12d,r13d
+ add r9d,r15d
+
+ add ebx,r12d
+ add r9d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r9d,r14d
+
+ mov r12d,DWORD PTR[12+rsi]
+ mov r13d,ebx
+ mov r14d,r9d
+ bswap r12d
+ ror r13d,14
+ mov r15d,ecx
+ mov DWORD PTR[12+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ebx
+ xor r15d,edx
+
+ ror r13d,5
+ add r12d,r8d
+ xor r14d,r9d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ebx
+ mov r8d,r10d
+
+ ror r14d,11
+ xor r13d,ebx
+ xor r15d,edx
+
+ xor r8d,r11d
+ xor r14d,r9d
+ add r12d,r15d
+ mov r15d,r10d
+
+ ror r13d,6
+ and r8d,r9d
+ and r15d,r11d
+
+ ror r14d,2
+ add r12d,r13d
+ add r8d,r15d
+
+ add eax,r12d
+ add r8d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r8d,r14d
+
+ mov r12d,DWORD PTR[16+rsi]
+ mov r13d,eax
+ mov r14d,r8d
+ bswap r12d
+ ror r13d,14
+ mov r15d,ebx
+ mov DWORD PTR[16+rsp],r12d
+
+ ror r14d,9
+ xor r13d,eax
+ xor r15d,ecx
+
+ ror r13d,5
+ add r12d,edx
+ xor r14d,r8d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,eax
+ mov edx,r9d
+
+ ror r14d,11
+ xor r13d,eax
+ xor r15d,ecx
+
+ xor edx,r10d
+ xor r14d,r8d
+ add r12d,r15d
+ mov r15d,r9d
+
+ ror r13d,6
+ and edx,r8d
+ and r15d,r10d
+
+ ror r14d,2
+ add r12d,r13d
+ add edx,r15d
+
+ add r11d,r12d
+ add edx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add edx,r14d
+
+ mov r12d,DWORD PTR[20+rsi]
+ mov r13d,r11d
+ mov r14d,edx
+ bswap r12d
+ ror r13d,14
+ mov r15d,eax
+ mov DWORD PTR[20+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r11d
+ xor r15d,ebx
+
+ ror r13d,5
+ add r12d,ecx
+ xor r14d,edx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r11d
+ mov ecx,r8d
+
+ ror r14d,11
+ xor r13d,r11d
+ xor r15d,ebx
+
+ xor ecx,r9d
+ xor r14d,edx
+ add r12d,r15d
+ mov r15d,r8d
+
+ ror r13d,6
+ and ecx,edx
+ and r15d,r9d
+
+ ror r14d,2
+ add r12d,r13d
+ add ecx,r15d
+
+ add r10d,r12d
+ add ecx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ecx,r14d
+
+ mov r12d,DWORD PTR[24+rsi]
+ mov r13d,r10d
+ mov r14d,ecx
+ bswap r12d
+ ror r13d,14
+ mov r15d,r11d
+ mov DWORD PTR[24+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r10d
+ xor r15d,eax
+
+ ror r13d,5
+ add r12d,ebx
+ xor r14d,ecx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r10d
+ mov ebx,edx
+
+ ror r14d,11
+ xor r13d,r10d
+ xor r15d,eax
+
+ xor ebx,r8d
+ xor r14d,ecx
+ add r12d,r15d
+ mov r15d,edx
+
+ ror r13d,6
+ and ebx,ecx
+ and r15d,r8d
+
+ ror r14d,2
+ add r12d,r13d
+ add ebx,r15d
+
+ add r9d,r12d
+ add ebx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ebx,r14d
+
+ mov r12d,DWORD PTR[28+rsi]
+ mov r13d,r9d
+ mov r14d,ebx
+ bswap r12d
+ ror r13d,14
+ mov r15d,r10d
+ mov DWORD PTR[28+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r9d
+ xor r15d,r11d
+
+ ror r13d,5
+ add r12d,eax
+ xor r14d,ebx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r9d
+ mov eax,ecx
+
+ ror r14d,11
+ xor r13d,r9d
+ xor r15d,r11d
+
+ xor eax,edx
+ xor r14d,ebx
+ add r12d,r15d
+ mov r15d,ecx
+
+ ror r13d,6
+ and eax,ebx
+ and r15d,edx
+
+ ror r14d,2
+ add r12d,r13d
+ add eax,r15d
+
+ add r8d,r12d
+ add eax,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add eax,r14d
+
+ mov r12d,DWORD PTR[32+rsi]
+ mov r13d,r8d
+ mov r14d,eax
+ bswap r12d
+ ror r13d,14
+ mov r15d,r9d
+ mov DWORD PTR[32+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r8d
+ xor r15d,r10d
+
+ ror r13d,5
+ add r12d,r11d
+ xor r14d,eax
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r8d
+ mov r11d,ebx
+
+ ror r14d,11
+ xor r13d,r8d
+ xor r15d,r10d
+
+ xor r11d,ecx
+ xor r14d,eax
+ add r12d,r15d
+ mov r15d,ebx
+
+ ror r13d,6
+ and r11d,eax
+ and r15d,ecx
+
+ ror r14d,2
+ add r12d,r13d
+ add r11d,r15d
+
+ add edx,r12d
+ add r11d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r11d,r14d
+
+ mov r12d,DWORD PTR[36+rsi]
+ mov r13d,edx
+ mov r14d,r11d
+ bswap r12d
+ ror r13d,14
+ mov r15d,r8d
+ mov DWORD PTR[36+rsp],r12d
+
+ ror r14d,9
+ xor r13d,edx
+ xor r15d,r9d
+
+ ror r13d,5
+ add r12d,r10d
+ xor r14d,r11d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,edx
+ mov r10d,eax
+
+ ror r14d,11
+ xor r13d,edx
+ xor r15d,r9d
+
+ xor r10d,ebx
+ xor r14d,r11d
+ add r12d,r15d
+ mov r15d,eax
+
+ ror r13d,6
+ and r10d,r11d
+ and r15d,ebx
+
+ ror r14d,2
+ add r12d,r13d
+ add r10d,r15d
+
+ add ecx,r12d
+ add r10d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r10d,r14d
+
+ mov r12d,DWORD PTR[40+rsi]
+ mov r13d,ecx
+ mov r14d,r10d
+ bswap r12d
+ ror r13d,14
+ mov r15d,edx
+ mov DWORD PTR[40+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ecx
+ xor r15d,r8d
+
+ ror r13d,5
+ add r12d,r9d
+ xor r14d,r10d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ecx
+ mov r9d,r11d
+
+ ror r14d,11
+ xor r13d,ecx
+ xor r15d,r8d
+
+ xor r9d,eax
+ xor r14d,r10d
+ add r12d,r15d
+ mov r15d,r11d
+
+ ror r13d,6
+ and r9d,r10d
+ and r15d,eax
+
+ ror r14d,2
+ add r12d,r13d
+ add r9d,r15d
+
+ add ebx,r12d
+ add r9d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r9d,r14d
+
+ mov r12d,DWORD PTR[44+rsi]
+ mov r13d,ebx
+ mov r14d,r9d
+ bswap r12d
+ ror r13d,14
+ mov r15d,ecx
+ mov DWORD PTR[44+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ebx
+ xor r15d,edx
+
+ ror r13d,5
+ add r12d,r8d
+ xor r14d,r9d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ebx
+ mov r8d,r10d
+
+ ror r14d,11
+ xor r13d,ebx
+ xor r15d,edx
+
+ xor r8d,r11d
+ xor r14d,r9d
+ add r12d,r15d
+ mov r15d,r10d
+
+ ror r13d,6
+ and r8d,r9d
+ and r15d,r11d
+
+ ror r14d,2
+ add r12d,r13d
+ add r8d,r15d
+
+ add eax,r12d
+ add r8d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r8d,r14d
+
+ mov r12d,DWORD PTR[48+rsi]
+ mov r13d,eax
+ mov r14d,r8d
+ bswap r12d
+ ror r13d,14
+ mov r15d,ebx
+ mov DWORD PTR[48+rsp],r12d
+
+ ror r14d,9
+ xor r13d,eax
+ xor r15d,ecx
+
+ ror r13d,5
+ add r12d,edx
+ xor r14d,r8d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,eax
+ mov edx,r9d
+
+ ror r14d,11
+ xor r13d,eax
+ xor r15d,ecx
+
+ xor edx,r10d
+ xor r14d,r8d
+ add r12d,r15d
+ mov r15d,r9d
+
+ ror r13d,6
+ and edx,r8d
+ and r15d,r10d
+
+ ror r14d,2
+ add r12d,r13d
+ add edx,r15d
+
+ add r11d,r12d
+ add edx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add edx,r14d
+
+ mov r12d,DWORD PTR[52+rsi]
+ mov r13d,r11d
+ mov r14d,edx
+ bswap r12d
+ ror r13d,14
+ mov r15d,eax
+ mov DWORD PTR[52+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r11d
+ xor r15d,ebx
+
+ ror r13d,5
+ add r12d,ecx
+ xor r14d,edx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r11d
+ mov ecx,r8d
+
+ ror r14d,11
+ xor r13d,r11d
+ xor r15d,ebx
+
+ xor ecx,r9d
+ xor r14d,edx
+ add r12d,r15d
+ mov r15d,r8d
+
+ ror r13d,6
+ and ecx,edx
+ and r15d,r9d
+
+ ror r14d,2
+ add r12d,r13d
+ add ecx,r15d
+
+ add r10d,r12d
+ add ecx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ecx,r14d
+
+ mov r12d,DWORD PTR[56+rsi]
+ mov r13d,r10d
+ mov r14d,ecx
+ bswap r12d
+ ror r13d,14
+ mov r15d,r11d
+ mov DWORD PTR[56+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r10d
+ xor r15d,eax
+
+ ror r13d,5
+ add r12d,ebx
+ xor r14d,ecx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r10d
+ mov ebx,edx
+
+ ror r14d,11
+ xor r13d,r10d
+ xor r15d,eax
+
+ xor ebx,r8d
+ xor r14d,ecx
+ add r12d,r15d
+ mov r15d,edx
+
+ ror r13d,6
+ and ebx,ecx
+ and r15d,r8d
+
+ ror r14d,2
+ add r12d,r13d
+ add ebx,r15d
+
+ add r9d,r12d
+ add ebx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ebx,r14d
+
+ mov r12d,DWORD PTR[60+rsi]
+ mov r13d,r9d
+ mov r14d,ebx
+ bswap r12d
+ ror r13d,14
+ mov r15d,r10d
+ mov DWORD PTR[60+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r9d
+ xor r15d,r11d
+
+ ror r13d,5
+ add r12d,eax
+ xor r14d,ebx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r9d
+ mov eax,ecx
+
+ ror r14d,11
+ xor r13d,r9d
+ xor r15d,r11d
+
+ xor eax,edx
+ xor r14d,ebx
+ add r12d,r15d
+ mov r15d,ecx
+
+ ror r13d,6
+ and eax,ebx
+ and r15d,edx
+
+ ror r14d,2
+ add r12d,r13d
+ add eax,r15d
+
+ add r8d,r12d
+ add eax,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add eax,r14d
+
+ jmp $L$rounds_16_xx
+ALIGN 16
+$L$rounds_16_xx::
+ mov r13d,DWORD PTR[4+rsp]
+ mov r14d,DWORD PTR[56+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[36+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[rsp]
+ mov r13d,r8d
+ add r12d,r14d
+ mov r14d,eax
+ ror r13d,14
+ mov r15d,r9d
+ mov DWORD PTR[rsp],r12d
+
+ ror r14d,9
+ xor r13d,r8d
+ xor r15d,r10d
+
+ ror r13d,5
+ add r12d,r11d
+ xor r14d,eax
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r8d
+ mov r11d,ebx
+
+ ror r14d,11
+ xor r13d,r8d
+ xor r15d,r10d
+
+ xor r11d,ecx
+ xor r14d,eax
+ add r12d,r15d
+ mov r15d,ebx
+
+ ror r13d,6
+ and r11d,eax
+ and r15d,ecx
+
+ ror r14d,2
+ add r12d,r13d
+ add r11d,r15d
+
+ add edx,r12d
+ add r11d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r11d,r14d
+
+ mov r13d,DWORD PTR[8+rsp]
+ mov r14d,DWORD PTR[60+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[40+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[4+rsp]
+ mov r13d,edx
+ add r12d,r14d
+ mov r14d,r11d
+ ror r13d,14
+ mov r15d,r8d
+ mov DWORD PTR[4+rsp],r12d
+
+ ror r14d,9
+ xor r13d,edx
+ xor r15d,r9d
+
+ ror r13d,5
+ add r12d,r10d
+ xor r14d,r11d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,edx
+ mov r10d,eax
+
+ ror r14d,11
+ xor r13d,edx
+ xor r15d,r9d
+
+ xor r10d,ebx
+ xor r14d,r11d
+ add r12d,r15d
+ mov r15d,eax
+
+ ror r13d,6
+ and r10d,r11d
+ and r15d,ebx
+
+ ror r14d,2
+ add r12d,r13d
+ add r10d,r15d
+
+ add ecx,r12d
+ add r10d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r10d,r14d
+
+ mov r13d,DWORD PTR[12+rsp]
+ mov r14d,DWORD PTR[rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[44+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[8+rsp]
+ mov r13d,ecx
+ add r12d,r14d
+ mov r14d,r10d
+ ror r13d,14
+ mov r15d,edx
+ mov DWORD PTR[8+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ecx
+ xor r15d,r8d
+
+ ror r13d,5
+ add r12d,r9d
+ xor r14d,r10d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ecx
+ mov r9d,r11d
+
+ ror r14d,11
+ xor r13d,ecx
+ xor r15d,r8d
+
+ xor r9d,eax
+ xor r14d,r10d
+ add r12d,r15d
+ mov r15d,r11d
+
+ ror r13d,6
+ and r9d,r10d
+ and r15d,eax
+
+ ror r14d,2
+ add r12d,r13d
+ add r9d,r15d
+
+ add ebx,r12d
+ add r9d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r9d,r14d
+
+ mov r13d,DWORD PTR[16+rsp]
+ mov r14d,DWORD PTR[4+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[48+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[12+rsp]
+ mov r13d,ebx
+ add r12d,r14d
+ mov r14d,r9d
+ ror r13d,14
+ mov r15d,ecx
+ mov DWORD PTR[12+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ebx
+ xor r15d,edx
+
+ ror r13d,5
+ add r12d,r8d
+ xor r14d,r9d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ebx
+ mov r8d,r10d
+
+ ror r14d,11
+ xor r13d,ebx
+ xor r15d,edx
+
+ xor r8d,r11d
+ xor r14d,r9d
+ add r12d,r15d
+ mov r15d,r10d
+
+ ror r13d,6
+ and r8d,r9d
+ and r15d,r11d
+
+ ror r14d,2
+ add r12d,r13d
+ add r8d,r15d
+
+ add eax,r12d
+ add r8d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r8d,r14d
+
+ mov r13d,DWORD PTR[20+rsp]
+ mov r14d,DWORD PTR[8+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[52+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[16+rsp]
+ mov r13d,eax
+ add r12d,r14d
+ mov r14d,r8d
+ ror r13d,14
+ mov r15d,ebx
+ mov DWORD PTR[16+rsp],r12d
+
+ ror r14d,9
+ xor r13d,eax
+ xor r15d,ecx
+
+ ror r13d,5
+ add r12d,edx
+ xor r14d,r8d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,eax
+ mov edx,r9d
+
+ ror r14d,11
+ xor r13d,eax
+ xor r15d,ecx
+
+ xor edx,r10d
+ xor r14d,r8d
+ add r12d,r15d
+ mov r15d,r9d
+
+ ror r13d,6
+ and edx,r8d
+ and r15d,r10d
+
+ ror r14d,2
+ add r12d,r13d
+ add edx,r15d
+
+ add r11d,r12d
+ add edx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add edx,r14d
+
+ mov r13d,DWORD PTR[24+rsp]
+ mov r14d,DWORD PTR[12+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[56+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[20+rsp]
+ mov r13d,r11d
+ add r12d,r14d
+ mov r14d,edx
+ ror r13d,14
+ mov r15d,eax
+ mov DWORD PTR[20+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r11d
+ xor r15d,ebx
+
+ ror r13d,5
+ add r12d,ecx
+ xor r14d,edx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r11d
+ mov ecx,r8d
+
+ ror r14d,11
+ xor r13d,r11d
+ xor r15d,ebx
+
+ xor ecx,r9d
+ xor r14d,edx
+ add r12d,r15d
+ mov r15d,r8d
+
+ ror r13d,6
+ and ecx,edx
+ and r15d,r9d
+
+ ror r14d,2
+ add r12d,r13d
+ add ecx,r15d
+
+ add r10d,r12d
+ add ecx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ecx,r14d
+
+ mov r13d,DWORD PTR[28+rsp]
+ mov r14d,DWORD PTR[16+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[60+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[24+rsp]
+ mov r13d,r10d
+ add r12d,r14d
+ mov r14d,ecx
+ ror r13d,14
+ mov r15d,r11d
+ mov DWORD PTR[24+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r10d
+ xor r15d,eax
+
+ ror r13d,5
+ add r12d,ebx
+ xor r14d,ecx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r10d
+ mov ebx,edx
+
+ ror r14d,11
+ xor r13d,r10d
+ xor r15d,eax
+
+ xor ebx,r8d
+ xor r14d,ecx
+ add r12d,r15d
+ mov r15d,edx
+
+ ror r13d,6
+ and ebx,ecx
+ and r15d,r8d
+
+ ror r14d,2
+ add r12d,r13d
+ add ebx,r15d
+
+ add r9d,r12d
+ add ebx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ebx,r14d
+
+ mov r13d,DWORD PTR[32+rsp]
+ mov r14d,DWORD PTR[20+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[28+rsp]
+ mov r13d,r9d
+ add r12d,r14d
+ mov r14d,ebx
+ ror r13d,14
+ mov r15d,r10d
+ mov DWORD PTR[28+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r9d
+ xor r15d,r11d
+
+ ror r13d,5
+ add r12d,eax
+ xor r14d,ebx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r9d
+ mov eax,ecx
+
+ ror r14d,11
+ xor r13d,r9d
+ xor r15d,r11d
+
+ xor eax,edx
+ xor r14d,ebx
+ add r12d,r15d
+ mov r15d,ecx
+
+ ror r13d,6
+ and eax,ebx
+ and r15d,edx
+
+ ror r14d,2
+ add r12d,r13d
+ add eax,r15d
+
+ add r8d,r12d
+ add eax,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add eax,r14d
+
+ mov r13d,DWORD PTR[36+rsp]
+ mov r14d,DWORD PTR[24+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[4+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[32+rsp]
+ mov r13d,r8d
+ add r12d,r14d
+ mov r14d,eax
+ ror r13d,14
+ mov r15d,r9d
+ mov DWORD PTR[32+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r8d
+ xor r15d,r10d
+
+ ror r13d,5
+ add r12d,r11d
+ xor r14d,eax
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r8d
+ mov r11d,ebx
+
+ ror r14d,11
+ xor r13d,r8d
+ xor r15d,r10d
+
+ xor r11d,ecx
+ xor r14d,eax
+ add r12d,r15d
+ mov r15d,ebx
+
+ ror r13d,6
+ and r11d,eax
+ and r15d,ecx
+
+ ror r14d,2
+ add r12d,r13d
+ add r11d,r15d
+
+ add edx,r12d
+ add r11d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r11d,r14d
+
+ mov r13d,DWORD PTR[40+rsp]
+ mov r14d,DWORD PTR[28+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[8+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[36+rsp]
+ mov r13d,edx
+ add r12d,r14d
+ mov r14d,r11d
+ ror r13d,14
+ mov r15d,r8d
+ mov DWORD PTR[36+rsp],r12d
+
+ ror r14d,9
+ xor r13d,edx
+ xor r15d,r9d
+
+ ror r13d,5
+ add r12d,r10d
+ xor r14d,r11d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,edx
+ mov r10d,eax
+
+ ror r14d,11
+ xor r13d,edx
+ xor r15d,r9d
+
+ xor r10d,ebx
+ xor r14d,r11d
+ add r12d,r15d
+ mov r15d,eax
+
+ ror r13d,6
+ and r10d,r11d
+ and r15d,ebx
+
+ ror r14d,2
+ add r12d,r13d
+ add r10d,r15d
+
+ add ecx,r12d
+ add r10d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r10d,r14d
+
+ mov r13d,DWORD PTR[44+rsp]
+ mov r14d,DWORD PTR[32+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[12+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[40+rsp]
+ mov r13d,ecx
+ add r12d,r14d
+ mov r14d,r10d
+ ror r13d,14
+ mov r15d,edx
+ mov DWORD PTR[40+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ecx
+ xor r15d,r8d
+
+ ror r13d,5
+ add r12d,r9d
+ xor r14d,r10d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ecx
+ mov r9d,r11d
+
+ ror r14d,11
+ xor r13d,ecx
+ xor r15d,r8d
+
+ xor r9d,eax
+ xor r14d,r10d
+ add r12d,r15d
+ mov r15d,r11d
+
+ ror r13d,6
+ and r9d,r10d
+ and r15d,eax
+
+ ror r14d,2
+ add r12d,r13d
+ add r9d,r15d
+
+ add ebx,r12d
+ add r9d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r9d,r14d
+
+ mov r13d,DWORD PTR[48+rsp]
+ mov r14d,DWORD PTR[36+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[16+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[44+rsp]
+ mov r13d,ebx
+ add r12d,r14d
+ mov r14d,r9d
+ ror r13d,14
+ mov r15d,ecx
+ mov DWORD PTR[44+rsp],r12d
+
+ ror r14d,9
+ xor r13d,ebx
+ xor r15d,edx
+
+ ror r13d,5
+ add r12d,r8d
+ xor r14d,r9d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,ebx
+ mov r8d,r10d
+
+ ror r14d,11
+ xor r13d,ebx
+ xor r15d,edx
+
+ xor r8d,r11d
+ xor r14d,r9d
+ add r12d,r15d
+ mov r15d,r10d
+
+ ror r13d,6
+ and r8d,r9d
+ and r15d,r11d
+
+ ror r14d,2
+ add r12d,r13d
+ add r8d,r15d
+
+ add eax,r12d
+ add r8d,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add r8d,r14d
+
+ mov r13d,DWORD PTR[52+rsp]
+ mov r14d,DWORD PTR[40+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[20+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[48+rsp]
+ mov r13d,eax
+ add r12d,r14d
+ mov r14d,r8d
+ ror r13d,14
+ mov r15d,ebx
+ mov DWORD PTR[48+rsp],r12d
+
+ ror r14d,9
+ xor r13d,eax
+ xor r15d,ecx
+
+ ror r13d,5
+ add r12d,edx
+ xor r14d,r8d
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,eax
+ mov edx,r9d
+
+ ror r14d,11
+ xor r13d,eax
+ xor r15d,ecx
+
+ xor edx,r10d
+ xor r14d,r8d
+ add r12d,r15d
+ mov r15d,r9d
+
+ ror r13d,6
+ and edx,r8d
+ and r15d,r10d
+
+ ror r14d,2
+ add r12d,r13d
+ add edx,r15d
+
+ add r11d,r12d
+ add edx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add edx,r14d
+
+ mov r13d,DWORD PTR[56+rsp]
+ mov r14d,DWORD PTR[44+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[24+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[52+rsp]
+ mov r13d,r11d
+ add r12d,r14d
+ mov r14d,edx
+ ror r13d,14
+ mov r15d,eax
+ mov DWORD PTR[52+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r11d
+ xor r15d,ebx
+
+ ror r13d,5
+ add r12d,ecx
+ xor r14d,edx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r11d
+ mov ecx,r8d
+
+ ror r14d,11
+ xor r13d,r11d
+ xor r15d,ebx
+
+ xor ecx,r9d
+ xor r14d,edx
+ add r12d,r15d
+ mov r15d,r8d
+
+ ror r13d,6
+ and ecx,edx
+ and r15d,r9d
+
+ ror r14d,2
+ add r12d,r13d
+ add ecx,r15d
+
+ add r10d,r12d
+ add ecx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ecx,r14d
+
+ mov r13d,DWORD PTR[60+rsp]
+ mov r14d,DWORD PTR[48+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[28+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[56+rsp]
+ mov r13d,r10d
+ add r12d,r14d
+ mov r14d,ecx
+ ror r13d,14
+ mov r15d,r11d
+ mov DWORD PTR[56+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r10d
+ xor r15d,eax
+
+ ror r13d,5
+ add r12d,ebx
+ xor r14d,ecx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r10d
+ mov ebx,edx
+
+ ror r14d,11
+ xor r13d,r10d
+ xor r15d,eax
+
+ xor ebx,r8d
+ xor r14d,ecx
+ add r12d,r15d
+ mov r15d,edx
+
+ ror r13d,6
+ and ebx,ecx
+ and r15d,r8d
+
+ ror r14d,2
+ add r12d,r13d
+ add ebx,r15d
+
+ add r9d,r12d
+ add ebx,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add ebx,r14d
+
+ mov r13d,DWORD PTR[rsp]
+ mov r14d,DWORD PTR[52+rsp]
+ mov r12d,r13d
+ mov r15d,r14d
+
+ ror r12d,11
+ xor r12d,r13d
+ shr r13d,3
+
+ ror r12d,7
+ xor r13d,r12d
+ mov r12d,DWORD PTR[32+rsp]
+
+ ror r15d,2
+ xor r15d,r14d
+ shr r14d,10
+
+ ror r15d,17
+ add r12d,r13d
+ xor r14d,r15d
+
+ add r12d,DWORD PTR[60+rsp]
+ mov r13d,r9d
+ add r12d,r14d
+ mov r14d,ebx
+ ror r13d,14
+ mov r15d,r10d
+ mov DWORD PTR[60+rsp],r12d
+
+ ror r14d,9
+ xor r13d,r9d
+ xor r15d,r11d
+
+ ror r13d,5
+ add r12d,eax
+ xor r14d,ebx
+
+ add r12d,DWORD PTR[rdi*4+rbp]
+ and r15d,r9d
+ mov eax,ecx
+
+ ror r14d,11
+ xor r13d,r9d
+ xor r15d,r11d
+
+ xor eax,edx
+ xor r14d,ebx
+ add r12d,r15d
+ mov r15d,ecx
+
+ ror r13d,6
+ and eax,ebx
+ and r15d,edx
+
+ ror r14d,2
+ add r12d,r13d
+ add eax,r15d
+
+ add r8d,r12d
+ add eax,r12d
+ lea rdi,QWORD PTR[1+rdi]
+ add eax,r14d
+
+ cmp rdi,64
+ jb $L$rounds_16_xx
+
+ mov rdi,QWORD PTR[((64+0))+rsp]
+ lea rsi,QWORD PTR[64+rsi]
+
+ add eax,DWORD PTR[rdi]
+ add ebx,DWORD PTR[4+rdi]
+ add ecx,DWORD PTR[8+rdi]
+ add edx,DWORD PTR[12+rdi]
+ add r8d,DWORD PTR[16+rdi]
+ add r9d,DWORD PTR[20+rdi]
+ add r10d,DWORD PTR[24+rdi]
+ add r11d,DWORD PTR[28+rdi]
+
+ cmp rsi,QWORD PTR[((64+16))+rsp]
+
+ mov DWORD PTR[rdi],eax
+ mov DWORD PTR[4+rdi],ebx
+ mov DWORD PTR[8+rdi],ecx
+ mov DWORD PTR[12+rdi],edx
+ mov DWORD PTR[16+rdi],r8d
+ mov DWORD PTR[20+rdi],r9d
+ mov DWORD PTR[24+rdi],r10d
+ mov DWORD PTR[28+rdi],r11d
+ jb $L$loop
+
+ mov rsi,QWORD PTR[((64+24))+rsp]
+ mov r15,QWORD PTR[rsi]
+ mov r14,QWORD PTR[8+rsi]
+ mov r13,QWORD PTR[16+rsi]
+ mov r12,QWORD PTR[24+rsi]
+ mov rbp,QWORD PTR[32+rsi]
+ mov rbx,QWORD PTR[40+rsi]
+ lea rsp,QWORD PTR[48+rsi]
+$L$epilogue::
+ mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue
+ mov rsi,QWORD PTR[16+rsp]
+ DB 0F3h,0C3h ;repret
+$L$SEH_end_sha256_block_data_order::
+sha256_block_data_order ENDP
+ALIGN 64
+
+K256::
+ DD 0428a2f98h,071374491h,0b5c0fbcfh,0e9b5dba5h
+ DD 03956c25bh,059f111f1h,0923f82a4h,0ab1c5ed5h
+ DD 0d807aa98h,012835b01h,0243185beh,0550c7dc3h
+ DD 072be5d74h,080deb1feh,09bdc06a7h,0c19bf174h
+ DD 0e49b69c1h,0efbe4786h,00fc19dc6h,0240ca1cch
+ DD 02de92c6fh,04a7484aah,05cb0a9dch,076f988dah
+ DD 0983e5152h,0a831c66dh,0b00327c8h,0bf597fc7h
+ DD 0c6e00bf3h,0d5a79147h,006ca6351h,014292967h
+ DD 027b70a85h,02e1b2138h,04d2c6dfch,053380d13h
+ DD 0650a7354h,0766a0abbh,081c2c92eh,092722c85h
+ DD 0a2bfe8a1h,0a81a664bh,0c24b8b70h,0c76c51a3h
+ DD 0d192e819h,0d6990624h,0f40e3585h,0106aa070h
+ DD 019a4c116h,01e376c08h,02748774ch,034b0bcb5h
+ DD 0391c0cb3h,04ed8aa4ah,05b9cca4fh,0682e6ff3h
+ DD 0748f82eeh,078a5636fh,084c87814h,08cc70208h
+ DD 090befffah,0a4506cebh,0bef9a3f7h,0c67178f2h
+
+.text$ ENDS
+END
+
diff --git a/crypto/libressl/crypto/sha/sha256-mingw64-x86_64.S b/crypto/libressl/crypto/sha/sha256-mingw64-x86_64.S
new file mode 100644
index 0000000..3de981b
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256-mingw64-x86_64.S
@@ -0,0 +1,1790 @@
+#include "x86_arch.h"
+.text
+
+.globl sha256_block_data_order
+.def sha256_block_data_order; .scl 2; .type 32; .endef
+.p2align 4
+sha256_block_data_order:
+ movq %rdi,8(%rsp)
+ movq %rsi,16(%rsp)
+ movq %rsp,%rax
+.LSEH_begin_sha256_block_data_order:
+ movq %rcx,%rdi
+ movq %rdx,%rsi
+ movq %r8,%rdx
+ movq %r9,%rcx
+
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $64+32,%rsp
+ leaq (%rsi,%rdx,4),%rdx
+ andq $-64,%rsp
+ movq %rdi,64+0(%rsp)
+ movq %rsi,64+8(%rsp)
+ movq %rdx,64+16(%rsp)
+ movq %r11,64+24(%rsp)
+.Lprologue:
+
+ leaq K256(%rip),%rbp
+
+ movl 0(%rdi),%eax
+ movl 4(%rdi),%ebx
+ movl 8(%rdi),%ecx
+ movl 12(%rdi),%edx
+ movl 16(%rdi),%r8d
+ movl 20(%rdi),%r9d
+ movl 24(%rdi),%r10d
+ movl 28(%rdi),%r11d
+ jmp .Lloop
+
+.p2align 4
+.Lloop:
+ xorq %rdi,%rdi
+ movl 0(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 4(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 8(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 12(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 16(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 20(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 24(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 28(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 32(%rsi),%r12d
+ movl %r8d,%r13d
+ movl %eax,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 36(%rsi),%r12d
+ movl %edx,%r13d
+ movl %r11d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 40(%rsi),%r12d
+ movl %ecx,%r13d
+ movl %r10d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 44(%rsi),%r12d
+ movl %ebx,%r13d
+ movl %r9d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 48(%rsi),%r12d
+ movl %eax,%r13d
+ movl %r8d,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 52(%rsi),%r12d
+ movl %r11d,%r13d
+ movl %edx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 56(%rsi),%r12d
+ movl %r10d,%r13d
+ movl %ecx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 60(%rsi),%r12d
+ movl %r9d,%r13d
+ movl %ebx,%r14d
+ bswapl %r12d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ jmp .Lrounds_16_xx
+.p2align 4
+.Lrounds_16_xx:
+ movl 4(%rsp),%r13d
+ movl 56(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 36(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 0(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,0(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 8(%rsp),%r13d
+ movl 60(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 40(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 4(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,4(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 12(%rsp),%r13d
+ movl 0(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 44(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 8(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,8(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 16(%rsp),%r13d
+ movl 4(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 48(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 12(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,12(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 20(%rsp),%r13d
+ movl 8(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 52(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 16(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,16(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 24(%rsp),%r13d
+ movl 12(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 56(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 20(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,20(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 28(%rsp),%r13d
+ movl 16(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 60(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 24(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,24(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 32(%rsp),%r13d
+ movl 20(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 0(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 28(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,28(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ movl 36(%rsp),%r13d
+ movl 24(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 4(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 32(%rsp),%r12d
+ movl %r8d,%r13d
+ addl %r14d,%r12d
+ movl %eax,%r14d
+ rorl $14,%r13d
+ movl %r9d,%r15d
+ movl %r12d,32(%rsp)
+
+ rorl $9,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ rorl $5,%r13d
+ addl %r11d,%r12d
+ xorl %eax,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r8d,%r15d
+ movl %ebx,%r11d
+
+ rorl $11,%r14d
+ xorl %r8d,%r13d
+ xorl %r10d,%r15d
+
+ xorl %ecx,%r11d
+ xorl %eax,%r14d
+ addl %r15d,%r12d
+ movl %ebx,%r15d
+
+ rorl $6,%r13d
+ andl %eax,%r11d
+ andl %ecx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r11d
+
+ addl %r12d,%edx
+ addl %r12d,%r11d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r11d
+
+ movl 40(%rsp),%r13d
+ movl 28(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 8(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 36(%rsp),%r12d
+ movl %edx,%r13d
+ addl %r14d,%r12d
+ movl %r11d,%r14d
+ rorl $14,%r13d
+ movl %r8d,%r15d
+ movl %r12d,36(%rsp)
+
+ rorl $9,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ rorl $5,%r13d
+ addl %r10d,%r12d
+ xorl %r11d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %edx,%r15d
+ movl %eax,%r10d
+
+ rorl $11,%r14d
+ xorl %edx,%r13d
+ xorl %r9d,%r15d
+
+ xorl %ebx,%r10d
+ xorl %r11d,%r14d
+ addl %r15d,%r12d
+ movl %eax,%r15d
+
+ rorl $6,%r13d
+ andl %r11d,%r10d
+ andl %ebx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r10d
+
+ addl %r12d,%ecx
+ addl %r12d,%r10d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r10d
+
+ movl 44(%rsp),%r13d
+ movl 32(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 12(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 40(%rsp),%r12d
+ movl %ecx,%r13d
+ addl %r14d,%r12d
+ movl %r10d,%r14d
+ rorl $14,%r13d
+ movl %edx,%r15d
+ movl %r12d,40(%rsp)
+
+ rorl $9,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ rorl $5,%r13d
+ addl %r9d,%r12d
+ xorl %r10d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ecx,%r15d
+ movl %r11d,%r9d
+
+ rorl $11,%r14d
+ xorl %ecx,%r13d
+ xorl %r8d,%r15d
+
+ xorl %eax,%r9d
+ xorl %r10d,%r14d
+ addl %r15d,%r12d
+ movl %r11d,%r15d
+
+ rorl $6,%r13d
+ andl %r10d,%r9d
+ andl %eax,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r9d
+
+ addl %r12d,%ebx
+ addl %r12d,%r9d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r9d
+
+ movl 48(%rsp),%r13d
+ movl 36(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 16(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 44(%rsp),%r12d
+ movl %ebx,%r13d
+ addl %r14d,%r12d
+ movl %r9d,%r14d
+ rorl $14,%r13d
+ movl %ecx,%r15d
+ movl %r12d,44(%rsp)
+
+ rorl $9,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ rorl $5,%r13d
+ addl %r8d,%r12d
+ xorl %r9d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %ebx,%r15d
+ movl %r10d,%r8d
+
+ rorl $11,%r14d
+ xorl %ebx,%r13d
+ xorl %edx,%r15d
+
+ xorl %r11d,%r8d
+ xorl %r9d,%r14d
+ addl %r15d,%r12d
+ movl %r10d,%r15d
+
+ rorl $6,%r13d
+ andl %r9d,%r8d
+ andl %r11d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%r8d
+
+ addl %r12d,%eax
+ addl %r12d,%r8d
+ leaq 1(%rdi),%rdi
+ addl %r14d,%r8d
+
+ movl 52(%rsp),%r13d
+ movl 40(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 20(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 48(%rsp),%r12d
+ movl %eax,%r13d
+ addl %r14d,%r12d
+ movl %r8d,%r14d
+ rorl $14,%r13d
+ movl %ebx,%r15d
+ movl %r12d,48(%rsp)
+
+ rorl $9,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ rorl $5,%r13d
+ addl %edx,%r12d
+ xorl %r8d,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %eax,%r15d
+ movl %r9d,%edx
+
+ rorl $11,%r14d
+ xorl %eax,%r13d
+ xorl %ecx,%r15d
+
+ xorl %r10d,%edx
+ xorl %r8d,%r14d
+ addl %r15d,%r12d
+ movl %r9d,%r15d
+
+ rorl $6,%r13d
+ andl %r8d,%edx
+ andl %r10d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%edx
+
+ addl %r12d,%r11d
+ addl %r12d,%edx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%edx
+
+ movl 56(%rsp),%r13d
+ movl 44(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 24(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 52(%rsp),%r12d
+ movl %r11d,%r13d
+ addl %r14d,%r12d
+ movl %edx,%r14d
+ rorl $14,%r13d
+ movl %eax,%r15d
+ movl %r12d,52(%rsp)
+
+ rorl $9,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ rorl $5,%r13d
+ addl %ecx,%r12d
+ xorl %edx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r11d,%r15d
+ movl %r8d,%ecx
+
+ rorl $11,%r14d
+ xorl %r11d,%r13d
+ xorl %ebx,%r15d
+
+ xorl %r9d,%ecx
+ xorl %edx,%r14d
+ addl %r15d,%r12d
+ movl %r8d,%r15d
+
+ rorl $6,%r13d
+ andl %edx,%ecx
+ andl %r9d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ecx
+
+ addl %r12d,%r10d
+ addl %r12d,%ecx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ecx
+
+ movl 60(%rsp),%r13d
+ movl 48(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 28(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 56(%rsp),%r12d
+ movl %r10d,%r13d
+ addl %r14d,%r12d
+ movl %ecx,%r14d
+ rorl $14,%r13d
+ movl %r11d,%r15d
+ movl %r12d,56(%rsp)
+
+ rorl $9,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ rorl $5,%r13d
+ addl %ebx,%r12d
+ xorl %ecx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r10d,%r15d
+ movl %edx,%ebx
+
+ rorl $11,%r14d
+ xorl %r10d,%r13d
+ xorl %eax,%r15d
+
+ xorl %r8d,%ebx
+ xorl %ecx,%r14d
+ addl %r15d,%r12d
+ movl %edx,%r15d
+
+ rorl $6,%r13d
+ andl %ecx,%ebx
+ andl %r8d,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%ebx
+
+ addl %r12d,%r9d
+ addl %r12d,%ebx
+ leaq 1(%rdi),%rdi
+ addl %r14d,%ebx
+
+ movl 0(%rsp),%r13d
+ movl 52(%rsp),%r14d
+ movl %r13d,%r12d
+ movl %r14d,%r15d
+
+ rorl $11,%r12d
+ xorl %r13d,%r12d
+ shrl $3,%r13d
+
+ rorl $7,%r12d
+ xorl %r12d,%r13d
+ movl 32(%rsp),%r12d
+
+ rorl $2,%r15d
+ xorl %r14d,%r15d
+ shrl $10,%r14d
+
+ rorl $17,%r15d
+ addl %r13d,%r12d
+ xorl %r15d,%r14d
+
+ addl 60(%rsp),%r12d
+ movl %r9d,%r13d
+ addl %r14d,%r12d
+ movl %ebx,%r14d
+ rorl $14,%r13d
+ movl %r10d,%r15d
+ movl %r12d,60(%rsp)
+
+ rorl $9,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ rorl $5,%r13d
+ addl %eax,%r12d
+ xorl %ebx,%r14d
+
+ addl (%rbp,%rdi,4),%r12d
+ andl %r9d,%r15d
+ movl %ecx,%eax
+
+ rorl $11,%r14d
+ xorl %r9d,%r13d
+ xorl %r11d,%r15d
+
+ xorl %edx,%eax
+ xorl %ebx,%r14d
+ addl %r15d,%r12d
+ movl %ecx,%r15d
+
+ rorl $6,%r13d
+ andl %ebx,%eax
+ andl %edx,%r15d
+
+ rorl $2,%r14d
+ addl %r13d,%r12d
+ addl %r15d,%eax
+
+ addl %r12d,%r8d
+ addl %r12d,%eax
+ leaq 1(%rdi),%rdi
+ addl %r14d,%eax
+
+ cmpq $64,%rdi
+ jb .Lrounds_16_xx
+
+ movq 64+0(%rsp),%rdi
+ leaq 64(%rsi),%rsi
+
+ addl 0(%rdi),%eax
+ addl 4(%rdi),%ebx
+ addl 8(%rdi),%ecx
+ addl 12(%rdi),%edx
+ addl 16(%rdi),%r8d
+ addl 20(%rdi),%r9d
+ addl 24(%rdi),%r10d
+ addl 28(%rdi),%r11d
+
+ cmpq 64+16(%rsp),%rsi
+
+ movl %eax,0(%rdi)
+ movl %ebx,4(%rdi)
+ movl %ecx,8(%rdi)
+ movl %edx,12(%rdi)
+ movl %r8d,16(%rdi)
+ movl %r9d,20(%rdi)
+ movl %r10d,24(%rdi)
+ movl %r11d,28(%rdi)
+ jb .Lloop
+
+ movq 64+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lepilogue:
+ movq 8(%rsp),%rdi
+ movq 16(%rsp),%rsi
+ retq
+.LSEH_end_sha256_block_data_order:
+.p2align 6
+
+K256:
+.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/crypto/libressl/crypto/sha/sha256.c b/crypto/libressl/crypto/sha/sha256.c
new file mode 100644
index 0000000..86e5070
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha256.c
@@ -0,0 +1,284 @@
+/* $OpenBSD: sha256.c,v 1.10 2019/01/21 23:20:31 jsg Exp $ */
+/* ====================================================================
+ * Copyright (c) 2004 The OpenSSL Project. All rights reserved
+ * according to the OpenSSL license [found in ../../LICENSE].
+ * ====================================================================
+ */
+
+#include <openssl/opensslconf.h>
+
+#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256)
+
+#include <machine/endian.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+//#include <openssl/crypto.h>
+#include <openssl/sha.h>
+#include <openssl/opensslv.h>
+
+int SHA224_Init(SHA256_CTX *c)
+ {
+ memset (c,0,sizeof(*c));
+ c->h[0]=0xc1059ed8UL; c->h[1]=0x367cd507UL;
+ c->h[2]=0x3070dd17UL; c->h[3]=0xf70e5939UL;
+ c->h[4]=0xffc00b31UL; c->h[5]=0x68581511UL;
+ c->h[6]=0x64f98fa7UL; c->h[7]=0xbefa4fa4UL;
+ c->md_len=SHA224_DIGEST_LENGTH;
+ return 1;
+ }
+
+int SHA256_Init(SHA256_CTX *c)
+ {
+ memset (c,0,sizeof(*c));
+ c->h[0]=0x6a09e667UL; c->h[1]=0xbb67ae85UL;
+ c->h[2]=0x3c6ef372UL; c->h[3]=0xa54ff53aUL;
+ c->h[4]=0x510e527fUL; c->h[5]=0x9b05688cUL;
+ c->h[6]=0x1f83d9abUL; c->h[7]=0x5be0cd19UL;
+ c->md_len=SHA256_DIGEST_LENGTH;
+ return 1;
+ }
+
+unsigned char *SHA224(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ SHA256_CTX c;
+ static unsigned char m[SHA224_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ SHA224_Init(&c);
+ SHA256_Update(&c,d,n);
+ SHA256_Final(md,&c);
+ explicit_bzero(&c,sizeof(c));
+ return(md);
+ }
+
+unsigned char *SHA256(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ SHA256_CTX c;
+ static unsigned char m[SHA256_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ SHA256_Init(&c);
+ SHA256_Update(&c,d,n);
+ SHA256_Final(md,&c);
+ explicit_bzero(&c,sizeof(c));
+ return(md);
+ }
+
+int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
+{ return SHA256_Update (c,data,len); }
+int SHA224_Final (unsigned char *md, SHA256_CTX *c)
+{ return SHA256_Final (md,c); }
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_LONG SHA_LONG
+#define HASH_CTX SHA256_CTX
+#define HASH_CBLOCK SHA_CBLOCK
+/*
+ * Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
+ * default: case below covers for it. It's not clear however if it's
+ * permitted to truncate to amount of bytes not divisible by 4. I bet not,
+ * but if it is, then default: case shall be extended. For reference.
+ * Idea behind separate cases for pre-defined lengths is to let the
+ * compiler decide if it's appropriate to unroll small loops.
+ */
+#define HASH_MAKE_STRING(c,s) do { \
+ unsigned long ll; \
+ unsigned int nn; \
+ switch ((c)->md_len) \
+ { case SHA224_DIGEST_LENGTH: \
+ for (nn=0;nn<SHA224_DIGEST_LENGTH/4;nn++) \
+ { ll=(c)->h[nn]; HOST_l2c(ll,(s)); } \
+ break; \
+ case SHA256_DIGEST_LENGTH: \
+ for (nn=0;nn<SHA256_DIGEST_LENGTH/4;nn++) \
+ { ll=(c)->h[nn]; HOST_l2c(ll,(s)); } \
+ break; \
+ default: \
+ if ((c)->md_len > SHA256_DIGEST_LENGTH) \
+ return 0; \
+ for (nn=0;nn<(c)->md_len/4;nn++) \
+ { ll=(c)->h[nn]; HOST_l2c(ll,(s)); } \
+ break; \
+ } \
+ } while (0)
+
+#define HASH_UPDATE SHA256_Update
+#define HASH_TRANSFORM SHA256_Transform
+#define HASH_FINAL SHA256_Final
+#define HASH_BLOCK_DATA_ORDER sha256_block_data_order
+#ifndef SHA256_ASM
+static
+#endif
+void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num);
+
+#include "md32_common.h"
+
+#ifndef SHA256_ASM
+static const SHA_LONG K256[64] = {
+ 0x428a2f98UL,0x71374491UL,0xb5c0fbcfUL,0xe9b5dba5UL,
+ 0x3956c25bUL,0x59f111f1UL,0x923f82a4UL,0xab1c5ed5UL,
+ 0xd807aa98UL,0x12835b01UL,0x243185beUL,0x550c7dc3UL,
+ 0x72be5d74UL,0x80deb1feUL,0x9bdc06a7UL,0xc19bf174UL,
+ 0xe49b69c1UL,0xefbe4786UL,0x0fc19dc6UL,0x240ca1ccUL,
+ 0x2de92c6fUL,0x4a7484aaUL,0x5cb0a9dcUL,0x76f988daUL,
+ 0x983e5152UL,0xa831c66dUL,0xb00327c8UL,0xbf597fc7UL,
+ 0xc6e00bf3UL,0xd5a79147UL,0x06ca6351UL,0x14292967UL,
+ 0x27b70a85UL,0x2e1b2138UL,0x4d2c6dfcUL,0x53380d13UL,
+ 0x650a7354UL,0x766a0abbUL,0x81c2c92eUL,0x92722c85UL,
+ 0xa2bfe8a1UL,0xa81a664bUL,0xc24b8b70UL,0xc76c51a3UL,
+ 0xd192e819UL,0xd6990624UL,0xf40e3585UL,0x106aa070UL,
+ 0x19a4c116UL,0x1e376c08UL,0x2748774cUL,0x34b0bcb5UL,
+ 0x391c0cb3UL,0x4ed8aa4aUL,0x5b9cca4fUL,0x682e6ff3UL,
+ 0x748f82eeUL,0x78a5636fUL,0x84c87814UL,0x8cc70208UL,
+ 0x90befffaUL,0xa4506cebUL,0xbef9a3f7UL,0xc67178f2UL };
+
+/*
+ * FIPS specification refers to right rotations, while our ROTATE macro
+ * is left one. This is why you might notice that rotation coefficients
+ * differ from those observed in FIPS document by 32-N...
+ */
+#define Sigma0(x) (ROTATE((x),30) ^ ROTATE((x),19) ^ ROTATE((x),10))
+#define Sigma1(x) (ROTATE((x),26) ^ ROTATE((x),21) ^ ROTATE((x),7))
+#define sigma0(x) (ROTATE((x),25) ^ ROTATE((x),14) ^ ((x)>>3))
+#define sigma1(x) (ROTATE((x),15) ^ ROTATE((x),13) ^ ((x)>>10))
+
+#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+#ifdef OPENSSL_SMALL_FOOTPRINT
+
+static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num)
+ {
+ unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1,T2;
+ SHA_LONG X[16],l;
+ int i;
+ const unsigned char *data=in;
+
+ while (num--) {
+
+ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
+ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
+
+ for (i=0;i<16;i++)
+ {
+ HOST_c2l(data,l); T1 = X[i] = l;
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];
+ T2 = Sigma0(a) + Maj(a,b,c);
+ h = g; g = f; f = e; e = d + T1;
+ d = c; c = b; b = a; a = T1 + T2;
+ }
+
+ for (;i<64;i++)
+ {
+ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0);
+ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1);
+
+ T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf];
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i];
+ T2 = Sigma0(a) + Maj(a,b,c);
+ h = g; g = f; f = e; e = d + T1;
+ d = c; c = b; b = a; a = T1 + T2;
+ }
+
+ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
+ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
+
+ }
+}
+
+#else
+
+#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K256[i]; \
+ h = Sigma0(a) + Maj(a,b,c); \
+ d += T1; h += T1; } while (0)
+
+#define ROUND_16_63(i,a,b,c,d,e,f,g,h,X) do { \
+ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0); \
+ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1); \
+ T1 = X[(i)&0x0f] += s0 + s1 + X[(i+9)&0x0f]; \
+ ROUND_00_15(i,a,b,c,d,e,f,g,h); } while (0)
+
+static void sha256_block_data_order (SHA256_CTX *ctx, const void *in, size_t num)
+ {
+ unsigned MD32_REG_T a,b,c,d,e,f,g,h,s0,s1,T1;
+ SHA_LONG X[16];
+ int i;
+ const unsigned char *data=in;
+
+ while (num--) {
+
+ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
+ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
+
+ if (BYTE_ORDER != LITTLE_ENDIAN &&
+ sizeof(SHA_LONG)==4 && ((size_t)in%4)==0)
+ {
+ const SHA_LONG *W=(const SHA_LONG *)data;
+
+ T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h);
+ T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g);
+ T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f);
+ T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e);
+ T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d);
+ T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c);
+ T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b);
+ T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a);
+ T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h);
+ T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g);
+ T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f);
+ T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e);
+ T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d);
+ T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c);
+ T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b);
+ T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a);
+
+ data += SHA256_CBLOCK;
+ }
+ else
+ {
+ SHA_LONG l;
+
+ HOST_c2l(data,l); T1 = X[0] = l; ROUND_00_15(0,a,b,c,d,e,f,g,h);
+ HOST_c2l(data,l); T1 = X[1] = l; ROUND_00_15(1,h,a,b,c,d,e,f,g);
+ HOST_c2l(data,l); T1 = X[2] = l; ROUND_00_15(2,g,h,a,b,c,d,e,f);
+ HOST_c2l(data,l); T1 = X[3] = l; ROUND_00_15(3,f,g,h,a,b,c,d,e);
+ HOST_c2l(data,l); T1 = X[4] = l; ROUND_00_15(4,e,f,g,h,a,b,c,d);
+ HOST_c2l(data,l); T1 = X[5] = l; ROUND_00_15(5,d,e,f,g,h,a,b,c);
+ HOST_c2l(data,l); T1 = X[6] = l; ROUND_00_15(6,c,d,e,f,g,h,a,b);
+ HOST_c2l(data,l); T1 = X[7] = l; ROUND_00_15(7,b,c,d,e,f,g,h,a);
+ HOST_c2l(data,l); T1 = X[8] = l; ROUND_00_15(8,a,b,c,d,e,f,g,h);
+ HOST_c2l(data,l); T1 = X[9] = l; ROUND_00_15(9,h,a,b,c,d,e,f,g);
+ HOST_c2l(data,l); T1 = X[10] = l; ROUND_00_15(10,g,h,a,b,c,d,e,f);
+ HOST_c2l(data,l); T1 = X[11] = l; ROUND_00_15(11,f,g,h,a,b,c,d,e);
+ HOST_c2l(data,l); T1 = X[12] = l; ROUND_00_15(12,e,f,g,h,a,b,c,d);
+ HOST_c2l(data,l); T1 = X[13] = l; ROUND_00_15(13,d,e,f,g,h,a,b,c);
+ HOST_c2l(data,l); T1 = X[14] = l; ROUND_00_15(14,c,d,e,f,g,h,a,b);
+ HOST_c2l(data,l); T1 = X[15] = l; ROUND_00_15(15,b,c,d,e,f,g,h,a);
+ }
+
+ for (i=16;i<64;i+=8)
+ {
+ ROUND_16_63(i+0,a,b,c,d,e,f,g,h,X);
+ ROUND_16_63(i+1,h,a,b,c,d,e,f,g,X);
+ ROUND_16_63(i+2,g,h,a,b,c,d,e,f,X);
+ ROUND_16_63(i+3,f,g,h,a,b,c,d,e,X);
+ ROUND_16_63(i+4,e,f,g,h,a,b,c,d,X);
+ ROUND_16_63(i+5,d,e,f,g,h,a,b,c,X);
+ ROUND_16_63(i+6,c,d,e,f,g,h,a,b,X);
+ ROUND_16_63(i+7,b,c,d,e,f,g,h,a,X);
+ }
+
+ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
+ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
+
+ }
+ }
+
+#endif
+#endif /* SHA256_ASM */
+
+#endif /* OPENSSL_NO_SHA256 */
diff --git a/crypto/libressl/crypto/sha/sha512-elf-armv4.S b/crypto/libressl/crypto/sha/sha512-elf-armv4.S
new file mode 100644
index 0000000..8abf8d5
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512-elf-armv4.S
@@ -0,0 +1,1786 @@
+#include "arm_arch.h"
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
+#endif
+
+.text
+.code 32
+.type K512,%object
+.align 5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size K512,.-K512
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-sha512_block_data_order
+.skip 32-4
+
+.global sha512_block_data_order
+.type sha512_block_data_order,%function
+sha512_block_data_order:
+ sub r3,pc,#8 @ sha512_block_data_order
+ add r2,r1,r2,lsl#7 @ len to point at the end of inp
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+ ldr r12,.LOPENSSL_armcap
+ ldr r12,[r3,r12] @ OPENSSL_armcap_P
+ tst r12,#1
+ bne .LNEON
+#endif
+ stmdb sp!,{r4-r12,lr}
+ sub r14,r3,#672 @ K512
+ sub sp,sp,#9*8
+
+ ldr r7,[r0,#32+LO]
+ ldr r8,[r0,#32+HI]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+.Loop:
+ str r9, [sp,#48+0]
+ str r10, [sp,#48+4]
+ str r11, [sp,#56+0]
+ str r12, [sp,#56+4]
+ ldr r5,[r0,#0+LO]
+ ldr r6,[r0,#0+HI]
+ ldr r3,[r0,#8+LO]
+ ldr r4,[r0,#8+HI]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ str r3,[sp,#8+0]
+ str r4,[sp,#8+4]
+ str r9, [sp,#16+0]
+ str r10, [sp,#16+4]
+ str r11, [sp,#24+0]
+ str r12, [sp,#24+4]
+ ldr r3,[r0,#40+LO]
+ ldr r4,[r0,#40+HI]
+ str r3,[sp,#40+0]
+ str r4,[sp,#40+4]
+
+.L00_15:
+#if __ARM_ARCH__<7 || defined(__STRICT_ALIGNMENT)
+ ldrb r3,[r1,#7]
+ ldrb r9, [r1,#6]
+ ldrb r10, [r1,#5]
+ ldrb r11, [r1,#4]
+ ldrb r4,[r1,#3]
+ ldrb r12, [r1,#2]
+ orr r3,r3,r9,lsl#8
+ ldrb r9, [r1,#1]
+ orr r3,r3,r10,lsl#16
+ ldrb r10, [r1],#8
+ orr r3,r3,r11,lsl#24
+ orr r4,r4,r12,lsl#8
+ orr r4,r4,r9,lsl#16
+ orr r4,r4,r10,lsl#24
+#else
+ ldr r3,[r1,#4]
+ ldr r4,[r1],#8
+#ifdef __ARMEL__
+ rev r3,r3
+ rev r4,r4
+#endif
+#endif
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#148
+
+ ldr r12,[sp,#16+0] @ c.lo
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+ tst r14,#1
+ beq .L00_15
+ ldr r9,[sp,#184+0]
+ ldr r10,[sp,#184+4]
+ bic r14,r14,#1
+.L16_79:
+ @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+ @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+ @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
+ mov r3,r9,lsr#1
+ ldr r11,[sp,#80+0]
+ mov r4,r10,lsr#1
+ ldr r12,[sp,#80+4]
+ eor r3,r3,r10,lsl#31
+ eor r4,r4,r9,lsl#31
+ eor r3,r3,r9,lsr#8
+ eor r4,r4,r10,lsr#8
+ eor r3,r3,r10,lsl#24
+ eor r4,r4,r9,lsl#24
+ eor r3,r3,r9,lsr#7
+ eor r4,r4,r10,lsr#7
+ eor r3,r3,r10,lsl#25
+
+ @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+ @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+ @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+ mov r9,r11,lsr#19
+ mov r10,r12,lsr#19
+ eor r9,r9,r12,lsl#13
+ eor r10,r10,r11,lsl#13
+ eor r9,r9,r12,lsr#29
+ eor r10,r10,r11,lsr#29
+ eor r9,r9,r11,lsl#3
+ eor r10,r10,r12,lsl#3
+ eor r9,r9,r11,lsr#6
+ eor r10,r10,r12,lsr#6
+ ldr r11,[sp,#120+0]
+ eor r9,r9,r12,lsl#26
+
+ ldr r12,[sp,#120+4]
+ adds r3,r3,r9
+ ldr r9,[sp,#192+0]
+ adc r4,r4,r10
+
+ ldr r10,[sp,#192+4]
+ adds r3,r3,r11
+ adc r4,r4,r12
+ adds r3,r3,r9
+ adc r4,r4,r10
+ @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+ @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+ @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+ mov r9,r7,lsr#14
+ str r3,[sp,#64+0]
+ mov r10,r8,lsr#14
+ str r4,[sp,#64+4]
+ eor r9,r9,r8,lsl#18
+ ldr r11,[sp,#56+0] @ h.lo
+ eor r10,r10,r7,lsl#18
+ ldr r12,[sp,#56+4] @ h.hi
+ eor r9,r9,r7,lsr#18
+ eor r10,r10,r8,lsr#18
+ eor r9,r9,r8,lsl#14
+ eor r10,r10,r7,lsl#14
+ eor r9,r9,r8,lsr#9
+ eor r10,r10,r7,lsr#9
+ eor r9,r9,r7,lsl#23
+ eor r10,r10,r8,lsl#23 @ Sigma1(e)
+ adds r3,r3,r9
+ ldr r9,[sp,#40+0] @ f.lo
+ adc r4,r4,r10 @ T += Sigma1(e)
+ ldr r10,[sp,#40+4] @ f.hi
+ adds r3,r3,r11
+ ldr r11,[sp,#48+0] @ g.lo
+ adc r4,r4,r12 @ T += h
+ ldr r12,[sp,#48+4] @ g.hi
+
+ eor r9,r9,r11
+ str r7,[sp,#32+0]
+ eor r10,r10,r12
+ str r8,[sp,#32+4]
+ and r9,r9,r7
+ str r5,[sp,#0+0]
+ and r10,r10,r8
+ str r6,[sp,#0+4]
+ eor r9,r9,r11
+ ldr r11,[r14,#LO] @ K[i].lo
+ eor r10,r10,r12 @ Ch(e,f,g)
+ ldr r12,[r14,#HI] @ K[i].hi
+
+ adds r3,r3,r9
+ ldr r7,[sp,#24+0] @ d.lo
+ adc r4,r4,r10 @ T += Ch(e,f,g)
+ ldr r8,[sp,#24+4] @ d.hi
+ adds r3,r3,r11
+ and r9,r11,#0xff
+ adc r4,r4,r12 @ T += K[i]
+ adds r7,r7,r3
+ ldr r11,[sp,#8+0] @ b.lo
+ adc r8,r8,r4 @ d += T
+ teq r9,#23
+
+ ldr r12,[sp,#16+0] @ c.lo
+ orreq r14,r14,#1
+ @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+ @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+ @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+ mov r9,r5,lsr#28
+ mov r10,r6,lsr#28
+ eor r9,r9,r6,lsl#4
+ eor r10,r10,r5,lsl#4
+ eor r9,r9,r6,lsr#2
+ eor r10,r10,r5,lsr#2
+ eor r9,r9,r5,lsl#30
+ eor r10,r10,r6,lsl#30
+ eor r9,r9,r6,lsr#7
+ eor r10,r10,r5,lsr#7
+ eor r9,r9,r5,lsl#25
+ eor r10,r10,r6,lsl#25 @ Sigma0(a)
+ adds r3,r3,r9
+ and r9,r5,r11
+ adc r4,r4,r10 @ T += Sigma0(a)
+
+ ldr r10,[sp,#8+4] @ b.hi
+ orr r5,r5,r11
+ ldr r11,[sp,#16+4] @ c.hi
+ and r5,r5,r12
+ and r12,r6,r10
+ orr r6,r6,r10
+ orr r5,r5,r9 @ Maj(a,b,c).lo
+ and r6,r6,r11
+ adds r5,r5,r3
+ orr r6,r6,r12 @ Maj(a,b,c).hi
+ sub sp,sp,#8
+ adc r6,r6,r4 @ h += T
+ tst r14,#1
+ add r14,r14,#8
+ ldreq r9,[sp,#184+0]
+ ldreq r10,[sp,#184+4]
+ beq .L16_79
+ bic r14,r14,#1
+
+ ldr r3,[sp,#8+0]
+ ldr r4,[sp,#8+4]
+ ldr r9, [r0,#0+LO]
+ ldr r10, [r0,#0+HI]
+ ldr r11, [r0,#8+LO]
+ ldr r12, [r0,#8+HI]
+ adds r9,r5,r9
+ str r9, [r0,#0+LO]
+ adc r10,r6,r10
+ str r10, [r0,#0+HI]
+ adds r11,r3,r11
+ str r11, [r0,#8+LO]
+ adc r12,r4,r12
+ str r12, [r0,#8+HI]
+
+ ldr r5,[sp,#16+0]
+ ldr r6,[sp,#16+4]
+ ldr r3,[sp,#24+0]
+ ldr r4,[sp,#24+4]
+ ldr r9, [r0,#16+LO]
+ ldr r10, [r0,#16+HI]
+ ldr r11, [r0,#24+LO]
+ ldr r12, [r0,#24+HI]
+ adds r9,r5,r9
+ str r9, [r0,#16+LO]
+ adc r10,r6,r10
+ str r10, [r0,#16+HI]
+ adds r11,r3,r11
+ str r11, [r0,#24+LO]
+ adc r12,r4,r12
+ str r12, [r0,#24+HI]
+
+ ldr r3,[sp,#40+0]
+ ldr r4,[sp,#40+4]
+ ldr r9, [r0,#32+LO]
+ ldr r10, [r0,#32+HI]
+ ldr r11, [r0,#40+LO]
+ ldr r12, [r0,#40+HI]
+ adds r7,r7,r9
+ str r7,[r0,#32+LO]
+ adc r8,r8,r10
+ str r8,[r0,#32+HI]
+ adds r11,r3,r11
+ str r11, [r0,#40+LO]
+ adc r12,r4,r12
+ str r12, [r0,#40+HI]
+
+ ldr r5,[sp,#48+0]
+ ldr r6,[sp,#48+4]
+ ldr r3,[sp,#56+0]
+ ldr r4,[sp,#56+4]
+ ldr r9, [r0,#48+LO]
+ ldr r10, [r0,#48+HI]
+ ldr r11, [r0,#56+LO]
+ ldr r12, [r0,#56+HI]
+ adds r9,r5,r9
+ str r9, [r0,#48+LO]
+ adc r10,r6,r10
+ str r10, [r0,#48+HI]
+ adds r11,r3,r11
+ str r11, [r0,#56+LO]
+ adc r12,r4,r12
+ str r12, [r0,#56+HI]
+
+ add sp,sp,#640
+ sub r14,r14,#640
+
+ teq r1,r2
+ bne .Loop
+
+ add sp,sp,#8*9 @ destroy frame
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r12,pc}
+#else
+ ldmia sp!,{r4-r12,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ .word 0xe12fff1e @ interoperable with Thumb ISA:-)
+#endif
+#if __ARM_ARCH__>=7 && !defined(__STRICT_ALIGNMENT)
+.fpu neon
+
+.align 4
+.LNEON:
+ dmb @ errata #451034 on early Cortex A8
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ sub r3,r3,#672 @ K512
+ vldmia r0,{d16-d23} @ load context
+.Loop_neon:
+ vshr.u64 d24,d20,#14 @ 0
+#if 0<16
+ vld1.64 {d0},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 0<16 && defined(__ARMEL__)
+ vrev64.8 d0,d0
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d0
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 1
+#if 1<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 1<16 && defined(__ARMEL__)
+ vrev64.8 d1,d1
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d1
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 2
+#if 2<16
+ vld1.64 {d2},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 2<16 && defined(__ARMEL__)
+ vrev64.8 d2,d2
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d2
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 3
+#if 3<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 3<16 && defined(__ARMEL__)
+ vrev64.8 d3,d3
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d3
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 4
+#if 4<16
+ vld1.64 {d4},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 4<16 && defined(__ARMEL__)
+ vrev64.8 d4,d4
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d4
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 5
+#if 5<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 5<16 && defined(__ARMEL__)
+ vrev64.8 d5,d5
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d5
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 6
+#if 6<16
+ vld1.64 {d6},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 6<16 && defined(__ARMEL__)
+ vrev64.8 d6,d6
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d6
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 7
+#if 7<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 7<16 && defined(__ARMEL__)
+ vrev64.8 d7,d7
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d7
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ vshr.u64 d24,d20,#14 @ 8
+#if 8<16
+ vld1.64 {d8},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d20,#18
+ vshr.u64 d26,d20,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 8<16 && defined(__ARMEL__)
+ vrev64.8 d8,d8
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d8
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 9
+#if 9<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 9<16 && defined(__ARMEL__)
+ vrev64.8 d9,d9
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d9
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 d24,d18,#14 @ 10
+#if 10<16
+ vld1.64 {d10},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d18,#18
+ vshr.u64 d26,d18,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 10<16 && defined(__ARMEL__)
+ vrev64.8 d10,d10
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d10
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 11
+#if 11<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 11<16 && defined(__ARMEL__)
+ vrev64.8 d11,d11
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d11
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 d24,d16,#14 @ 12
+#if 12<16
+ vld1.64 {d12},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d16,#18
+ vshr.u64 d26,d16,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 12<16 && defined(__ARMEL__)
+ vrev64.8 d12,d12
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d12
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 13
+#if 13<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 13<16 && defined(__ARMEL__)
+ vrev64.8 d13,d13
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d13
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 d24,d22,#14 @ 14
+#if 14<16
+ vld1.64 {d14},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d22,#18
+ vshr.u64 d26,d22,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 14<16 && defined(__ARMEL__)
+ vrev64.8 d14,d14
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d14
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 15
+#if 15<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 15<16 && defined(__ARMEL__)
+ vrev64.8 d15,d15
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d15
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ mov r12,#4
+.L16_79_neon:
+ subs r12,#1
+ vshr.u64 q12,q7,#19
+ vshr.u64 q13,q7,#61
+ vshr.u64 q15,q7,#6
+ vsli.64 q12,q7,#45
+ vext.8 q14,q0,q1,#8 @ X[i+1]
+ vsli.64 q13,q7,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q0,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q4,q5,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q0,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q0,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 16<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d0
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 17
+#if 17<16
+ vld1.64 {d1},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 17<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d1
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 q12,q0,#19
+ vshr.u64 q13,q0,#61
+ vshr.u64 q15,q0,#6
+ vsli.64 q12,q0,#45
+ vext.8 q14,q1,q2,#8 @ X[i+1]
+ vsli.64 q13,q0,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q1,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q5,q6,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q1,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q1,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 18<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d2
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 19
+#if 19<16
+ vld1.64 {d3},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 19<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d3
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 q12,q1,#19
+ vshr.u64 q13,q1,#61
+ vshr.u64 q15,q1,#6
+ vsli.64 q12,q1,#45
+ vext.8 q14,q2,q3,#8 @ X[i+1]
+ vsli.64 q13,q1,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q2,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q6,q7,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q2,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q2,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 20<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d4
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 21
+#if 21<16
+ vld1.64 {d5},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 21<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d5
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 q12,q2,#19
+ vshr.u64 q13,q2,#61
+ vshr.u64 q15,q2,#6
+ vsli.64 q12,q2,#45
+ vext.8 q14,q3,q4,#8 @ X[i+1]
+ vsli.64 q13,q2,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q3,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q7,q0,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q3,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q3,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 22<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d6
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 23
+#if 23<16
+ vld1.64 {d7},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 23<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d7
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ vshr.u64 q12,q3,#19
+ vshr.u64 q13,q3,#61
+ vshr.u64 q15,q3,#6
+ vsli.64 q12,q3,#45
+ vext.8 q14,q4,q5,#8 @ X[i+1]
+ vsli.64 q13,q3,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q4,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q0,q1,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d20,#14 @ from NEON_00_15
+ vadd.i64 q4,q14
+ vshr.u64 d25,d20,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d20,#41 @ from NEON_00_15
+ vadd.i64 q4,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d20,#50
+ vsli.64 d25,d20,#46
+ vsli.64 d26,d20,#23
+#if 24<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d23
+ veor d29,d21,d22
+ veor d24,d25
+ vand d29,d20
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d22 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d16,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d16,#34
+ vshr.u64 d26,d16,#39
+ vsli.64 d24,d16,#36
+ vsli.64 d25,d16,#30
+ vsli.64 d26,d16,#25
+ vadd.i64 d27,d8
+ vorr d30,d16,d18
+ vand d29,d16,d18
+ veor d23,d24,d25
+ vand d30,d17
+ veor d23,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d23,d27
+ vadd.i64 d19,d27
+ vadd.i64 d23,d30
+ vshr.u64 d24,d19,#14 @ 25
+#if 25<16
+ vld1.64 {d9},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d19,#18
+ vshr.u64 d26,d19,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d19,#50
+ vsli.64 d25,d19,#46
+ vsli.64 d26,d19,#23
+#if 25<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d22
+ veor d29,d20,d21
+ veor d24,d25
+ vand d29,d19
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d21 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d23,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d23,#34
+ vshr.u64 d26,d23,#39
+ vsli.64 d24,d23,#36
+ vsli.64 d25,d23,#30
+ vsli.64 d26,d23,#25
+ vadd.i64 d27,d9
+ vorr d30,d23,d17
+ vand d29,d23,d17
+ veor d22,d24,d25
+ vand d30,d16
+ veor d22,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d22,d27
+ vadd.i64 d18,d27
+ vadd.i64 d22,d30
+ vshr.u64 q12,q4,#19
+ vshr.u64 q13,q4,#61
+ vshr.u64 q15,q4,#6
+ vsli.64 q12,q4,#45
+ vext.8 q14,q5,q6,#8 @ X[i+1]
+ vsli.64 q13,q4,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q5,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q1,q2,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d18,#14 @ from NEON_00_15
+ vadd.i64 q5,q14
+ vshr.u64 d25,d18,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d18,#41 @ from NEON_00_15
+ vadd.i64 q5,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d18,#50
+ vsli.64 d25,d18,#46
+ vsli.64 d26,d18,#23
+#if 26<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d21
+ veor d29,d19,d20
+ veor d24,d25
+ vand d29,d18
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d20 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d22,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d22,#34
+ vshr.u64 d26,d22,#39
+ vsli.64 d24,d22,#36
+ vsli.64 d25,d22,#30
+ vsli.64 d26,d22,#25
+ vadd.i64 d27,d10
+ vorr d30,d22,d16
+ vand d29,d22,d16
+ veor d21,d24,d25
+ vand d30,d23
+ veor d21,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d21,d27
+ vadd.i64 d17,d27
+ vadd.i64 d21,d30
+ vshr.u64 d24,d17,#14 @ 27
+#if 27<16
+ vld1.64 {d11},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d17,#18
+ vshr.u64 d26,d17,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d17,#50
+ vsli.64 d25,d17,#46
+ vsli.64 d26,d17,#23
+#if 27<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d20
+ veor d29,d18,d19
+ veor d24,d25
+ vand d29,d17
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d19 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d21,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d21,#34
+ vshr.u64 d26,d21,#39
+ vsli.64 d24,d21,#36
+ vsli.64 d25,d21,#30
+ vsli.64 d26,d21,#25
+ vadd.i64 d27,d11
+ vorr d30,d21,d23
+ vand d29,d21,d23
+ veor d20,d24,d25
+ vand d30,d22
+ veor d20,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d20,d27
+ vadd.i64 d16,d27
+ vadd.i64 d20,d30
+ vshr.u64 q12,q5,#19
+ vshr.u64 q13,q5,#61
+ vshr.u64 q15,q5,#6
+ vsli.64 q12,q5,#45
+ vext.8 q14,q6,q7,#8 @ X[i+1]
+ vsli.64 q13,q5,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q6,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q2,q3,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d16,#14 @ from NEON_00_15
+ vadd.i64 q6,q14
+ vshr.u64 d25,d16,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d16,#41 @ from NEON_00_15
+ vadd.i64 q6,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d16,#50
+ vsli.64 d25,d16,#46
+ vsli.64 d26,d16,#23
+#if 28<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d19
+ veor d29,d17,d18
+ veor d24,d25
+ vand d29,d16
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d18 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d20,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d20,#34
+ vshr.u64 d26,d20,#39
+ vsli.64 d24,d20,#36
+ vsli.64 d25,d20,#30
+ vsli.64 d26,d20,#25
+ vadd.i64 d27,d12
+ vorr d30,d20,d22
+ vand d29,d20,d22
+ veor d19,d24,d25
+ vand d30,d21
+ veor d19,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d19,d27
+ vadd.i64 d23,d27
+ vadd.i64 d19,d30
+ vshr.u64 d24,d23,#14 @ 29
+#if 29<16
+ vld1.64 {d13},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d23,#18
+ vshr.u64 d26,d23,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d23,#50
+ vsli.64 d25,d23,#46
+ vsli.64 d26,d23,#23
+#if 29<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d18
+ veor d29,d16,d17
+ veor d24,d25
+ vand d29,d23
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d17 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d19,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d19,#34
+ vshr.u64 d26,d19,#39
+ vsli.64 d24,d19,#36
+ vsli.64 d25,d19,#30
+ vsli.64 d26,d19,#25
+ vadd.i64 d27,d13
+ vorr d30,d19,d21
+ vand d29,d19,d21
+ veor d18,d24,d25
+ vand d30,d20
+ veor d18,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d18,d27
+ vadd.i64 d22,d27
+ vadd.i64 d18,d30
+ vshr.u64 q12,q6,#19
+ vshr.u64 q13,q6,#61
+ vshr.u64 q15,q6,#6
+ vsli.64 q12,q6,#45
+ vext.8 q14,q7,q0,#8 @ X[i+1]
+ vsli.64 q13,q6,#3
+ veor q15,q12
+ vshr.u64 q12,q14,#1
+ veor q15,q13 @ sigma1(X[i+14])
+ vshr.u64 q13,q14,#8
+ vadd.i64 q7,q15
+ vshr.u64 q15,q14,#7
+ vsli.64 q12,q14,#63
+ vsli.64 q13,q14,#56
+ vext.8 q14,q3,q4,#8 @ X[i+9]
+ veor q15,q12
+ vshr.u64 d24,d22,#14 @ from NEON_00_15
+ vadd.i64 q7,q14
+ vshr.u64 d25,d22,#18 @ from NEON_00_15
+ veor q15,q13 @ sigma0(X[i+1])
+ vshr.u64 d26,d22,#41 @ from NEON_00_15
+ vadd.i64 q7,q15
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d22,#50
+ vsli.64 d25,d22,#46
+ vsli.64 d26,d22,#23
+#if 30<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d17
+ veor d29,d23,d16
+ veor d24,d25
+ vand d29,d22
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d16 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d18,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d18,#34
+ vshr.u64 d26,d18,#39
+ vsli.64 d24,d18,#36
+ vsli.64 d25,d18,#30
+ vsli.64 d26,d18,#25
+ vadd.i64 d27,d14
+ vorr d30,d18,d20
+ vand d29,d18,d20
+ veor d17,d24,d25
+ vand d30,d19
+ veor d17,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d17,d27
+ vadd.i64 d21,d27
+ vadd.i64 d17,d30
+ vshr.u64 d24,d21,#14 @ 31
+#if 31<16
+ vld1.64 {d15},[r1]! @ handles unaligned
+#endif
+ vshr.u64 d25,d21,#18
+ vshr.u64 d26,d21,#41
+ vld1.64 {d28},[r3,:64]! @ K[i++]
+ vsli.64 d24,d21,#50
+ vsli.64 d25,d21,#46
+ vsli.64 d26,d21,#23
+#if 31<16 && defined(__ARMEL__)
+ vrev64.8 ,
+#endif
+ vadd.i64 d27,d28,d16
+ veor d29,d22,d23
+ veor d24,d25
+ vand d29,d21
+ veor d24,d26 @ Sigma1(e)
+ veor d29,d23 @ Ch(e,f,g)
+ vadd.i64 d27,d24
+ vshr.u64 d24,d17,#28
+ vadd.i64 d27,d29
+ vshr.u64 d25,d17,#34
+ vshr.u64 d26,d17,#39
+ vsli.64 d24,d17,#36
+ vsli.64 d25,d17,#30
+ vsli.64 d26,d17,#25
+ vadd.i64 d27,d15
+ vorr d30,d17,d19
+ vand d29,d17,d19
+ veor d16,d24,d25
+ vand d30,d18
+ veor d16,d26 @ Sigma0(a)
+ vorr d30,d29 @ Maj(a,b,c)
+ vadd.i64 d16,d27
+ vadd.i64 d20,d27
+ vadd.i64 d16,d30
+ bne .L16_79_neon
+
+ vldmia r0,{d24-d31} @ load context to temp
+ vadd.i64 q8,q12 @ vectorized accumulate
+ vadd.i64 q9,q13
+ vadd.i64 q10,q14
+ vadd.i64 q11,q15
+ vstmia r0,{d16-d23} @ save context
+ teq r1,r2
+ sub r3,#640 @ rewind K512
+ bne .Loop_neon
+
+ vldmia sp!,{d8-d15} @ epilogue
+ .word 0xe12fff1e
+#endif
+.size sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm OPENSSL_armcap_P,4,4
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha512-elf-x86_64.S b/crypto/libressl/crypto/sha/sha512-elf-x86_64.S
new file mode 100644
index 0000000..1173407
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512-elf-x86_64.S
@@ -0,0 +1,1806 @@
+#include "x86_arch.h"
+.text
+
+.globl sha512_block_data_order
+.type sha512_block_data_order,@function
+.align 16
+sha512_block_data_order:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $128+32,%rsp
+ leaq (%rsi,%rdx,8),%rdx
+ andq $-64,%rsp
+ movq %rdi,128+0(%rsp)
+ movq %rsi,128+8(%rsp)
+ movq %rdx,128+16(%rsp)
+ movq %r11,128+24(%rsp)
+.Lprologue:
+
+ leaq K512(%rip),%rbp
+
+ movq 0(%rdi),%rax
+ movq 8(%rdi),%rbx
+ movq 16(%rdi),%rcx
+ movq 24(%rdi),%rdx
+ movq 32(%rdi),%r8
+ movq 40(%rdi),%r9
+ movq 48(%rdi),%r10
+ movq 56(%rdi),%r11
+ jmp .Lloop
+
+.align 16
+.Lloop:
+ xorq %rdi,%rdi
+ movq 0(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 8(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 16(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 24(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 32(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 40(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 48(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 56(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 64(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 72(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 80(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 88(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 96(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 104(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 112(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 120(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ jmp .Lrounds_16_xx
+.align 16
+.Lrounds_16_xx:
+ movq 8(%rsp),%r13
+ movq 112(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 72(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 0(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 16(%rsp),%r13
+ movq 120(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 80(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 8(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 24(%rsp),%r13
+ movq 0(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 88(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 16(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 32(%rsp),%r13
+ movq 8(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 96(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 24(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 40(%rsp),%r13
+ movq 16(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 104(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 32(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 48(%rsp),%r13
+ movq 24(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 112(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 40(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 56(%rsp),%r13
+ movq 32(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 120(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 48(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 64(%rsp),%r13
+ movq 40(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 0(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 56(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 72(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 8(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 64(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 80(%rsp),%r13
+ movq 56(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 16(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 72(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 88(%rsp),%r13
+ movq 64(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 24(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 80(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 96(%rsp),%r13
+ movq 72(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 32(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 88(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 104(%rsp),%r13
+ movq 80(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 40(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 96(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 112(%rsp),%r13
+ movq 88(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 48(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 104(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 120(%rsp),%r13
+ movq 96(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 56(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 112(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 0(%rsp),%r13
+ movq 104(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 64(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 120(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ cmpq $80,%rdi
+ jb .Lrounds_16_xx
+
+ movq 128+0(%rsp),%rdi
+ leaq 128(%rsi),%rsi
+
+ addq 0(%rdi),%rax
+ addq 8(%rdi),%rbx
+ addq 16(%rdi),%rcx
+ addq 24(%rdi),%rdx
+ addq 32(%rdi),%r8
+ addq 40(%rdi),%r9
+ addq 48(%rdi),%r10
+ addq 56(%rdi),%r11
+
+ cmpq 128+16(%rsp),%rsi
+
+ movq %rax,0(%rdi)
+ movq %rbx,8(%rdi)
+ movq %rcx,16(%rdi)
+ movq %rdx,24(%rdi)
+ movq %r8,32(%rdi)
+ movq %r9,40(%rdi)
+ movq %r10,48(%rdi)
+ movq %r11,56(%rdi)
+ jb .Lloop
+
+ movq 128+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lepilogue:
+ retq
+.size sha512_block_data_order,.-sha512_block_data_order
+.align 64
+.type K512,@object
+K512:
+.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+.quad 0x3956c25bf348b538,0x59f111f1b605d019
+.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+.quad 0xd807aa98a3030242,0x12835b0145706fbe
+.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+.quad 0x9bdc06a725c71235,0xc19bf174cf692694
+.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+.quad 0x06ca6351e003826f,0x142929670a0e6e70
+.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+.quad 0x81c2c92e47edaee6,0x92722c851482353b
+.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+.quad 0xd192e819d6ef5218,0xd69906245565a910
+.quad 0xf40e35855771202a,0x106aa07032bbd1b8
+.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+.quad 0x90befffa23631e28,0xa4506cebde82bde9
+.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+.quad 0xca273eceea26619c,0xd186b8c721c0c207
+.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+.quad 0x113f9804bef90dae,0x1b710b35131c471b
+.quad 0x28db77f523047d84,0x32caab7b40c72493
+.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
+#if defined(HAVE_GNU_STACK)
+.section .note.GNU-stack,"",%progbits
+#endif
diff --git a/crypto/libressl/crypto/sha/sha512-macosx-x86_64.S b/crypto/libressl/crypto/sha/sha512-macosx-x86_64.S
new file mode 100644
index 0000000..7581da4
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512-macosx-x86_64.S
@@ -0,0 +1,1803 @@
+#include "x86_arch.h"
+.text
+
+.globl _sha512_block_data_order
+
+.p2align 4
+_sha512_block_data_order:
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $128+32,%rsp
+ leaq (%rsi,%rdx,8),%rdx
+ andq $-64,%rsp
+ movq %rdi,128+0(%rsp)
+ movq %rsi,128+8(%rsp)
+ movq %rdx,128+16(%rsp)
+ movq %r11,128+24(%rsp)
+L$prologue:
+
+ leaq K512(%rip),%rbp
+
+ movq 0(%rdi),%rax
+ movq 8(%rdi),%rbx
+ movq 16(%rdi),%rcx
+ movq 24(%rdi),%rdx
+ movq 32(%rdi),%r8
+ movq 40(%rdi),%r9
+ movq 48(%rdi),%r10
+ movq 56(%rdi),%r11
+ jmp L$loop
+
+.p2align 4
+L$loop:
+ xorq %rdi,%rdi
+ movq 0(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 8(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 16(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 24(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 32(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 40(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 48(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 56(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 64(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 72(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 80(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 88(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 96(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 104(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 112(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 120(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ jmp L$rounds_16_xx
+.p2align 4
+L$rounds_16_xx:
+ movq 8(%rsp),%r13
+ movq 112(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 72(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 0(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 16(%rsp),%r13
+ movq 120(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 80(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 8(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 24(%rsp),%r13
+ movq 0(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 88(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 16(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 32(%rsp),%r13
+ movq 8(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 96(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 24(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 40(%rsp),%r13
+ movq 16(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 104(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 32(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 48(%rsp),%r13
+ movq 24(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 112(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 40(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 56(%rsp),%r13
+ movq 32(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 120(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 48(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 64(%rsp),%r13
+ movq 40(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 0(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 56(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 72(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 8(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 64(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 80(%rsp),%r13
+ movq 56(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 16(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 72(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 88(%rsp),%r13
+ movq 64(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 24(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 80(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 96(%rsp),%r13
+ movq 72(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 32(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 88(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 104(%rsp),%r13
+ movq 80(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 40(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 96(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 112(%rsp),%r13
+ movq 88(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 48(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 104(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 120(%rsp),%r13
+ movq 96(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 56(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 112(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 0(%rsp),%r13
+ movq 104(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 64(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 120(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ cmpq $80,%rdi
+ jb L$rounds_16_xx
+
+ movq 128+0(%rsp),%rdi
+ leaq 128(%rsi),%rsi
+
+ addq 0(%rdi),%rax
+ addq 8(%rdi),%rbx
+ addq 16(%rdi),%rcx
+ addq 24(%rdi),%rdx
+ addq 32(%rdi),%r8
+ addq 40(%rdi),%r9
+ addq 48(%rdi),%r10
+ addq 56(%rdi),%r11
+
+ cmpq 128+16(%rsp),%rsi
+
+ movq %rax,0(%rdi)
+ movq %rbx,8(%rdi)
+ movq %rcx,16(%rdi)
+ movq %rdx,24(%rdi)
+ movq %r8,32(%rdi)
+ movq %r9,40(%rdi)
+ movq %r10,48(%rdi)
+ movq %r11,56(%rdi)
+ jb L$loop
+
+ movq 128+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+L$epilogue:
+ retq
+
+.p2align 6
+
+K512:
+.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+.quad 0x3956c25bf348b538,0x59f111f1b605d019
+.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+.quad 0xd807aa98a3030242,0x12835b0145706fbe
+.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+.quad 0x9bdc06a725c71235,0xc19bf174cf692694
+.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+.quad 0x06ca6351e003826f,0x142929670a0e6e70
+.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+.quad 0x81c2c92e47edaee6,0x92722c851482353b
+.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+.quad 0xd192e819d6ef5218,0xd69906245565a910
+.quad 0xf40e35855771202a,0x106aa07032bbd1b8
+.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+.quad 0x90befffa23631e28,0xa4506cebde82bde9
+.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+.quad 0xca273eceea26619c,0xd186b8c721c0c207
+.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+.quad 0x113f9804bef90dae,0x1b710b35131c471b
+.quad 0x28db77f523047d84,0x32caab7b40c72493
+.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
diff --git a/crypto/libressl/crypto/sha/sha512-masm-x86_64.S b/crypto/libressl/crypto/sha/sha512-masm-x86_64.S
new file mode 100644
index 0000000..4a2b9af
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512-masm-x86_64.S
@@ -0,0 +1,1888 @@
+; 1 "crypto/sha/sha512-masm-x86_64.S.tmp"
+; 1 "<built-in>" 1
+; 1 "<built-in>" 3
+; 340 "<built-in>" 3
+; 1 "<command line>" 1
+; 1 "<built-in>" 2
+; 1 "crypto/sha/sha512-masm-x86_64.S.tmp" 2
+OPTION DOTNAME
+
+; 1 "./crypto/x86_arch.h" 1
+
+
+; 16 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+; 40 "./crypto/x86_arch.h"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+; 3 "crypto/sha/sha512-masm-x86_64.S.tmp" 2
+.text$ SEGMENT ALIGN(64) 'CODE'
+
+PUBLIC sha512_block_data_order
+
+ALIGN 16
+sha512_block_data_order PROC PUBLIC
+ mov QWORD PTR[8+rsp],rdi ;WIN64 prologue
+ mov QWORD PTR[16+rsp],rsi
+ mov rax,rsp
+$L$SEH_begin_sha512_block_data_order::
+ mov rdi,rcx
+ mov rsi,rdx
+ mov rdx,r8
+ mov rcx,r9
+
+
+ push rbx
+ push rbp
+ push r12
+ push r13
+ push r14
+ push r15
+ mov r11,rsp
+ shl rdx,4
+ sub rsp,16*8+4*8
+ lea rdx,QWORD PTR[rdx*8+rsi]
+ and rsp,-64
+ mov QWORD PTR[((128+0))+rsp],rdi
+ mov QWORD PTR[((128+8))+rsp],rsi
+ mov QWORD PTR[((128+16))+rsp],rdx
+ mov QWORD PTR[((128+24))+rsp],r11
+$L$prologue::
+
+ lea rbp,QWORD PTR[K512]
+
+ mov rax,QWORD PTR[rdi]
+ mov rbx,QWORD PTR[8+rdi]
+ mov rcx,QWORD PTR[16+rdi]
+ mov rdx,QWORD PTR[24+rdi]
+ mov r8,QWORD PTR[32+rdi]
+ mov r9,QWORD PTR[40+rdi]
+ mov r10,QWORD PTR[48+rdi]
+ mov r11,QWORD PTR[56+rdi]
+ jmp $L$loop
+
+ALIGN 16
+$L$loop::
+ xor rdi,rdi
+ mov r12,QWORD PTR[rsi]
+ mov r13,r8
+ mov r14,rax
+ bswap r12
+ ror r13,23
+ mov r15,r9
+ mov QWORD PTR[rsp],r12
+
+ ror r14,5
+ xor r13,r8
+ xor r15,r10
+
+ ror r13,4
+ add r12,r11
+ xor r14,rax
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r8
+ mov r11,rbx
+
+ ror r14,6
+ xor r13,r8
+ xor r15,r10
+
+ xor r11,rcx
+ xor r14,rax
+ add r12,r15
+ mov r15,rbx
+
+ ror r13,14
+ and r11,rax
+ and r15,rcx
+
+ ror r14,28
+ add r12,r13
+ add r11,r15
+
+ add rdx,r12
+ add r11,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r11,r14
+
+ mov r12,QWORD PTR[8+rsi]
+ mov r13,rdx
+ mov r14,r11
+ bswap r12
+ ror r13,23
+ mov r15,r8
+ mov QWORD PTR[8+rsp],r12
+
+ ror r14,5
+ xor r13,rdx
+ xor r15,r9
+
+ ror r13,4
+ add r12,r10
+ xor r14,r11
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rdx
+ mov r10,rax
+
+ ror r14,6
+ xor r13,rdx
+ xor r15,r9
+
+ xor r10,rbx
+ xor r14,r11
+ add r12,r15
+ mov r15,rax
+
+ ror r13,14
+ and r10,r11
+ and r15,rbx
+
+ ror r14,28
+ add r12,r13
+ add r10,r15
+
+ add rcx,r12
+ add r10,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r10,r14
+
+ mov r12,QWORD PTR[16+rsi]
+ mov r13,rcx
+ mov r14,r10
+ bswap r12
+ ror r13,23
+ mov r15,rdx
+ mov QWORD PTR[16+rsp],r12
+
+ ror r14,5
+ xor r13,rcx
+ xor r15,r8
+
+ ror r13,4
+ add r12,r9
+ xor r14,r10
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rcx
+ mov r9,r11
+
+ ror r14,6
+ xor r13,rcx
+ xor r15,r8
+
+ xor r9,rax
+ xor r14,r10
+ add r12,r15
+ mov r15,r11
+
+ ror r13,14
+ and r9,r10
+ and r15,rax
+
+ ror r14,28
+ add r12,r13
+ add r9,r15
+
+ add rbx,r12
+ add r9,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r9,r14
+
+ mov r12,QWORD PTR[24+rsi]
+ mov r13,rbx
+ mov r14,r9
+ bswap r12
+ ror r13,23
+ mov r15,rcx
+ mov QWORD PTR[24+rsp],r12
+
+ ror r14,5
+ xor r13,rbx
+ xor r15,rdx
+
+ ror r13,4
+ add r12,r8
+ xor r14,r9
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rbx
+ mov r8,r10
+
+ ror r14,6
+ xor r13,rbx
+ xor r15,rdx
+
+ xor r8,r11
+ xor r14,r9
+ add r12,r15
+ mov r15,r10
+
+ ror r13,14
+ and r8,r9
+ and r15,r11
+
+ ror r14,28
+ add r12,r13
+ add r8,r15
+
+ add rax,r12
+ add r8,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r8,r14
+
+ mov r12,QWORD PTR[32+rsi]
+ mov r13,rax
+ mov r14,r8
+ bswap r12
+ ror r13,23
+ mov r15,rbx
+ mov QWORD PTR[32+rsp],r12
+
+ ror r14,5
+ xor r13,rax
+ xor r15,rcx
+
+ ror r13,4
+ add r12,rdx
+ xor r14,r8
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rax
+ mov rdx,r9
+
+ ror r14,6
+ xor r13,rax
+ xor r15,rcx
+
+ xor rdx,r10
+ xor r14,r8
+ add r12,r15
+ mov r15,r9
+
+ ror r13,14
+ and rdx,r8
+ and r15,r10
+
+ ror r14,28
+ add r12,r13
+ add rdx,r15
+
+ add r11,r12
+ add rdx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rdx,r14
+
+ mov r12,QWORD PTR[40+rsi]
+ mov r13,r11
+ mov r14,rdx
+ bswap r12
+ ror r13,23
+ mov r15,rax
+ mov QWORD PTR[40+rsp],r12
+
+ ror r14,5
+ xor r13,r11
+ xor r15,rbx
+
+ ror r13,4
+ add r12,rcx
+ xor r14,rdx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r11
+ mov rcx,r8
+
+ ror r14,6
+ xor r13,r11
+ xor r15,rbx
+
+ xor rcx,r9
+ xor r14,rdx
+ add r12,r15
+ mov r15,r8
+
+ ror r13,14
+ and rcx,rdx
+ and r15,r9
+
+ ror r14,28
+ add r12,r13
+ add rcx,r15
+
+ add r10,r12
+ add rcx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rcx,r14
+
+ mov r12,QWORD PTR[48+rsi]
+ mov r13,r10
+ mov r14,rcx
+ bswap r12
+ ror r13,23
+ mov r15,r11
+ mov QWORD PTR[48+rsp],r12
+
+ ror r14,5
+ xor r13,r10
+ xor r15,rax
+
+ ror r13,4
+ add r12,rbx
+ xor r14,rcx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r10
+ mov rbx,rdx
+
+ ror r14,6
+ xor r13,r10
+ xor r15,rax
+
+ xor rbx,r8
+ xor r14,rcx
+ add r12,r15
+ mov r15,rdx
+
+ ror r13,14
+ and rbx,rcx
+ and r15,r8
+
+ ror r14,28
+ add r12,r13
+ add rbx,r15
+
+ add r9,r12
+ add rbx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rbx,r14
+
+ mov r12,QWORD PTR[56+rsi]
+ mov r13,r9
+ mov r14,rbx
+ bswap r12
+ ror r13,23
+ mov r15,r10
+ mov QWORD PTR[56+rsp],r12
+
+ ror r14,5
+ xor r13,r9
+ xor r15,r11
+
+ ror r13,4
+ add r12,rax
+ xor r14,rbx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r9
+ mov rax,rcx
+
+ ror r14,6
+ xor r13,r9
+ xor r15,r11
+
+ xor rax,rdx
+ xor r14,rbx
+ add r12,r15
+ mov r15,rcx
+
+ ror r13,14
+ and rax,rbx
+ and r15,rdx
+
+ ror r14,28
+ add r12,r13
+ add rax,r15
+
+ add r8,r12
+ add rax,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rax,r14
+
+ mov r12,QWORD PTR[64+rsi]
+ mov r13,r8
+ mov r14,rax
+ bswap r12
+ ror r13,23
+ mov r15,r9
+ mov QWORD PTR[64+rsp],r12
+
+ ror r14,5
+ xor r13,r8
+ xor r15,r10
+
+ ror r13,4
+ add r12,r11
+ xor r14,rax
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r8
+ mov r11,rbx
+
+ ror r14,6
+ xor r13,r8
+ xor r15,r10
+
+ xor r11,rcx
+ xor r14,rax
+ add r12,r15
+ mov r15,rbx
+
+ ror r13,14
+ and r11,rax
+ and r15,rcx
+
+ ror r14,28
+ add r12,r13
+ add r11,r15
+
+ add rdx,r12
+ add r11,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r11,r14
+
+ mov r12,QWORD PTR[72+rsi]
+ mov r13,rdx
+ mov r14,r11
+ bswap r12
+ ror r13,23
+ mov r15,r8
+ mov QWORD PTR[72+rsp],r12
+
+ ror r14,5
+ xor r13,rdx
+ xor r15,r9
+
+ ror r13,4
+ add r12,r10
+ xor r14,r11
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rdx
+ mov r10,rax
+
+ ror r14,6
+ xor r13,rdx
+ xor r15,r9
+
+ xor r10,rbx
+ xor r14,r11
+ add r12,r15
+ mov r15,rax
+
+ ror r13,14
+ and r10,r11
+ and r15,rbx
+
+ ror r14,28
+ add r12,r13
+ add r10,r15
+
+ add rcx,r12
+ add r10,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r10,r14
+
+ mov r12,QWORD PTR[80+rsi]
+ mov r13,rcx
+ mov r14,r10
+ bswap r12
+ ror r13,23
+ mov r15,rdx
+ mov QWORD PTR[80+rsp],r12
+
+ ror r14,5
+ xor r13,rcx
+ xor r15,r8
+
+ ror r13,4
+ add r12,r9
+ xor r14,r10
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rcx
+ mov r9,r11
+
+ ror r14,6
+ xor r13,rcx
+ xor r15,r8
+
+ xor r9,rax
+ xor r14,r10
+ add r12,r15
+ mov r15,r11
+
+ ror r13,14
+ and r9,r10
+ and r15,rax
+
+ ror r14,28
+ add r12,r13
+ add r9,r15
+
+ add rbx,r12
+ add r9,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r9,r14
+
+ mov r12,QWORD PTR[88+rsi]
+ mov r13,rbx
+ mov r14,r9
+ bswap r12
+ ror r13,23
+ mov r15,rcx
+ mov QWORD PTR[88+rsp],r12
+
+ ror r14,5
+ xor r13,rbx
+ xor r15,rdx
+
+ ror r13,4
+ add r12,r8
+ xor r14,r9
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rbx
+ mov r8,r10
+
+ ror r14,6
+ xor r13,rbx
+ xor r15,rdx
+
+ xor r8,r11
+ xor r14,r9
+ add r12,r15
+ mov r15,r10
+
+ ror r13,14
+ and r8,r9
+ and r15,r11
+
+ ror r14,28
+ add r12,r13
+ add r8,r15
+
+ add rax,r12
+ add r8,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r8,r14
+
+ mov r12,QWORD PTR[96+rsi]
+ mov r13,rax
+ mov r14,r8
+ bswap r12
+ ror r13,23
+ mov r15,rbx
+ mov QWORD PTR[96+rsp],r12
+
+ ror r14,5
+ xor r13,rax
+ xor r15,rcx
+
+ ror r13,4
+ add r12,rdx
+ xor r14,r8
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rax
+ mov rdx,r9
+
+ ror r14,6
+ xor r13,rax
+ xor r15,rcx
+
+ xor rdx,r10
+ xor r14,r8
+ add r12,r15
+ mov r15,r9
+
+ ror r13,14
+ and rdx,r8
+ and r15,r10
+
+ ror r14,28
+ add r12,r13
+ add rdx,r15
+
+ add r11,r12
+ add rdx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rdx,r14
+
+ mov r12,QWORD PTR[104+rsi]
+ mov r13,r11
+ mov r14,rdx
+ bswap r12
+ ror r13,23
+ mov r15,rax
+ mov QWORD PTR[104+rsp],r12
+
+ ror r14,5
+ xor r13,r11
+ xor r15,rbx
+
+ ror r13,4
+ add r12,rcx
+ xor r14,rdx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r11
+ mov rcx,r8
+
+ ror r14,6
+ xor r13,r11
+ xor r15,rbx
+
+ xor rcx,r9
+ xor r14,rdx
+ add r12,r15
+ mov r15,r8
+
+ ror r13,14
+ and rcx,rdx
+ and r15,r9
+
+ ror r14,28
+ add r12,r13
+ add rcx,r15
+
+ add r10,r12
+ add rcx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rcx,r14
+
+ mov r12,QWORD PTR[112+rsi]
+ mov r13,r10
+ mov r14,rcx
+ bswap r12
+ ror r13,23
+ mov r15,r11
+ mov QWORD PTR[112+rsp],r12
+
+ ror r14,5
+ xor r13,r10
+ xor r15,rax
+
+ ror r13,4
+ add r12,rbx
+ xor r14,rcx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r10
+ mov rbx,rdx
+
+ ror r14,6
+ xor r13,r10
+ xor r15,rax
+
+ xor rbx,r8
+ xor r14,rcx
+ add r12,r15
+ mov r15,rdx
+
+ ror r13,14
+ and rbx,rcx
+ and r15,r8
+
+ ror r14,28
+ add r12,r13
+ add rbx,r15
+
+ add r9,r12
+ add rbx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rbx,r14
+
+ mov r12,QWORD PTR[120+rsi]
+ mov r13,r9
+ mov r14,rbx
+ bswap r12
+ ror r13,23
+ mov r15,r10
+ mov QWORD PTR[120+rsp],r12
+
+ ror r14,5
+ xor r13,r9
+ xor r15,r11
+
+ ror r13,4
+ add r12,rax
+ xor r14,rbx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r9
+ mov rax,rcx
+
+ ror r14,6
+ xor r13,r9
+ xor r15,r11
+
+ xor rax,rdx
+ xor r14,rbx
+ add r12,r15
+ mov r15,rcx
+
+ ror r13,14
+ and rax,rbx
+ and r15,rdx
+
+ ror r14,28
+ add r12,r13
+ add rax,r15
+
+ add r8,r12
+ add rax,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rax,r14
+
+ jmp $L$rounds_16_xx
+ALIGN 16
+$L$rounds_16_xx::
+ mov r13,QWORD PTR[8+rsp]
+ mov r14,QWORD PTR[112+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[72+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[rsp]
+ mov r13,r8
+ add r12,r14
+ mov r14,rax
+ ror r13,23
+ mov r15,r9
+ mov QWORD PTR[rsp],r12
+
+ ror r14,5
+ xor r13,r8
+ xor r15,r10
+
+ ror r13,4
+ add r12,r11
+ xor r14,rax
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r8
+ mov r11,rbx
+
+ ror r14,6
+ xor r13,r8
+ xor r15,r10
+
+ xor r11,rcx
+ xor r14,rax
+ add r12,r15
+ mov r15,rbx
+
+ ror r13,14
+ and r11,rax
+ and r15,rcx
+
+ ror r14,28
+ add r12,r13
+ add r11,r15
+
+ add rdx,r12
+ add r11,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r11,r14
+
+ mov r13,QWORD PTR[16+rsp]
+ mov r14,QWORD PTR[120+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[80+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[8+rsp]
+ mov r13,rdx
+ add r12,r14
+ mov r14,r11
+ ror r13,23
+ mov r15,r8
+ mov QWORD PTR[8+rsp],r12
+
+ ror r14,5
+ xor r13,rdx
+ xor r15,r9
+
+ ror r13,4
+ add r12,r10
+ xor r14,r11
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rdx
+ mov r10,rax
+
+ ror r14,6
+ xor r13,rdx
+ xor r15,r9
+
+ xor r10,rbx
+ xor r14,r11
+ add r12,r15
+ mov r15,rax
+
+ ror r13,14
+ and r10,r11
+ and r15,rbx
+
+ ror r14,28
+ add r12,r13
+ add r10,r15
+
+ add rcx,r12
+ add r10,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r10,r14
+
+ mov r13,QWORD PTR[24+rsp]
+ mov r14,QWORD PTR[rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[88+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[16+rsp]
+ mov r13,rcx
+ add r12,r14
+ mov r14,r10
+ ror r13,23
+ mov r15,rdx
+ mov QWORD PTR[16+rsp],r12
+
+ ror r14,5
+ xor r13,rcx
+ xor r15,r8
+
+ ror r13,4
+ add r12,r9
+ xor r14,r10
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rcx
+ mov r9,r11
+
+ ror r14,6
+ xor r13,rcx
+ xor r15,r8
+
+ xor r9,rax
+ xor r14,r10
+ add r12,r15
+ mov r15,r11
+
+ ror r13,14
+ and r9,r10
+ and r15,rax
+
+ ror r14,28
+ add r12,r13
+ add r9,r15
+
+ add rbx,r12
+ add r9,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r9,r14
+
+ mov r13,QWORD PTR[32+rsp]
+ mov r14,QWORD PTR[8+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[96+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[24+rsp]
+ mov r13,rbx
+ add r12,r14
+ mov r14,r9
+ ror r13,23
+ mov r15,rcx
+ mov QWORD PTR[24+rsp],r12
+
+ ror r14,5
+ xor r13,rbx
+ xor r15,rdx
+
+ ror r13,4
+ add r12,r8
+ xor r14,r9
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rbx
+ mov r8,r10
+
+ ror r14,6
+ xor r13,rbx
+ xor r15,rdx
+
+ xor r8,r11
+ xor r14,r9
+ add r12,r15
+ mov r15,r10
+
+ ror r13,14
+ and r8,r9
+ and r15,r11
+
+ ror r14,28
+ add r12,r13
+ add r8,r15
+
+ add rax,r12
+ add r8,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r8,r14
+
+ mov r13,QWORD PTR[40+rsp]
+ mov r14,QWORD PTR[16+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[104+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[32+rsp]
+ mov r13,rax
+ add r12,r14
+ mov r14,r8
+ ror r13,23
+ mov r15,rbx
+ mov QWORD PTR[32+rsp],r12
+
+ ror r14,5
+ xor r13,rax
+ xor r15,rcx
+
+ ror r13,4
+ add r12,rdx
+ xor r14,r8
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rax
+ mov rdx,r9
+
+ ror r14,6
+ xor r13,rax
+ xor r15,rcx
+
+ xor rdx,r10
+ xor r14,r8
+ add r12,r15
+ mov r15,r9
+
+ ror r13,14
+ and rdx,r8
+ and r15,r10
+
+ ror r14,28
+ add r12,r13
+ add rdx,r15
+
+ add r11,r12
+ add rdx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rdx,r14
+
+ mov r13,QWORD PTR[48+rsp]
+ mov r14,QWORD PTR[24+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[112+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[40+rsp]
+ mov r13,r11
+ add r12,r14
+ mov r14,rdx
+ ror r13,23
+ mov r15,rax
+ mov QWORD PTR[40+rsp],r12
+
+ ror r14,5
+ xor r13,r11
+ xor r15,rbx
+
+ ror r13,4
+ add r12,rcx
+ xor r14,rdx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r11
+ mov rcx,r8
+
+ ror r14,6
+ xor r13,r11
+ xor r15,rbx
+
+ xor rcx,r9
+ xor r14,rdx
+ add r12,r15
+ mov r15,r8
+
+ ror r13,14
+ and rcx,rdx
+ and r15,r9
+
+ ror r14,28
+ add r12,r13
+ add rcx,r15
+
+ add r10,r12
+ add rcx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rcx,r14
+
+ mov r13,QWORD PTR[56+rsp]
+ mov r14,QWORD PTR[32+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[120+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[48+rsp]
+ mov r13,r10
+ add r12,r14
+ mov r14,rcx
+ ror r13,23
+ mov r15,r11
+ mov QWORD PTR[48+rsp],r12
+
+ ror r14,5
+ xor r13,r10
+ xor r15,rax
+
+ ror r13,4
+ add r12,rbx
+ xor r14,rcx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r10
+ mov rbx,rdx
+
+ ror r14,6
+ xor r13,r10
+ xor r15,rax
+
+ xor rbx,r8
+ xor r14,rcx
+ add r12,r15
+ mov r15,rdx
+
+ ror r13,14
+ and rbx,rcx
+ and r15,r8
+
+ ror r14,28
+ add r12,r13
+ add rbx,r15
+
+ add r9,r12
+ add rbx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rbx,r14
+
+ mov r13,QWORD PTR[64+rsp]
+ mov r14,QWORD PTR[40+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[56+rsp]
+ mov r13,r9
+ add r12,r14
+ mov r14,rbx
+ ror r13,23
+ mov r15,r10
+ mov QWORD PTR[56+rsp],r12
+
+ ror r14,5
+ xor r13,r9
+ xor r15,r11
+
+ ror r13,4
+ add r12,rax
+ xor r14,rbx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r9
+ mov rax,rcx
+
+ ror r14,6
+ xor r13,r9
+ xor r15,r11
+
+ xor rax,rdx
+ xor r14,rbx
+ add r12,r15
+ mov r15,rcx
+
+ ror r13,14
+ and rax,rbx
+ and r15,rdx
+
+ ror r14,28
+ add r12,r13
+ add rax,r15
+
+ add r8,r12
+ add rax,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rax,r14
+
+ mov r13,QWORD PTR[72+rsp]
+ mov r14,QWORD PTR[48+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[8+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[64+rsp]
+ mov r13,r8
+ add r12,r14
+ mov r14,rax
+ ror r13,23
+ mov r15,r9
+ mov QWORD PTR[64+rsp],r12
+
+ ror r14,5
+ xor r13,r8
+ xor r15,r10
+
+ ror r13,4
+ add r12,r11
+ xor r14,rax
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r8
+ mov r11,rbx
+
+ ror r14,6
+ xor r13,r8
+ xor r15,r10
+
+ xor r11,rcx
+ xor r14,rax
+ add r12,r15
+ mov r15,rbx
+
+ ror r13,14
+ and r11,rax
+ and r15,rcx
+
+ ror r14,28
+ add r12,r13
+ add r11,r15
+
+ add rdx,r12
+ add r11,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r11,r14
+
+ mov r13,QWORD PTR[80+rsp]
+ mov r14,QWORD PTR[56+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[16+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[72+rsp]
+ mov r13,rdx
+ add r12,r14
+ mov r14,r11
+ ror r13,23
+ mov r15,r8
+ mov QWORD PTR[72+rsp],r12
+
+ ror r14,5
+ xor r13,rdx
+ xor r15,r9
+
+ ror r13,4
+ add r12,r10
+ xor r14,r11
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rdx
+ mov r10,rax
+
+ ror r14,6
+ xor r13,rdx
+ xor r15,r9
+
+ xor r10,rbx
+ xor r14,r11
+ add r12,r15
+ mov r15,rax
+
+ ror r13,14
+ and r10,r11
+ and r15,rbx
+
+ ror r14,28
+ add r12,r13
+ add r10,r15
+
+ add rcx,r12
+ add r10,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r10,r14
+
+ mov r13,QWORD PTR[88+rsp]
+ mov r14,QWORD PTR[64+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[24+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[80+rsp]
+ mov r13,rcx
+ add r12,r14
+ mov r14,r10
+ ror r13,23
+ mov r15,rdx
+ mov QWORD PTR[80+rsp],r12
+
+ ror r14,5
+ xor r13,rcx
+ xor r15,r8
+
+ ror r13,4
+ add r12,r9
+ xor r14,r10
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rcx
+ mov r9,r11
+
+ ror r14,6
+ xor r13,rcx
+ xor r15,r8
+
+ xor r9,rax
+ xor r14,r10
+ add r12,r15
+ mov r15,r11
+
+ ror r13,14
+ and r9,r10
+ and r15,rax
+
+ ror r14,28
+ add r12,r13
+ add r9,r15
+
+ add rbx,r12
+ add r9,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r9,r14
+
+ mov r13,QWORD PTR[96+rsp]
+ mov r14,QWORD PTR[72+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[32+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[88+rsp]
+ mov r13,rbx
+ add r12,r14
+ mov r14,r9
+ ror r13,23
+ mov r15,rcx
+ mov QWORD PTR[88+rsp],r12
+
+ ror r14,5
+ xor r13,rbx
+ xor r15,rdx
+
+ ror r13,4
+ add r12,r8
+ xor r14,r9
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rbx
+ mov r8,r10
+
+ ror r14,6
+ xor r13,rbx
+ xor r15,rdx
+
+ xor r8,r11
+ xor r14,r9
+ add r12,r15
+ mov r15,r10
+
+ ror r13,14
+ and r8,r9
+ and r15,r11
+
+ ror r14,28
+ add r12,r13
+ add r8,r15
+
+ add rax,r12
+ add r8,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add r8,r14
+
+ mov r13,QWORD PTR[104+rsp]
+ mov r14,QWORD PTR[80+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[40+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[96+rsp]
+ mov r13,rax
+ add r12,r14
+ mov r14,r8
+ ror r13,23
+ mov r15,rbx
+ mov QWORD PTR[96+rsp],r12
+
+ ror r14,5
+ xor r13,rax
+ xor r15,rcx
+
+ ror r13,4
+ add r12,rdx
+ xor r14,r8
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,rax
+ mov rdx,r9
+
+ ror r14,6
+ xor r13,rax
+ xor r15,rcx
+
+ xor rdx,r10
+ xor r14,r8
+ add r12,r15
+ mov r15,r9
+
+ ror r13,14
+ and rdx,r8
+ and r15,r10
+
+ ror r14,28
+ add r12,r13
+ add rdx,r15
+
+ add r11,r12
+ add rdx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rdx,r14
+
+ mov r13,QWORD PTR[112+rsp]
+ mov r14,QWORD PTR[88+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[48+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[104+rsp]
+ mov r13,r11
+ add r12,r14
+ mov r14,rdx
+ ror r13,23
+ mov r15,rax
+ mov QWORD PTR[104+rsp],r12
+
+ ror r14,5
+ xor r13,r11
+ xor r15,rbx
+
+ ror r13,4
+ add r12,rcx
+ xor r14,rdx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r11
+ mov rcx,r8
+
+ ror r14,6
+ xor r13,r11
+ xor r15,rbx
+
+ xor rcx,r9
+ xor r14,rdx
+ add r12,r15
+ mov r15,r8
+
+ ror r13,14
+ and rcx,rdx
+ and r15,r9
+
+ ror r14,28
+ add r12,r13
+ add rcx,r15
+
+ add r10,r12
+ add rcx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rcx,r14
+
+ mov r13,QWORD PTR[120+rsp]
+ mov r14,QWORD PTR[96+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[56+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[112+rsp]
+ mov r13,r10
+ add r12,r14
+ mov r14,rcx
+ ror r13,23
+ mov r15,r11
+ mov QWORD PTR[112+rsp],r12
+
+ ror r14,5
+ xor r13,r10
+ xor r15,rax
+
+ ror r13,4
+ add r12,rbx
+ xor r14,rcx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r10
+ mov rbx,rdx
+
+ ror r14,6
+ xor r13,r10
+ xor r15,rax
+
+ xor rbx,r8
+ xor r14,rcx
+ add r12,r15
+ mov r15,rdx
+
+ ror r13,14
+ and rbx,rcx
+ and r15,r8
+
+ ror r14,28
+ add r12,r13
+ add rbx,r15
+
+ add r9,r12
+ add rbx,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rbx,r14
+
+ mov r13,QWORD PTR[rsp]
+ mov r14,QWORD PTR[104+rsp]
+ mov r12,r13
+ mov r15,r14
+
+ ror r12,7
+ xor r12,r13
+ shr r13,7
+
+ ror r12,1
+ xor r13,r12
+ mov r12,QWORD PTR[64+rsp]
+
+ ror r15,42
+ xor r15,r14
+ shr r14,6
+
+ ror r15,19
+ add r12,r13
+ xor r14,r15
+
+ add r12,QWORD PTR[120+rsp]
+ mov r13,r9
+ add r12,r14
+ mov r14,rbx
+ ror r13,23
+ mov r15,r10
+ mov QWORD PTR[120+rsp],r12
+
+ ror r14,5
+ xor r13,r9
+ xor r15,r11
+
+ ror r13,4
+ add r12,rax
+ xor r14,rbx
+
+ add r12,QWORD PTR[rdi*8+rbp]
+ and r15,r9
+ mov rax,rcx
+
+ ror r14,6
+ xor r13,r9
+ xor r15,r11
+
+ xor rax,rdx
+ xor r14,rbx
+ add r12,r15
+ mov r15,rcx
+
+ ror r13,14
+ and rax,rbx
+ and r15,rdx
+
+ ror r14,28
+ add r12,r13
+ add rax,r15
+
+ add r8,r12
+ add rax,r12
+ lea rdi,QWORD PTR[1+rdi]
+ add rax,r14
+
+ cmp rdi,80
+ jb $L$rounds_16_xx
+
+ mov rdi,QWORD PTR[((128+0))+rsp]
+ lea rsi,QWORD PTR[128+rsi]
+
+ add rax,QWORD PTR[rdi]
+ add rbx,QWORD PTR[8+rdi]
+ add rcx,QWORD PTR[16+rdi]
+ add rdx,QWORD PTR[24+rdi]
+ add r8,QWORD PTR[32+rdi]
+ add r9,QWORD PTR[40+rdi]
+ add r10,QWORD PTR[48+rdi]
+ add r11,QWORD PTR[56+rdi]
+
+ cmp rsi,QWORD PTR[((128+16))+rsp]
+
+ mov QWORD PTR[rdi],rax
+ mov QWORD PTR[8+rdi],rbx
+ mov QWORD PTR[16+rdi],rcx
+ mov QWORD PTR[24+rdi],rdx
+ mov QWORD PTR[32+rdi],r8
+ mov QWORD PTR[40+rdi],r9
+ mov QWORD PTR[48+rdi],r10
+ mov QWORD PTR[56+rdi],r11
+ jb $L$loop
+
+ mov rsi,QWORD PTR[((128+24))+rsp]
+ mov r15,QWORD PTR[rsi]
+ mov r14,QWORD PTR[8+rsi]
+ mov r13,QWORD PTR[16+rsi]
+ mov r12,QWORD PTR[24+rsi]
+ mov rbp,QWORD PTR[32+rsi]
+ mov rbx,QWORD PTR[40+rsi]
+ lea rsp,QWORD PTR[48+rsi]
+$L$epilogue::
+ mov rdi,QWORD PTR[8+rsp] ;WIN64 epilogue
+ mov rsi,QWORD PTR[16+rsp]
+ DB 0F3h,0C3h ;repret
+$L$SEH_end_sha512_block_data_order::
+sha512_block_data_order ENDP
+ALIGN 64
+
+K512::
+ DQ 0428a2f98d728ae22h,07137449123ef65cdh
+ DQ 0b5c0fbcfec4d3b2fh,0e9b5dba58189dbbch
+ DQ 03956c25bf348b538h,059f111f1b605d019h
+ DQ 0923f82a4af194f9bh,0ab1c5ed5da6d8118h
+ DQ 0d807aa98a3030242h,012835b0145706fbeh
+ DQ 0243185be4ee4b28ch,0550c7dc3d5ffb4e2h
+ DQ 072be5d74f27b896fh,080deb1fe3b1696b1h
+ DQ 09bdc06a725c71235h,0c19bf174cf692694h
+ DQ 0e49b69c19ef14ad2h,0efbe4786384f25e3h
+ DQ 00fc19dc68b8cd5b5h,0240ca1cc77ac9c65h
+ DQ 02de92c6f592b0275h,04a7484aa6ea6e483h
+ DQ 05cb0a9dcbd41fbd4h,076f988da831153b5h
+ DQ 0983e5152ee66dfabh,0a831c66d2db43210h
+ DQ 0b00327c898fb213fh,0bf597fc7beef0ee4h
+ DQ 0c6e00bf33da88fc2h,0d5a79147930aa725h
+ DQ 006ca6351e003826fh,0142929670a0e6e70h
+ DQ 027b70a8546d22ffch,02e1b21385c26c926h
+ DQ 04d2c6dfc5ac42aedh,053380d139d95b3dfh
+ DQ 0650a73548baf63deh,0766a0abb3c77b2a8h
+ DQ 081c2c92e47edaee6h,092722c851482353bh
+ DQ 0a2bfe8a14cf10364h,0a81a664bbc423001h
+ DQ 0c24b8b70d0f89791h,0c76c51a30654be30h
+ DQ 0d192e819d6ef5218h,0d69906245565a910h
+ DQ 0f40e35855771202ah,0106aa07032bbd1b8h
+ DQ 019a4c116b8d2d0c8h,01e376c085141ab53h
+ DQ 02748774cdf8eeb99h,034b0bcb5e19b48a8h
+ DQ 0391c0cb3c5c95a63h,04ed8aa4ae3418acbh
+ DQ 05b9cca4f7763e373h,0682e6ff3d6b2b8a3h
+ DQ 0748f82ee5defb2fch,078a5636f43172f60h
+ DQ 084c87814a1f0ab72h,08cc702081a6439ech
+ DQ 090befffa23631e28h,0a4506cebde82bde9h
+ DQ 0bef9a3f7b2c67915h,0c67178f2e372532bh
+ DQ 0ca273eceea26619ch,0d186b8c721c0c207h
+ DQ 0eada7dd6cde0eb1eh,0f57d4f7fee6ed178h
+ DQ 006f067aa72176fbah,00a637dc5a2c898a6h
+ DQ 0113f9804bef90daeh,01b710b35131c471bh
+ DQ 028db77f523047d84h,032caab7b40c72493h
+ DQ 03c9ebe0a15c9bebch,0431d67c49c100d4ch
+ DQ 04cc5d4becb3e42b6h,0597f299cfc657e2ah
+ DQ 05fcb6fab3ad6faech,06c44198c4a475817h
+
+.text$ ENDS
+END
+
diff --git a/crypto/libressl/crypto/sha/sha512-mingw64-x86_64.S b/crypto/libressl/crypto/sha/sha512-mingw64-x86_64.S
new file mode 100644
index 0000000..5153952
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512-mingw64-x86_64.S
@@ -0,0 +1,1814 @@
+#include "x86_arch.h"
+.text
+
+.globl sha512_block_data_order
+.def sha512_block_data_order; .scl 2; .type 32; .endef
+.p2align 4
+sha512_block_data_order:
+ movq %rdi,8(%rsp)
+ movq %rsi,16(%rsp)
+ movq %rsp,%rax
+.LSEH_begin_sha512_block_data_order:
+ movq %rcx,%rdi
+ movq %rdx,%rsi
+ movq %r8,%rdx
+ movq %r9,%rcx
+
+ pushq %rbx
+ pushq %rbp
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+ movq %rsp,%r11
+ shlq $4,%rdx
+ subq $128+32,%rsp
+ leaq (%rsi,%rdx,8),%rdx
+ andq $-64,%rsp
+ movq %rdi,128+0(%rsp)
+ movq %rsi,128+8(%rsp)
+ movq %rdx,128+16(%rsp)
+ movq %r11,128+24(%rsp)
+.Lprologue:
+
+ leaq K512(%rip),%rbp
+
+ movq 0(%rdi),%rax
+ movq 8(%rdi),%rbx
+ movq 16(%rdi),%rcx
+ movq 24(%rdi),%rdx
+ movq 32(%rdi),%r8
+ movq 40(%rdi),%r9
+ movq 48(%rdi),%r10
+ movq 56(%rdi),%r11
+ jmp .Lloop
+
+.p2align 4
+.Lloop:
+ xorq %rdi,%rdi
+ movq 0(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 8(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 16(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 24(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 32(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 40(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 48(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 56(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 64(%rsi),%r12
+ movq %r8,%r13
+ movq %rax,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 72(%rsi),%r12
+ movq %rdx,%r13
+ movq %r11,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 80(%rsi),%r12
+ movq %rcx,%r13
+ movq %r10,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 88(%rsi),%r12
+ movq %rbx,%r13
+ movq %r9,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 96(%rsi),%r12
+ movq %rax,%r13
+ movq %r8,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 104(%rsi),%r12
+ movq %r11,%r13
+ movq %rdx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 112(%rsi),%r12
+ movq %r10,%r13
+ movq %rcx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 120(%rsi),%r12
+ movq %r9,%r13
+ movq %rbx,%r14
+ bswapq %r12
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ jmp .Lrounds_16_xx
+.p2align 4
+.Lrounds_16_xx:
+ movq 8(%rsp),%r13
+ movq 112(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 72(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 0(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,0(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 16(%rsp),%r13
+ movq 120(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 80(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 8(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,8(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 24(%rsp),%r13
+ movq 0(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 88(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 16(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,16(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 32(%rsp),%r13
+ movq 8(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 96(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 24(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,24(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 40(%rsp),%r13
+ movq 16(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 104(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 32(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,32(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 48(%rsp),%r13
+ movq 24(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 112(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 40(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,40(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 56(%rsp),%r13
+ movq 32(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 120(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 48(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,48(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 64(%rsp),%r13
+ movq 40(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 0(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 56(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,56(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ movq 72(%rsp),%r13
+ movq 48(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 8(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 64(%rsp),%r12
+ movq %r8,%r13
+ addq %r14,%r12
+ movq %rax,%r14
+ rorq $23,%r13
+ movq %r9,%r15
+ movq %r12,64(%rsp)
+
+ rorq $5,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ rorq $4,%r13
+ addq %r11,%r12
+ xorq %rax,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r8,%r15
+ movq %rbx,%r11
+
+ rorq $6,%r14
+ xorq %r8,%r13
+ xorq %r10,%r15
+
+ xorq %rcx,%r11
+ xorq %rax,%r14
+ addq %r15,%r12
+ movq %rbx,%r15
+
+ rorq $14,%r13
+ andq %rax,%r11
+ andq %rcx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r11
+
+ addq %r12,%rdx
+ addq %r12,%r11
+ leaq 1(%rdi),%rdi
+ addq %r14,%r11
+
+ movq 80(%rsp),%r13
+ movq 56(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 16(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 72(%rsp),%r12
+ movq %rdx,%r13
+ addq %r14,%r12
+ movq %r11,%r14
+ rorq $23,%r13
+ movq %r8,%r15
+ movq %r12,72(%rsp)
+
+ rorq $5,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ rorq $4,%r13
+ addq %r10,%r12
+ xorq %r11,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rdx,%r15
+ movq %rax,%r10
+
+ rorq $6,%r14
+ xorq %rdx,%r13
+ xorq %r9,%r15
+
+ xorq %rbx,%r10
+ xorq %r11,%r14
+ addq %r15,%r12
+ movq %rax,%r15
+
+ rorq $14,%r13
+ andq %r11,%r10
+ andq %rbx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r10
+
+ addq %r12,%rcx
+ addq %r12,%r10
+ leaq 1(%rdi),%rdi
+ addq %r14,%r10
+
+ movq 88(%rsp),%r13
+ movq 64(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 24(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 80(%rsp),%r12
+ movq %rcx,%r13
+ addq %r14,%r12
+ movq %r10,%r14
+ rorq $23,%r13
+ movq %rdx,%r15
+ movq %r12,80(%rsp)
+
+ rorq $5,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ rorq $4,%r13
+ addq %r9,%r12
+ xorq %r10,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rcx,%r15
+ movq %r11,%r9
+
+ rorq $6,%r14
+ xorq %rcx,%r13
+ xorq %r8,%r15
+
+ xorq %rax,%r9
+ xorq %r10,%r14
+ addq %r15,%r12
+ movq %r11,%r15
+
+ rorq $14,%r13
+ andq %r10,%r9
+ andq %rax,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r9
+
+ addq %r12,%rbx
+ addq %r12,%r9
+ leaq 1(%rdi),%rdi
+ addq %r14,%r9
+
+ movq 96(%rsp),%r13
+ movq 72(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 32(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 88(%rsp),%r12
+ movq %rbx,%r13
+ addq %r14,%r12
+ movq %r9,%r14
+ rorq $23,%r13
+ movq %rcx,%r15
+ movq %r12,88(%rsp)
+
+ rorq $5,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ rorq $4,%r13
+ addq %r8,%r12
+ xorq %r9,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rbx,%r15
+ movq %r10,%r8
+
+ rorq $6,%r14
+ xorq %rbx,%r13
+ xorq %rdx,%r15
+
+ xorq %r11,%r8
+ xorq %r9,%r14
+ addq %r15,%r12
+ movq %r10,%r15
+
+ rorq $14,%r13
+ andq %r9,%r8
+ andq %r11,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%r8
+
+ addq %r12,%rax
+ addq %r12,%r8
+ leaq 1(%rdi),%rdi
+ addq %r14,%r8
+
+ movq 104(%rsp),%r13
+ movq 80(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 40(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 96(%rsp),%r12
+ movq %rax,%r13
+ addq %r14,%r12
+ movq %r8,%r14
+ rorq $23,%r13
+ movq %rbx,%r15
+ movq %r12,96(%rsp)
+
+ rorq $5,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ rorq $4,%r13
+ addq %rdx,%r12
+ xorq %r8,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %rax,%r15
+ movq %r9,%rdx
+
+ rorq $6,%r14
+ xorq %rax,%r13
+ xorq %rcx,%r15
+
+ xorq %r10,%rdx
+ xorq %r8,%r14
+ addq %r15,%r12
+ movq %r9,%r15
+
+ rorq $14,%r13
+ andq %r8,%rdx
+ andq %r10,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rdx
+
+ addq %r12,%r11
+ addq %r12,%rdx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rdx
+
+ movq 112(%rsp),%r13
+ movq 88(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 48(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 104(%rsp),%r12
+ movq %r11,%r13
+ addq %r14,%r12
+ movq %rdx,%r14
+ rorq $23,%r13
+ movq %rax,%r15
+ movq %r12,104(%rsp)
+
+ rorq $5,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ rorq $4,%r13
+ addq %rcx,%r12
+ xorq %rdx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r11,%r15
+ movq %r8,%rcx
+
+ rorq $6,%r14
+ xorq %r11,%r13
+ xorq %rbx,%r15
+
+ xorq %r9,%rcx
+ xorq %rdx,%r14
+ addq %r15,%r12
+ movq %r8,%r15
+
+ rorq $14,%r13
+ andq %rdx,%rcx
+ andq %r9,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rcx
+
+ addq %r12,%r10
+ addq %r12,%rcx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rcx
+
+ movq 120(%rsp),%r13
+ movq 96(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 56(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 112(%rsp),%r12
+ movq %r10,%r13
+ addq %r14,%r12
+ movq %rcx,%r14
+ rorq $23,%r13
+ movq %r11,%r15
+ movq %r12,112(%rsp)
+
+ rorq $5,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ rorq $4,%r13
+ addq %rbx,%r12
+ xorq %rcx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r10,%r15
+ movq %rdx,%rbx
+
+ rorq $6,%r14
+ xorq %r10,%r13
+ xorq %rax,%r15
+
+ xorq %r8,%rbx
+ xorq %rcx,%r14
+ addq %r15,%r12
+ movq %rdx,%r15
+
+ rorq $14,%r13
+ andq %rcx,%rbx
+ andq %r8,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rbx
+
+ addq %r12,%r9
+ addq %r12,%rbx
+ leaq 1(%rdi),%rdi
+ addq %r14,%rbx
+
+ movq 0(%rsp),%r13
+ movq 104(%rsp),%r14
+ movq %r13,%r12
+ movq %r14,%r15
+
+ rorq $7,%r12
+ xorq %r13,%r12
+ shrq $7,%r13
+
+ rorq $1,%r12
+ xorq %r12,%r13
+ movq 64(%rsp),%r12
+
+ rorq $42,%r15
+ xorq %r14,%r15
+ shrq $6,%r14
+
+ rorq $19,%r15
+ addq %r13,%r12
+ xorq %r15,%r14
+
+ addq 120(%rsp),%r12
+ movq %r9,%r13
+ addq %r14,%r12
+ movq %rbx,%r14
+ rorq $23,%r13
+ movq %r10,%r15
+ movq %r12,120(%rsp)
+
+ rorq $5,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ rorq $4,%r13
+ addq %rax,%r12
+ xorq %rbx,%r14
+
+ addq (%rbp,%rdi,8),%r12
+ andq %r9,%r15
+ movq %rcx,%rax
+
+ rorq $6,%r14
+ xorq %r9,%r13
+ xorq %r11,%r15
+
+ xorq %rdx,%rax
+ xorq %rbx,%r14
+ addq %r15,%r12
+ movq %rcx,%r15
+
+ rorq $14,%r13
+ andq %rbx,%rax
+ andq %rdx,%r15
+
+ rorq $28,%r14
+ addq %r13,%r12
+ addq %r15,%rax
+
+ addq %r12,%r8
+ addq %r12,%rax
+ leaq 1(%rdi),%rdi
+ addq %r14,%rax
+
+ cmpq $80,%rdi
+ jb .Lrounds_16_xx
+
+ movq 128+0(%rsp),%rdi
+ leaq 128(%rsi),%rsi
+
+ addq 0(%rdi),%rax
+ addq 8(%rdi),%rbx
+ addq 16(%rdi),%rcx
+ addq 24(%rdi),%rdx
+ addq 32(%rdi),%r8
+ addq 40(%rdi),%r9
+ addq 48(%rdi),%r10
+ addq 56(%rdi),%r11
+
+ cmpq 128+16(%rsp),%rsi
+
+ movq %rax,0(%rdi)
+ movq %rbx,8(%rdi)
+ movq %rcx,16(%rdi)
+ movq %rdx,24(%rdi)
+ movq %r8,32(%rdi)
+ movq %r9,40(%rdi)
+ movq %r10,48(%rdi)
+ movq %r11,56(%rdi)
+ jb .Lloop
+
+ movq 128+24(%rsp),%rsi
+ movq (%rsi),%r15
+ movq 8(%rsi),%r14
+ movq 16(%rsi),%r13
+ movq 24(%rsi),%r12
+ movq 32(%rsi),%rbp
+ movq 40(%rsi),%rbx
+ leaq 48(%rsi),%rsp
+.Lepilogue:
+ movq 8(%rsp),%rdi
+ movq 16(%rsp),%rsi
+ retq
+.LSEH_end_sha512_block_data_order:
+.p2align 6
+
+K512:
+.quad 0x428a2f98d728ae22,0x7137449123ef65cd
+.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
+.quad 0x3956c25bf348b538,0x59f111f1b605d019
+.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
+.quad 0xd807aa98a3030242,0x12835b0145706fbe
+.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
+.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
+.quad 0x9bdc06a725c71235,0xc19bf174cf692694
+.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
+.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
+.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
+.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
+.quad 0x983e5152ee66dfab,0xa831c66d2db43210
+.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
+.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
+.quad 0x06ca6351e003826f,0x142929670a0e6e70
+.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
+.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
+.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
+.quad 0x81c2c92e47edaee6,0x92722c851482353b
+.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
+.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
+.quad 0xd192e819d6ef5218,0xd69906245565a910
+.quad 0xf40e35855771202a,0x106aa07032bbd1b8
+.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
+.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
+.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
+.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
+.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
+.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
+.quad 0x90befffa23631e28,0xa4506cebde82bde9
+.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
+.quad 0xca273eceea26619c,0xd186b8c721c0c207
+.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
+.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
+.quad 0x113f9804bef90dae,0x1b710b35131c471b
+.quad 0x28db77f523047d84,0x32caab7b40c72493
+.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
+.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
+.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
diff --git a/crypto/libressl/crypto/sha/sha512.c b/crypto/libressl/crypto/sha/sha512.c
new file mode 100644
index 0000000..2d23614
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha512.c
@@ -0,0 +1,547 @@
+/* $OpenBSD: sha512.c,v 1.15 2016/11/04 13:56:05 miod Exp $ */
+/* ====================================================================
+ * Copyright (c) 2004 The OpenSSL Project. All rights reserved
+ * according to the OpenSSL license [found in ../../LICENSE].
+ * ====================================================================
+ */
+
+#include <machine/endian.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <openssl/opensslconf.h>
+
+#if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA512)
+/*
+ * IMPLEMENTATION NOTES.
+ *
+ * As you might have noticed 32-bit hash algorithms:
+ *
+ * - permit SHA_LONG to be wider than 32-bit (case on CRAY);
+ * - optimized versions implement two transform functions: one operating
+ * on [aligned] data in host byte order and one - on data in input
+ * stream byte order;
+ * - share common byte-order neutral collector and padding function
+ * implementations, ../md32_common.h;
+ *
+ * Neither of the above applies to this SHA-512 implementations. Reasons
+ * [in reverse order] are:
+ *
+ * - it's the only 64-bit hash algorithm for the moment of this writing,
+ * there is no need for common collector/padding implementation [yet];
+ * - by supporting only one transform function [which operates on
+ * *aligned* data in input stream byte order, big-endian in this case]
+ * we minimize burden of maintenance in two ways: a) collector/padding
+ * function is simpler; b) only one transform function to stare at;
+ * - SHA_LONG64 is required to be exactly 64-bit in order to be able to
+ * apply a number of optimizations to mitigate potential performance
+ * penalties caused by previous design decision;
+ *
+ * Caveat lector.
+ *
+ * Implementation relies on the fact that "long long" is 64-bit on
+ * both 32- and 64-bit platforms. If some compiler vendor comes up
+ * with 128-bit long long, adjustment to sha.h would be required.
+ * As this implementation relies on 64-bit integer type, it's totally
+ * inappropriate for platforms which don't support it, most notably
+ * 16-bit platforms.
+ * <appro@fy.chalmers.se>
+ */
+
+//#include <openssl/crypto.h>
+#include <openssl/opensslv.h>
+#include <openssl/sha.h>
+
+#if !defined(__STRICT_ALIGNMENT) || defined(SHA512_ASM)
+#define SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+#endif
+
+int SHA384_Init(SHA512_CTX *c)
+ {
+ c->h[0]=U64(0xcbbb9d5dc1059ed8);
+ c->h[1]=U64(0x629a292a367cd507);
+ c->h[2]=U64(0x9159015a3070dd17);
+ c->h[3]=U64(0x152fecd8f70e5939);
+ c->h[4]=U64(0x67332667ffc00b31);
+ c->h[5]=U64(0x8eb44a8768581511);
+ c->h[6]=U64(0xdb0c2e0d64f98fa7);
+ c->h[7]=U64(0x47b5481dbefa4fa4);
+
+ c->Nl=0; c->Nh=0;
+ c->num=0; c->md_len=SHA384_DIGEST_LENGTH;
+ return 1;
+ }
+
+int SHA512_Init(SHA512_CTX *c)
+ {
+ c->h[0]=U64(0x6a09e667f3bcc908);
+ c->h[1]=U64(0xbb67ae8584caa73b);
+ c->h[2]=U64(0x3c6ef372fe94f82b);
+ c->h[3]=U64(0xa54ff53a5f1d36f1);
+ c->h[4]=U64(0x510e527fade682d1);
+ c->h[5]=U64(0x9b05688c2b3e6c1f);
+ c->h[6]=U64(0x1f83d9abfb41bd6b);
+ c->h[7]=U64(0x5be0cd19137e2179);
+
+ c->Nl=0; c->Nh=0;
+ c->num=0; c->md_len=SHA512_DIGEST_LENGTH;
+ return 1;
+ }
+
+#ifndef SHA512_ASM
+static
+#endif
+void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num);
+
+int SHA512_Final (unsigned char *md, SHA512_CTX *c)
+ {
+ unsigned char *p=(unsigned char *)c->u.p;
+ size_t n=c->num;
+
+ p[n]=0x80; /* There always is a room for one */
+ n++;
+ if (n > (sizeof(c->u)-16))
+ memset (p+n,0,sizeof(c->u)-n), n=0,
+ sha512_block_data_order (c,p,1);
+
+ memset (p+n,0,sizeof(c->u)-16-n);
+#if BYTE_ORDER == BIG_ENDIAN
+ c->u.d[SHA_LBLOCK-2] = c->Nh;
+ c->u.d[SHA_LBLOCK-1] = c->Nl;
+#else
+ p[sizeof(c->u)-1] = (unsigned char)(c->Nl);
+ p[sizeof(c->u)-2] = (unsigned char)(c->Nl>>8);
+ p[sizeof(c->u)-3] = (unsigned char)(c->Nl>>16);
+ p[sizeof(c->u)-4] = (unsigned char)(c->Nl>>24);
+ p[sizeof(c->u)-5] = (unsigned char)(c->Nl>>32);
+ p[sizeof(c->u)-6] = (unsigned char)(c->Nl>>40);
+ p[sizeof(c->u)-7] = (unsigned char)(c->Nl>>48);
+ p[sizeof(c->u)-8] = (unsigned char)(c->Nl>>56);
+ p[sizeof(c->u)-9] = (unsigned char)(c->Nh);
+ p[sizeof(c->u)-10] = (unsigned char)(c->Nh>>8);
+ p[sizeof(c->u)-11] = (unsigned char)(c->Nh>>16);
+ p[sizeof(c->u)-12] = (unsigned char)(c->Nh>>24);
+ p[sizeof(c->u)-13] = (unsigned char)(c->Nh>>32);
+ p[sizeof(c->u)-14] = (unsigned char)(c->Nh>>40);
+ p[sizeof(c->u)-15] = (unsigned char)(c->Nh>>48);
+ p[sizeof(c->u)-16] = (unsigned char)(c->Nh>>56);
+#endif
+
+ sha512_block_data_order (c,p,1);
+
+ if (md==0) return 0;
+
+ switch (c->md_len)
+ {
+ /* Let compiler decide if it's appropriate to unroll... */
+ case SHA384_DIGEST_LENGTH:
+ for (n=0;n<SHA384_DIGEST_LENGTH/8;n++)
+ {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t>>56);
+ *(md++) = (unsigned char)(t>>48);
+ *(md++) = (unsigned char)(t>>40);
+ *(md++) = (unsigned char)(t>>32);
+ *(md++) = (unsigned char)(t>>24);
+ *(md++) = (unsigned char)(t>>16);
+ *(md++) = (unsigned char)(t>>8);
+ *(md++) = (unsigned char)(t);
+ }
+ break;
+ case SHA512_DIGEST_LENGTH:
+ for (n=0;n<SHA512_DIGEST_LENGTH/8;n++)
+ {
+ SHA_LONG64 t = c->h[n];
+
+ *(md++) = (unsigned char)(t>>56);
+ *(md++) = (unsigned char)(t>>48);
+ *(md++) = (unsigned char)(t>>40);
+ *(md++) = (unsigned char)(t>>32);
+ *(md++) = (unsigned char)(t>>24);
+ *(md++) = (unsigned char)(t>>16);
+ *(md++) = (unsigned char)(t>>8);
+ *(md++) = (unsigned char)(t);
+ }
+ break;
+ /* ... as well as make sure md_len is not abused. */
+ default: return 0;
+ }
+
+ return 1;
+ }
+
+int SHA384_Final (unsigned char *md,SHA512_CTX *c)
+{ return SHA512_Final (md,c); }
+
+int SHA512_Update (SHA512_CTX *c, const void *_data, size_t len)
+ {
+ SHA_LONG64 l;
+ unsigned char *p=c->u.p;
+ const unsigned char *data=(const unsigned char *)_data;
+
+ if (len==0) return 1;
+
+ l = (c->Nl+(((SHA_LONG64)len)<<3))&U64(0xffffffffffffffff);
+ if (l < c->Nl) c->Nh++;
+ if (sizeof(len)>=8) c->Nh+=(((SHA_LONG64)len)>>61);
+ c->Nl=l;
+
+ if (c->num != 0)
+ {
+ size_t n = sizeof(c->u) - c->num;
+
+ if (len < n)
+ {
+ memcpy (p+c->num,data,len), c->num += (unsigned int)len;
+ return 1;
+ }
+ else {
+ memcpy (p+c->num,data,n), c->num = 0;
+ len-=n, data+=n;
+ sha512_block_data_order (c,p,1);
+ }
+ }
+
+ if (len >= sizeof(c->u))
+ {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+ if ((size_t)data%sizeof(c->u.d[0]) != 0)
+ while (len >= sizeof(c->u))
+ memcpy (p,data,sizeof(c->u)),
+ sha512_block_data_order (c,p,1),
+ len -= sizeof(c->u),
+ data += sizeof(c->u);
+ else
+#endif
+ sha512_block_data_order (c,data,len/sizeof(c->u)),
+ data += len,
+ len %= sizeof(c->u),
+ data -= len;
+ }
+
+ if (len != 0) memcpy (p,data,len), c->num = (int)len;
+
+ return 1;
+ }
+
+int SHA384_Update (SHA512_CTX *c, const void *data, size_t len)
+{ return SHA512_Update (c,data,len); }
+
+void SHA512_Transform (SHA512_CTX *c, const unsigned char *data)
+ {
+#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
+ if ((size_t)data%sizeof(c->u.d[0]) != 0)
+ memcpy(c->u.p,data,sizeof(c->u.p)),
+ data = c->u.p;
+#endif
+ sha512_block_data_order (c,data,1);
+ }
+
+unsigned char *SHA384(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ SHA512_CTX c;
+ static unsigned char m[SHA384_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ SHA384_Init(&c);
+ SHA512_Update(&c,d,n);
+ SHA512_Final(md,&c);
+ explicit_bzero(&c,sizeof(c));
+ return(md);
+ }
+
+unsigned char *SHA512(const unsigned char *d, size_t n, unsigned char *md)
+ {
+ SHA512_CTX c;
+ static unsigned char m[SHA512_DIGEST_LENGTH];
+
+ if (md == NULL) md=m;
+ SHA512_Init(&c);
+ SHA512_Update(&c,d,n);
+ SHA512_Final(md,&c);
+ explicit_bzero(&c,sizeof(c));
+ return(md);
+ }
+
+#ifndef SHA512_ASM
+static const SHA_LONG64 K512[80] = {
+ U64(0x428a2f98d728ae22),U64(0x7137449123ef65cd),
+ U64(0xb5c0fbcfec4d3b2f),U64(0xe9b5dba58189dbbc),
+ U64(0x3956c25bf348b538),U64(0x59f111f1b605d019),
+ U64(0x923f82a4af194f9b),U64(0xab1c5ed5da6d8118),
+ U64(0xd807aa98a3030242),U64(0x12835b0145706fbe),
+ U64(0x243185be4ee4b28c),U64(0x550c7dc3d5ffb4e2),
+ U64(0x72be5d74f27b896f),U64(0x80deb1fe3b1696b1),
+ U64(0x9bdc06a725c71235),U64(0xc19bf174cf692694),
+ U64(0xe49b69c19ef14ad2),U64(0xefbe4786384f25e3),
+ U64(0x0fc19dc68b8cd5b5),U64(0x240ca1cc77ac9c65),
+ U64(0x2de92c6f592b0275),U64(0x4a7484aa6ea6e483),
+ U64(0x5cb0a9dcbd41fbd4),U64(0x76f988da831153b5),
+ U64(0x983e5152ee66dfab),U64(0xa831c66d2db43210),
+ U64(0xb00327c898fb213f),U64(0xbf597fc7beef0ee4),
+ U64(0xc6e00bf33da88fc2),U64(0xd5a79147930aa725),
+ U64(0x06ca6351e003826f),U64(0x142929670a0e6e70),
+ U64(0x27b70a8546d22ffc),U64(0x2e1b21385c26c926),
+ U64(0x4d2c6dfc5ac42aed),U64(0x53380d139d95b3df),
+ U64(0x650a73548baf63de),U64(0x766a0abb3c77b2a8),
+ U64(0x81c2c92e47edaee6),U64(0x92722c851482353b),
+ U64(0xa2bfe8a14cf10364),U64(0xa81a664bbc423001),
+ U64(0xc24b8b70d0f89791),U64(0xc76c51a30654be30),
+ U64(0xd192e819d6ef5218),U64(0xd69906245565a910),
+ U64(0xf40e35855771202a),U64(0x106aa07032bbd1b8),
+ U64(0x19a4c116b8d2d0c8),U64(0x1e376c085141ab53),
+ U64(0x2748774cdf8eeb99),U64(0x34b0bcb5e19b48a8),
+ U64(0x391c0cb3c5c95a63),U64(0x4ed8aa4ae3418acb),
+ U64(0x5b9cca4f7763e373),U64(0x682e6ff3d6b2b8a3),
+ U64(0x748f82ee5defb2fc),U64(0x78a5636f43172f60),
+ U64(0x84c87814a1f0ab72),U64(0x8cc702081a6439ec),
+ U64(0x90befffa23631e28),U64(0xa4506cebde82bde9),
+ U64(0xbef9a3f7b2c67915),U64(0xc67178f2e372532b),
+ U64(0xca273eceea26619c),U64(0xd186b8c721c0c207),
+ U64(0xeada7dd6cde0eb1e),U64(0xf57d4f7fee6ed178),
+ U64(0x06f067aa72176fba),U64(0x0a637dc5a2c898a6),
+ U64(0x113f9804bef90dae),U64(0x1b710b35131c471b),
+ U64(0x28db77f523047d84),U64(0x32caab7b40c72493),
+ U64(0x3c9ebe0a15c9bebc),U64(0x431d67c49c100d4c),
+ U64(0x4cc5d4becb3e42b6),U64(0x597f299cfc657e2a),
+ U64(0x5fcb6fab3ad6faec),U64(0x6c44198c4a475817) };
+
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if defined(__x86_64) || defined(__x86_64__)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
+ asm ("rorq %1,%0" \
+ : "=r"(ret) \
+ : "J"(n),"0"(a) \
+ : "cc"); ret; })
+# define PULL64(x) ({ SHA_LONG64 ret=*((const SHA_LONG64 *)(&(x))); \
+ asm ("bswapq %0" \
+ : "=r"(ret) \
+ : "0"(ret)); ret; })
+# elif (defined(__i386) || defined(__i386__))
+# define PULL64(x) ({ const unsigned int *p=(const unsigned int *)(&(x));\
+ unsigned int hi=p[0],lo=p[1]; \
+ asm ("bswapl %0; bswapl %1;" \
+ : "=r"(lo),"=r"(hi) \
+ : "0"(lo),"1"(hi)); \
+ ((SHA_LONG64)hi)<<32|lo; })
+# elif (defined(_ARCH_PPC) && defined(__64BIT__)) || defined(_ARCH_PPC64)
+# define ROTR(a,n) ({ SHA_LONG64 ret; \
+ asm ("rotrdi %0,%1,%2" \
+ : "=r"(ret) \
+ : "r"(a),"K"(n)); ret; })
+# endif
+#endif
+
+#ifndef PULL64
+#define B(x,j) (((SHA_LONG64)(*(((const unsigned char *)(&x))+j)))<<((7-j)*8))
+#define PULL64(x) (B(x,0)|B(x,1)|B(x,2)|B(x,3)|B(x,4)|B(x,5)|B(x,6)|B(x,7))
+#endif
+
+#ifndef ROTR
+#define ROTR(x,s) (((x)>>s) | (x)<<(64-s))
+#endif
+
+#define Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+#define Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
+#define sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
+#define sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+
+#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
+#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
+
+
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86)
+/*
+ * This code should give better results on 32-bit CPU with less than
+ * ~24 registers, both size and performance wise...
+ */
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
+ {
+ const SHA_LONG64 *W=in;
+ SHA_LONG64 A,E,T;
+ SHA_LONG64 X[9+80],*F;
+ int i;
+
+ while (num--) {
+
+ F = X+80;
+ A = ctx->h[0]; F[1] = ctx->h[1];
+ F[2] = ctx->h[2]; F[3] = ctx->h[3];
+ E = ctx->h[4]; F[5] = ctx->h[5];
+ F[6] = ctx->h[6]; F[7] = ctx->h[7];
+
+ for (i=0;i<16;i++,F--)
+ {
+ T = PULL64(W[i]);
+ F[0] = A;
+ F[4] = E;
+ F[8] = T;
+ T += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
+ E = F[3] + T;
+ A = T + Sigma0(A) + Maj(A,F[1],F[2]);
+ }
+
+ for (;i<80;i++,F--)
+ {
+ T = sigma0(F[8+16-1]);
+ T += sigma1(F[8+16-14]);
+ T += F[8+16] + F[8+16-9];
+
+ F[0] = A;
+ F[4] = E;
+ F[8] = T;
+ T += F[7] + Sigma1(E) + Ch(E,F[5],F[6]) + K512[i];
+ E = F[3] + T;
+ A = T + Sigma0(A) + Maj(A,F[1],F[2]);
+ }
+
+ ctx->h[0] += A; ctx->h[1] += F[1];
+ ctx->h[2] += F[2]; ctx->h[3] += F[3];
+ ctx->h[4] += E; ctx->h[5] += F[5];
+ ctx->h[6] += F[6]; ctx->h[7] += F[7];
+
+ W+=SHA_LBLOCK;
+ }
+ }
+
+#elif defined(OPENSSL_SMALL_FOOTPRINT)
+
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
+ {
+ const SHA_LONG64 *W=in;
+ SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1,T2;
+ SHA_LONG64 X[16];
+ int i;
+
+ while (num--) {
+
+ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
+ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
+
+ for (i=0;i<16;i++)
+ {
+#if BYTE_ORDER == BIG_ENDIAN
+ T1 = X[i] = W[i];
+#else
+ T1 = X[i] = PULL64(W[i]);
+#endif
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i];
+ T2 = Sigma0(a) + Maj(a,b,c);
+ h = g; g = f; f = e; e = d + T1;
+ d = c; c = b; b = a; a = T1 + T2;
+ }
+
+ for (;i<80;i++)
+ {
+ s0 = X[(i+1)&0x0f]; s0 = sigma0(s0);
+ s1 = X[(i+14)&0x0f]; s1 = sigma1(s1);
+
+ T1 = X[i&0xf] += s0 + s1 + X[(i+9)&0xf];
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i];
+ T2 = Sigma0(a) + Maj(a,b,c);
+ h = g; g = f; f = e; e = d + T1;
+ d = c; c = b; b = a; a = T1 + T2;
+ }
+
+ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
+ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
+
+ W+=SHA_LBLOCK;
+ }
+ }
+
+#else
+
+#define ROUND_00_15(i,a,b,c,d,e,f,g,h) do { \
+ T1 += h + Sigma1(e) + Ch(e,f,g) + K512[i]; \
+ h = Sigma0(a) + Maj(a,b,c); \
+ d += T1; h += T1; } while (0)
+
+#define ROUND_16_80(i,j,a,b,c,d,e,f,g,h,X) do { \
+ s0 = X[(j+1)&0x0f]; s0 = sigma0(s0); \
+ s1 = X[(j+14)&0x0f]; s1 = sigma1(s1); \
+ T1 = X[(j)&0x0f] += s0 + s1 + X[(j+9)&0x0f]; \
+ ROUND_00_15(i+j,a,b,c,d,e,f,g,h); } while (0)
+
+static void sha512_block_data_order (SHA512_CTX *ctx, const void *in, size_t num)
+ {
+ const SHA_LONG64 *W=in;
+ SHA_LONG64 a,b,c,d,e,f,g,h,s0,s1,T1;
+ SHA_LONG64 X[16];
+ int i;
+
+ while (num--) {
+
+ a = ctx->h[0]; b = ctx->h[1]; c = ctx->h[2]; d = ctx->h[3];
+ e = ctx->h[4]; f = ctx->h[5]; g = ctx->h[6]; h = ctx->h[7];
+
+#if BYTE_ORDER == BIG_ENDIAN
+ T1 = X[0] = W[0]; ROUND_00_15(0,a,b,c,d,e,f,g,h);
+ T1 = X[1] = W[1]; ROUND_00_15(1,h,a,b,c,d,e,f,g);
+ T1 = X[2] = W[2]; ROUND_00_15(2,g,h,a,b,c,d,e,f);
+ T1 = X[3] = W[3]; ROUND_00_15(3,f,g,h,a,b,c,d,e);
+ T1 = X[4] = W[4]; ROUND_00_15(4,e,f,g,h,a,b,c,d);
+ T1 = X[5] = W[5]; ROUND_00_15(5,d,e,f,g,h,a,b,c);
+ T1 = X[6] = W[6]; ROUND_00_15(6,c,d,e,f,g,h,a,b);
+ T1 = X[7] = W[7]; ROUND_00_15(7,b,c,d,e,f,g,h,a);
+ T1 = X[8] = W[8]; ROUND_00_15(8,a,b,c,d,e,f,g,h);
+ T1 = X[9] = W[9]; ROUND_00_15(9,h,a,b,c,d,e,f,g);
+ T1 = X[10] = W[10]; ROUND_00_15(10,g,h,a,b,c,d,e,f);
+ T1 = X[11] = W[11]; ROUND_00_15(11,f,g,h,a,b,c,d,e);
+ T1 = X[12] = W[12]; ROUND_00_15(12,e,f,g,h,a,b,c,d);
+ T1 = X[13] = W[13]; ROUND_00_15(13,d,e,f,g,h,a,b,c);
+ T1 = X[14] = W[14]; ROUND_00_15(14,c,d,e,f,g,h,a,b);
+ T1 = X[15] = W[15]; ROUND_00_15(15,b,c,d,e,f,g,h,a);
+#else
+ T1 = X[0] = PULL64(W[0]); ROUND_00_15(0,a,b,c,d,e,f,g,h);
+ T1 = X[1] = PULL64(W[1]); ROUND_00_15(1,h,a,b,c,d,e,f,g);
+ T1 = X[2] = PULL64(W[2]); ROUND_00_15(2,g,h,a,b,c,d,e,f);
+ T1 = X[3] = PULL64(W[3]); ROUND_00_15(3,f,g,h,a,b,c,d,e);
+ T1 = X[4] = PULL64(W[4]); ROUND_00_15(4,e,f,g,h,a,b,c,d);
+ T1 = X[5] = PULL64(W[5]); ROUND_00_15(5,d,e,f,g,h,a,b,c);
+ T1 = X[6] = PULL64(W[6]); ROUND_00_15(6,c,d,e,f,g,h,a,b);
+ T1 = X[7] = PULL64(W[7]); ROUND_00_15(7,b,c,d,e,f,g,h,a);
+ T1 = X[8] = PULL64(W[8]); ROUND_00_15(8,a,b,c,d,e,f,g,h);
+ T1 = X[9] = PULL64(W[9]); ROUND_00_15(9,h,a,b,c,d,e,f,g);
+ T1 = X[10] = PULL64(W[10]); ROUND_00_15(10,g,h,a,b,c,d,e,f);
+ T1 = X[11] = PULL64(W[11]); ROUND_00_15(11,f,g,h,a,b,c,d,e);
+ T1 = X[12] = PULL64(W[12]); ROUND_00_15(12,e,f,g,h,a,b,c,d);
+ T1 = X[13] = PULL64(W[13]); ROUND_00_15(13,d,e,f,g,h,a,b,c);
+ T1 = X[14] = PULL64(W[14]); ROUND_00_15(14,c,d,e,f,g,h,a,b);
+ T1 = X[15] = PULL64(W[15]); ROUND_00_15(15,b,c,d,e,f,g,h,a);
+#endif
+
+ for (i=16;i<80;i+=16)
+ {
+ ROUND_16_80(i, 0,a,b,c,d,e,f,g,h,X);
+ ROUND_16_80(i, 1,h,a,b,c,d,e,f,g,X);
+ ROUND_16_80(i, 2,g,h,a,b,c,d,e,f,X);
+ ROUND_16_80(i, 3,f,g,h,a,b,c,d,e,X);
+ ROUND_16_80(i, 4,e,f,g,h,a,b,c,d,X);
+ ROUND_16_80(i, 5,d,e,f,g,h,a,b,c,X);
+ ROUND_16_80(i, 6,c,d,e,f,g,h,a,b,X);
+ ROUND_16_80(i, 7,b,c,d,e,f,g,h,a,X);
+ ROUND_16_80(i, 8,a,b,c,d,e,f,g,h,X);
+ ROUND_16_80(i, 9,h,a,b,c,d,e,f,g,X);
+ ROUND_16_80(i,10,g,h,a,b,c,d,e,f,X);
+ ROUND_16_80(i,11,f,g,h,a,b,c,d,e,X);
+ ROUND_16_80(i,12,e,f,g,h,a,b,c,d,X);
+ ROUND_16_80(i,13,d,e,f,g,h,a,b,c,X);
+ ROUND_16_80(i,14,c,d,e,f,g,h,a,b,X);
+ ROUND_16_80(i,15,b,c,d,e,f,g,h,a,X);
+ }
+
+ ctx->h[0] += a; ctx->h[1] += b; ctx->h[2] += c; ctx->h[3] += d;
+ ctx->h[4] += e; ctx->h[5] += f; ctx->h[6] += g; ctx->h[7] += h;
+
+ W+=SHA_LBLOCK;
+ }
+ }
+
+#endif
+
+#endif /* SHA512_ASM */
+
+#endif /* !OPENSSL_NO_SHA512 */
diff --git a/crypto/libressl/crypto/sha/sha_locl.h b/crypto/libressl/crypto/sha/sha_locl.h
new file mode 100644
index 0000000..46c9a39
--- /dev/null
+++ b/crypto/libressl/crypto/sha/sha_locl.h
@@ -0,0 +1,419 @@
+/* $OpenBSD: sha_locl.h,v 1.23 2016/12/23 23:22:25 patrick Exp $ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <openssl/opensslconf.h>
+#include <openssl/sha.h>
+
+#define DATA_ORDER_IS_BIG_ENDIAN
+
+#define HASH_LONG SHA_LONG
+#define HASH_CTX SHA_CTX
+#define HASH_CBLOCK SHA_CBLOCK
+#define HASH_MAKE_STRING(c,s) do { \
+ unsigned long ll; \
+ ll=(c)->h0; HOST_l2c(ll,(s)); \
+ ll=(c)->h1; HOST_l2c(ll,(s)); \
+ ll=(c)->h2; HOST_l2c(ll,(s)); \
+ ll=(c)->h3; HOST_l2c(ll,(s)); \
+ ll=(c)->h4; HOST_l2c(ll,(s)); \
+ } while (0)
+
+# define HASH_UPDATE SHA1_Update
+# define HASH_TRANSFORM SHA1_Transform
+# define HASH_FINAL SHA1_Final
+# define HASH_INIT SHA1_Init
+# define HASH_BLOCK_DATA_ORDER sha1_block_data_order
+# define Xupdate(a,ix,ia,ib,ic,id) ( (a)=(ia^ib^ic^id), \
+ ix=(a)=ROTATE((a),1) \
+ )
+
+__BEGIN_HIDDEN_DECLS
+
+#ifndef SHA1_ASM
+static
+#endif
+
+void sha1_block_data_order (SHA_CTX *c, const void *p,size_t num);
+
+__END_HIDDEN_DECLS
+
+#include "md32_common.h"
+
+#define INIT_DATA_h0 0x67452301UL
+#define INIT_DATA_h1 0xefcdab89UL
+#define INIT_DATA_h2 0x98badcfeUL
+#define INIT_DATA_h3 0x10325476UL
+#define INIT_DATA_h4 0xc3d2e1f0UL
+
+int SHA1_Init(SHA_CTX *c)
+ {
+ memset (c,0,sizeof(*c));
+ c->h0=INIT_DATA_h0;
+ c->h1=INIT_DATA_h1;
+ c->h2=INIT_DATA_h2;
+ c->h3=INIT_DATA_h3;
+ c->h4=INIT_DATA_h4;
+ return 1;
+ }
+
+#define K_00_19 0x5a827999UL
+#define K_20_39 0x6ed9eba1UL
+#define K_40_59 0x8f1bbcdcUL
+#define K_60_79 0xca62c1d6UL
+
+/* As pointed out by Wei Dai <weidai@eskimo.com>, F() below can be
+ * simplified to the code in F_00_19. Wei attributes these optimisations
+ * to Peter Gutmann's SHS code, and he attributes it to Rich Schroeppel.
+ * #define F(x,y,z) (((x) & (y)) | ((~(x)) & (z)))
+ * I've just become aware of another tweak to be made, again from Wei Dai,
+ * in F_40_59, (x&a)|(y&a) -> (x|y)&a
+ */
+#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d))
+#define F_20_39(b,c,d) ((b) ^ (c) ^ (d))
+#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d)))
+#define F_60_79(b,c,d) F_20_39(b,c,d)
+
+#ifndef OPENSSL_SMALL_FOOTPRINT
+
+#define BODY_00_15(i,a,b,c,d,e,f,xi) \
+ (f)=xi+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_16_19(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
+ Xupdate(f,xi,xa,xb,xc,xd); \
+ (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_20_31(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \
+ Xupdate(f,xi,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_32_39(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_40_59(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#define BODY_60_79(i,a,b,c,d,e,f,xa,xb,xc,xd) \
+ Xupdate(f,xa,xa,xb,xc,xd); \
+ (f)=xa+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \
+ (b)=ROTATE((b),30);
+
+#ifdef X
+#undef X
+#endif
+#ifndef MD32_XARRAY
+ /*
+ * Originally X was an array. As it's automatic it's natural
+ * to expect RISC compiler to accommodate at least part of it in
+ * the register bank, isn't it? Unfortunately not all compilers
+ * "find" this expectation reasonable:-( On order to make such
+ * compilers generate better code I replace X[] with a bunch of
+ * X0, X1, etc. See the function body below...
+ * <appro@fy.chalmers.se>
+ */
+# define X(i) XX##i
+#else
+ /*
+ * However! Some compilers (most notably HP C) get overwhelmed by
+ * that many local variables so that we have to have the way to
+ * fall down to the original behavior.
+ */
+# define X(i) XX[i]
+#endif
+
+#if !defined(SHA1_ASM)
+#include <machine/endian.h>
+static void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, size_t num)
+ {
+ const unsigned char *data=p;
+ unsigned MD32_REG_T A,B,C,D,E,T,l;
+#ifndef MD32_XARRAY
+ unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
+ XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
+#else
+ SHA_LONG XX[16];
+#endif
+
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
+
+ for (;;)
+ {
+
+ if (BYTE_ORDER != LITTLE_ENDIAN &&
+ sizeof(SHA_LONG)==4 && ((size_t)p%4)==0)
+ {
+ const SHA_LONG *W=(const SHA_LONG *)data;
+
+ X( 0) = W[0]; X( 1) = W[ 1];
+ BODY_00_15( 0,A,B,C,D,E,T,X( 0)); X( 2) = W[ 2];
+ BODY_00_15( 1,T,A,B,C,D,E,X( 1)); X( 3) = W[ 3];
+ BODY_00_15( 2,E,T,A,B,C,D,X( 2)); X( 4) = W[ 4];
+ BODY_00_15( 3,D,E,T,A,B,C,X( 3)); X( 5) = W[ 5];
+ BODY_00_15( 4,C,D,E,T,A,B,X( 4)); X( 6) = W[ 6];
+ BODY_00_15( 5,B,C,D,E,T,A,X( 5)); X( 7) = W[ 7];
+ BODY_00_15( 6,A,B,C,D,E,T,X( 6)); X( 8) = W[ 8];
+ BODY_00_15( 7,T,A,B,C,D,E,X( 7)); X( 9) = W[ 9];
+ BODY_00_15( 8,E,T,A,B,C,D,X( 8)); X(10) = W[10];
+ BODY_00_15( 9,D,E,T,A,B,C,X( 9)); X(11) = W[11];
+ BODY_00_15(10,C,D,E,T,A,B,X(10)); X(12) = W[12];
+ BODY_00_15(11,B,C,D,E,T,A,X(11)); X(13) = W[13];
+ BODY_00_15(12,A,B,C,D,E,T,X(12)); X(14) = W[14];
+ BODY_00_15(13,T,A,B,C,D,E,X(13)); X(15) = W[15];
+ BODY_00_15(14,E,T,A,B,C,D,X(14));
+ BODY_00_15(15,D,E,T,A,B,C,X(15));
+
+ data += SHA_CBLOCK;
+ }
+ else
+ {
+ HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l;
+ BODY_00_15( 0,A,B,C,D,E,T,X( 0)); HOST_c2l(data,l); X( 2)=l;
+ BODY_00_15( 1,T,A,B,C,D,E,X( 1)); HOST_c2l(data,l); X( 3)=l;
+ BODY_00_15( 2,E,T,A,B,C,D,X( 2)); HOST_c2l(data,l); X( 4)=l;
+ BODY_00_15( 3,D,E,T,A,B,C,X( 3)); HOST_c2l(data,l); X( 5)=l;
+ BODY_00_15( 4,C,D,E,T,A,B,X( 4)); HOST_c2l(data,l); X( 6)=l;
+ BODY_00_15( 5,B,C,D,E,T,A,X( 5)); HOST_c2l(data,l); X( 7)=l;
+ BODY_00_15( 6,A,B,C,D,E,T,X( 6)); HOST_c2l(data,l); X( 8)=l;
+ BODY_00_15( 7,T,A,B,C,D,E,X( 7)); HOST_c2l(data,l); X( 9)=l;
+ BODY_00_15( 8,E,T,A,B,C,D,X( 8)); HOST_c2l(data,l); X(10)=l;
+ BODY_00_15( 9,D,E,T,A,B,C,X( 9)); HOST_c2l(data,l); X(11)=l;
+ BODY_00_15(10,C,D,E,T,A,B,X(10)); HOST_c2l(data,l); X(12)=l;
+ BODY_00_15(11,B,C,D,E,T,A,X(11)); HOST_c2l(data,l); X(13)=l;
+ BODY_00_15(12,A,B,C,D,E,T,X(12)); HOST_c2l(data,l); X(14)=l;
+ BODY_00_15(13,T,A,B,C,D,E,X(13)); HOST_c2l(data,l); X(15)=l;
+ BODY_00_15(14,E,T,A,B,C,D,X(14));
+ BODY_00_15(15,D,E,T,A,B,C,X(15));
+ }
+
+ BODY_16_19(16,C,D,E,T,A,B,X( 0),X( 0),X( 2),X( 8),X(13));
+ BODY_16_19(17,B,C,D,E,T,A,X( 1),X( 1),X( 3),X( 9),X(14));
+ BODY_16_19(18,A,B,C,D,E,T,X( 2),X( 2),X( 4),X(10),X(15));
+ BODY_16_19(19,T,A,B,C,D,E,X( 3),X( 3),X( 5),X(11),X( 0));
+
+ BODY_20_31(20,E,T,A,B,C,D,X( 4),X( 4),X( 6),X(12),X( 1));
+ BODY_20_31(21,D,E,T,A,B,C,X( 5),X( 5),X( 7),X(13),X( 2));
+ BODY_20_31(22,C,D,E,T,A,B,X( 6),X( 6),X( 8),X(14),X( 3));
+ BODY_20_31(23,B,C,D,E,T,A,X( 7),X( 7),X( 9),X(15),X( 4));
+ BODY_20_31(24,A,B,C,D,E,T,X( 8),X( 8),X(10),X( 0),X( 5));
+ BODY_20_31(25,T,A,B,C,D,E,X( 9),X( 9),X(11),X( 1),X( 6));
+ BODY_20_31(26,E,T,A,B,C,D,X(10),X(10),X(12),X( 2),X( 7));
+ BODY_20_31(27,D,E,T,A,B,C,X(11),X(11),X(13),X( 3),X( 8));
+ BODY_20_31(28,C,D,E,T,A,B,X(12),X(12),X(14),X( 4),X( 9));
+ BODY_20_31(29,B,C,D,E,T,A,X(13),X(13),X(15),X( 5),X(10));
+ BODY_20_31(30,A,B,C,D,E,T,X(14),X(14),X( 0),X( 6),X(11));
+ BODY_20_31(31,T,A,B,C,D,E,X(15),X(15),X( 1),X( 7),X(12));
+
+ BODY_32_39(32,E,T,A,B,C,D,X( 0),X( 2),X( 8),X(13));
+ BODY_32_39(33,D,E,T,A,B,C,X( 1),X( 3),X( 9),X(14));
+ BODY_32_39(34,C,D,E,T,A,B,X( 2),X( 4),X(10),X(15));
+ BODY_32_39(35,B,C,D,E,T,A,X( 3),X( 5),X(11),X( 0));
+ BODY_32_39(36,A,B,C,D,E,T,X( 4),X( 6),X(12),X( 1));
+ BODY_32_39(37,T,A,B,C,D,E,X( 5),X( 7),X(13),X( 2));
+ BODY_32_39(38,E,T,A,B,C,D,X( 6),X( 8),X(14),X( 3));
+ BODY_32_39(39,D,E,T,A,B,C,X( 7),X( 9),X(15),X( 4));
+
+ BODY_40_59(40,C,D,E,T,A,B,X( 8),X(10),X( 0),X( 5));
+ BODY_40_59(41,B,C,D,E,T,A,X( 9),X(11),X( 1),X( 6));
+ BODY_40_59(42,A,B,C,D,E,T,X(10),X(12),X( 2),X( 7));
+ BODY_40_59(43,T,A,B,C,D,E,X(11),X(13),X( 3),X( 8));
+ BODY_40_59(44,E,T,A,B,C,D,X(12),X(14),X( 4),X( 9));
+ BODY_40_59(45,D,E,T,A,B,C,X(13),X(15),X( 5),X(10));
+ BODY_40_59(46,C,D,E,T,A,B,X(14),X( 0),X( 6),X(11));
+ BODY_40_59(47,B,C,D,E,T,A,X(15),X( 1),X( 7),X(12));
+ BODY_40_59(48,A,B,C,D,E,T,X( 0),X( 2),X( 8),X(13));
+ BODY_40_59(49,T,A,B,C,D,E,X( 1),X( 3),X( 9),X(14));
+ BODY_40_59(50,E,T,A,B,C,D,X( 2),X( 4),X(10),X(15));
+ BODY_40_59(51,D,E,T,A,B,C,X( 3),X( 5),X(11),X( 0));
+ BODY_40_59(52,C,D,E,T,A,B,X( 4),X( 6),X(12),X( 1));
+ BODY_40_59(53,B,C,D,E,T,A,X( 5),X( 7),X(13),X( 2));
+ BODY_40_59(54,A,B,C,D,E,T,X( 6),X( 8),X(14),X( 3));
+ BODY_40_59(55,T,A,B,C,D,E,X( 7),X( 9),X(15),X( 4));
+ BODY_40_59(56,E,T,A,B,C,D,X( 8),X(10),X( 0),X( 5));
+ BODY_40_59(57,D,E,T,A,B,C,X( 9),X(11),X( 1),X( 6));
+ BODY_40_59(58,C,D,E,T,A,B,X(10),X(12),X( 2),X( 7));
+ BODY_40_59(59,B,C,D,E,T,A,X(11),X(13),X( 3),X( 8));
+
+ BODY_60_79(60,A,B,C,D,E,T,X(12),X(14),X( 4),X( 9));
+ BODY_60_79(61,T,A,B,C,D,E,X(13),X(15),X( 5),X(10));
+ BODY_60_79(62,E,T,A,B,C,D,X(14),X( 0),X( 6),X(11));
+ BODY_60_79(63,D,E,T,A,B,C,X(15),X( 1),X( 7),X(12));
+ BODY_60_79(64,C,D,E,T,A,B,X( 0),X( 2),X( 8),X(13));
+ BODY_60_79(65,B,C,D,E,T,A,X( 1),X( 3),X( 9),X(14));
+ BODY_60_79(66,A,B,C,D,E,T,X( 2),X( 4),X(10),X(15));
+ BODY_60_79(67,T,A,B,C,D,E,X( 3),X( 5),X(11),X( 0));
+ BODY_60_79(68,E,T,A,B,C,D,X( 4),X( 6),X(12),X( 1));
+ BODY_60_79(69,D,E,T,A,B,C,X( 5),X( 7),X(13),X( 2));
+ BODY_60_79(70,C,D,E,T,A,B,X( 6),X( 8),X(14),X( 3));
+ BODY_60_79(71,B,C,D,E,T,A,X( 7),X( 9),X(15),X( 4));
+ BODY_60_79(72,A,B,C,D,E,T,X( 8),X(10),X( 0),X( 5));
+ BODY_60_79(73,T,A,B,C,D,E,X( 9),X(11),X( 1),X( 6));
+ BODY_60_79(74,E,T,A,B,C,D,X(10),X(12),X( 2),X( 7));
+ BODY_60_79(75,D,E,T,A,B,C,X(11),X(13),X( 3),X( 8));
+ BODY_60_79(76,C,D,E,T,A,B,X(12),X(14),X( 4),X( 9));
+ BODY_60_79(77,B,C,D,E,T,A,X(13),X(15),X( 5),X(10));
+ BODY_60_79(78,A,B,C,D,E,T,X(14),X( 0),X( 6),X(11));
+ BODY_60_79(79,T,A,B,C,D,E,X(15),X( 1),X( 7),X(12));
+
+ c->h0=(c->h0+E)&0xffffffffL;
+ c->h1=(c->h1+T)&0xffffffffL;
+ c->h2=(c->h2+A)&0xffffffffL;
+ c->h3=(c->h3+B)&0xffffffffL;
+ c->h4=(c->h4+C)&0xffffffffL;
+
+ if (--num == 0) break;
+
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
+
+ }
+ }
+#endif
+
+#else /* OPENSSL_SMALL_FOOTPRINT */
+
+#define BODY_00_15(xi) do { \
+ T=E+K_00_19+F_00_19(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T+xi; } while(0)
+
+#define BODY_16_19(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_00_19+F_00_19(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+#define BODY_20_39(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_20_39+F_20_39(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+#define BODY_40_59(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T+=E+K_40_59+F_40_59(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T; } while(0)
+
+#define BODY_60_79(xa,xb,xc,xd) do { \
+ Xupdate(T,xa,xa,xb,xc,xd); \
+ T=E+K_60_79+F_60_79(B,C,D); \
+ E=D, D=C, C=ROTATE(B,30), B=A; \
+ A=ROTATE(A,5)+T+xa; } while(0)
+
+#if !defined(SHA1_ASM)
+static void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, size_t num)
+ {
+ const unsigned char *data=p;
+ unsigned MD32_REG_T A,B,C,D,E,T,l;
+ int i;
+ SHA_LONG X[16];
+
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
+
+ for (;;)
+ {
+ for (i=0;i<16;i++)
+ { HOST_c2l(data,l); X[i]=l; BODY_00_15(X[i]); }
+ for (i=0;i<4;i++)
+ { BODY_16_19(X[i], X[i+2], X[i+8], X[(i+13)&15]); }
+ for (;i<24;i++)
+ { BODY_20_39(X[i&15], X[(i+2)&15], X[(i+8)&15],X[(i+13)&15]); }
+ for (i=0;i<20;i++)
+ { BODY_40_59(X[(i+8)&15],X[(i+10)&15],X[i&15], X[(i+5)&15]); }
+ for (i=4;i<24;i++)
+ { BODY_60_79(X[(i+8)&15],X[(i+10)&15],X[i&15], X[(i+5)&15]); }
+
+ c->h0=(c->h0+A)&0xffffffffL;
+ c->h1=(c->h1+B)&0xffffffffL;
+ c->h2=(c->h2+C)&0xffffffffL;
+ c->h3=(c->h3+D)&0xffffffffL;
+ c->h4=(c->h4+E)&0xffffffffL;
+
+ if (--num == 0) break;
+
+ A=c->h0;
+ B=c->h1;
+ C=c->h2;
+ D=c->h3;
+ E=c->h4;
+
+ }
+ }
+#endif
+
+#endif
diff --git a/crypto/libressl/include/compat/string.h b/crypto/libressl/include/compat/string.h
new file mode 100644
index 0000000..4bf7519
--- /dev/null
+++ b/crypto/libressl/include/compat/string.h
@@ -0,0 +1,87 @@
+/*
+ * Public domain
+ * string.h compatibility shim
+ */
+
+#ifndef LIBCRYPTOCOMPAT_STRING_H
+#define LIBCRYPTOCOMPAT_STRING_H
+
+#ifdef _MSC_VER
+#if _MSC_VER >= 1900
+#include <../ucrt/string.h>
+#else
+#include <../include/string.h>
+#endif
+#else
+#include_next <string.h>
+#endif
+
+#include <sys/types.h>
+
+#if defined(__sun) || defined(_AIX) || defined(__hpux)
+/* Some functions historically defined in string.h were placed in strings.h by
+ * SUS. Use the same hack as OS X and FreeBSD use to work around on AIX,
+ * Solaris, and HPUX.
+ */
+#include <strings.h>
+#endif
+
+#ifndef HAVE_STRCASECMP
+int strcasecmp(const char *s1, const char *s2);
+int strncasecmp(const char *s1, const char *s2, size_t len);
+#endif
+
+#ifndef HAVE_STRLCPY
+size_t strlcpy(char *dst, const char *src, size_t siz);
+#endif
+
+#ifndef HAVE_STRLCAT
+size_t strlcat(char *dst, const char *src, size_t siz);
+#endif
+
+#ifndef HAVE_STRNDUP
+char * strndup(const char *str, size_t maxlen);
+/* the only user of strnlen is strndup, so only build it if needed */
+#ifndef HAVE_STRNLEN
+size_t strnlen(const char *str, size_t maxlen);
+#endif
+#endif
+
+#ifndef HAVE_STRSEP
+char *strsep(char **stringp, const char *delim);
+#endif
+
+#ifndef HAVE_EXPLICIT_BZERO
+void explicit_bzero(void *, size_t);
+#endif
+
+#ifndef HAVE_TIMINGSAFE_BCMP
+int timingsafe_bcmp(const void *b1, const void *b2, size_t n);
+#endif
+
+#ifndef HAVE_TIMINGSAFE_MEMCMP
+int timingsafe_memcmp(const void *b1, const void *b2, size_t len);
+#endif
+
+#ifndef HAVE_MEMMEM
+void * memmem(const void *big, size_t big_len, const void *little,
+ size_t little_len);
+#endif
+
+#ifdef _WIN32
+#include <errno.h>
+
+static inline char *
+posix_strerror(int errnum)
+{
+ if (errnum == ECONNREFUSED) {
+ return "Connection refused";
+ }
+ return strerror(errnum);
+}
+
+#define strerror(errnum) posix_strerror(errnum)
+
+#endif
+
+#endif
diff --git a/crypto/libressl/include/compat/unistd.h b/crypto/libressl/include/compat/unistd.h
new file mode 100644
index 0000000..5e6ab1d
--- /dev/null
+++ b/crypto/libressl/include/compat/unistd.h
@@ -0,0 +1,78 @@
+/*
+ * Public domain
+ * unistd.h compatibility shim
+ */
+
+#ifndef LIBCRYPTOCOMPAT_UNISTD_H
+#define LIBCRYPTOCOMPAT_UNISTD_H
+
+#ifndef _MSC_VER
+
+#include_next <unistd.h>
+
+#ifdef __MINGW32__
+int ftruncate(int fd, off_t length);
+uid_t getuid(void);
+ssize_t pread(int d, void *buf, size_t nbytes, off_t offset);
+ssize_t pwrite(int d, const void *buf, size_t nbytes, off_t offset);
+#endif
+
+#else
+
+#include <stdlib.h>
+#include <io.h>
+#include <process.h>
+
+#define STDOUT_FILENO 1
+#define STDERR_FILENO 2
+
+#define R_OK 4
+#define W_OK 2
+#define X_OK 0
+#define F_OK 0
+
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+
+#define access _access
+
+#ifdef _MSC_VER
+#include <windows.h>
+static inline unsigned int sleep(unsigned int seconds)
+{
+ Sleep(seconds * 1000);
+ return seconds;
+}
+#endif
+
+int ftruncate(int fd, off_t length);
+uid_t getuid(void);
+ssize_t pread(int d, void *buf, size_t nbytes, off_t offset);
+ssize_t pwrite(int d, const void *buf, size_t nbytes, off_t offset);
+
+#endif
+
+#ifndef HAVE_GETENTROPY
+int getentropy(void *buf, size_t buflen);
+#else
+/*
+ * Solaris 11.3 adds getentropy(2), but defines the function in sys/random.h
+ */
+#if defined(__sun)
+#include <sys/random.h>
+#endif
+#endif
+
+#ifndef HAVE_GETPAGESIZE
+int getpagesize(void);
+#endif
+
+#define pledge(request, paths) 0
+#define unveil(path, permissions) 0
+
+#ifndef HAVE_PIPE2
+int pipe2(int fildes[2], int flags);
+#endif
+
+#endif
diff --git a/crypto/libressl/include/md32_common.h b/crypto/libressl/include/md32_common.h
new file mode 100644
index 0000000..0dca617
--- /dev/null
+++ b/crypto/libressl/include/md32_common.h
@@ -0,0 +1,345 @@
+/* $OpenBSD: md32_common.h,v 1.22 2016/11/04 13:56:04 miod Exp $ */
+/* ====================================================================
+ * Copyright (c) 1999-2007 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ */
+
+/*
+ * This is a generic 32 bit "collector" for message digest algorithms.
+ * Whenever needed it collects input character stream into chunks of
+ * 32 bit values and invokes a block function that performs actual hash
+ * calculations.
+ *
+ * Porting guide.
+ *
+ * Obligatory macros:
+ *
+ * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
+ * this macro defines byte order of input stream.
+ * HASH_CBLOCK
+ * size of a unit chunk HASH_BLOCK operates on.
+ * HASH_LONG
+ * has to be at least 32 bit wide.
+ * HASH_CTX
+ * context structure that at least contains following
+ * members:
+ * typedef struct {
+ * ...
+ * HASH_LONG Nl,Nh;
+ * either {
+ * HASH_LONG data[HASH_LBLOCK];
+ * unsigned char data[HASH_CBLOCK];
+ * };
+ * unsigned int num;
+ * ...
+ * } HASH_CTX;
+ * data[] vector is expected to be zeroed upon first call to
+ * HASH_UPDATE.
+ * HASH_UPDATE
+ * name of "Update" function, implemented here.
+ * HASH_TRANSFORM
+ * name of "Transform" function, implemented here.
+ * HASH_FINAL
+ * name of "Final" function, implemented here.
+ * HASH_BLOCK_DATA_ORDER
+ * name of "block" function capable of treating *unaligned* input
+ * message in original (data) byte order, implemented externally.
+ * HASH_MAKE_STRING
+ * macro convering context variables to an ASCII hash string.
+ *
+ * MD5 example:
+ *
+ * #define DATA_ORDER_IS_LITTLE_ENDIAN
+ *
+ * #define HASH_LONG MD5_LONG
+ * #define HASH_CTX MD5_CTX
+ * #define HASH_CBLOCK MD5_CBLOCK
+ * #define HASH_UPDATE MD5_Update
+ * #define HASH_TRANSFORM MD5_Transform
+ * #define HASH_FINAL MD5_Final
+ * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
+ *
+ * <appro@fy.chalmers.se>
+ */
+
+#include <stdint.h>
+
+#include <openssl/opensslconf.h>
+
+#if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+#error "DATA_ORDER must be defined!"
+#endif
+
+#ifndef HASH_CBLOCK
+#error "HASH_CBLOCK must be defined!"
+#endif
+#ifndef HASH_LONG
+#error "HASH_LONG must be defined!"
+#endif
+#ifndef HASH_CTX
+#error "HASH_CTX must be defined!"
+#endif
+
+#ifndef HASH_UPDATE
+#error "HASH_UPDATE must be defined!"
+#endif
+#ifndef HASH_TRANSFORM
+#error "HASH_TRANSFORM must be defined!"
+#endif
+#if !defined(HASH_FINAL) && !defined(HASH_NO_FINAL)
+#error "HASH_FINAL or HASH_NO_FINAL must be defined!"
+#endif
+
+#ifndef HASH_BLOCK_DATA_ORDER
+#error "HASH_BLOCK_DATA_ORDER must be defined!"
+#endif
+
+/*
+ * This common idiom is recognized by the compiler and turned into a
+ * CPU-specific intrinsic as appropriate.
+ * e.g. GCC optimizes to roll on amd64 at -O0
+ */
+static inline uint32_t ROTATE(uint32_t a, uint32_t n)
+{
+ return (a<<n)|(a>>(32-n));
+}
+
+#if defined(DATA_ORDER_IS_BIG_ENDIAN)
+
+#if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
+# if (defined(__i386) || defined(__i386__) || \
+ defined(__x86_64) || defined(__x86_64__))
+ /*
+ * This gives ~30-40% performance improvement in SHA-256 compiled
+ * with gcc [on P4]. Well, first macro to be frank. We can pull
+ * this trick on x86* platforms only, because these CPUs can fetch
+ * unaligned data without raising an exception.
+ */
+# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \
+ asm ("bswapl %0":"=r"(r):"0"(r)); \
+ (c)+=4; (l)=r; })
+# define HOST_l2c(l,c) ({ unsigned int r=(l); \
+ asm ("bswapl %0":"=r"(r):"0"(r)); \
+ *((unsigned int *)(c))=r; (c)+=4; })
+# endif
+#endif
+
+#ifndef HOST_c2l
+#define HOST_c2l(c,l) do {l =(((unsigned long)(*((c)++)))<<24); \
+ l|=(((unsigned long)(*((c)++)))<<16); \
+ l|=(((unsigned long)(*((c)++)))<< 8); \
+ l|=(((unsigned long)(*((c)++))) ); \
+ } while (0)
+#endif
+#ifndef HOST_l2c
+#define HOST_l2c(l,c) do {*((c)++)=(unsigned char)(((l)>>24)&0xff); \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff); \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff); \
+ *((c)++)=(unsigned char)(((l) )&0xff); \
+ } while (0)
+#endif
+
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+
+#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4)
+# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4)
+#endif
+
+#ifndef HOST_c2l
+#define HOST_c2l(c,l) do {l =(((unsigned long)(*((c)++))) ); \
+ l|=(((unsigned long)(*((c)++)))<< 8); \
+ l|=(((unsigned long)(*((c)++)))<<16); \
+ l|=(((unsigned long)(*((c)++)))<<24); \
+ } while (0)
+#endif
+#ifndef HOST_l2c
+#define HOST_l2c(l,c) do {*((c)++)=(unsigned char)(((l) )&0xff); \
+ *((c)++)=(unsigned char)(((l)>> 8)&0xff); \
+ *((c)++)=(unsigned char)(((l)>>16)&0xff); \
+ *((c)++)=(unsigned char)(((l)>>24)&0xff); \
+ } while (0)
+#endif
+
+#endif
+
+/*
+ * Time for some action:-)
+ */
+
+int
+HASH_UPDATE(HASH_CTX *c, const void *data_, size_t len)
+{
+ const unsigned char *data = data_;
+ unsigned char *p;
+ HASH_LONG l;
+ size_t n;
+
+ if (len == 0)
+ return 1;
+
+ l = (c->Nl + (((HASH_LONG)len) << 3))&0xffffffffUL;
+ /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
+ * Wei Dai <weidai@eskimo.com> for pointing it out. */
+ if (l < c->Nl) /* overflow */
+ c->Nh++;
+ c->Nh+=(HASH_LONG)(len>>29); /* might cause compiler warning on 16-bit */
+ c->Nl = l;
+
+ n = c->num;
+ if (n != 0) {
+ p = (unsigned char *)c->data;
+
+ if (len >= HASH_CBLOCK || len + n >= HASH_CBLOCK) {
+ memcpy (p + n, data, HASH_CBLOCK - n);
+ HASH_BLOCK_DATA_ORDER (c, p, 1);
+ n = HASH_CBLOCK - n;
+ data += n;
+ len -= n;
+ c->num = 0;
+ memset (p,0,HASH_CBLOCK); /* keep it zeroed */
+ } else {
+ memcpy (p + n, data, len);
+ c->num += (unsigned int)len;
+ return 1;
+ }
+ }
+
+ n = len/HASH_CBLOCK;
+ if (n > 0) {
+ HASH_BLOCK_DATA_ORDER (c, data, n);
+ n *= HASH_CBLOCK;
+ data += n;
+ len -= n;
+ }
+
+ if (len != 0) {
+ p = (unsigned char *)c->data;
+ c->num = (unsigned int)len;
+ memcpy (p, data, len);
+ }
+ return 1;
+}
+
+
+void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
+{
+ HASH_BLOCK_DATA_ORDER (c, data, 1);
+}
+
+
+#ifndef HASH_NO_FINAL
+int HASH_FINAL (unsigned char *md, HASH_CTX *c)
+{
+ unsigned char *p = (unsigned char *)c->data;
+ size_t n = c->num;
+
+ p[n] = 0x80; /* there is always room for one */
+ n++;
+
+ if (n > (HASH_CBLOCK - 8)) {
+ memset (p + n, 0, HASH_CBLOCK - n);
+ n = 0;
+ HASH_BLOCK_DATA_ORDER (c, p, 1);
+ }
+ memset (p + n, 0, HASH_CBLOCK - 8 - n);
+
+ p += HASH_CBLOCK - 8;
+#if defined(DATA_ORDER_IS_BIG_ENDIAN)
+ HOST_l2c(c->Nh, p);
+ HOST_l2c(c->Nl, p);
+#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
+ HOST_l2c(c->Nl, p);
+ HOST_l2c(c->Nh, p);
+#endif
+ p -= HASH_CBLOCK;
+ HASH_BLOCK_DATA_ORDER (c, p, 1);
+ c->num = 0;
+ memset (p, 0, HASH_CBLOCK);
+
+#ifndef HASH_MAKE_STRING
+#error "HASH_MAKE_STRING must be defined!"
+#else
+ HASH_MAKE_STRING(c, md);
+#endif
+
+ return 1;
+}
+#endif
+
+#ifndef MD32_REG_T
+#if defined(__alpha) || defined(__sparcv9) || defined(__mips)
+#define MD32_REG_T long
+/*
+ * This comment was originaly written for MD5, which is why it
+ * discusses A-D. But it basically applies to all 32-bit digests,
+ * which is why it was moved to common header file.
+ *
+ * In case you wonder why A-D are declared as long and not
+ * as MD5_LONG. Doing so results in slight performance
+ * boost on LP64 architectures. The catch is we don't
+ * really care if 32 MSBs of a 64-bit register get polluted
+ * with eventual overflows as we *save* only 32 LSBs in
+ * *either* case. Now declaring 'em long excuses the compiler
+ * from keeping 32 MSBs zeroed resulting in 13% performance
+ * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
+ * Well, to be honest it should say that this *prevents*
+ * performance degradation.
+ * <appro@fy.chalmers.se>
+ */
+#else
+/*
+ * Above is not absolute and there are LP64 compilers that
+ * generate better code if MD32_REG_T is defined int. The above
+ * pre-processor condition reflects the circumstances under which
+ * the conclusion was made and is subject to further extension.
+ * <appro@fy.chalmers.se>
+ */
+#define MD32_REG_T int
+#endif
+#endif
diff --git a/crypto/libressl/include/openssl/chacha.h b/crypto/libressl/include/openssl/chacha.h
new file mode 100644
index 0000000..e2345b2
--- /dev/null
+++ b/crypto/libressl/include/openssl/chacha.h
@@ -0,0 +1,58 @@
+/* $OpenBSD: chacha.h,v 1.8 2019/01/22 00:59:21 dlg Exp $ */
+/*
+ * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HEADER_CHACHA_H
+#define HEADER_CHACHA_H
+
+#include <openssl/opensslconf.h>
+
+#if defined(OPENSSL_NO_CHACHA)
+#error ChaCha is disabled.
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ unsigned int input[16];
+ unsigned char ks[64];
+ unsigned char unused;
+} ChaCha_ctx;
+
+void ChaCha_set_key(ChaCha_ctx *ctx, const unsigned char *key,
+ unsigned int keybits);
+void ChaCha_set_iv(ChaCha_ctx *ctx, const unsigned char *iv,
+ const unsigned char *counter);
+void ChaCha(ChaCha_ctx *ctx, unsigned char *out, const unsigned char *in,
+ size_t len);
+
+void CRYPTO_chacha_20(unsigned char *out, const unsigned char *in, size_t len,
+ const unsigned char key[32], const unsigned char iv[8], uint64_t counter);
+void CRYPTO_xchacha_20(unsigned char *out, const unsigned char *in, size_t len,
+ const unsigned char key[32], const unsigned char iv[24]);
+void CRYPTO_hchacha_20(unsigned char out[32],
+ const unsigned char key[32], const unsigned char iv[16]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HEADER_CHACHA_H */
diff --git a/crypto/libressl/include/openssl/curve25519.h b/crypto/libressl/include/openssl/curve25519.h
new file mode 100644
index 0000000..0470e8e
--- /dev/null
+++ b/crypto/libressl/include/openssl/curve25519.h
@@ -0,0 +1,77 @@
+/* $OpenBSD: curve25519.h,v 1.3 2019/05/11 15:55:52 tb Exp $ */
+/*
+ * Copyright (c) 2015, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HEADER_CURVE25519_H
+#define HEADER_CURVE25519_H
+
+#include <stdint.h>
+
+#include <openssl/opensslconf.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Curve25519.
+ *
+ * Curve25519 is an elliptic curve. See https://tools.ietf.org/html/rfc7748.
+ */
+
+/*
+ * X25519.
+ *
+ * X25519 is the Diffie-Hellman primitive built from curve25519. It is
+ * sometimes referred to as curve25519, but X25519 is a more precise name.
+ * See http://cr.yp.to/ecdh.html and https://tools.ietf.org/html/rfc7748.
+ */
+
+#define X25519_KEY_LENGTH 32
+
+/*
+ * X25519_keypair sets |out_public_value| and |out_private_key| to a freshly
+ * generated, public/private key pair.
+ */
+void X25519_keypair(uint8_t out_public_value[X25519_KEY_LENGTH],
+ uint8_t out_private_key[X25519_KEY_LENGTH]);
+
+/*
+ * X25519 writes a shared key to |out_shared_key| that is calculated from the
+ * given private key and the peer's public value. It returns one on success and
+ * zero on error.
+ *
+ * Don't use the shared key directly, rather use a KDF and also include the two
+ * public values as inputs.
+ */
+int X25519(uint8_t out_shared_key[X25519_KEY_LENGTH],
+ const uint8_t private_key[X25519_KEY_LENGTH],
+ const uint8_t peers_public_value[X25519_KEY_LENGTH]);
+
+/*
+* ED25519
+*/
+void ED25519_keypair(uint8_t out_public_key[32], uint8_t out_private_key[64]);
+int ED25519_sign(uint8_t *out_sig, const uint8_t *message, size_t message_len,
+ const uint8_t private_key[64]);
+int ED25519_verify(const uint8_t *message, size_t message_len,
+ const uint8_t signature[64], const uint8_t public_key[32]);
+
+#if defined(__cplusplus)
+} /* extern C */
+#endif
+
+#endif /* HEADER_CURVE25519_H */
diff --git a/crypto/libressl/include/openssl/opensslconf.h b/crypto/libressl/include/openssl/opensslconf.h
new file mode 100644
index 0000000..bb71768
--- /dev/null
+++ b/crypto/libressl/include/openssl/opensslconf.h
@@ -0,0 +1,153 @@
+#include <openssl/opensslfeatures.h>
+/* crypto/opensslconf.h.in */
+
+#if defined(_MSC_VER) && !defined(__attribute__)
+#define __attribute__(a)
+#endif
+
+#if defined(HEADER_CRYPTLIB_H) && !defined(OPENSSLDIR)
+#define OPENSSLDIR "/etc/ssl"
+#endif
+
+#undef OPENSSL_UNISTD
+#define OPENSSL_UNISTD <unistd.h>
+
+#undef OPENSSL_EXPORT_VAR_AS_FUNCTION
+
+#if defined(HEADER_IDEA_H) && !defined(IDEA_INT)
+#define IDEA_INT unsigned int
+#endif
+
+#if defined(HEADER_MD2_H) && !defined(MD2_INT)
+#define MD2_INT unsigned int
+#endif
+
+#if defined(HEADER_RC2_H) && !defined(RC2_INT)
+/* I need to put in a mod for the alpha - eay */
+#define RC2_INT unsigned int
+#endif
+
+#if defined(HEADER_RC4_H)
+#if !defined(RC4_INT)
+/* using int types make the structure larger but make the code faster
+ * on most boxes I have tested - up to %20 faster. */
+/*
+ * I don't know what does "most" mean, but declaring "int" is a must on:
+ * - Intel P6 because partial register stalls are very expensive;
+ * - elder Alpha because it lacks byte load/store instructions;
+ */
+#define RC4_INT unsigned int
+#endif
+#if !defined(RC4_CHUNK)
+/*
+ * This enables code handling data aligned at natural CPU word
+ * boundary. See crypto/rc4/rc4_enc.c for further details.
+ */
+#define RC4_CHUNK unsigned long
+#endif
+#endif
+
+#if (defined(HEADER_NEW_DES_H) || defined(HEADER_DES_H)) && !defined(DES_LONG)
+/* If this is set to 'unsigned int' on a DEC Alpha, this gives about a
+ * %20 speed up (longs are 8 bytes, int's are 4). */
+#ifndef DES_LONG
+#define DES_LONG unsigned int
+#endif
+#endif
+
+#if defined(HEADER_BN_H) && !defined(CONFIG_HEADER_BN_H)
+#define CONFIG_HEADER_BN_H
+#undef BN_LLONG
+
+/* Should we define BN_DIV2W here? */
+
+/* Only one for the following should be defined */
+#define SIXTY_FOUR_BIT_LONG
+#undef SIXTY_FOUR_BIT
+#undef THIRTY_TWO_BIT
+#endif
+
+#if defined(HEADER_RC4_LOCL_H) && !defined(CONFIG_HEADER_RC4_LOCL_H)
+#define CONFIG_HEADER_RC4_LOCL_H
+/* if this is defined data[i] is used instead of *data, this is a %20
+ * speedup on x86 */
+#undef RC4_INDEX
+#endif
+
+#if defined(HEADER_BF_LOCL_H) && !defined(CONFIG_HEADER_BF_LOCL_H)
+#define CONFIG_HEADER_BF_LOCL_H
+#undef BF_PTR
+#endif /* HEADER_BF_LOCL_H */
+
+#if defined(HEADER_DES_LOCL_H) && !defined(CONFIG_HEADER_DES_LOCL_H)
+#define CONFIG_HEADER_DES_LOCL_H
+#ifndef DES_DEFAULT_OPTIONS
+/* the following is tweaked from a config script, that is why it is a
+ * protected undef/define */
+#ifndef DES_PTR
+#undef DES_PTR
+#endif
+
+/* This helps C compiler generate the correct code for multiple functional
+ * units. It reduces register dependancies at the expense of 2 more
+ * registers */
+#ifndef DES_RISC1
+#undef DES_RISC1
+#endif
+
+#ifndef DES_RISC2
+#undef DES_RISC2
+#endif
+
+#if defined(DES_RISC1) && defined(DES_RISC2)
+YOU SHOULD NOT HAVE BOTH DES_RISC1 AND DES_RISC2 DEFINED!!!!!
+#endif
+
+/* Unroll the inner loop, this sometimes helps, sometimes hinders.
+ * Very mucy CPU dependant */
+#ifndef DES_UNROLL
+#define DES_UNROLL
+#endif
+
+/* These default values were supplied by
+ * Peter Gutman <pgut001@cs.auckland.ac.nz>
+ * They are only used if nothing else has been defined */
+#if !defined(DES_PTR) && !defined(DES_RISC1) && !defined(DES_RISC2) && !defined(DES_UNROLL)
+/* Special defines which change the way the code is built depending on the
+ CPU and OS. For SGI machines you can use _MIPS_SZLONG (32 or 64) to find
+ even newer MIPS CPU's, but at the moment one size fits all for
+ optimization options. Older Sparc's work better with only UNROLL, but
+ there's no way to tell at compile time what it is you're running on */
+
+#if defined( sun ) /* Newer Sparc's */
+# define DES_PTR
+# define DES_RISC1
+# define DES_UNROLL
+#elif defined( __ultrix ) /* Older MIPS */
+# define DES_PTR
+# define DES_RISC2
+# define DES_UNROLL
+#elif defined( __osf1__ ) /* Alpha */
+# define DES_PTR
+# define DES_RISC2
+#elif defined ( _AIX ) /* RS6000 */
+ /* Unknown */
+#elif defined( __hpux ) /* HP-PA */
+ /* Unknown */
+#elif defined( __aux ) /* 68K */
+ /* Unknown */
+#elif defined( __dgux ) /* 88K (but P6 in latest boxes) */
+# define DES_UNROLL
+#elif defined( __sgi ) /* Newer MIPS */
+# define DES_PTR
+# define DES_RISC2
+# define DES_UNROLL
+#elif defined(i386) || defined(__i386__) /* x86 boxes, should be gcc */
+# define DES_PTR
+# define DES_RISC1
+# define DES_UNROLL
+#endif /* Systems-specific speed defines */
+#endif
+
+#endif /* DES_DEFAULT_OPTIONS */
+#endif /* HEADER_DES_LOCL_H */
diff --git a/crypto/libressl/include/openssl/opensslfeatures.h b/crypto/libressl/include/openssl/opensslfeatures.h
new file mode 100644
index 0000000..ba80520
--- /dev/null
+++ b/crypto/libressl/include/openssl/opensslfeatures.h
@@ -0,0 +1,120 @@
+/*
+ * Feature flags for LibreSSL... so you can actually tell when things
+ * are enabled, rather than not being able to tell when things are
+ * enabled (or possibly not yet not implemented, or removed!).
+ */
+#define LIBRESSL_HAS_TLS1_3
+#define LIBRESSL_HAS_DTLS1_2
+
+#define OPENSSL_THREADS
+
+#define OPENSSL_NO_BUF_FREELISTS
+#define OPENSSL_NO_GMP
+#define OPENSSL_NO_JPAKE
+#define OPENSSL_NO_KRB5
+#define OPENSSL_NO_RSAX
+#define OPENSSL_NO_SHA0
+#define OPENSSL_NO_SSL2
+#define OPENSSL_NO_STORE
+
+/*
+ * OPENSSL_NO_* flags that currently appear in OpenSSL.
+ */
+
+/* #define OPENSSL_NO_AFALGENG */
+/* #define OPENSSL_NO_ALGORITHMS */
+/* #define OPENSSL_NO_ARIA */
+/* #define OPENSSL_NO_ASM */
+#define OPENSSL_NO_ASYNC
+/* #define OPENSSL_NO_AUTOALGINIT */
+/* #define OPENSSL_NO_AUTOERRINIT */
+/* #define OPENSSL_NO_AUTOLOAD_CONFIG */
+/* #define OPENSSL_NO_BF */
+/* #define OPENSSL_NO_BLAKE2 */
+/* #define OPENSSL_NO_CAMELLIA */
+/* #define OPENSSL_NO_CAPIENG */
+/* #define OPENSSL_NO_CAST */
+/* #define OPENSSL_NO_CHACHA */
+/* #define OPENSSL_NO_CMAC */
+/* #define OPENSSL_NO_CMS */
+#define OPENSSL_NO_COMP /* XXX */
+/* #define OPENSSL_NO_CRYPTO_MDEBUG */
+/* #define OPENSSL_NO_CRYPTO_MDEBUG_BACKTRACE */
+/* #define OPENSSL_NO_CT */
+/* #define OPENSSL_NO_DECC_INIT */
+/* #define OPENSSL_NO_DES */
+/* #define OPENSSL_NO_DEVCRYPTOENG */
+/* #define OPENSSL_NO_DGRAM */
+/* #define OPENSSL_NO_DH */
+/* #define OPENSSL_NO_DSA */
+/* #define OPENSSL_NO_DSO */
+/* #define OPENSSL_NO_DTLS */
+/* #define OPENSSL_NO_DTLS1 */
+/* #define OPENSSL_NO_DTLS1_2 */
+/* #define OPENSSL_NO_DTLS1_2_METHOD */
+/* #define OPENSSL_NO_DTLS1_METHOD */
+#define OPENSSL_NO_DYNAMIC_ENGINE
+/* #define OPENSSL_NO_EC */
+/* #define OPENSSL_NO_EC2M */
+#define OPENSSL_NO_EC_NISTP_64_GCC_128
+#define OPENSSL_NO_EGD
+/* #define OPENSSL_NO_ENGINE */
+/* #define OPENSSL_NO_ERR */
+/* #define OPENSSL_NO_FUZZ_AFL */
+/* #define OPENSSL_NO_FUZZ_LIBFUZZER */
+/* #define OPENSSL_NO_GOST */
+#define OPENSSL_NO_HEARTBEATS
+/* #define OPENSSL_NO_HW */
+/* #define OPENSSL_NO_HW_PADLOCK */
+/* #define OPENSSL_NO_IDEA */
+/* #define OPENSSL_NO_INLINE_ASM */
+#define OPENSSL_NO_MD2
+/* #define OPENSSL_NO_MD4 */
+/* #define OPENSSL_NO_MD5 */
+#define OPENSSL_NO_MDC2
+/* #define OPENSSL_NO_MULTIBLOCK */
+/* #define OPENSSL_NO_NEXTPROTONEG */
+/* #define OPENSSL_NO_OCB */
+/* #define OPENSSL_NO_OCSP */
+/* #define OPENSSL_NO_PINSHARED */
+/* #define OPENSSL_NO_POLY1305 */
+/* #define OPENSSL_NO_POSIX_IO */
+#define OPENSSL_NO_PSK
+/* #define OPENSSL_NO_RC2 */
+/* #define OPENSSL_NO_RC4 */
+#define OPENSSL_NO_RC5
+/* #define OPENSSL_NO_RDRAND */
+#define OPENSSL_NO_RFC3779
+/* #define OPENSSL_NO_RMD160 */
+/* #define OPENSSL_NO_RSA */
+/* #define OPENSSL_NO_SCRYPT */
+#define OPENSSL_NO_SCTP
+/* #define OPENSSL_NO_SECURE_MEMORY */
+#define OPENSSL_NO_SEED
+/* #define OPENSSL_NO_SIPHASH */
+/* #define OPENSSL_NO_SM2 */
+/* #define OPENSSL_NO_SM3 */
+/* #define OPENSSL_NO_SM4 */
+/* #define OPENSSL_NO_SOCK */
+#define OPENSSL_NO_SRP
+/* #define OPENSSL_NO_SRTP */
+#define OPENSSL_NO_SSL3
+#define OPENSSL_NO_SSL3_METHOD
+#define OPENSSL_NO_SSL_TRACE
+/* #define OPENSSL_NO_STATIC_ENGINE */
+/* #define OPENSSL_NO_STDIO */
+/* #define OPENSSL_NO_TLS */
+/* #define OPENSSL_NO_TLS1 */
+/* #define OPENSSL_NO_TLS1_1 */
+/* #define OPENSSL_NO_TLS1_1_METHOD */
+/* #define OPENSSL_NO_TLS1_2 */
+/* #define OPENSSL_NO_TLS1_2_METHOD */
+#ifndef LIBRESSL_HAS_TLS1_3
+#define OPENSSL_NO_TLS1_3
+#endif
+/* #define OPENSSL_NO_TLS1_METHOD */
+/* #define OPENSSL_NO_TS */
+/* #define OPENSSL_NO_UI_CONSOLE */
+/* #define OPENSSL_NO_UNIT_TEST */
+/* #define OPENSSL_NO_WEAK_SSL_CIPHERS */
+/* #define OPENSSL_NO_WHIRLPOOL */
diff --git a/crypto/libressl/include/openssl/opensslv.h b/crypto/libressl/include/openssl/opensslv.h
new file mode 100644
index 0000000..e06b97e
--- /dev/null
+++ b/crypto/libressl/include/openssl/opensslv.h
@@ -0,0 +1,18 @@
+/* $OpenBSD: opensslv.h,v 1.66 2021/09/15 17:14:26 tb Exp $ */
+#ifndef HEADER_OPENSSLV_H
+#define HEADER_OPENSSLV_H
+
+/* These will change with each release of LibreSSL-portable */
+#define LIBRESSL_VERSION_NUMBER 0x3040200fL
+/* ^ Patch starts here */
+#define LIBRESSL_VERSION_TEXT "LibreSSL 3.4.2"
+
+/* These will never change */
+#define OPENSSL_VERSION_NUMBER 0x20000000L
+#define OPENSSL_VERSION_TEXT LIBRESSL_VERSION_TEXT
+#define OPENSSL_VERSION_PTEXT " part of " OPENSSL_VERSION_TEXT
+
+#define SHLIB_VERSION_HISTORY ""
+#define SHLIB_VERSION_NUMBER "1.0.0"
+
+#endif /* HEADER_OPENSSLV_H */
diff --git a/crypto/libressl/include/openssl/poly1305.h b/crypto/libressl/include/openssl/poly1305.h
new file mode 100644
index 0000000..00ab0bf
--- /dev/null
+++ b/crypto/libressl/include/openssl/poly1305.h
@@ -0,0 +1,49 @@
+/* $OpenBSD: poly1305.h,v 1.3 2014/07/25 14:04:51 jsing Exp $ */
+/*
+ * Copyright (c) 2014 Joel Sing <jsing@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef HEADER_POLY1305_H
+#define HEADER_POLY1305_H
+
+#include <openssl/opensslconf.h>
+
+#if defined(OPENSSL_NO_POLY1305)
+#error Poly1305 is disabled.
+#endif
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct poly1305_context {
+ size_t aligner;
+ unsigned char opaque[136];
+} poly1305_context;
+
+typedef struct poly1305_context poly1305_state;
+
+void CRYPTO_poly1305_init(poly1305_context *ctx, const unsigned char key[32]);
+void CRYPTO_poly1305_update(poly1305_context *ctx, const unsigned char *in,
+ size_t len);
+void CRYPTO_poly1305_finish(poly1305_context *ctx, unsigned char mac[16]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HEADER_POLY1305_H */
diff --git a/crypto/libressl/include/openssl/sha.h b/crypto/libressl/include/openssl/sha.h
new file mode 100644
index 0000000..87fdf8d
--- /dev/null
+++ b/crypto/libressl/include/openssl/sha.h
@@ -0,0 +1,192 @@
+/* $OpenBSD: sha.h,v 1.21 2015/09/13 21:09:56 doug Exp $ */
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stddef.h>
+
+#ifndef HEADER_SHA_H
+#define HEADER_SHA_H
+#if !defined(HAVE_ATTRIBUTE__BOUNDED__) && !defined(__OpenBSD__)
+#define __bounded__(x, y, z)
+#endif
+
+#include <openssl/opensslconf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(OPENSSL_NO_SHA) || defined(OPENSSL_NO_SHA1)
+#error SHA is disabled.
+#endif
+
+/*
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * ! SHA_LONG has to be at least 32 bits wide. !
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+
+#define SHA_LONG unsigned int
+
+#define SHA_LBLOCK 16
+#define SHA_CBLOCK (SHA_LBLOCK*4) /* SHA treats input data as a
+ * contiguous array of 32 bit
+ * wide big-endian values. */
+#define SHA_LAST_BLOCK (SHA_CBLOCK-8)
+#define SHA_DIGEST_LENGTH 20
+
+typedef struct SHAstate_st
+ {
+ SHA_LONG h0,h1,h2,h3,h4;
+ SHA_LONG Nl,Nh;
+ SHA_LONG data[SHA_LBLOCK];
+ unsigned int num;
+ } SHA_CTX;
+
+#ifndef OPENSSL_NO_SHA1
+int SHA1_Init(SHA_CTX *c);
+int SHA1_Update(SHA_CTX *c, const void *data, size_t len)
+ __attribute__ ((__bounded__(__buffer__,2,3)));
+int SHA1_Final(unsigned char *md, SHA_CTX *c);
+unsigned char *SHA1(const unsigned char *d, size_t n, unsigned char *md)
+ __attribute__ ((__bounded__(__buffer__,1,2)));
+void SHA1_Transform(SHA_CTX *c, const unsigned char *data);
+#endif
+
+#define SHA256_CBLOCK (SHA_LBLOCK*4) /* SHA-256 treats input data as a
+ * contiguous array of 32 bit
+ * wide big-endian values. */
+#define SHA224_DIGEST_LENGTH 28
+#define SHA256_DIGEST_LENGTH 32
+
+typedef struct SHA256state_st
+ {
+ SHA_LONG h[8];
+ SHA_LONG Nl,Nh;
+ SHA_LONG data[SHA_LBLOCK];
+ unsigned int num,md_len;
+ } SHA256_CTX;
+
+#ifndef OPENSSL_NO_SHA256
+int SHA224_Init(SHA256_CTX *c);
+int SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
+ __attribute__ ((__bounded__(__buffer__,2,3)));
+int SHA224_Final(unsigned char *md, SHA256_CTX *c);
+unsigned char *SHA224(const unsigned char *d, size_t n,unsigned char *md)
+ __attribute__ ((__bounded__(__buffer__,1,2)));
+int SHA256_Init(SHA256_CTX *c);
+int SHA256_Update(SHA256_CTX *c, const void *data, size_t len)
+ __attribute__ ((__bounded__(__buffer__,2,3)));
+int SHA256_Final(unsigned char *md, SHA256_CTX *c);
+unsigned char *SHA256(const unsigned char *d, size_t n,unsigned char *md)
+ __attribute__ ((__bounded__(__buffer__,1,2)));
+void SHA256_Transform(SHA256_CTX *c, const unsigned char *data);
+#endif
+
+#define SHA384_DIGEST_LENGTH 48
+#define SHA512_DIGEST_LENGTH 64
+
+#ifndef OPENSSL_NO_SHA512
+/*
+ * Unlike 32-bit digest algorithms, SHA-512 *relies* on SHA_LONG64
+ * being exactly 64-bit wide. See Implementation Notes in sha512.c
+ * for further details.
+ */
+#define SHA512_CBLOCK (SHA_LBLOCK*8) /* SHA-512 treats input data as a
+ * contiguous array of 64 bit
+ * wide big-endian values. */
+#if defined(_LP64)
+#define SHA_LONG64 unsigned long
+#define U64(C) C##UL
+#else
+#define SHA_LONG64 unsigned long long
+#define U64(C) C##ULL
+#endif
+
+typedef struct SHA512state_st
+ {
+ SHA_LONG64 h[8];
+ SHA_LONG64 Nl,Nh;
+ union {
+ SHA_LONG64 d[SHA_LBLOCK];
+ unsigned char p[SHA512_CBLOCK];
+ } u;
+ unsigned int num,md_len;
+ } SHA512_CTX;
+#endif
+
+#ifndef OPENSSL_NO_SHA512
+int SHA384_Init(SHA512_CTX *c);
+int SHA384_Update(SHA512_CTX *c, const void *data, size_t len)
+ __attribute__ ((__bounded__(__buffer__,2,3)));
+int SHA384_Final(unsigned char *md, SHA512_CTX *c);
+unsigned char *SHA384(const unsigned char *d, size_t n,unsigned char *md)
+ __attribute__ ((__bounded__(__buffer__,1,2)));
+int SHA512_Init(SHA512_CTX *c);
+int SHA512_Update(SHA512_CTX *c, const void *data, size_t len)
+ __attribute__ ((__bounded__(__buffer__,2,3)));
+int SHA512_Final(unsigned char *md, SHA512_CTX *c);
+unsigned char *SHA512(const unsigned char *d, size_t n,unsigned char *md)
+ __attribute__ ((__bounded__(__buffer__,1,2)));
+void SHA512_Transform(SHA512_CTX *c, const unsigned char *data);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/crypto/libressl/ssl_common.mk b/crypto/libressl/ssl_common.mk
new file mode 100644
index 0000000..e004457
--- /dev/null
+++ b/crypto/libressl/ssl_common.mk
@@ -0,0 +1,7 @@
+include common.mk
+
+CFLAGS += -I../../include/compat -I../../include
+# exclude from include/compat/string.h
+CFLAGS += -DHAVE_STRCASECMP -DHAVE_STRLCPY -DHAVE_STRLCAT -DHAVE_STRNDUP -DHAVE_STRNLEN -DHAVE_STRSEP -DHAVE_MEMMEM
+# exclude from include/compat/unistd.h
+CFLAGS += -DHAVE_GETPAGESIZE -DHAVE_PIPE2
diff --git a/crypto/libressl/ssl_obj.mk b/crypto/libressl/ssl_obj.mk
new file mode 100644
index 0000000..8e99414
--- /dev/null
+++ b/crypto/libressl/ssl_obj.mk
@@ -0,0 +1,8 @@
+obj_dep_ = curve25519/curve25519.o curve25519/curve25519-generic.o \
+ chacha/chacha.o poly1305/poly1305.o aead/e_chacha20poly1305.o \
+ sha/sha256.o sha/sha512.o \
+ compat/arc4random.o compat/explicit_bzero.o compat/timingsafe_memcmp.o compat/timingsafe_bcmp.o
+obj_dep = $(addprefix crypto/,$(obj_dep_))
+
+subdirs_ = curve25519 chacha poly1305 aead sha compat
+subdirs = $(addprefix crypto/,$(subdirs_))
diff --git a/crypto/sha/Makefile b/crypto/sha/Makefile
new file mode 100644
index 0000000..ee67f3e
--- /dev/null
+++ b/crypto/sha/Makefile
@@ -0,0 +1,15 @@
+include common.mk
+
+obj = sha1.o
+
+
+%.o: %.c %.h
+ $(CC) $(CFLAGS) -c $<
+
+%.o: %.S
+ $(CC) $(CFLAGS) -c $<
+
+all: $(obj)
+
+clean:
+ rm -f *.o