ChaCha20-Poly1305 support.
diff --git a/crypto/chacha/CMakeLists.txt b/crypto/chacha/CMakeLists.txt
new file mode 100644
index 0000000..d23ecb1
--- /dev/null
+++ b/crypto/chacha/CMakeLists.txt
@@ -0,0 +1,20 @@
+include_directories(. .. ../../include)
+
+if (${ARCH} STREQUAL "arm")
+	set(
+		CHACHA_ARCH_SOURCES
+
+		chacha_vec_arm.S
+	)
+endif()
+
+add_library(
+	chacha
+
+	OBJECT
+
+	chacha_generic.c
+	chacha_vec.c
+
+	${CHACHA_ARCH_SOURCES}
+)
diff --git a/crypto/chacha/chacha.h b/crypto/chacha/chacha.h
new file mode 100644
index 0000000..ce53d49
--- /dev/null
+++ b/crypto/chacha/chacha.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#ifndef OPENSSL_HEADER_CHACHA_H
+#define OPENSSL_HEADER_CHACHA_H
+
+#include <openssl/base.h>
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+
+/* CRYPTO_chacha_20 encrypts |in_len| bytes from |in| with the given key and
+ * nonce and writes the result to |out|, which may be equal to |in|. The
+ * initial block counter is specified by |counter|. */
+void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in,
+                      size_t in_len, const uint8_t key[32],
+                      const uint8_t nonce[8], size_t counter);
+
+
+#if defined(__cplusplus)
+}  /* extern C */
+#endif
+
+#endif  /* OPENSSL_HEADER_CHACHA_H */
diff --git a/crypto/chacha/chacha_generic.c b/crypto/chacha/chacha_generic.c
new file mode 100644
index 0000000..1e5b70d
--- /dev/null
+++ b/crypto/chacha/chacha_generic.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+/* Adapted from the public domain, estream code by D. Bernstein. */
+
+#include <openssl/chacha.h>
+
+#include <openssl/cpu.h>
+
+#if defined(OPENSSL_WINDOWS) || !defined(OPENSSL_X86_64) && !defined(OPENSSL_X86)
+
+/* sigma contains the ChaCha constants, which happen to be an ASCII string. */
+static const char sigma[16] = "expand 32-byte k";
+
+#define ROTATE(v, n) (((v) << (n)) | ((v) >> (32 - (n))))
+#define XOR(v, w) ((v) ^ (w))
+#define PLUS(x, y) ((x) + (y))
+#define PLUSONE(v) (PLUS((v), 1))
+
+#define U32TO8_LITTLE(p, v)    \
+  {                            \
+    (p)[0] = (v >> 0) & 0xff;  \
+    (p)[1] = (v >> 8) & 0xff;  \
+    (p)[2] = (v >> 16) & 0xff; \
+    (p)[3] = (v >> 24) & 0xff; \
+  }
+
+#define U8TO32_LITTLE(p)                              \
+  (((uint32_t)((p)[0])) | ((uint32_t)((p)[1]) << 8) | \
+   ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24))
+
+/* QUARTERROUND updates a, b, c, d with a ChaCha "quarter" round. */
+#define QUARTERROUND(a,b,c,d) \
+  x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]),16); \
+  x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]),12); \
+  x[a] = PLUS(x[a],x[b]); x[d] = ROTATE(XOR(x[d],x[a]), 8); \
+  x[c] = PLUS(x[c],x[d]); x[b] = ROTATE(XOR(x[b],x[c]), 7);
+
+#if defined(OPENSSL_ARM)
+/* Defined in chacha_vec.c */
+void CRYPTO_chacha_20_neon(uint8_t *out, const uint8_t *in, size_t in_len,
+                           const uint8_t key[32], const uint8_t nonce[8],
+                           size_t counter);
+#endif
+
+/* chacha_core performs |num_rounds| rounds of ChaCha20 on the input words in
+ * |input| and writes the 64 output bytes to |output|. */
+static void chacha_core(uint8_t output[64], const uint32_t input[16],
+                        int num_rounds) {
+  uint32_t x[16];
+  int i;
+
+  memcpy(x, input, sizeof(uint32_t) * 16);
+  for (i = 20; i > 0; i -= 2) {
+    QUARTERROUND(0, 4, 8, 12)
+    QUARTERROUND(1, 5, 9, 13)
+    QUARTERROUND(2, 6, 10, 14)
+    QUARTERROUND(3, 7, 11, 15)
+    QUARTERROUND(0, 5, 10, 15)
+    QUARTERROUND(1, 6, 11, 12)
+    QUARTERROUND(2, 7, 8, 13)
+    QUARTERROUND(3, 4, 9, 14)
+  }
+
+  for (i = 0; i < 16; ++i) {
+    x[i] = PLUS(x[i], input[i]);
+  }
+  for (i = 0; i < 16; ++i) {
+    U32TO8_LITTLE(output + 4 * i, x[i]);
+  }
+}
+
+void CRYPTO_chacha_20(uint8_t *out, const uint8_t *in, size_t in_len,
+                      const uint8_t key[32], const uint8_t nonce[8],
+                      size_t counter) {
+  uint32_t input[16];
+  uint8_t buf[64];
+  size_t todo, i;
+
+#if defined(OPENSSL_ARM)
+  if (CRYPTO_is_NEON_capable() && ((intptr_t)in & 15) == 0 &&
+      ((intptr_t)out & 15) == 0) {
+    CRYPTO_chacha_20_neon(out, in, in_len, key, nonce, counter);
+    return;
+  }
+#endif
+
+  input[0] = U8TO32_LITTLE(sigma + 0);
+  input[1] = U8TO32_LITTLE(sigma + 4);
+  input[2] = U8TO32_LITTLE(sigma + 8);
+  input[3] = U8TO32_LITTLE(sigma + 12);
+
+  input[4] = U8TO32_LITTLE(key + 0);
+  input[5] = U8TO32_LITTLE(key + 4);
+  input[6] = U8TO32_LITTLE(key + 8);
+  input[7] = U8TO32_LITTLE(key + 12);
+
+  input[8] = U8TO32_LITTLE(key + 16);
+  input[9] = U8TO32_LITTLE(key + 20);
+  input[10] = U8TO32_LITTLE(key + 24);
+  input[11] = U8TO32_LITTLE(key + 28);
+
+  input[12] = counter;
+  input[13] = ((uint64_t)counter) >> 32;
+  input[14] = U8TO32_LITTLE(nonce + 0);
+  input[15] = U8TO32_LITTLE(nonce + 4);
+
+  while (in_len > 0) {
+    todo = sizeof(buf);
+    if (in_len < todo) {
+      todo = in_len;
+    }
+
+    chacha_core(buf, input, 20);
+    for (i = 0; i < todo; i++) {
+      out[i] = in[i] ^ buf[i];
+    }
+
+    out += todo;
+    in += todo;
+    in_len -= todo;
+
+    input[12]++;
+    if (input[12] == 0) {
+      input[13]++;
+    }
+  }
+}
+
+#endif /* OPENSSL_WINDOWS || !OPENSSL_X86_64 && !OPENSSL_X86 && !OPENSSL_ARM */
diff --git a/crypto/chacha/chacha_vec.c b/crypto/chacha/chacha_vec.c
new file mode 100644
index 0000000..d06d1dd
--- /dev/null
+++ b/crypto/chacha/chacha_vec.c
@@ -0,0 +1,329 @@
+/* Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+/* ====================================================================
+ *
+ * When updating this file, also update chacha_vec_arm.S
+ *
+ * ==================================================================== */
+
+
+/* This implementation is by Ted Krovetz and was submitted to SUPERCOP and
+ * marked as public domain. It was been altered to allow for non-aligned inputs
+ * and to allow the block counter to be passed in specifically. */
+
+#include <openssl/chacha.h>
+
+#if !defined(OPENSSL_WINDOWS) && (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
+
+#define CHACHA_RNDS 20 /* 8 (high speed), 20 (conservative), 12 (middle) */
+
+/* Architecture-neutral way to specify 16-byte vector of ints              */
+typedef unsigned vec __attribute__((vector_size(16)));
+
+/* This implementation is designed for Neon, SSE and AltiVec machines. The
+ * following specify how to do certain vector operations efficiently on
+ * each architecture, using intrinsics.
+ * This implementation supports parallel processing of multiple blocks,
+ * including potentially using general-purpose registers. */
+#if __ARM_NEON__
+#include <arm_neon.h>
+#define GPR_TOO 1
+#define VBPI 2
+#define ONE (vec) vsetq_lane_u32(1, vdupq_n_u32(0), 0)
+#define LOAD(m) (vec)(*((vec *)(m)))
+#define STORE(m, r) (*((vec *)(m))) = (r)
+#define ROTV1(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 1)
+#define ROTV2(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 2)
+#define ROTV3(x) (vec) vextq_u32((uint32x4_t)x, (uint32x4_t)x, 3)
+#define ROTW16(x) (vec) vrev32q_u16((uint16x8_t)x)
+#if __clang__
+#define ROTW7(x) (x << ((vec) {7, 7, 7, 7})) ^ (x >> ((vec) {25, 25, 25, 25}))
+#define ROTW8(x) (x << ((vec) {8, 8, 8, 8})) ^ (x >> ((vec) {24, 24, 24, 24}))
+#define ROTW12(x) \
+  (x << ((vec) {12, 12, 12, 12})) ^ (x >> ((vec) {20, 20, 20, 20}))
+#else
+#define ROTW7(x) \
+  (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 7), (uint32x4_t)x, 25)
+#define ROTW8(x) \
+  (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 8), (uint32x4_t)x, 24)
+#define ROTW12(x) \
+  (vec) vsriq_n_u32(vshlq_n_u32((uint32x4_t)x, 12), (uint32x4_t)x, 20)
+#endif
+#elif __SSE2__
+#include <emmintrin.h>
+#define GPR_TOO 0
+#if __clang__
+#define VBPI 4
+#else
+#define VBPI 3
+#endif
+#define ONE (vec) _mm_set_epi32(0, 0, 0, 1)
+#define LOAD(m) (vec) _mm_loadu_si128((__m128i *)(m))
+#define STORE(m, r) _mm_storeu_si128((__m128i *)(m), (__m128i)(r))
+#define ROTV1(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(0, 3, 2, 1))
+#define ROTV2(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(1, 0, 3, 2))
+#define ROTV3(x) (vec) _mm_shuffle_epi32((__m128i)x, _MM_SHUFFLE(2, 1, 0, 3))
+#define ROTW7(x) \
+  (vec)(_mm_slli_epi32((__m128i)x, 7) ^ _mm_srli_epi32((__m128i)x, 25))
+#define ROTW12(x) \
+  (vec)(_mm_slli_epi32((__m128i)x, 12) ^ _mm_srli_epi32((__m128i)x, 20))
+#if __SSSE3__
+#include <tmmintrin.h>
+#define ROTW8(x)                                                            \
+  (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(14, 13, 12, 15, 10, 9, 8, \
+                                                  11, 6, 5, 4, 7, 2, 1, 0, 3))
+#define ROTW16(x)                                                           \
+  (vec) _mm_shuffle_epi8((__m128i)x, _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, \
+                                                  10, 5, 4, 7, 6, 1, 0, 3, 2))
+#else
+#define ROTW8(x) \
+  (vec)(_mm_slli_epi32((__m128i)x, 8) ^ _mm_srli_epi32((__m128i)x, 24))
+#define ROTW16(x) \
+  (vec)(_mm_slli_epi32((__m128i)x, 16) ^ _mm_srli_epi32((__m128i)x, 16))
+#endif
+#else
+#error-- Implementation supports only machines with neon or SSE2
+#endif
+
+#ifndef REVV_BE
+#define REVV_BE(x)  (x)
+#endif
+
+#ifndef REVW_BE
+#define REVW_BE(x)  (x)
+#endif
+
+#define BPI      (VBPI + GPR_TOO)  /* Blocks computed per loop iteration   */
+
+#define DQROUND_VECTORS(a,b,c,d)                \
+    a += b; d ^= a; d = ROTW16(d);              \
+    c += d; b ^= c; b = ROTW12(b);              \
+    a += b; d ^= a; d = ROTW8(d);               \
+    c += d; b ^= c; b = ROTW7(b);               \
+    b = ROTV1(b); c = ROTV2(c);  d = ROTV3(d);  \
+    a += b; d ^= a; d = ROTW16(d);              \
+    c += d; b ^= c; b = ROTW12(b);              \
+    a += b; d ^= a; d = ROTW8(d);               \
+    c += d; b ^= c; b = ROTW7(b);               \
+    b = ROTV3(b); c = ROTV2(c); d = ROTV1(d);
+
+#define QROUND_WORDS(a,b,c,d) \
+  a = a+b; d ^= a; d = d<<16 | d>>16; \
+  c = c+d; b ^= c; b = b<<12 | b>>20; \
+  a = a+b; d ^= a; d = d<< 8 | d>>24; \
+  c = c+d; b ^= c; b = b<< 7 | b>>25;
+
+#define WRITE_XOR(in, op, d, v0, v1, v2, v3)                   \
+	STORE(op + d + 0, LOAD(in + d + 0) ^ REVV_BE(v0));      \
+	STORE(op + d + 4, LOAD(in + d + 4) ^ REVV_BE(v1));      \
+	STORE(op + d + 8, LOAD(in + d + 8) ^ REVV_BE(v2));      \
+	STORE(op + d +12, LOAD(in + d +12) ^ REVV_BE(v3));
+
+#if __ARM_NEON__
+/* For ARM, we can't depend on NEON support, so this function is compiled with
+ * a different name, along with the generic code, and can be enabled at
+ * run-time. */
+void CRYPTO_chacha_20_neon(
+#else
+void CRYPTO_chacha_20(
+#endif
+	uint8_t *out,
+	const uint8_t *in,
+	size_t inlen,
+	const uint8_t key[32],
+	const uint8_t nonce[8],
+	size_t counter)
+	{
+	unsigned iters, i, *op=(unsigned *)out, *ip=(unsigned *)in, *kp;
+#if defined(__ARM_NEON__)
+	unsigned *np;
+#endif
+	vec s0, s1, s2, s3;
+#if !defined(__ARM_NEON__) && !defined(__SSE2__)
+	__attribute__ ((aligned (16))) unsigned key[8], nonce[4];
+#endif
+	__attribute__ ((aligned (16))) unsigned chacha_const[] =
+		{0x61707865,0x3320646E,0x79622D32,0x6B206574};
+#if defined(__ARM_NEON__) || defined(__SSE2__)
+	kp = (unsigned *)key;
+#else
+	((vec *)key)[0] = REVV_BE(((vec *)key)[0]);
+	((vec *)key)[1] = REVV_BE(((vec *)key)[1]);
+	nonce[0] = REVW_BE(((unsigned *)nonce)[0]);
+	nonce[1] = REVW_BE(((unsigned *)nonce)[1]);
+	nonce[2] = REVW_BE(((unsigned *)nonce)[2]);
+	nonce[3] = REVW_BE(((unsigned *)nonce)[3]);
+	kp = (unsigned *)key;
+	np = (unsigned *)nonce;
+#endif
+#if defined(__ARM_NEON__)
+	np = (unsigned*) nonce;
+#endif
+	s0 = LOAD(chacha_const);
+	s1 = LOAD(&((vec*)kp)[0]);
+	s2 = LOAD(&((vec*)kp)[1]);
+	s3 = (vec){
+		counter & 0xffffffff,
+#if __ARM_NEON__ || defined(OPENSSL_X86)
+		0,  /* can't right-shift 32 bits on a 32-bit system. */
+#else
+		counter >> 32,
+#endif
+		((uint32_t*)nonce)[0],
+		((uint32_t*)nonce)[1]
+	};
+
+	for (iters = 0; iters < inlen/(BPI*64); iters++)
+		{
+#if GPR_TOO
+		register unsigned x0, x1, x2, x3, x4, x5, x6, x7, x8,
+				  x9, x10, x11, x12, x13, x14, x15;
+#endif
+#if VBPI > 2
+		vec v8,v9,v10,v11;
+#endif
+#if VBPI > 3
+		vec v12,v13,v14,v15;
+#endif
+
+		vec v0,v1,v2,v3,v4,v5,v6,v7;
+		v4 = v0 = s0; v5 = v1 = s1; v6 = v2 = s2; v3 = s3;
+		v7 = v3 + ONE;
+#if VBPI > 2
+		v8 = v4; v9 = v5; v10 = v6;
+		v11 =  v7 + ONE;
+#endif
+#if VBPI > 3
+		v12 = v8; v13 = v9; v14 = v10;
+		v15 = v11 + ONE;
+#endif
+#if GPR_TOO
+		x0 = chacha_const[0]; x1 = chacha_const[1];
+		x2 = chacha_const[2]; x3 = chacha_const[3];
+		x4 = kp[0]; x5 = kp[1]; x6  = kp[2]; x7  = kp[3];
+		x8 = kp[4]; x9 = kp[5]; x10 = kp[6]; x11 = kp[7];
+		x12 = counter+BPI*iters+(BPI-1); x13 = 0;
+		x14 = np[0]; x15 = np[1];
+#endif
+		for (i = CHACHA_RNDS/2; i; i--)
+			{
+			DQROUND_VECTORS(v0,v1,v2,v3)
+			DQROUND_VECTORS(v4,v5,v6,v7)
+#if VBPI > 2
+			DQROUND_VECTORS(v8,v9,v10,v11)
+#endif
+#if VBPI > 3
+			DQROUND_VECTORS(v12,v13,v14,v15)
+#endif
+#if GPR_TOO
+			QROUND_WORDS( x0, x4, x8,x12)
+			QROUND_WORDS( x1, x5, x9,x13)
+			QROUND_WORDS( x2, x6,x10,x14)
+			QROUND_WORDS( x3, x7,x11,x15)
+			QROUND_WORDS( x0, x5,x10,x15)
+			QROUND_WORDS( x1, x6,x11,x12)
+			QROUND_WORDS( x2, x7, x8,x13)
+			QROUND_WORDS( x3, x4, x9,x14)
+#endif
+			}
+
+		WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3)
+		s3 += ONE;
+		WRITE_XOR(ip, op, 16, v4+s0, v5+s1, v6+s2, v7+s3)
+		s3 += ONE;
+#if VBPI > 2
+		WRITE_XOR(ip, op, 32, v8+s0, v9+s1, v10+s2, v11+s3)
+		s3 += ONE;
+#endif
+#if VBPI > 3
+		WRITE_XOR(ip, op, 48, v12+s0, v13+s1, v14+s2, v15+s3)
+		s3 += ONE;
+#endif
+		ip += VBPI*16;
+		op += VBPI*16;
+#if GPR_TOO
+		op[0]  = REVW_BE(REVW_BE(ip[0])  ^ (x0  + chacha_const[0]));
+		op[1]  = REVW_BE(REVW_BE(ip[1])  ^ (x1  + chacha_const[1]));
+		op[2]  = REVW_BE(REVW_BE(ip[2])  ^ (x2  + chacha_const[2]));
+		op[3]  = REVW_BE(REVW_BE(ip[3])  ^ (x3  + chacha_const[3]));
+		op[4]  = REVW_BE(REVW_BE(ip[4])  ^ (x4  + kp[0]));
+		op[5]  = REVW_BE(REVW_BE(ip[5])  ^ (x5  + kp[1]));
+		op[6]  = REVW_BE(REVW_BE(ip[6])  ^ (x6  + kp[2]));
+		op[7]  = REVW_BE(REVW_BE(ip[7])  ^ (x7  + kp[3]));
+		op[8]  = REVW_BE(REVW_BE(ip[8])  ^ (x8  + kp[4]));
+		op[9]  = REVW_BE(REVW_BE(ip[9])  ^ (x9  + kp[5]));
+		op[10] = REVW_BE(REVW_BE(ip[10]) ^ (x10 + kp[6]));
+		op[11] = REVW_BE(REVW_BE(ip[11]) ^ (x11 + kp[7]));
+		op[12] = REVW_BE(REVW_BE(ip[12]) ^ (x12 + counter+BPI*iters+(BPI-1)));
+		op[13] = REVW_BE(REVW_BE(ip[13]) ^ (x13));
+		op[14] = REVW_BE(REVW_BE(ip[14]) ^ (x14 + np[0]));
+		op[15] = REVW_BE(REVW_BE(ip[15]) ^ (x15 + np[1]));
+		s3 += ONE;
+		ip += 16;
+		op += 16;
+#endif
+		}
+
+	for (iters = inlen%(BPI*64)/64; iters != 0; iters--)
+		{
+		vec v0 = s0, v1 = s1, v2 = s2, v3 = s3;
+		for (i = CHACHA_RNDS/2; i; i--)
+			{
+			DQROUND_VECTORS(v0,v1,v2,v3);
+			}
+		WRITE_XOR(ip, op, 0, v0+s0, v1+s1, v2+s2, v3+s3)
+		s3 += ONE;
+		ip += 16;
+		op += 16;
+		}
+
+	inlen = inlen % 64;
+	if (inlen)
+		{
+		__attribute__ ((aligned (16))) vec buf[4];
+		vec v0,v1,v2,v3;
+		v0 = s0; v1 = s1; v2 = s2; v3 = s3;
+		for (i = CHACHA_RNDS/2; i; i--)
+			{
+			DQROUND_VECTORS(v0,v1,v2,v3);
+			}
+
+		if (inlen >= 16)
+			{
+			STORE(op + 0, LOAD(ip + 0) ^ REVV_BE(v0 + s0));
+			if (inlen >= 32)
+				{
+				STORE(op + 4, LOAD(ip + 4) ^ REVV_BE(v1 + s1));
+				if (inlen >= 48)
+					{
+					STORE(op + 8, LOAD(ip +  8) ^
+						      REVV_BE(v2 + s2));
+					buf[3] = REVV_BE(v3 + s3);
+					}
+				else
+					buf[2] = REVV_BE(v2 + s2);
+				}
+			else
+				buf[1] = REVV_BE(v1 + s1);
+			}
+		else
+			buf[0] = REVV_BE(v0 + s0);
+
+		for (i=inlen & ~15; i<inlen; i++)
+			((char *)op)[i] = ((char *)ip)[i] ^ ((char *)buf)[i];
+		}
+	}
+
+#endif /* !OPENSSL_WINDOWS && (OPENSSL_X86_64 || OPENSSL_X86) */
diff --git a/crypto/chacha/chacha_vec_arm.S b/crypto/chacha/chacha_vec_arm.S
new file mode 100644
index 0000000..d82e6ee
--- /dev/null
+++ b/crypto/chacha/chacha_vec_arm.S
@@ -0,0 +1,885 @@
+# Copyright (c) 2014, Google Inc.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file contains a pre-compiled version of chacha_vec.c for ARM. This is
+# needed to support switching on NEON code at runtime. If the whole of OpenSSL
+# were to be compiled with the needed flags to build chacha_vec.c, then it
+# wouldn't be possible to run on non-NEON systems.
+#
+# This file was generated by:
+#
+#     /opt/gcc-linaro-arm-linux-gnueabihf-4.7-2012.10-20121022_linux/bin/arm-linux-gnueabihf-gcc -O3 -mcpu=cortex-a8 -mfpu=neon -S chacha_vec.c -I ../../include -fpic -o chacha_vec_arm.S
+
+	.syntax unified
+	.cpu cortex-a8
+	.eabi_attribute 27, 3
+
+# EABI attribute 28 sets whether VFP register arguments were used to build this
+# file. If object files are inconsistent on this point, the linker will refuse
+# to link them. Thus we report whatever the compiler expects since we don't use
+# VFP arguments.
+
+#if defined(__ARM_PCS_VFP)
+	.eabi_attribute 28, 1
+#else
+	.eabi_attribute 28, 0
+#endif
+
+	.fpu neon
+	.eabi_attribute 20, 1
+	.eabi_attribute 21, 1
+	.eabi_attribute 23, 3
+	.eabi_attribute 24, 1
+	.eabi_attribute 25, 1
+	.eabi_attribute 26, 2
+	.eabi_attribute 30, 2
+	.eabi_attribute 34, 1
+	.eabi_attribute 18, 4
+	.thumb
+	.file	"chacha_vec.c"
+	.text
+	.align	2
+	.global	CRYPTO_chacha_20_neon
+	.thumb
+	.thumb_func
+	.type	CRYPTO_chacha_20_neon, %function
+CRYPTO_chacha_20_neon:
+	@ args = 8, pretend = 0, frame = 304
+	@ frame_needed = 1, uses_anonymous_args = 0
+	@ link register save eliminated.
+	push	{r4, r5, r6, r7, r8, r9, sl, fp}
+	fstmfdd	sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
+	sub	sp, sp, #304
+	add	r7, sp, #0
+	movw	ip, #43691
+	movt	ip, 43690
+	str	r2, [r7, #196]
+	sub	sp, sp, #96
+	ldr	r4, [r7, #196]
+	ldr	r6, [r7, #400]
+	ldr	r2, .L38+16
+	umull	r4, ip, ip, r4
+	ldr	r6, [r6, #0]
+	ldr	r8, [r7, #400]
+.LPIC24:
+	add	r2, pc
+	add	r4, sp, #15
+	str	r3, [r7, #244]
+	str	r6, [r7, #176]
+	bic	r4, r4, #15
+	str	r0, [r7, #188]
+	str	r4, [r7, #200]
+	lsrs	ip, ip, #7
+	str	r1, [r7, #184]
+	ldmia	r2, {r0, r1, r2, r3}
+	ldr	r4, [r8, #4]
+	ldr	r5, [r7, #244]
+	vld1.64	{d24-d25}, [r5:64]
+	vldr	d26, [r5, #16]
+	vldr	d27, [r5, #24]
+	ldr	r9, [r7, #200]
+	ldr	r8, [r7, #404]
+	ldr	r5, [r7, #176]
+	add	r6, r9, #64
+	str	r4, [r7, #300]
+	mov	r4, #0
+	str	r8, [r7, #288]
+	str	r5, [r7, #296]
+	str	r4, [r7, #292]
+	stmia	r6, {r0, r1, r2, r3}
+	vldr	d22, [r9, #64]
+	vldr	d23, [r9, #72]
+	vldr	d20, [r7, #288]
+	vldr	d21, [r7, #296]
+	str	ip, [r7, #192]
+	beq	.L20
+	lsl	r6, ip, #1
+	ldr	r1, [r9, #68]
+	add	r3, r6, ip
+	str	r6, [r7, #180]
+	ldr	r2, [r9, #72]
+	add	r8, r8, #2
+	ldr	r5, [r9, #76]
+	vldr	d18, .L38
+	vldr	d19, .L38+8
+	str	r4, [r7, #240]
+	ldr	r6, [r7, #184]
+	ldr	r4, [r7, #188]
+	str	r0, [r7, #224]
+	str	r1, [r7, #220]
+	str	r8, [r7, #208]
+	str	r2, [r7, #216]
+	str	r3, [r7, #204]
+	str	r5, [r7, #212]
+	str	r6, [r7, #252]
+	str	r4, [r7, #248]
+.L4:
+	ldr	r2, [r7, #244]
+	add	r9, r7, #216
+	ldr	r3, [r7, #244]
+	vadd.i32	q8, q10, q9
+	ldr	r6, [r7, #208]
+	vmov	q15, q13  @ v4si
+	ldr	r5, [r7, #240]
+	vmov	q3, q12  @ v4si
+	ldr	r4, [r7, #244]
+	vmov	q2, q11  @ v4si
+	adds	r5, r5, r6
+	ldr	r2, [r2, #8]
+	ldr	r6, [r7, #400]
+	vmov	q5, q10  @ v4si
+	ldr	r3, [r3, #12]
+	vmov	q1, q13  @ v4si
+	ldr	r0, [r7, #244]
+	vmov	q0, q12  @ v4si
+	ldr	r1, [r7, #244]
+	vmov	q4, q11  @ v4si
+	ldmia	r9, {r9, sl, fp}
+	str	r5, [r7, #228]
+	ldr	r5, [r4, #24]
+	ldr	r0, [r0, #0]
+	ldr	r1, [r1, #4]
+	str	r2, [r7, #264]
+	str	r3, [r7, #236]
+	ldr	r2, [r6, #4]
+	ldr	r3, [r4, #28]
+	str	r5, [r7, #280]
+	ldr	r5, [r6, #0]
+	movs	r6, #0
+	ldr	ip, [r7, #228]
+	ldr	r8, [r7, #212]
+	str	r0, [r7, #232]
+	str	r1, [r7, #268]
+	ldr	r0, [r4, #16]
+	ldr	r1, [r4, #20]
+	movs	r4, #10
+	str	r2, [r7, #24]
+	str	r3, [r7, #284]
+	str	r4, [r7, #256]
+	ldr	r2, [r7, #264]
+	str	r9, [r7, #276]
+	mov	r9, r6
+	ldr	r6, [r7, #280]
+	str	r8, [r7, #260]
+	mov	r8, sl
+	str	r1, [r7, #272]
+	mov	sl, ip
+	str	r6, [r7, #264]
+	mov	r6, r5
+	ldr	r3, [r7, #236]
+	mov	r5, r0
+	ldr	ip, [r7, #24]
+	ldr	r1, [r7, #268]
+	ldr	r0, [r7, #232]
+	b	.L39
+.L40:
+	.align	3
+.L38:
+	.word	1
+	.word	0
+	.word	0
+	.word	0
+	.word	.LANCHOR0-(.LPIC24+4)
+.L39:
+.L3:
+	vadd.i32	q4, q4, q0
+	add	r8, r8, r1
+	vadd.i32	q2, q2, q3
+	str	r8, [r7, #268]
+	veor	q5, q5, q4
+	ldr	r8, [r7, #276]
+	veor	q8, q8, q2
+	add	fp, fp, r0
+	str	fp, [r7, #280]
+	add	r8, r8, r2
+	vrev32.16	q5, q5
+	str	r8, [r7, #276]
+	vrev32.16	q8, q8
+	vadd.i32	q1, q1, q5
+	vadd.i32	q15, q15, q8
+	ldr	r8, [r7, #280]
+	veor	q0, q1, q0
+	ldr	r4, [r7, #260]
+	veor	q3, q15, q3
+	eor	sl, sl, r8
+	ldr	r8, [r7, #276]
+	add	fp, r4, r3
+	vshl.i32	q7, q0, #12
+	ldr	r4, [r7, #268]
+	vshl.i32	q6, q3, #12
+	eor	r6, r6, r8
+	eor	r9, r9, r4
+	ldr	r4, [r7, #272]
+	vsri.32	q7, q0, #20
+	ror	r8, r6, #16
+	ldr	r6, [r7, #264]
+	eor	ip, ip, fp
+	vsri.32	q6, q3, #20
+	ror	sl, sl, #16
+	ror	r9, r9, #16
+	add	r5, r5, sl
+	vadd.i32	q4, q4, q7
+	str	r5, [r7, #236]
+	vadd.i32	q2, q2, q6
+	add	r5, r4, r9
+	add	r4, r6, r8
+	ldr	r6, [r7, #284]
+	ror	ip, ip, #16
+	veor	q5, q4, q5
+	veor	q8, q2, q8
+	add	r6, r6, ip
+	str	r6, [r7, #264]
+	eors	r1, r1, r5
+	ldr	r6, [r7, #236]
+	vshl.i32	q3, q5, #8
+	vshl.i32	q14, q8, #8
+	eors	r2, r2, r4
+	eors	r0, r0, r6
+	ldr	r6, [r7, #264]
+	vsri.32	q3, q5, #24
+	ror	r1, r1, #20
+	eors	r3, r3, r6
+	ldr	r6, [r7, #280]
+	ror	r0, r0, #20
+	vsri.32	q14, q8, #24
+	adds	r6, r0, r6
+	str	r6, [r7, #284]
+	ldr	r6, [r7, #268]
+	vadd.i32	q1, q1, q3
+	vadd.i32	q15, q15, q14
+	ror	r2, r2, #20
+	adds	r6, r1, r6
+	str	r6, [r7, #260]
+	ldr	r6, [r7, #276]
+	veor	q6, q15, q6
+	veor	q7, q1, q7
+	ror	r3, r3, #20
+	adds	r6, r2, r6
+	str	r6, [r7, #280]
+	ldr	r6, [r7, #284]
+	vshl.i32	q0, q6, #7
+	vshl.i32	q5, q7, #7
+	add	fp, r3, fp
+	eor	sl, r6, sl
+	ldr	r6, [r7, #260]
+	eor	ip, fp, ip
+	vsri.32	q0, q6, #25
+	eor	r9, r6, r9
+	ldr	r6, [r7, #280]
+	ror	sl, sl, #24
+	vsri.32	q5, q7, #25
+	eor	r8, r6, r8
+	ldr	r6, [r7, #236]
+	ror	r9, r9, #24
+	ror	ip, ip, #24
+	add	r6, sl, r6
+	str	r6, [r7, #276]
+	ldr	r6, [r7, #264]
+	add	r5, r9, r5
+	str	r5, [r7, #272]
+	vext.32	q5, q5, q5, #1
+	add	r5, ip, r6
+	ldr	r6, [r7, #276]
+	vext.32	q0, q0, q0, #1
+	vadd.i32	q4, q4, q5
+	eors	r0, r0, r6
+	ldr	r6, [r7, #272]
+	vadd.i32	q2, q2, q0
+	vext.32	q3, q3, q3, #3
+	ror	r8, r8, #24
+	eors	r1, r1, r6
+	vext.32	q14, q14, q14, #3
+	add	r4, r8, r4
+	ldr	r6, [r7, #284]
+	veor	q3, q4, q3
+	veor	q14, q2, q14
+	eors	r2, r2, r4
+	ror	r1, r1, #25
+	vext.32	q1, q1, q1, #2
+	adds	r6, r1, r6
+	str	r6, [r7, #284]
+	vext.32	q15, q15, q15, #2
+	ldr	r6, [r7, #260]
+	eors	r3, r3, r5
+	ror	r2, r2, #25
+	vrev32.16	q8, q14
+	adds	r6, r2, r6
+	vrev32.16	q3, q3
+	str	r6, [r7, #268]
+	vadd.i32	q1, q1, q3
+	ldr	r6, [r7, #280]
+	vadd.i32	q15, q15, q8
+	ror	r3, r3, #25
+	veor	q5, q1, q5
+	adds	r6, r3, r6
+	veor	q0, q15, q0
+	str	r6, [r7, #264]
+	ldr	r6, [r7, #268]
+	ror	r0, r0, #25
+	add	fp, r0, fp
+	vshl.i32	q6, q5, #12
+	eor	sl, r6, sl
+	ldr	r6, [r7, #284]
+	vshl.i32	q14, q0, #12
+	eor	r8, fp, r8
+	eor	ip, r6, ip
+	ldr	r6, [r7, #264]
+	vsri.32	q6, q5, #20
+	ror	sl, sl, #16
+	eor	r9, r6, r9
+	ror	r6, r8, #16
+	vsri.32	q14, q0, #20
+	ldr	r8, [r7, #272]
+	ror	ip, ip, #16
+	add	r5, sl, r5
+	add	r8, r6, r8
+	add	r4, ip, r4
+	str	r4, [r7, #236]
+	eor	r0, r8, r0
+	str	r5, [r7, #280]
+	vadd.i32	q4, q4, q6
+	ldr	r5, [r7, #236]
+	vadd.i32	q2, q2, q14
+	ldr	r4, [r7, #276]
+	ror	r0, r0, #20
+	veor	q3, q4, q3
+	eors	r1, r1, r5
+	veor	q0, q2, q8
+	str	r8, [r7, #272]
+	str	r0, [r7, #24]
+	add	fp, r0, fp
+	ldr	r8, [r7, #280]
+	ror	r9, r9, #16
+	ldr	r0, [r7, #284]
+	add	r4, r9, r4
+	str	fp, [r7, #260]
+	ror	r1, r1, #20
+	add	fp, r1, r0
+	eor	r2, r8, r2
+	ldr	r0, [r7, #260]
+	eors	r3, r3, r4
+	vshl.i32	q5, q3, #8
+	str	r4, [r7, #232]
+	vshl.i32	q8, q0, #8
+	ldr	r4, [r7, #268]
+	ldr	r5, [r7, #264]
+	ror	r2, r2, #20
+	ror	r3, r3, #20
+	eors	r6, r6, r0
+	adds	r5, r3, r5
+	add	r8, r2, r4
+	vsri.32	q5, q3, #24
+	ldr	r4, [r7, #272]
+	eor	r9, r5, r9
+	eor	ip, fp, ip
+	vsri.32	q8, q0, #24
+	eor	sl, r8, sl
+	ror	r6, r6, #24
+	ldr	r0, [r7, #280]
+	str	r5, [r7, #276]
+	adds	r4, r6, r4
+	ldr	r5, [r7, #236]
+	vadd.i32	q1, q1, q5
+	str	r4, [r7, #272]
+	vadd.i32	q15, q15, q8
+	ldr	r4, [r7, #232]
+	ror	ip, ip, #24
+	ror	sl, sl, #24
+	ror	r9, r9, #24
+	add	r5, ip, r5
+	add	r0, sl, r0
+	str	r5, [r7, #264]
+	add	r5, r9, r4
+	str	r0, [r7, #284]
+	veor	q6, q1, q6
+	ldr	r4, [r7, #24]
+	veor	q14, q15, q14
+	ldr	r0, [r7, #272]
+	eors	r3, r3, r5
+	vshl.i32	q0, q6, #7
+	vext.32	q1, q1, q1, #2
+	eors	r0, r0, r4
+	ldr	r4, [r7, #284]
+	str	r0, [r7, #280]
+	vshl.i32	q3, q14, #7
+	eors	r2, r2, r4
+	ldr	r4, [r7, #280]
+	ldr	r0, [r7, #264]
+	vsri.32	q0, q6, #25
+	ror	r2, r2, #25
+	ror	r3, r3, #25
+	eors	r1, r1, r0
+	vsri.32	q3, q14, #25
+	ror	r0, r4, #25
+	ldr	r4, [r7, #256]
+	ror	r1, r1, #25
+	vext.32	q5, q5, q5, #1
+	subs	r4, r4, #1
+	str	r4, [r7, #256]
+	vext.32	q15, q15, q15, #2
+	vext.32	q8, q8, q8, #1
+	vext.32	q0, q0, q0, #3
+	vext.32	q3, q3, q3, #3
+	bne	.L3
+	ldr	r4, [r7, #264]
+	vadd.i32	q14, q10, q9
+	str	r2, [r7, #264]
+	vadd.i32	q10, q10, q5
+	ldr	r2, [r7, #252]
+	vld1.64	{d12-d13}, [r2:64]
+	ldr	r2, [r7, #220]
+	vadd.i32	q4, q11, q4
+	str	ip, [r7, #24]
+	mov	ip, sl
+	mov	sl, r8
+	ldr	r8, [r7, #260]
+	add	sl, sl, r2
+	ldr	r2, [r7, #212]
+	str	r4, [r7, #280]
+	vadd.i32	q0, q12, q0
+	ldr	r4, [r7, #224]
+	add	r8, r8, r2
+	ldr	r2, [r7, #240]
+	vadd.i32	q1, q13, q1
+	str	r0, [r7, #232]
+	add	fp, fp, r4
+	mov	r0, r5
+	ldr	r4, [r7, #216]
+	mov	r5, r6
+	mov	r6, r9
+	ldr	r9, [r7, #276]
+	adds	r2, r2, #3
+	str	r2, [r7, #240]
+	vadd.i32	q2, q11, q2
+	ldr	r2, [r7, #252]
+	add	r9, r9, r4
+	vadd.i32	q3, q12, q3
+	ldr	r4, [r7, #228]
+	vadd.i32	q15, q13, q15
+	str	r1, [r7, #268]
+	vadd.i32	q8, q14, q8
+	str	r3, [r7, #236]
+	veor	q4, q4, q6
+	ldr	r3, [r7, #284]
+	ldr	r1, [r7, #272]
+	add	ip, r4, ip
+	ldr	r4, [r7, #248]
+	vst1.64	{d8-d9}, [r4:64]
+	vldr	d8, [r2, #16]
+	vldr	d9, [r2, #24]
+	veor	q0, q0, q4
+	vstr	d0, [r4, #16]
+	vstr	d1, [r4, #24]
+	vldr	d0, [r2, #32]
+	vldr	d1, [r2, #40]
+	veor	q1, q1, q0
+	vstr	d2, [r4, #32]
+	vstr	d3, [r4, #40]
+	vldr	d2, [r2, #48]
+	vldr	d3, [r2, #56]
+	veor	q10, q10, q1
+	vstr	d20, [r4, #48]
+	vstr	d21, [r4, #56]
+	vldr	d8, [r2, #64]
+	vldr	d9, [r2, #72]
+	veor	q2, q2, q4
+	vstr	d4, [r4, #64]
+	vstr	d5, [r4, #72]
+	vldr	d10, [r2, #80]
+	vldr	d11, [r2, #88]
+	veor	q3, q3, q5
+	vstr	d6, [r4, #80]
+	vstr	d7, [r4, #88]
+	vldr	d12, [r2, #96]
+	vldr	d13, [r2, #104]
+	veor	q15, q15, q6
+	vstr	d30, [r4, #96]
+	vstr	d31, [r4, #104]
+	vldr	d20, [r2, #112]
+	vldr	d21, [r2, #120]
+	veor	q8, q8, q10
+	vstr	d16, [r4, #112]
+	vstr	d17, [r4, #120]
+	ldr	r4, [r2, #128]
+	ldr	r2, [r7, #248]
+	vadd.i32	q10, q14, q9
+	eor	r4, fp, r4
+	vadd.i32	q10, q10, q9
+	str	r4, [r2, #128]
+	ldr	r4, [r7, #252]
+	ldr	r2, [r4, #132]
+	eor	r2, sl, r2
+	ldr	sl, [r7, #248]
+	str	r2, [sl, #132]
+	ldr	r2, [r4, #136]
+	eor	r2, r9, r2
+	str	r2, [sl, #136]
+	ldr	r2, [r4, #140]
+	eor	r2, r8, r2
+	str	r2, [sl, #140]
+	ldr	r2, [r7, #244]
+	ldr	r4, [r4, #144]
+	ldr	r2, [r2, #0]
+	str	r4, [r7, #44]
+	ldr	r4, [r7, #232]
+	add	r8, r4, r2
+	ldr	r2, [r7, #44]
+	ldr	r4, [r7, #244]
+	eor	r8, r8, r2
+	ldr	r2, [r7, #252]
+	str	r8, [sl, #144]
+	ldr	r4, [r4, #4]
+	ldr	r2, [r2, #148]
+	str	r2, [r7, #40]
+	ldr	r2, [r7, #268]
+	add	r8, r2, r4
+	ldr	r4, [r7, #40]
+	ldr	r2, [r7, #244]
+	eor	r8, r8, r4
+	ldr	r4, [r7, #252]
+	str	r8, [sl, #148]
+	ldr	r2, [r2, #8]
+	ldr	r4, [r4, #152]
+	str	r4, [r7, #36]
+	ldr	r4, [r7, #264]
+	add	r8, r4, r2
+	ldr	r2, [r7, #36]
+	eor	r8, r8, r2
+	str	r8, [sl, #152]
+	ldr	r2, [r7, #252]
+	ldr	r4, [r7, #244]
+	ldr	r2, [r2, #156]
+	ldr	r4, [r4, #12]
+	str	r2, [r7, #32]
+	ldr	r2, [r7, #236]
+	add	r8, r2, r4
+	ldr	r4, [r7, #32]
+	ldr	r2, [r7, #252]
+	eor	r8, r8, r4
+	str	r8, [sl, #156]
+	ldr	r8, [r7, #244]
+	ldr	r2, [r2, #160]
+	ldr	r4, [r8, #16]
+	adds	r0, r0, r4
+	ldr	r4, [r7, #252]
+	eors	r0, r0, r2
+	str	r0, [sl, #160]
+	ldr	r0, [r8, #20]
+	ldr	r2, [r4, #164]
+	adds	r1, r1, r0
+	ldr	r0, [r7, #280]
+	eors	r1, r1, r2
+	str	r1, [sl, #164]
+	ldr	r2, [r8, #24]
+	ldr	r1, [r4, #168]
+	adds	r2, r0, r2
+	eors	r2, r2, r1
+	str	r2, [sl, #168]
+	ldr	r1, [r8, #28]
+	ldr	r2, [r4, #172]
+	adds	r3, r3, r1
+	eors	r3, r3, r2
+	str	r3, [sl, #172]
+	ldr	r3, [r4, #176]
+	eor	r3, ip, r3
+	str	r3, [sl, #176]
+	ldr	r3, [r4, #180]
+	ldr	r4, [r7, #400]
+	eors	r6, r6, r3
+	str	r6, [sl, #180]
+	ldr	r6, [r7, #252]
+	ldr	r2, [r4, #0]
+	ldr	r3, [r6, #184]
+	adds	r5, r5, r2
+	eors	r5, r5, r3
+	str	r5, [sl, #184]
+	ldr	r2, [r6, #188]
+	adds	r6, r6, #192
+	ldr	r3, [r4, #4]
+	str	r6, [r7, #252]
+	ldr	r0, [r7, #24]
+	ldr	r1, [r7, #240]
+	adds	r4, r0, r3
+	eors	r4, r4, r2
+	ldr	r2, [r7, #204]
+	str	r4, [sl, #188]
+	add	sl, sl, #192
+	cmp	r1, r2
+	str	sl, [r7, #248]
+	bne	.L4
+	ldr	r4, [r7, #192]
+	ldr	r3, [r7, #180]
+	ldr	r6, [r7, #188]
+	adds	r5, r3, r4
+	ldr	r8, [r7, #184]
+	lsls	r5, r5, #6
+	adds	r4, r6, r5
+	add	r5, r8, r5
+.L2:
+	ldr	r9, [r7, #196]
+	movw	r3, #43691
+	movt	r3, 43690
+	ldr	sl, [r7, #196]
+	umull	r9, r3, r3, r9
+	lsrs	r3, r3, #7
+	add	r3, r3, r3, lsl #1
+	sub	r3, sl, r3, lsl #6
+	lsrs	r6, r3, #6
+	beq	.L5
+	add	r1, r5, #16
+	add	r2, r4, #16
+	mov	r0, r6
+	vldr	d30, .L41
+	vldr	d31, .L41+8
+.L6:
+	vmov	q8, q10  @ v4si
+	movs	r3, #10
+	vmov	q1, q13  @ v4si
+	vmov	q14, q12  @ v4si
+	vmov	q3, q11  @ v4si
+.L7:
+	vadd.i32	q3, q3, q14
+	subs	r3, r3, #1
+	veor	q2, q8, q3
+	vrev32.16	q2, q2
+	vadd.i32	q8, q1, q2
+	veor	q9, q8, q14
+	vshl.i32	q14, q9, #12
+	vsri.32	q14, q9, #20
+	vadd.i32	q3, q3, q14
+	veor	q2, q3, q2
+	vshl.i32	q9, q2, #8
+	vsri.32	q9, q2, #24
+	vadd.i32	q8, q8, q9
+	vext.32	q9, q9, q9, #3
+	veor	q14, q8, q14
+	vext.32	q1, q8, q8, #2
+	vshl.i32	q8, q14, #7
+	vsri.32	q8, q14, #25
+	vext.32	q8, q8, q8, #1
+	vadd.i32	q3, q3, q8
+	veor	q2, q3, q9
+	vrev32.16	q2, q2
+	vadd.i32	q9, q1, q2
+	veor	q8, q9, q8
+	vshl.i32	q14, q8, #12
+	vsri.32	q14, q8, #20
+	vadd.i32	q3, q3, q14
+	veor	q2, q3, q2
+	vshl.i32	q8, q2, #8
+	vsri.32	q8, q2, #24
+	vadd.i32	q9, q9, q8
+	vext.32	q8, q8, q8, #1
+	veor	q14, q9, q14
+	vext.32	q1, q9, q9, #2
+	vshl.i32	q9, q14, #7
+	vsri.32	q9, q14, #25
+	vext.32	q14, q9, q9, #3
+	bne	.L7
+	vadd.i32	q8, q10, q8
+	subs	r0, r0, #1
+	vadd.i32	q3, q11, q3
+	vldr	d0, [r1, #-16]
+	vldr	d1, [r1, #-8]
+	vadd.i32	q14, q12, q14
+	vadd.i32	q1, q13, q1
+	veor	q3, q3, q0
+	vstr	d6, [r2, #-16]
+	vstr	d7, [r2, #-8]
+	vadd.i32	q10, q10, q15
+	vld1.64	{d8-d9}, [r1:64]
+	veor	q14, q14, q4
+	vst1.64	{d28-d29}, [r2:64]
+	vldr	d10, [r1, #16]
+	vldr	d11, [r1, #24]
+	veor	q1, q1, q5
+	vstr	d2, [r2, #16]
+	vstr	d3, [r2, #24]
+	vldr	d18, [r1, #32]
+	vldr	d19, [r1, #40]
+	add	r1, r1, #64
+	veor	q8, q8, q9
+	vstr	d16, [r2, #32]
+	vstr	d17, [r2, #40]
+	add	r2, r2, #64
+	bne	.L6
+	lsls	r6, r6, #6
+	adds	r4, r4, r6
+	adds	r5, r5, r6
+.L5:
+	ldr	r6, [r7, #196]
+	ands	ip, r6, #63
+	beq	.L1
+	vmov	q8, q10  @ v4si
+	movs	r3, #10
+	vmov	q14, q13  @ v4si
+	vmov	q9, q12  @ v4si
+	vmov	q15, q11  @ v4si
+.L10:
+	vadd.i32	q15, q15, q9
+	subs	r3, r3, #1
+	veor	q8, q8, q15
+	vrev32.16	q8, q8
+	vadd.i32	q3, q14, q8
+	veor	q9, q3, q9
+	vshl.i32	q14, q9, #12
+	vsri.32	q14, q9, #20
+	vadd.i32	q15, q15, q14
+	veor	q9, q15, q8
+	vshl.i32	q8, q9, #8
+	vsri.32	q8, q9, #24
+	vadd.i32	q9, q3, q8
+	vext.32	q8, q8, q8, #3
+	veor	q2, q9, q14
+	vext.32	q14, q9, q9, #2
+	vshl.i32	q9, q2, #7
+	vsri.32	q9, q2, #25
+	vext.32	q9, q9, q9, #1
+	vadd.i32	q15, q15, q9
+	veor	q3, q15, q8
+	vrev32.16	q3, q3
+	vadd.i32	q14, q14, q3
+	veor	q8, q14, q9
+	vshl.i32	q9, q8, #12
+	vsri.32	q9, q8, #20
+	vadd.i32	q15, q15, q9
+	veor	q3, q15, q3
+	vshl.i32	q8, q3, #8
+	vsri.32	q8, q3, #24
+	vadd.i32	q14, q14, q8
+	vext.32	q8, q8, q8, #1
+	veor	q3, q14, q9
+	vext.32	q14, q14, q14, #2
+	vshl.i32	q9, q3, #7
+	vsri.32	q9, q3, #25
+	vext.32	q9, q9, q9, #3
+	bne	.L10
+	cmp	ip, #15
+	vadd.i32	q11, q11, q15
+	bhi	.L37
+	ldr	r9, [r7, #200]
+	vst1.64	{d22-d23}, [r9:128]
+.L14:
+	ldr	sl, [r7, #196]
+	and	r3, sl, #48
+	cmp	ip, r3
+	bls	.L1
+	adds	r0, r5, r3
+	adds	r1, r4, r3
+	add	r2, r0, #16
+	add	r6, r1, #16
+	cmp	r1, r2
+	it	cc
+	cmpcc	r0, r6
+	rsb	r9, r3, ip
+	ite	cc
+	movcc	r2, #0
+	movcs	r2, #1
+	cmp	r9, #15
+	ite	ls
+	movls	r2, #0
+	andhi	r2, r2, #1
+	lsr	r8, r9, #4
+	eor	r2, r2, #1
+	cmp	r8, #0
+	it	eq
+	orreq	r2, r2, #1
+	lsl	sl, r8, #4
+	cbnz	r2, .L35
+	ldr	fp, [r7, #200]
+	add	r6, fp, r3
+.L17:
+	vld1.8	{q8}, [r0]!
+	adds	r2, r2, #1
+	cmp	r8, r2
+	vld1.8	{q9}, [r6]!
+	veor	q8, q9, q8
+	vst1.8	{q8}, [r1]!
+	bhi	.L17
+	cmp	r9, sl
+	add	r3, r3, sl
+	beq	.L1
+.L35:
+	ldr	r0, [r7, #200]
+.L25:
+	ldrb	r2, [r5, r3]	@ zero_extendqisi2
+	ldrb	r1, [r3, r0]	@ zero_extendqisi2
+	eors	r2, r2, r1
+	strb	r2, [r4, r3]
+	adds	r3, r3, #1
+	cmp	ip, r3
+	bhi	.L25
+.L1:
+	add	r7, r7, #304
+	mov	sp, r7
+	fldmfdd	sp!, {d8, d9, d10, d11, d12, d13, d14, d15}
+	pop	{r4, r5, r6, r7, r8, r9, sl, fp}
+	bx	lr
+.L37:
+	cmp	ip, #31
+	vld1.64	{d0-d1}, [r5:64]
+	vadd.i32	q9, q12, q9
+	veor	q11, q11, q0
+	vst1.64	{d22-d23}, [r4:64]
+	bls	.L12
+	cmp	ip, #47
+	vldr	d2, [r5, #16]
+	vldr	d3, [r5, #24]
+	vadd.i32	q13, q13, q14
+	veor	q9, q9, q1
+	vstr	d18, [r4, #16]
+	vstr	d19, [r4, #24]
+	bls	.L13
+	vadd.i32	q8, q8, q10
+	vldr	d0, [r5, #32]
+	vldr	d1, [r5, #40]
+	ldr	r6, [r7, #200]
+	vstr	d16, [r6, #48]
+	vstr	d17, [r6, #56]
+	veor	q8, q13, q0
+	vstr	d16, [r4, #32]
+	vstr	d17, [r4, #40]
+	b	.L14
+.L12:
+	ldr	r8, [r7, #200]
+	vstr	d18, [r8, #16]
+	vstr	d19, [r8, #24]
+	b	.L14
+.L20:
+	ldr	r5, [r7, #184]
+	ldr	r4, [r7, #188]
+	b	.L2
+.L13:
+	ldr	r6, [r7, #200]
+	vstr	d26, [r6, #32]
+	vstr	d27, [r6, #40]
+	b	.L14
+.L42:
+	.align	3
+.L41:
+	.word	1
+	.word	0
+	.word	0
+	.word	0
+	.size	CRYPTO_chacha_20_neon, .-CRYPTO_chacha_20_neon
+	.section	.rodata
+	.align	3
+.LANCHOR0 = . + 0
+.LC0:
+	.word	1634760805
+	.word	857760878
+	.word	2036477234
+	.word	1797285236
+	.ident	"GCC: (crosstool-NG linaro-1.13.1-4.7-2012.10-20121022 - Linaro GCC 2012.10) 4.7.3 20121001 (prerelease)"
+	.section	.note.GNU-stack,"",%progbits