AEAD interface.

This change adds an AEAD interface to EVP and an AES-GCM implementation
suitable for use in TLS.
diff --git a/crypto/base.h b/crypto/base.h
index d4b4028..f9adbb3 100644
--- a/crypto/base.h
+++ b/crypto/base.h
@@ -137,6 +137,7 @@
 typedef struct engine_st ENGINE;
 typedef struct env_md_ctx_st EVP_MD_CTX;
 typedef struct env_md_st EVP_MD;
+typedef struct evp_aead_st EVP_AEAD;
 typedef struct evp_cipher_ctx_st EVP_CIPHER_CTX;
 typedef struct evp_cipher_st EVP_CIPHER;
 typedef struct evp_pkey_asn1_method_st EVP_PKEY_ASN1_METHOD;
diff --git a/crypto/cipher/CMakeLists.txt b/crypto/cipher/CMakeLists.txt
index 55a5f87..82f85bf 100644
--- a/crypto/cipher/CMakeLists.txt
+++ b/crypto/cipher/CMakeLists.txt
@@ -8,6 +8,7 @@
 	cipher.c
 	cipher_error.c
 	derive_key.c
+	aead.c
 
 	e_null.c
 	e_rc4.c
diff --git a/crypto/cipher/aead.c b/crypto/cipher/aead.c
new file mode 100644
index 0000000..263e398
--- /dev/null
+++ b/crypto/cipher/aead.c
@@ -0,0 +1,119 @@
+/* Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#include <openssl/aead.h>
+
+#include <string.h>
+
+#include <openssl/cipher.h>
+#include <openssl/err.h>
+
+#include "internal.h"
+
+
+size_t EVP_AEAD_key_length(const EVP_AEAD *aead) { return aead->key_len; }
+
+size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead) { return aead->nonce_len; }
+
+size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead) { return aead->overhead; }
+
+size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead) { return aead->max_tag_len; }
+
+int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead,
+                      const uint8_t *key, size_t key_len, size_t tag_len,
+                      ENGINE *impl) {
+  ctx->aead = aead;
+  if (key_len != aead->key_len) {
+    OPENSSL_PUT_ERROR(CIPHER, EVP_AEAD_CTX_init, CIPHER_R_UNSUPPORTED_KEY_SIZE);
+    return 0;
+  }
+  return aead->init(ctx, key, key_len, tag_len);
+}
+
+void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx) {
+  if (ctx->aead == NULL) {
+    return;
+  }
+  ctx->aead->cleanup(ctx);
+  ctx->aead = NULL;
+}
+
+/* check_alias returns 0 if |out| points within the buffer determined by |in|
+ * and |in_len| and 1 otherwise.
+ *
+ * When processing, there's only an issue if |out| points within in[:in_len]
+ * and isn't equal to |in|. If that's the case then writing the output will
+ * stomp input that hasn't been read yet.
+ *
+ * This function checks for that case. */
+static int check_alias(const uint8_t *in, size_t in_len, const uint8_t *out) {
+  if (out <= in) {
+    return 1;
+  } else if (in + in_len <= out) {
+    return 1;
+  }
+  return 0;
+}
+
+int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
+                      size_t max_out_len, const uint8_t *nonce,
+                      size_t nonce_len, const uint8_t *in, size_t in_len,
+                      const uint8_t *ad, size_t ad_len) {
+  size_t possible_out_len = in_len + ctx->aead->overhead;
+
+  if (possible_out_len < in_len /* overflow */) {
+    OPENSSL_PUT_ERROR(CIPHER, EVP_AEAD_CTX_seal, CIPHER_R_TOO_LARGE);
+    goto error;
+  }
+
+  if (!check_alias(in, in_len, out)) {
+    OPENSSL_PUT_ERROR(CIPHER, EVP_AEAD_CTX_seal, CIPHER_R_OUTPUT_ALIASES_INPUT);
+    goto error;
+  }
+
+  if (ctx->aead->seal(ctx, out, out_len, max_out_len, nonce, nonce_len, in,
+                      in_len, ad, ad_len)) {
+    return 1;
+  }
+
+error:
+  /* In the event of an error, clear the output buffer so that a caller
+   * that doesn't check the return value doesn't send raw data. */
+  memset(out, 0, max_out_len);
+  *out_len = 0;
+  return 0;
+}
+
+int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
+                      size_t max_out_len, const uint8_t *nonce,
+                      size_t nonce_len, const uint8_t *in, size_t in_len,
+                      const uint8_t *ad, size_t ad_len) {
+  if (!check_alias(in, in_len, out)) {
+    OPENSSL_PUT_ERROR(CIPHER, EVP_AEAD_CTX_open, CIPHER_R_OUTPUT_ALIASES_INPUT);
+    goto error;
+  }
+
+  if (ctx->aead->open(ctx, out, out_len, max_out_len, nonce, nonce_len, in,
+                      in_len, ad, ad_len)) {
+    return 1;
+  }
+
+error:
+  /* In the event of an error, clear the output buffer so that a caller
+   * that doesn't check the return value doesn't try and process bad
+   * data. */
+  memset(out, 0, max_out_len);
+  *out_len = 0;
+  return 0;
+}
diff --git a/crypto/cipher/aead.h b/crypto/cipher/aead.h
new file mode 100644
index 0000000..f51fed1
--- /dev/null
+++ b/crypto/cipher/aead.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2014, Google Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+#ifndef OPENSSL_HEADER_AEAD_H
+#define OPENSSL_HEADER_AEAD_H
+
+#include <openssl/base.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+/* Authenticated Encryption with Additional Data.
+ *
+ * AEAD couples confidentiality and integrity in a single primtive. AEAD
+ * algorithms take a key and then can seal and open individual messages. Each
+ * message has a unique, per-message nonce and, optionally, additional data
+ * which is authenticated but not included in the ciphertext.
+ *
+ * The |EVP_AEAD_CTX_init| function initialises an |EVP_AEAD_CTX| structure and
+ * performs any precomputation needed to use |aead| with |key|. The length of
+ * the key, |key_len|, is given in bytes.
+ *
+ * The |tag_len| argument contains the length of the tags, in bytes, and allows
+ * for the processing of truncated authenticators. A zero value indicates that
+ * the default tag length should be used and this is defined as
+ * |EVP_AEAD_DEFAULT_TAG_LENGTH| in order to make the code clear. Using
+ * truncated tags increases an attacker's chance of creating a valid forgery.
+ * Be aware that the attacker's chance may increase more than exponentially as
+ * would naively be expected.
+ *
+ * When no longer needed, the initialised |EVP_AEAD_CTX| structure must be
+ * passed to |EVP_AEAD_CTX_cleanup|, which will deallocate any memory used.
+ *
+ * With an |EVP_AEAD_CTX| in hand, one can seal and open messages. These
+ * operations are intended to meet the standard notions of privacy and
+ * authenticity for authenticated encryption. For formal definitions see
+ * Bellare and Namprempre, "Authenticated encryption: relations among notions
+ * and analysis of the generic composition paradigm," Lecture Notes in Computer
+ * Science B<1976> (2000), 531–545,
+ * http://www-cse.ucsd.edu/~mihir/papers/oem.html.
+ *
+ * When sealing messages, a nonce must be given. The length of the nonce is
+ * fixed by the AEAD in use and is returned by |EVP_AEAD_nonce_length|. *The
+ * nonce must be unique for all messages with the same key*. This is critically
+ * important - nonce reuse may completely undermine the security of the AEAD.
+ * Nonces may be predictable and public, so long as they are unique. Uniqueness
+ * may be achieved with a simple counter or, if large enough, may be generated
+ * randomly. The nonce must be passed into the "open" operation by the receiver
+ * so must either be implicit (e.g. a counter), or must be transmitted along
+ * with the sealed message.
+ *
+ * The "seal" and "open" operations are atomic - an entire message must be
+ * encrypted or decrypted in a single call. Large messages may have to be split
+ * up in order to accomodate this. When doing so, be mindful of the need not to
+ * repeat nonces and the possibility that an attacker could duplicate, reorder
+ * or drop message chunks. For example, using a single key for a given (large)
+ * message and sealing chunks with nonces counting from zero would be secure as
+ * long as the number of chunks was securely transmitted. (Otherwise an
+ * attacker could truncate the message by dropping chunks from the end.)
+ *
+ * The number of chunks could be transmitted by prefixing it to the plaintext,
+ * for example. This also assumes that no other message would ever use the same
+ * key otherwise the rule that nonces must be unique for a given key would be
+ * violated.
+ *
+ * The "seal" and "open" operations also permit additional data to be
+ * authenticated via the |ad| parameter. This data is not included in the
+ * ciphertext and must be identical for both the "seal" and "open" call. This
+ * permits implicit context to be authenticated but may be empty if not needed.
+ *
+ * The "seal" and "open" operations may work in-place if the |out| and |in|
+ * arguments are equal. They may also be used to shift the data left inside the
+ * same buffer if |out| is less than |in|. However, |out| may not point inside
+ * the input data otherwise the input may be overwritten before it has been
+ * read. This situation will cause an error.
+ *
+ * The "seal" and "open" operations return one on success and zero on error. */
+
+
+/* AEAD algorithms. */
+
+/* EVP_aes_128_gcm is AES-128 in Galois Counter Mode. */
+const EVP_AEAD *EVP_aead_aes_128_gcm(void);
+
+/* EVP_aes_256_gcm is AES-256 in Galois Counter Mode. */
+const EVP_AEAD *EVP_aead_aes_256_gcm(void);
+
+
+/* Utility functions. */
+
+/* EVP_AEAD_key_length returns the length, in bytes, of the keys used by
+ * |aead|. */
+size_t EVP_AEAD_key_length(const EVP_AEAD *aead);
+
+/* EVP_AEAD_nonce_length returns the length, in bytes, of the per-message nonce
+ * for |aead|. */
+size_t EVP_AEAD_nonce_length(const EVP_AEAD *aead);
+
+/* EVP_AEAD_max_overhead returns the maximum number of additional bytes added
+ * by the act of sealing data with |aead|. */
+size_t EVP_AEAD_max_overhead(const EVP_AEAD *aead);
+
+/* EVP_AEAD_max_tag_len returns the maximum tag length when using |aead|. This
+ * is the largest value that can be passed as |tag_len| to
+ * |EVP_AEAD_CTX_init|. */
+size_t EVP_AEAD_max_tag_len(const EVP_AEAD *aead);
+
+
+/* AEAD operations. */
+
+/* An EVP_AEAD_CTX represents an AEAD algorithm configured with a specific key
+ * and message-independent IV. */
+typedef struct evp_aead_ctx_st {
+  const EVP_AEAD *aead;
+  /* aead_state is an opaque pointer to whatever state the AEAD needs to
+   * maintain. */
+  void *aead_state;
+} EVP_AEAD_CTX;
+
+/* EVP_AEAD_MAX_OVERHEAD contains the maximum overhead used by any AEAD
+ * defined in this header. */
+#define EVP_AEAD_MAX_OVERHEAD 16
+
+/* EVP_AEAD_DEFAULT_TAG_LENGTH is a magic value that can be passed to
+ * EVP_AEAD_CTX_init to indicate that the default tag length for an AEAD should
+ * be used. */
+#define EVP_AEAD_DEFAULT_TAG_LENGTH 0
+
+/* EVP_AEAD_init initializes |ctx| for the given AEAD algorithm from |impl|.
+ * The |impl| argument may be NULL to choose the default implementation.
+ * Authentication tags may be truncated by passing a size as |tag_len|. A
+ * |tag_len| of zero indicates the default tag length and this is defined as
+ * EVP_AEAD_DEFAULT_TAG_LENGTH for readability.
+ * Returns 1 on success. Otherwise returns 0 and pushes to the error stack. */
+int EVP_AEAD_CTX_init(EVP_AEAD_CTX *ctx, const EVP_AEAD *aead,
+                      const uint8_t *key, size_t key_len, size_t tag_len,
+                      ENGINE *impl);
+
+/* EVP_AEAD_CTX_cleanup frees any data allocated by |ctx|. */
+void EVP_AEAD_CTX_cleanup(EVP_AEAD_CTX *ctx);
+
+/* EVP_AEAD_CTX_seal encrypts and authenticates |in_len| bytes from |in| and
+ * authenticates |ad_len| bytes from |ad| and writes the result to |out|. It
+ * returns one on success and zero otherwise.
+ *
+ * This function may be called (with the same |EVP_AEAD_CTX|) concurrently with
+ * itself or |EVP_AEAD_CTX_open|.
+ *
+ * At most |max_out_len| bytes are written to |out| and, in order to ensure
+ * success, |max_out_len| should be |in_len| plus the result of
+ * |EVP_AEAD_overhead|. On successful return, |*out_len| is set to the actual
+ * number of bytes written.
+ *
+ * The length of |nonce|, |nonce_len|, must be equal to the result of
+ * |EVP_AEAD_nonce_length| for this AEAD.
+ *
+ * |EVP_AEAD_CTX_seal| never results in a partial output. If |max_out_len| is
+ * insufficient, zero will be returned. (In this case, |*out_len| is set to
+ * zero.)
+ *
+ * If |in| and |out| alias then |out| must be <= |in|. */
+int EVP_AEAD_CTX_seal(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
+                      size_t max_out_len, const uint8_t *nonce,
+                      size_t nonce_len, const uint8_t *in, size_t in_len,
+                      const uint8_t *ad, size_t ad_len);
+
+/* EVP_AEAD_CTX_open authenticates |in_len| bytes from |in| and |ad_len| bytes
+ * from |ad| and decrypts at most |in_len| bytes into |out|. It returns one on
+ * success and zero otherwise.
+ *
+ * This function may be called (with the same |EVP_AEAD_CTX|) concurrently with
+ * itself or |EVP_AEAD_CTX_seal|.
+ *
+ * At most |in_len| bytes are written to |out|. In order to ensure success,
+ * |max_out_len| should be at least |in_len|. On successful return, |*out_len|
+ * is set to the the actual number of bytes written.
+ *
+ * The length of |nonce|, |nonce_len|, must be equal to the result of
+ * |EVP_AEAD_nonce_length| for this AEAD.
+ *
+ * |EVP_AEAD_CTX_open| never results in a partial output. If |max_out_len| is
+ * insufficient, zero will be returned. (In this case, |*out_len| is set to
+ * zero.)
+ *
+ * If |in| and |out| alias then |out| must be <= |in|. */
+int EVP_AEAD_CTX_open(const EVP_AEAD_CTX *ctx, uint8_t *out, size_t *out_len,
+                      size_t max_out_len, const uint8_t *nonce,
+                      size_t nonce_len, const uint8_t *in, size_t in_len,
+                      const uint8_t *ad, size_t ad_len);
+
+
+#if defined(__cplusplus)
+}  /* extern C */
+#endif
+
+#endif  /* OPENSSL_HEADER_AEAD_H */
diff --git a/crypto/cipher/cipher.h b/crypto/cipher/cipher.h
index 2d105d5..e98a632 100644
--- a/crypto/cipher/cipher.h
+++ b/crypto/cipher/cipher.h
@@ -428,6 +428,12 @@
 #define CIPHER_F_aes_init_key 104
 #define CIPHER_F_aesni_init_key 105
 #define CIPHER_F_EVP_CIPHER_CTX_copy 106
+#define CIPHER_F_EVP_AEAD_CTX_open 107
+#define CIPHER_F_EVP_AEAD_CTX_init 108
+#define CIPHER_F_EVP_AEAD_CTX_seal 109
+#define CIPHER_F_aead_aes_gcm_seal 110
+#define CIPHER_F_aead_aes_gcm_open 111
+#define CIPHER_F_aead_aes_gcm_init 112
 #define CIPHER_R_WRAP_MODE_NOT_ALLOWED 100
 #define CIPHER_R_AES_KEY_SETUP_FAILED 101
 #define CIPHER_R_INPUT_NOT_INITIALIZED 102
@@ -438,5 +444,11 @@
 #define CIPHER_R_BAD_DECRYPT 107
 #define CIPHER_R_WRONG_FINAL_BLOCK_LENGTH 108
 #define CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED 109
+#define CIPHER_R_TAG_TOO_LARGE 110
+#define CIPHER_R_BAD_KEY_LENGTH 111
+#define CIPHER_R_BUFFER_TOO_SMALL 112
+#define CIPHER_R_OUTPUT_ALIASES_INPUT 113
+#define CIPHER_R_UNSUPPORTED_KEY_SIZE 114
+#define CIPHER_R_TOO_LARGE 115
 
 #endif  /* OPENSSL_HEADER_CIPHER_H */
diff --git a/crypto/cipher/cipher_error.c b/crypto/cipher/cipher_error.c
index bddecdd..0736404 100644
--- a/crypto/cipher/cipher_error.c
+++ b/crypto/cipher/cipher_error.c
@@ -17,21 +17,33 @@
 #include "cipher.h"
 
 const ERR_STRING_DATA CIPHER_error_string_data[] = {
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_AEAD_CTX_init, 0), "EVP_AEAD_CTX_init"},
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_AEAD_CTX_open, 0), "EVP_AEAD_CTX_open"},
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_AEAD_CTX_seal, 0), "EVP_AEAD_CTX_seal"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_CIPHER_CTX_copy, 0), "EVP_CIPHER_CTX_copy"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_CIPHER_CTX_ctrl, 0), "EVP_CIPHER_CTX_ctrl"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_CipherInit_ex, 0), "EVP_CipherInit_ex"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_DecryptFinal_ex, 0), "EVP_DecryptFinal_ex"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_EVP_EncryptFinal_ex, 0), "EVP_EncryptFinal_ex"},
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_aead_aes_gcm_init, 0), "aead_aes_gcm_init"},
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_aead_aes_gcm_open, 0), "aead_aes_gcm_open"},
+  {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_aead_aes_gcm_seal, 0), "aead_aes_gcm_seal"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_aes_init_key, 0), "aes_init_key"},
   {ERR_PACK(ERR_LIB_CIPHER, CIPHER_F_aesni_init_key, 0), "aesni_init_key"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_AES_KEY_SETUP_FAILED), "AES_KEY_SETUP_FAILED"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_BAD_DECRYPT), "BAD_DECRYPT"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_BAD_KEY_LENGTH), "BAD_KEY_LENGTH"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_BUFFER_TOO_SMALL), "BUFFER_TOO_SMALL"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_CTRL_NOT_IMPLEMENTED), "CTRL_NOT_IMPLEMENTED"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_CTRL_OPERATION_NOT_IMPLEMENTED), "CTRL_OPERATION_NOT_IMPLEMENTED"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH), "DATA_NOT_MULTIPLE_OF_BLOCK_LENGTH"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_INITIALIZATION_ERROR), "INITIALIZATION_ERROR"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_INPUT_NOT_INITIALIZED), "INPUT_NOT_INITIALIZED"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_NO_CIPHER_SET), "NO_CIPHER_SET"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_OUTPUT_ALIASES_INPUT), "OUTPUT_ALIASES_INPUT"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_TAG_TOO_LARGE), "TAG_TOO_LARGE"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_TOO_LARGE), "TOO_LARGE"},
+  {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_UNSUPPORTED_KEY_SIZE), "UNSUPPORTED_KEY_SIZE"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_WRAP_MODE_NOT_ALLOWED), "WRAP_MODE_NOT_ALLOWED"},
   {ERR_PACK(ERR_LIB_CIPHER, 0, CIPHER_R_WRONG_FINAL_BLOCK_LENGTH), "WRONG_FINAL_BLOCK_LENGTH"},
   {0, NULL},
diff --git a/crypto/cipher/e_aes.c b/crypto/cipher/e_aes.c
index c22e306..c88eb07 100644
--- a/crypto/cipher/e_aes.c
+++ b/crypto/cipher/e_aes.c
@@ -46,14 +46,15 @@
  * OF THE POSSIBILITY OF SUCH DAMAGE.
  * ==================================================================== */
 
+#include <openssl/aead.h>
 #include <openssl/aes.h>
 #include <openssl/cipher.h>
 #include <openssl/cpu.h>
 #include <openssl/err.h>
 #include <openssl/mem.h>
 #include <openssl/modes.h>
-#include <openssl/rand.h>
 #include <openssl/obj.h>
+#include <openssl/rand.h>
 
 #include "internal.h"
 #include "../modes/internal.h"
@@ -87,11 +88,6 @@
   ctr128_f ctr;
 } EVP_AES_GCM_CTX;
 
-
-void AES_ctr32_encrypt(const uint8_t *in, uint8_t *out, size_t blocks,
-                       const AES_KEY *key, const uint8_t ivec[AES_BLOCK_SIZE]);
-
-
 #if !defined(OPENSSL_NO_ASM) && \
     (defined(OPENSSL_X86_64) || defined(OPENSSL_X86))
 #define VPAES
@@ -144,7 +140,7 @@
 #endif
 
 #if defined(VPAES)
-/* On platforms where BSAES gets defined (just above), then these functions are
+/* On platforms where VPAES gets defined (just above), then these functions are
  * provided by asm. */
 int vpaes_set_encrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
 int vpaes_set_decrypt_key(const uint8_t *userKey, int bits, AES_KEY *key);
@@ -206,7 +202,7 @@
                    size_t len);
 #define AES_GCM_ASM(gctx) \
   (gctx->ctr == aesni_ctr32_encrypt_blocks && gctx->gcm.ghash == gcm_ghash_avx)
-#endif
+#endif  /* OPENSSL_X86_64 */
 
 #else
 
@@ -319,30 +315,34 @@
   return 1;
 }
 
+static ctr128_f aes_gcm_set_key(AES_KEY *aes_key, GCM128_CONTEXT *gcm_ctx,
+                                const uint8_t *key, size_t key_len) {
+  if (bsaes_capable()) {
+    AES_set_encrypt_key(key, key_len * 8, aes_key);
+    CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
+    return (ctr128_f)bsaes_ctr32_encrypt_blocks;
+  }
+
+  if (vpaes_capable()) {
+    vpaes_set_encrypt_key(key, key_len * 8, aes_key);
+    CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)vpaes_encrypt);
+    return NULL;
+  }
+
+  AES_set_encrypt_key(key, key_len * 8, aes_key);
+  CRYPTO_gcm128_init(gcm_ctx, aes_key, (block128_f)AES_encrypt);
+  return NULL;
+}
+
 static int aes_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
                             const uint8_t *iv, int enc) {
   EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
   if (!iv && !key) {
     return 1;
   }
-
   if (key) {
-    if (bsaes_capable()) {
-      AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
-      CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)AES_encrypt);
-      gctx->ctr = (ctr128_f)bsaes_ctr32_encrypt_blocks;
-    } else if (vpaes_capable()) {
-      vpaes_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
-      CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)vpaes_encrypt);
-      gctx->ctr = NULL;
-    } else {
-      AES_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
-      CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)AES_encrypt);
-      gctx->ctr = NULL;
-    }
-
-    /* If we have an iv can set it directly, otherwise use
-     * saved IV. */
+    gctx->ctr = aes_gcm_set_key(&gctx->ks.ks, &gctx->gcm, key, ctx->key_len);
+    /* If we have an iv can set it directly, otherwise use saved IV. */
     if (iv == NULL && gctx->iv_set) {
       iv = gctx->iv;
     }
@@ -512,7 +512,6 @@
  * followed by the payload and finally the tag. On encrypt generate IV, encrypt
  * payload and write the tag. On verify retrieve IV, decrypt payload and verify
  * tag. */
-
 static int aes_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out,
                               const uint8_t *in, size_t len) {
   EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
@@ -522,15 +521,16 @@
     return -1;
   }
   /* Set IV from start of buffer or generate IV and write to start
-   * of buffer.
-   */
+   * of buffer. */
   if (EVP_CIPHER_CTX_ctrl(
           ctx, ctx->encrypt ? EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV,
-          EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
+          EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0) {
     goto err;
+  }
   /* Use saved AAD */
-  if (!CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len))
+  if (!CRYPTO_gcm128_aad(&gctx->gcm, ctx->buf, gctx->tls_aad_len)) {
     goto err;
+  }
   /* Fix buffer and length to point to payload */
   in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
   out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
@@ -541,8 +541,9 @@
       size_t bulk = 0;
 #if defined(AES_GCM_ASM)
       if (len >= 32 && AES_GCM_ASM(gctx)) {
-        if (!CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0))
+        if (!CRYPTO_gcm128_encrypt(&gctx->gcm, NULL, NULL, 0)) {
           return -1;
+        }
 
         bulk = AES_gcm_encrypt(in, out, len, gctx->gcm.key, gctx->gcm.Yi.c,
                                gctx->gcm.Xi.u);
@@ -550,12 +551,15 @@
       }
 #endif
       if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
-                                      len - bulk, gctx->ctr))
+                                      len - bulk, gctx->ctr)) {
         goto err;
+      }
     } else {
       size_t bulk = 0;
-      if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in + bulk, out + bulk, len - bulk))
+      if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in + bulk, out + bulk,
+                                 len - bulk)) {
         goto err;
+      }
     }
     out += len;
     /* Finally write tag */
@@ -567,8 +571,9 @@
       size_t bulk = 0;
 #if defined(AES_GCM_ASM)
       if (len >= 16 && AES_GCM_ASM(gctx)) {
-        if (!CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0))
+        if (!CRYPTO_gcm128_decrypt(&gctx->gcm, NULL, NULL, 0)) {
           return -1;
+        }
 
         bulk = AES_gcm_decrypt(in, out, len, gctx->gcm.key, gctx->gcm.Yi.c,
                                gctx->gcm.Xi.u);
@@ -576,12 +581,15 @@
       }
 #endif
       if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
-                                      len - bulk, gctx->ctr))
+                                      len - bulk, gctx->ctr)) {
         goto err;
+      }
     } else {
       size_t bulk = 0;
-      if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in + bulk, out + bulk, len - bulk))
+      if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in + bulk, out + bulk,
+                                 len - bulk)) {
         goto err;
+      }
     }
     /* Retrieve tag */
     CRYPTO_gcm128_tag(&gctx->gcm, ctx->buf, EVP_GCM_TLS_TAG_LEN);
@@ -602,19 +610,23 @@
 static int aes_gcm_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
                           size_t len) {
   EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
+
   /* If not set up, return error */
-  if (!gctx->key_set)
+  if (!gctx->key_set) {
     return -1;
-
-  if (gctx->tls_aad_len >= 0)
+  }
+  if (gctx->tls_aad_len >= 0) {
     return aes_gcm_tls_cipher(ctx, out, in, len);
-
-  if (!gctx->iv_set)
+  }
+  if (!gctx->iv_set) {
     return -1;
+  }
+
   if (in) {
     if (out == NULL) {
-      if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len))
+      if (!CRYPTO_gcm128_aad(&gctx->gcm, in, len)) {
         return -1;
+      }
     } else if (ctx->encrypt) {
       if (gctx->ctr) {
         size_t bulk = 0;
@@ -622,8 +634,9 @@
         if (len >= 32 && AES_GCM_ASM(gctx)) {
           size_t res = (16 - gctx->gcm.mres) % 16;
 
-          if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res))
+          if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in, out, res)) {
             return -1;
+          }
 
           bulk = AES_gcm_encrypt(in + res, out + res, len - res, gctx->gcm.key,
                                  gctx->gcm.Yi.c, gctx->gcm.Xi.u);
@@ -632,13 +645,15 @@
         }
 #endif
         if (!CRYPTO_gcm128_encrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
-                                        len - bulk, gctx->ctr))
+                                        len - bulk, gctx->ctr)) {
           return -1;
+        }
       } else {
         size_t bulk = 0;
         if (!CRYPTO_gcm128_encrypt(&gctx->gcm, in + bulk, out + bulk,
-                                  len - bulk))
+                                  len - bulk)) {
           return -1;
+        }
       }
     } else {
       if (gctx->ctr) {
@@ -647,8 +662,9 @@
         if (len >= 16 && AES_GCM_ASM(gctx)) {
           size_t res = (16 - gctx->gcm.mres) % 16;
 
-          if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res))
+          if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in, out, res)) {
             return -1;
+          }
 
           bulk = AES_gcm_decrypt(in + res, out + res, len - res, gctx->gcm.key,
                                  gctx->gcm.Yi.c, gctx->gcm.Xi.u);
@@ -657,22 +673,24 @@
         }
 #endif
         if (!CRYPTO_gcm128_decrypt_ctr32(&gctx->gcm, in + bulk, out + bulk,
-                                        len - bulk, gctx->ctr))
+                                        len - bulk, gctx->ctr)) {
           return -1;
+        }
       } else {
         size_t bulk = 0;
         if (!CRYPTO_gcm128_decrypt(&gctx->gcm, in + bulk, out + bulk,
-                                  len - bulk))
+                                  len - bulk)) {
           return -1;
+        }
       }
     }
     return len;
   } else {
     if (!ctx->encrypt) {
-      if (gctx->taglen < 0)
+      if (gctx->taglen < 0 ||
+          !CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0) {
         return -1;
-      if (!CRYPTO_gcm128_finish(&gctx->gcm, ctx->buf, gctx->taglen) != 0)
-        return -1;
+      }
       gctx->iv_set = 0;
       return 0;
     }
@@ -802,8 +820,9 @@
 static int aesni_gcm_init_key(EVP_CIPHER_CTX *ctx, const uint8_t *key,
                               const uint8_t *iv, int enc) {
   EVP_AES_GCM_CTX *gctx = ctx->cipher_data;
-  if (!iv && !key)
+  if (!iv && !key) {
     return 1;
+  }
   if (key) {
     aesni_set_encrypt_key(key, ctx->key_len * 8, &gctx->ks.ks);
     CRYPTO_gcm128_init(&gctx->gcm, &gctx->ks, (block128_f)aesni_encrypt);
@@ -897,6 +916,10 @@
 
 #else  /* ^^^  OPENSSL_X86_64 || OPENSSL_X86 */
 
+static char aesni_capable() {
+  return 0;
+}
+
 #define EVP_CIPHER_FUNCTION(keybits, mode)             \
   const EVP_CIPHER *EVP_aes_##keybits##_##mode(void) { \
     return &aes_##keybits##_##mode;                    \
@@ -913,3 +936,178 @@
 EVP_CIPHER_FUNCTION(256, ctr)
 EVP_CIPHER_FUNCTION(256, ecb)
 EVP_CIPHER_FUNCTION(256, gcm)
+
+
+#define EVP_AEAD_AES_GCM_TAG_LEN 16
+
+struct aead_aes_gcm_ctx {
+  union {
+    double align;
+    AES_KEY ks;
+  } ks;
+  GCM128_CONTEXT gcm;
+  ctr128_f ctr;
+  uint8_t tag_len;
+};
+
+static int aead_aes_gcm_init(EVP_AEAD_CTX *ctx, const uint8_t *key,
+                             size_t key_len, size_t tag_len) {
+  struct aead_aes_gcm_ctx *gcm_ctx;
+  const size_t key_bits = key_len * 8;
+
+  if (key_bits != 128 && key_bits != 256) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_init, CIPHER_R_BAD_KEY_LENGTH);
+    return 0; /* EVP_AEAD_CTX_init should catch this. */
+  }
+
+  if (tag_len == EVP_AEAD_DEFAULT_TAG_LENGTH) {
+    tag_len = EVP_AEAD_AES_GCM_TAG_LEN;
+  }
+
+  if (tag_len > EVP_AEAD_AES_GCM_TAG_LEN) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_init, CIPHER_R_TAG_TOO_LARGE);
+    return 0;
+  }
+
+  gcm_ctx = OPENSSL_malloc(sizeof(struct aead_aes_gcm_ctx));
+  if (gcm_ctx == NULL) {
+    return 0;
+  }
+
+  if (aesni_capable()) {
+    aesni_set_encrypt_key(key, key_len * 8, &gcm_ctx->ks.ks);
+    CRYPTO_gcm128_init(&gcm_ctx->gcm, &gcm_ctx->ks.ks,
+                       (block128_f)aesni_encrypt);
+    gcm_ctx->ctr = (ctr128_f)aesni_ctr32_encrypt_blocks;
+  } else {
+    gcm_ctx->ctr =
+        aes_gcm_set_key(&gcm_ctx->ks.ks, &gcm_ctx->gcm, key, key_len);
+  }
+  gcm_ctx->tag_len = tag_len;
+  ctx->aead_state = gcm_ctx;
+
+  return 1;
+}
+
+static void aead_aes_gcm_cleanup(EVP_AEAD_CTX *ctx) {
+  struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
+  OPENSSL_cleanse(gcm_ctx, sizeof(struct aead_aes_gcm_ctx));
+  OPENSSL_free(gcm_ctx);
+}
+
+static int aead_aes_gcm_seal(const EVP_AEAD_CTX *ctx, uint8_t *out,
+                             size_t *out_len, size_t max_out_len,
+                             const uint8_t *nonce, size_t nonce_len,
+                             const uint8_t *in, size_t in_len,
+                             const uint8_t *ad, size_t ad_len) {
+  size_t bulk = 0;
+  const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
+  GCM128_CONTEXT gcm;
+
+  if (in_len + gcm_ctx->tag_len < in_len) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_seal, CIPHER_R_TOO_LARGE);
+    return 0;
+  }
+
+  if (max_out_len < in_len + gcm_ctx->tag_len) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_seal, CIPHER_R_BUFFER_TOO_SMALL);
+    return 0;
+  }
+
+  memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
+  CRYPTO_gcm128_setiv(&gcm, nonce, nonce_len);
+
+  if (ad_len > 0 && !CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
+    return 0;
+  }
+
+  if (gcm_ctx->ctr) {
+    if (!CRYPTO_gcm128_encrypt_ctr32(&gcm, in + bulk, out + bulk, in_len - bulk,
+                                     gcm_ctx->ctr)) {
+      return 0;
+    }
+  } else {
+    if (!CRYPTO_gcm128_encrypt(&gcm, in + bulk, out + bulk, in_len - bulk)) {
+      return 0;
+    }
+  }
+
+  CRYPTO_gcm128_tag(&gcm, out + in_len, gcm_ctx->tag_len);
+  *out_len = in_len + gcm_ctx->tag_len;
+  return 1;
+}
+
+static int aead_aes_gcm_open(const EVP_AEAD_CTX *ctx, uint8_t *out,
+                             size_t *out_len, size_t max_out_len,
+                             const uint8_t *nonce, size_t nonce_len,
+                             const uint8_t *in, size_t in_len,
+                             const uint8_t *ad, size_t ad_len) {
+  size_t bulk = 0;
+  const struct aead_aes_gcm_ctx *gcm_ctx = ctx->aead_state;
+  uint8_t tag[EVP_AEAD_AES_GCM_TAG_LEN];
+  size_t plaintext_len;
+  GCM128_CONTEXT gcm;
+
+  if (in_len < gcm_ctx->tag_len) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
+    return 0;
+  }
+
+  plaintext_len = in_len - gcm_ctx->tag_len;
+
+  if (max_out_len < plaintext_len) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BUFFER_TOO_SMALL);
+    return 0;
+  }
+
+  memcpy(&gcm, &gcm_ctx->gcm, sizeof(gcm));
+  CRYPTO_gcm128_setiv(&gcm, nonce, nonce_len);
+
+  if (!CRYPTO_gcm128_aad(&gcm, ad, ad_len)) {
+    return 0;
+  }
+
+  if (gcm_ctx->ctr) {
+    if (!CRYPTO_gcm128_decrypt_ctr32(&gcm, in + bulk, out + bulk,
+                                     in_len - bulk - gcm_ctx->tag_len,
+                                     gcm_ctx->ctr)) {
+      return 0;
+    }
+  } else {
+    if (!CRYPTO_gcm128_decrypt(&gcm, in + bulk, out + bulk,
+                               in_len - bulk - gcm_ctx->tag_len)) {
+      return 0;
+    }
+  }
+
+  CRYPTO_gcm128_tag(&gcm, tag, gcm_ctx->tag_len);
+  if (CRYPTO_memcmp(tag, in + plaintext_len, gcm_ctx->tag_len) != 0) {
+    OPENSSL_PUT_ERROR(CIPHER, aead_aes_gcm_open, CIPHER_R_BAD_DECRYPT);
+    return 0;
+  }
+
+  *out_len = plaintext_len;
+  return 1;
+}
+
+static const EVP_AEAD aead_aes_128_gcm = {
+    16,                       /* key len */
+    12,                       /* nonce len */
+    EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
+    EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
+    aead_aes_gcm_init,        aead_aes_gcm_cleanup,
+    aead_aes_gcm_seal,        aead_aes_gcm_open,
+};
+
+static const EVP_AEAD aead_aes_256_gcm = {
+    32,                       /* key len */
+    12,                       /* nonce len */
+    EVP_AEAD_AES_GCM_TAG_LEN, /* overhead */
+    EVP_AEAD_AES_GCM_TAG_LEN, /* max tag length */
+    aead_aes_gcm_init,        aead_aes_gcm_cleanup,
+    aead_aes_gcm_seal,        aead_aes_gcm_open,
+};
+
+const EVP_AEAD *EVP_aead_aes_128_gcm() { return &aead_aes_128_gcm; }
+
+const EVP_AEAD *EVP_aead_aes_256_gcm() { return &aead_aes_256_gcm; }
diff --git a/crypto/cipher/internal.h b/crypto/cipher/internal.h
index 1edc059..d46a9e6 100644
--- a/crypto/cipher/internal.h
+++ b/crypto/cipher/internal.h
@@ -106,6 +106,30 @@
 #define EVP_CIPH_MODE_MASK 0x3f
 
 
+struct evp_aead_ctx_st;
+
+/* EVP_AEAD represents a specific AEAD algorithm. */
+struct evp_aead_st {
+  uint8_t key_len;
+  uint8_t nonce_len;
+  uint8_t overhead;
+  uint8_t max_tag_len;
+
+  int (*init)(struct evp_aead_ctx_st *, const uint8_t *key,
+              size_t key_len, size_t tag_len);
+  void (*cleanup)(struct evp_aead_ctx_st *);
+
+  int (*seal)(const struct evp_aead_ctx_st *ctx, uint8_t *out,
+              size_t *out_len, size_t max_out_len, const uint8_t *nonce,
+              size_t nonce_len, const uint8_t *in, size_t in_len,
+              const uint8_t *ad, size_t ad_len);
+
+  int (*open)(const struct evp_aead_ctx_st *ctx, uint8_t *out,
+              size_t *out_len, size_t max_out_len, const uint8_t *nonce,
+              size_t nonce_len, const uint8_t *in, size_t in_len,
+              const uint8_t *ad, size_t ad_len);
+};
+
 #if defined(__cplusplus)
 } /* extern C */
 #endif
diff --git a/crypto/evp/evp.h b/crypto/evp/evp.h
index ce40aa5..c6f5ab3 100644
--- a/crypto/evp/evp.h
+++ b/crypto/evp/evp.h
@@ -61,7 +61,10 @@
 #include <openssl/stack.h>
 
 /* OpenSSL included digest and cipher functions in this header so we include
- * them for users that still expect that. */
+ * them for users that still expect that.
+ *
+ * TODO(fork): clean up callers so that they include what they use. */
+#include <openssl/aead.h>
 #include <openssl/cipher.h>
 #include <openssl/digest.h>
 #include <openssl/mem.h>
diff --git a/include/openssl/aead.h b/include/openssl/aead.h
new file mode 120000
index 0000000..d90b92d
--- /dev/null
+++ b/include/openssl/aead.h
@@ -0,0 +1 @@
+../../crypto/cipher/aead.h
\ No newline at end of file