Use C99 for size_t loops.

This was done just by grepping for 'size_t i;' and 'size_t j;'. I left
everything in crypto/x509 and friends alone.

There's some instances in gcm.c that are non-trivial and pulled into a
separate CL for ease of review.

Change-Id: I6515804e3097f7e90855f1e7610868ee87117223
Reviewed-on: https://boringssl-review.googlesource.com/10801
Reviewed-by: Adam Langley <agl@google.com>
Commit-Queue: Adam Langley <agl@google.com>
CQ-Verified: CQ bot account: commit-bot@chromium.org <commit-bot@chromium.org>
diff --git a/crypto/base64/base64_test.cc b/crypto/base64/base64_test.cc
index 32b44f6..f8af66c 100644
--- a/crypto/base64/base64_test.cc
+++ b/crypto/base64/base64_test.cc
@@ -107,8 +107,7 @@
   std::string ret;
   const size_t in_len = strlen(in);
 
-  size_t i;
-  for (i = 0; i < in_len; i++) {
+  for (size_t i = 0; i < in_len; i++) {
     if (in[i] != '\n') {
       ret.push_back(in[i]);
     }
diff --git a/crypto/bio/hexdump.c b/crypto/bio/hexdump.c
index 17f5518..8c35114 100644
--- a/crypto/bio/hexdump.c
+++ b/crypto/bio/hexdump.c
@@ -86,7 +86,6 @@
  * |ctx|. */
 static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
                          size_t len) {
-  size_t i;
   char buf[10];
   unsigned l;
 
@@ -95,7 +94,7 @@
    * ^ offset                          ^ extra space           ^ ASCII of line
    */
 
-  for (i = 0; i < len; i++) {
+  for (size_t i = 0; i < len; i++) {
     if (ctx->used == 0) {
       /* The beginning of a line. */
       BIO_indent(ctx->bio, ctx->indent, UINT_MAX);
diff --git a/crypto/bn/convert.c b/crypto/bn/convert.c
index 05e27bf..8f4b964 100644
--- a/crypto/bn/convert.c
+++ b/crypto/bn/convert.c
@@ -160,9 +160,6 @@
 }
 
 int BN_bn2bin_padded(uint8_t *out, size_t len, const BIGNUM *in) {
-  size_t i;
-  BN_ULONG l;
-
   /* Special case for |in| = 0. Just branch as the probability is negligible. */
   if (BN_is_zero(in)) {
     memset(out, 0, len);
@@ -175,7 +172,7 @@
     return 0;
   }
   if ((len % BN_BYTES) != 0) {
-    l = read_word_padded(in, len / BN_BYTES);
+    BN_ULONG l = read_word_padded(in, len / BN_BYTES);
     if (l >> (8 * (len % BN_BYTES)) != 0) {
       return 0;
     }
@@ -188,9 +185,9 @@
    * leading zero octets is low.
    *
    * See Falko Stenzke, "Manger's Attack revisited", ICICS 2010. */
-  i = len;
+  size_t i = len;
   while (i--) {
-    l = read_word_padded(in, i / BN_BYTES);
+    BN_ULONG l = read_word_padded(in, i / BN_BYTES);
     *(out++) = (uint8_t)(l >> (8 * (i % BN_BYTES))) & 0xff;
   }
   return 1;
diff --git a/crypto/bytestring/cbb.c b/crypto/bytestring/cbb.c
index 0d97c8a..2d65be8 100644
--- a/crypto/bytestring/cbb.c
+++ b/crypto/bytestring/cbb.c
@@ -142,17 +142,16 @@
 
 static int cbb_buffer_add_u(struct cbb_buffer_st *base, uint32_t v,
                             size_t len_len) {
-  uint8_t *buf;
-  size_t i;
-
   if (len_len == 0) {
     return 1;
   }
+
+  uint8_t *buf;
   if (!cbb_buffer_add(base, &buf, len_len)) {
     return 0;
   }
 
-  for (i = len_len - 1; i < len_len; i--) {
+  for (size_t i = len_len - 1; i < len_len; i--) {
     buf[i] = v;
     v >>= 8;
   }
@@ -440,14 +439,13 @@
 
 int CBB_add_asn1_uint64(CBB *cbb, uint64_t value) {
   CBB child;
-  size_t i;
   int started = 0;
 
   if (!CBB_add_asn1(cbb, &child, CBS_ASN1_INTEGER)) {
     return 0;
   }
 
-  for (i = 0; i < 8; i++) {
+  for (size_t i = 0; i < 8; i++) {
     uint8_t byte = (value >> 8*(7-i)) & 0xff;
     if (!started) {
       if (byte == 0) {
diff --git a/crypto/bytestring/cbs.c b/crypto/bytestring/cbs.c
index c86afbd..a7adc43 100644
--- a/crypto/bytestring/cbs.c
+++ b/crypto/bytestring/cbs.c
@@ -88,13 +88,12 @@
 
 static int cbs_get_u(CBS *cbs, uint32_t *out, size_t len) {
   uint32_t result = 0;
-  size_t i;
   const uint8_t *data;
 
   if (!cbs_get(cbs, &data, len)) {
     return 0;
   }
-  for (i = 0; i < len; i++) {
+  for (size_t i = 0; i < len; i++) {
     result <<= 8;
     result |= data[i];
   }
diff --git a/crypto/cipher/e_aes.c b/crypto/cipher/e_aes.c
index 24c4d8a..ddbee84 100644
--- a/crypto/cipher/e_aes.c
+++ b/crypto/cipher/e_aes.c
@@ -353,14 +353,14 @@
 static int aes_ecb_cipher(EVP_CIPHER_CTX *ctx, uint8_t *out, const uint8_t *in,
                           size_t len) {
   size_t bl = ctx->cipher->block_size;
-  size_t i;
   EVP_AES_KEY *dat = (EVP_AES_KEY *)ctx->cipher_data;
 
   if (len < bl) {
     return 1;
   }
 
-  for (i = 0, len -= bl; i <= len; i += bl) {
+  len -= bl;
+  for (size_t i = 0; i <= len; i += bl) {
     (*dat->block)(in + i, out + i, &dat->ks);
   }
 
diff --git a/crypto/cipher/e_des.c b/crypto/cipher/e_des.c
index 2ba2bed..6834a42 100644
--- a/crypto/cipher/e_des.c
+++ b/crypto/cipher/e_des.c
@@ -104,8 +104,7 @@
   in_len -= ctx->cipher->block_size;
 
   EVP_DES_KEY *dat = (EVP_DES_KEY *) ctx->cipher_data;
-  size_t i;
-  for (i = 0; i <= in_len; i += ctx->cipher->block_size) {
+  for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
     DES_ecb_encrypt((DES_cblock *) (in + i), (DES_cblock *) (out + i),
                     &dat->ks.ks, ctx->encrypt);
   }
@@ -189,8 +188,7 @@
   in_len -= ctx->cipher->block_size;
 
   DES_EDE_KEY *dat = (DES_EDE_KEY *) ctx->cipher_data;
-  size_t i;
-  for (i = 0; i <= in_len; i += ctx->cipher->block_size) {
+  for (size_t i = 0; i <= in_len; i += ctx->cipher->block_size) {
     DES_ecb3_encrypt((DES_cblock *) (in + i), (DES_cblock *) (out + i),
                      &dat->ks.ks[0], &dat->ks.ks[1], &dat->ks.ks[2],
                      ctx->encrypt);
diff --git a/crypto/digest/digest_test.cc b/crypto/digest/digest_test.cc
index 70fa483..ecf0308 100644
--- a/crypto/digest/digest_test.cc
+++ b/crypto/digest/digest_test.cc
@@ -142,10 +142,9 @@
                           const uint8_t *digest,
                           size_t digest_len) {
   static const char kHexTable[] = "0123456789abcdef";
-  size_t i;
   char digest_hex[2*EVP_MAX_MD_SIZE + 1];
 
-  for (i = 0; i < digest_len; i++) {
+  for (size_t i = 0; i < digest_len; i++) {
     digest_hex[2*i] = kHexTable[digest[i] >> 4];
     digest_hex[2*i + 1] = kHexTable[digest[i] & 0xf];
   }
diff --git a/crypto/ec/ec.c b/crypto/ec/ec.c
index 271fb50..7e76dfe 100644
--- a/crypto/ec/ec.c
+++ b/crypto/ec/ec.c
@@ -709,9 +709,7 @@
 
 int EC_POINTs_make_affine(const EC_GROUP *group, size_t num, EC_POINT *points[],
                           BN_CTX *ctx) {
-  size_t i;
-
-  for (i = 0; i < num; i++) {
+  for (size_t i = 0; i < num; i++) {
     if (group->meth != points[i]->meth) {
       OPENSSL_PUT_ERROR(EC, EC_R_INCOMPATIBLE_OBJECTS);
       return 0;
diff --git a/crypto/ec/p224-64.c b/crypto/ec/p224-64.c
index be85ad6..825bbc3 100644
--- a/crypto/ec/p224-64.c
+++ b/crypto/ec/p224-64.c
@@ -192,8 +192,7 @@
 }
 
 static void felem_to_bin28(u8 out[28], const felem in) {
-  size_t i;
-  for (i = 0; i < 7; ++i) {
+  for (size_t i = 0; i < 7; ++i) {
     out[i] = in[0] >> (8 * i);
     out[i + 7] = in[1] >> (8 * i);
     out[i + 14] = in[2] >> (8 * i);
@@ -203,8 +202,7 @@
 
 /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
 static void flip_endian(u8 *out, const u8 *in, size_t len) {
-  size_t i;
-  for (i = 0; i < len; ++i) {
+  for (size_t i = 0; i < len; ++i) {
     out[i] = in[len - 1 - i];
   }
 }
@@ -524,7 +522,6 @@
 static void felem_inv(felem out, const felem in) {
   felem ftmp, ftmp2, ftmp3, ftmp4;
   widefelem tmp;
-  size_t i;
 
   felem_square(tmp, in);
   felem_reduce(ftmp, tmp); /* 2 */
@@ -544,7 +541,7 @@
   felem_reduce(ftmp, tmp); /* 2^6 - 1 */
   felem_square(tmp, ftmp);
   felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
-  for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
+  for (size_t i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
     felem_square(tmp, ftmp2);
     felem_reduce(ftmp2, tmp);
   }
@@ -552,7 +549,7 @@
   felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
   felem_square(tmp, ftmp2);
   felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
-  for (i = 0; i < 11; ++i) {/* 2^24 - 2^12 */
+  for (size_t i = 0; i < 11; ++i) {/* 2^24 - 2^12 */
     felem_square(tmp, ftmp3);
     felem_reduce(ftmp3, tmp);
   }
@@ -560,7 +557,7 @@
   felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
   felem_square(tmp, ftmp2);
   felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
-  for (i = 0; i < 23; ++i) {/* 2^48 - 2^24 */
+  for (size_t i = 0; i < 23; ++i) {/* 2^48 - 2^24 */
     felem_square(tmp, ftmp3);
     felem_reduce(ftmp3, tmp);
   }
@@ -568,7 +565,7 @@
   felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
   felem_square(tmp, ftmp3);
   felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
-  for (i = 0; i < 47; ++i) {/* 2^96 - 2^48 */
+  for (size_t i = 0; i < 47; ++i) {/* 2^96 - 2^48 */
     felem_square(tmp, ftmp4);
     felem_reduce(ftmp4, tmp);
   }
@@ -576,13 +573,13 @@
   felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
   felem_square(tmp, ftmp3);
   felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
-  for (i = 0; i < 23; ++i) {/* 2^120 - 2^24 */
+  for (size_t i = 0; i < 23; ++i) {/* 2^120 - 2^24 */
     felem_square(tmp, ftmp4);
     felem_reduce(ftmp4, tmp);
   }
   felem_mul(tmp, ftmp2, ftmp4);
   felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
-  for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
+  for (size_t i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
     felem_square(tmp, ftmp2);
     felem_reduce(ftmp2, tmp);
   }
@@ -592,7 +589,7 @@
   felem_reduce(ftmp, tmp); /* 2^127 - 2 */
   felem_mul(tmp, ftmp, in);
   felem_reduce(ftmp, tmp); /* 2^127 - 1 */
-  for (i = 0; i < 97; ++i) {/* 2^224 - 2^97 */
+  for (size_t i = 0; i < 97; ++i) {/* 2^224 - 2^97 */
     felem_square(tmp, ftmp);
     felem_reduce(ftmp, tmp);
   }
@@ -604,10 +601,9 @@
  * if icopy == 1, copy in to out,
  * if icopy == 0, copy out to itself. */
 static void copy_conditional(felem out, const felem in, limb icopy) {
-  size_t i;
   /* icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one */
   const limb copy = -icopy;
-  for (i = 0; i < 4; ++i) {
+  for (size_t i = 0; i < 4; ++i) {
     const limb tmp = copy & (in[i] ^ out[i]);
     out[i] ^= tmp;
   }
@@ -866,8 +862,7 @@
   limb *outlimbs = &out[0][0];
   memset(outlimbs, 0, 3 * sizeof(felem));
 
-  size_t i;
-  for (i = 0; i < size; i++) {
+  for (size_t i = 0; i < size; i++) {
     const limb *inlimbs = &pre_comp[i][0][0];
     u64 mask = i ^ idx;
     mask |= mask >> 4;
@@ -875,8 +870,7 @@
     mask |= mask >> 1;
     mask &= 1;
     mask--;
-    size_t j;
-    for (j = 0; j < 4 * 3; j++) {
+    for (size_t j = 0; j < 4 * 3; j++) {
       outlimbs[j] |= inlimbs[j] & mask;
     }
   }
@@ -1082,8 +1076,7 @@
      * i.e., they contribute nothing to the linear combination */
     memset(secrets, 0, num_points * sizeof(felem_bytearray));
     memset(pre_comp, 0, num_points * 17 * 3 * sizeof(felem));
-    size_t i;
-    for (i = 0; i < num_points; ++i) {
+    for (size_t i = 0; i < num_points; ++i) {
       if (i == num) {
         /* the generator */
         p = EC_GROUP_get0_generator(group);
@@ -1121,8 +1114,7 @@
         felem_assign(pre_comp[i][1][1], y_out);
         felem_assign(pre_comp[i][1][2], z_out);
 
-        size_t j;
-        for (j = 2; j <= 16; ++j) {
+        for (size_t j = 2; j <= 16; ++j) {
           if (j & 1) {
             point_add(pre_comp[i][j][0], pre_comp[i][j][1], pre_comp[i][j][2],
                       pre_comp[i][1][0], pre_comp[i][1][1], pre_comp[i][1][2],
diff --git a/crypto/ec/p256-64.c b/crypto/ec/p256-64.c
index 6a57a73..a0e4df5 100644
--- a/crypto/ec/p256-64.c
+++ b/crypto/ec/p256-64.c
@@ -94,8 +94,7 @@
 
 /* To preserve endianness when using BN_bn2bin and BN_bin2bn. */
 static void flip_endian(u8 *out, const u8 *in, size_t len) {
-  size_t i;
-  for (i = 0; i < len; ++i) {
+  for (size_t i = 0; i < len; ++i) {
     out[i] = in[len - 1 - i];
   }
 }
@@ -719,8 +718,7 @@
    * each u64, from most-significant to least significant. For each one, if
    * all words so far have been equal (m is all ones) then a non-equal
    * result is the answer. Otherwise we continue. */
-  size_t i;
-  for (i = 3; i < 4; i--) {
+  for (size_t i = 3; i < 4; i--) {
     u64 equal;
     uint128_t a = ((uint128_t)kPrime[i]) - out[i];
     /* if out[i] > kPrime[i] then a will underflow and the high 64-bits
@@ -810,7 +808,6 @@
   /* each e_I will hold |in|^{2^I - 1} */
   felem e2, e4, e8, e16, e32, e64;
   longfelem tmp;
-  size_t i;
 
   felem_square(tmp, in);
   felem_reduce(ftmp, tmp); /* 2^1 */
@@ -835,47 +832,47 @@
   felem_mul(tmp, ftmp, e4);
   felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
   felem_assign(e8, ftmp);
-  for (i = 0; i < 8; i++) {
+  for (size_t i = 0; i < 8; i++) {
     felem_square(tmp, ftmp);
     felem_reduce(ftmp, tmp);
   } /* 2^16 - 2^8 */
   felem_mul(tmp, ftmp, e8);
   felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
   felem_assign(e16, ftmp);
-  for (i = 0; i < 16; i++) {
+  for (size_t i = 0; i < 16; i++) {
     felem_square(tmp, ftmp);
     felem_reduce(ftmp, tmp);
   } /* 2^32 - 2^16 */
   felem_mul(tmp, ftmp, e16);
   felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
   felem_assign(e32, ftmp);
-  for (i = 0; i < 32; i++) {
+  for (size_t i = 0; i < 32; i++) {
     felem_square(tmp, ftmp);
     felem_reduce(ftmp, tmp);
   } /* 2^64 - 2^32 */
   felem_assign(e64, ftmp);
   felem_mul(tmp, ftmp, in);
   felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
-  for (i = 0; i < 192; i++) {
+  for (size_t i = 0; i < 192; i++) {
     felem_square(tmp, ftmp);
     felem_reduce(ftmp, tmp);
   } /* 2^256 - 2^224 + 2^192 */
 
   felem_mul(tmp, e64, e32);
   felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
-  for (i = 0; i < 16; i++) {
+  for (size_t i = 0; i < 16; i++) {
     felem_square(tmp, ftmp2);
     felem_reduce(ftmp2, tmp);
   } /* 2^80 - 2^16 */
   felem_mul(tmp, ftmp2, e16);
   felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
-  for (i = 0; i < 8; i++) {
+  for (size_t i = 0; i < 8; i++) {
     felem_square(tmp, ftmp2);
     felem_reduce(ftmp2, tmp);
   } /* 2^88 - 2^8 */
   felem_mul(tmp, ftmp2, e8);
   felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
-  for (i = 0; i < 4; i++) {
+  for (size_t i = 0; i < 4; i++) {
     felem_square(tmp, ftmp2);
     felem_reduce(ftmp2, tmp);
   } /* 2^92 - 2^4 */
@@ -1008,8 +1005,7 @@
 
 /* copy_conditional copies in to out iff mask is all ones. */
 static void copy_conditional(felem out, const felem in, limb mask) {
-  size_t i;
-  for (i = 0; i < NLIMBS; ++i) {
+  for (size_t i = 0; i < NLIMBS; ++i) {
     const limb tmp = mask & (in[i] ^ out[i]);
     out[i] ^= tmp;
   }
@@ -1017,9 +1013,8 @@
 
 /* copy_small_conditional copies in to out iff mask is all ones. */
 static void copy_small_conditional(felem out, const smallfelem in, limb mask) {
-  size_t i;
   const u64 mask64 = mask;
-  for (i = 0; i < NLIMBS; ++i) {
+  for (size_t i = 0; i < NLIMBS; ++i) {
     out[i] = ((limb)(in[i] & mask64)) | (out[i] & ~mask);
   }
 }
@@ -1407,8 +1402,7 @@
   u64 *outlimbs = &out[0][0];
   memset(outlimbs, 0, 3 * sizeof(smallfelem));
 
-  size_t i;
-  for (i = 0; i < size; i++) {
+  for (size_t i = 0; i < size; i++) {
     const u64 *inlimbs = (const u64 *)&pre_comp[i][0][0];
     u64 mask = i ^ idx;
     mask |= mask >> 4;
@@ -1416,8 +1410,7 @@
     mask |= mask >> 1;
     mask &= 1;
     mask--;
-    size_t j;
-    for (j = 0; j < NLIMBS * 3; j++) {
+    for (size_t j = 0; j < NLIMBS * 3; j++) {
       outlimbs[j] |= inlimbs[j] & mask;
     }
   }
@@ -1639,8 +1632,7 @@
      * i.e., they contribute nothing to the linear combination. */
     memset(secrets, 0, num_points * sizeof(felem_bytearray));
     memset(pre_comp, 0, num_points * 17 * 3 * sizeof(smallfelem));
-    size_t i;
-    for (i = 0; i < num_points; ++i) {
+    for (size_t i = 0; i < num_points; ++i) {
       if (i == num) {
         /* we didn't have a valid precomputation, so we pick the generator. */
         p = EC_GROUP_get0_generator(group);
@@ -1674,8 +1666,7 @@
         felem_shrink(pre_comp[i][1][0], x_out);
         felem_shrink(pre_comp[i][1][1], y_out);
         felem_shrink(pre_comp[i][1][2], z_out);
-        size_t j;
-        for (j = 2; j <= 16; ++j) {
+        for (size_t j = 2; j <= 16; ++j) {
           if (j & 1) {
             point_add_small(pre_comp[i][j][0], pre_comp[i][j][1],
                             pre_comp[i][j][2], pre_comp[i][1][0],
diff --git a/crypto/ec/simple.c b/crypto/ec/simple.c
index 67c4602..a1e6229 100644
--- a/crypto/ec/simple.c
+++ b/crypto/ec/simple.c
@@ -964,7 +964,6 @@
   BN_CTX *new_ctx = NULL;
   BIGNUM *tmp, *tmp_Z;
   BIGNUM **prod_Z = NULL;
-  size_t i;
   int ret = 0;
 
   if (num == 0) {
@@ -990,7 +989,7 @@
     goto err;
   }
   memset(prod_Z, 0, num * sizeof(prod_Z[0]));
-  for (i = 0; i < num; i++) {
+  for (size_t i = 0; i < num; i++) {
     prod_Z[i] = BN_new();
     if (prod_Z[i] == NULL) {
       goto err;
@@ -1010,7 +1009,7 @@
     }
   }
 
-  for (i = 1; i < num; i++) {
+  for (size_t i = 1; i < num; i++) {
     if (!BN_is_zero(&points[i]->Z)) {
       if (!group->meth->field_mul(group, prod_Z[i], prod_Z[i - 1],
                                   &points[i]->Z, ctx)) {
@@ -1047,7 +1046,7 @@
     }
   }
 
-  for (i = num - 1; i > 0; --i) {
+  for (size_t i = num - 1; i > 0; --i) {
     /* Loop invariant: tmp is the product of the inverses of
      * points[0]->Z .. points[i]->Z (zero-valued inputs skipped). */
     if (BN_is_zero(&points[i]->Z)) {
@@ -1071,7 +1070,7 @@
   }
 
   /* Finally, fix up the X and Y coordinates for all points. */
-  for (i = 0; i < num; i++) {
+  for (size_t i = 0; i < num; i++) {
     EC_POINT *p = points[i];
 
     if (!BN_is_zero(&p->Z)) {
@@ -1095,7 +1094,7 @@
   BN_CTX_end(ctx);
   BN_CTX_free(new_ctx);
   if (prod_Z != NULL) {
-    for (i = 0; i < num; i++) {
+    for (size_t i = 0; i < num; i++) {
       if (prod_Z[i] == NULL) {
         break;
       }
diff --git a/crypto/evp/print.c b/crypto/evp/print.c
index 53527b4..b2e3509 100644
--- a/crypto/evp/print.c
+++ b/crypto/evp/print.c
@@ -121,15 +121,13 @@
 }
 
 static void update_buflen(const BIGNUM *b, size_t *pbuflen) {
-  size_t i;
-
   if (!b) {
     return;
   }
 
-  i = BN_num_bytes(b);
-  if (*pbuflen < i) {
-    *pbuflen = i;
+  size_t len = BN_num_bytes(b);
+  if (*pbuflen < len) {
+    *pbuflen = len;
   }
 }
 
@@ -154,10 +152,8 @@
     update_buflen(rsa->iqmp, &buf_len);
 
     if (rsa->additional_primes != NULL) {
-      size_t i;
-
-      for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
-           i++) {
+      for (size_t i = 0;
+           i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
         const RSA_additional_prime *ap =
             sk_RSA_additional_prime_value(rsa->additional_primes, i);
         update_buflen(ap->prime, &buf_len);
@@ -211,13 +207,11 @@
 
     if (rsa->additional_primes != NULL &&
         sk_RSA_additional_prime_num(rsa->additional_primes) > 0) {
-      size_t i;
-
       if (BIO_printf(out, "otherPrimeInfos:\n") <= 0) {
         goto err;
       }
-      for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
-           i++) {
+      for (size_t i = 0;
+           i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
         const RSA_additional_prime *ap =
             sk_RSA_additional_prime_value(rsa->additional_primes, i);
 
@@ -483,8 +477,7 @@
 static size_t kPrintMethodsLen = OPENSSL_ARRAY_SIZE(kPrintMethods);
 
 static EVP_PKEY_PRINT_METHOD *find_method(int type) {
-  size_t i;
-  for (i = 0; i < kPrintMethodsLen; i++) {
+  for (size_t i = 0; i < kPrintMethodsLen; i++) {
     if (kPrintMethods[i].type == type) {
       return &kPrintMethods[i];
     }
diff --git a/crypto/ex_data.c b/crypto/ex_data.c
index d67abba..5286513 100644
--- a/crypto/ex_data.c
+++ b/crypto/ex_data.c
@@ -244,8 +244,7 @@
     return 0;
   }
 
-  size_t i;
-  for (i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
+  for (size_t i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
     CRYPTO_EX_DATA_FUNCS *func_pointer =
         sk_CRYPTO_EX_DATA_FUNCS_value(func_pointers, i);
     void *ptr = CRYPTO_get_ex_data(from, i + ex_data_class->num_reserved);
@@ -274,8 +273,7 @@
     return;
   }
 
-  size_t i;
-  for (i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
+  for (size_t i = 0; i < sk_CRYPTO_EX_DATA_FUNCS_num(func_pointers); i++) {
     CRYPTO_EX_DATA_FUNCS *func_pointer =
         sk_CRYPTO_EX_DATA_FUNCS_value(func_pointers, i);
     if (func_pointer->free_func) {
diff --git a/crypto/hmac/hmac.c b/crypto/hmac/hmac.c
index bccc5c0..2eae9e8 100644
--- a/crypto/hmac/hmac.c
+++ b/crypto/hmac/hmac.c
@@ -115,7 +115,6 @@
    * exist callers which intend the latter, but the former is an awkward edge
    * case. Fix to API to avoid this. */
   if (md != ctx->md || key != NULL) {
-    size_t i;
     uint8_t pad[EVP_MAX_MD_BLOCK_SIZE];
     uint8_t key_block[EVP_MAX_MD_BLOCK_SIZE];
     unsigned key_block_len;
@@ -139,7 +138,7 @@
       memset(&key_block[key_block_len], 0, sizeof(key_block) - key_block_len);
     }
 
-    for (i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
+    for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
       pad[i] = 0x36 ^ key_block[i];
     }
     if (!EVP_DigestInit_ex(&ctx->i_ctx, md, impl) ||
@@ -147,7 +146,7 @@
       return 0;
     }
 
-    for (i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
+    for (size_t i = 0; i < EVP_MAX_MD_BLOCK_SIZE; i++) {
       pad[i] = 0x5c ^ key_block[i];
     }
     if (!EVP_DigestInit_ex(&ctx->o_ctx, md, impl) ||
diff --git a/crypto/lhash/lhash.c b/crypto/lhash/lhash.c
index 257900e..233f34f 100644
--- a/crypto/lhash/lhash.c
+++ b/crypto/lhash/lhash.c
@@ -100,15 +100,13 @@
 }
 
 void lh_free(_LHASH *lh) {
-  size_t i;
-  LHASH_ITEM *n, *next;
-
   if (lh == NULL) {
     return;
   }
 
-  for (i = 0; i < lh->num_buckets; i++) {
-    for (n = lh->buckets[i]; n != NULL; n = next) {
+  for (size_t i = 0; i < lh->num_buckets; i++) {
+    LHASH_ITEM *next;
+    for (LHASH_ITEM *n = lh->buckets[i]; n != NULL; n = next) {
       next = n->next;
       OPENSSL_free(n);
     }
@@ -277,9 +275,6 @@
 
 static void lh_doall_internal(_LHASH *lh, void (*no_arg_func)(void *),
                               void (*arg_func)(void *, void *), void *arg) {
-  size_t i;
-  LHASH_ITEM *cur, *next;
-
   if (lh == NULL) {
     return;
   }
@@ -289,8 +284,9 @@
     lh->callback_depth++;
   }
 
-  for (i = 0; i < lh->num_buckets; i++) {
-    for (cur = lh->buckets[i]; cur != NULL; cur = next) {
+  for (size_t i = 0; i < lh->num_buckets; i++) {
+    LHASH_ITEM *next;
+    for (LHASH_ITEM *cur = lh->buckets[i]; cur != NULL; cur = next) {
       next = cur->next;
       if (arg_func) {
         arg_func(cur->data, arg);
diff --git a/crypto/modes/gcm.c b/crypto/modes/gcm.c
index b857131..04c3211 100644
--- a/crypto/modes/gcm.c
+++ b/crypto/modes/gcm.c
@@ -516,11 +516,10 @@
     ctx->Yi.c[15] = 1;
     ctr = 1;
   } else {
-    size_t i;
     uint64_t len0 = len;
 
     while (len >= 16) {
-      for (i = 0; i < 16; ++i) {
+      for (size_t i = 0; i < 16; ++i) {
         ctx->Yi.c[i] ^= iv[i];
       }
       GCM_MUL(ctx, Yi);
@@ -528,7 +527,7 @@
       len -= 16;
     }
     if (len) {
-      for (i = 0; i < len; ++i) {
+      for (size_t i = 0; i < len; ++i) {
         ctx->Yi.c[i] ^= iv[i];
       }
       GCM_MUL(ctx, Yi);
diff --git a/crypto/modes/gcm_test.c b/crypto/modes/gcm_test.c
index 19c295b..6f9d528 100644
--- a/crypto/modes/gcm_test.c
+++ b/crypto/modes/gcm_test.c
@@ -257,9 +257,6 @@
 
 static int decode_hex(uint8_t **out, size_t *out_len, const char *in,
                       unsigned test_num, const char *description) {
-  uint8_t *buf = NULL;
-  size_t i;
-
   if (in == NULL) {
     *out = NULL;
     *out_len = 0;
@@ -269,16 +266,16 @@
   size_t len = strlen(in);
   if (len & 1) {
     fprintf(stderr, "%u: Odd-length %s input.\n", test_num, description);
-    goto err;
+    return 0;
   }
 
-  buf = OPENSSL_malloc(len / 2);
+  uint8_t *buf = OPENSSL_malloc(len / 2);
   if (buf == NULL) {
     fprintf(stderr, "%u: malloc failure.\n", test_num);
     goto err;
   }
 
-  for (i = 0; i < len; i += 2) {
+  for (size_t i = 0; i < len; i += 2) {
     uint8_t v, v2;
     if (!from_hex(&v, in[i]) ||
         !from_hex(&v2, in[i+1])) {
diff --git a/crypto/newhope/poly.c b/crypto/newhope/poly.c
index 44cd383..ca37bc2 100644
--- a/crypto/newhope/poly.c
+++ b/crypto/newhope/poly.c
@@ -28,8 +28,7 @@
 extern uint16_t newhope_psis_inv_montgomery[];
 
 void NEWHOPE_POLY_frombytes(NEWHOPE_POLY* r, const uint8_t* a) {
-  int i;
-  for (i = 0; i < PARAM_N / 4; i++) {
+  for (int i = 0; i < PARAM_N / 4; i++) {
     r->coeffs[4 * i + 0] =
         a[7 * i + 0] | (((uint16_t)a[7 * i + 1] & 0x3f) << 8);
     r->coeffs[4 * i + 1] = (a[7 * i + 1] >> 6) |
@@ -44,10 +43,9 @@
 }
 
 void NEWHOPE_POLY_tobytes(uint8_t* r, const NEWHOPE_POLY* p) {
-  int i;
   uint16_t t0, t1, t2, t3, m;
   int16_t c;
-  for (i = 0; i < PARAM_N / 4; i++) {
+  for (int i = 0; i < PARAM_N / 4; i++) {
     t0 = newhope_barrett_reduce(
         p->coeffs[4 * i + 0]); /* Make sure that coefficients
                           have only 14 bits */
@@ -136,13 +134,11 @@
   /* The reference implementation calls ChaCha20 here. */
   RAND_bytes((uint8_t *) tp, sizeof(tp));
 
-  size_t i;
-  for (i = 0; i < PARAM_N; i++) {
+  for (size_t i = 0; i < PARAM_N; i++) {
     const uint32_t t = tp[i];
 
-    size_t j;
     uint32_t d = 0;
-    for (j = 0; j < 8; j++) {
+    for (size_t j = 0; j < 8; j++) {
       d += (t >> j) & 0x01010101;
     }
 
@@ -154,8 +150,7 @@
 
 void newhope_poly_pointwise(NEWHOPE_POLY* r, const NEWHOPE_POLY* a,
                             const NEWHOPE_POLY* b) {
-  size_t i;
-  for (i = 0; i < PARAM_N; i++) {
+  for (size_t i = 0; i < PARAM_N; i++) {
     uint16_t t = newhope_montgomery_reduce(3186 * b->coeffs[i]);
     /* t is now in Montgomery domain */
     r->coeffs[i] = newhope_montgomery_reduce(a->coeffs[i] * t);
@@ -165,8 +160,7 @@
 
 void newhope_poly_add(NEWHOPE_POLY* r, const NEWHOPE_POLY* a,
                       const NEWHOPE_POLY* b) {
-  size_t i;
-  for (i = 0; i < PARAM_N; i++) {
+  for (size_t i = 0; i < PARAM_N; i++) {
     r->coeffs[i] = newhope_barrett_reduce(a->coeffs[i] + b->coeffs[i]);
   }
 }
diff --git a/crypto/pkcs8/pkcs8.c b/crypto/pkcs8/pkcs8.c
index 2363aa8..7b34705 100644
--- a/crypto/pkcs8/pkcs8.c
+++ b/crypto/pkcs8/pkcs8.c
@@ -155,11 +155,10 @@
     return 0;
   }
 
-  size_t i;
-  for (i = 0; i < S_len; i++) {
+  for (size_t i = 0; i < S_len; i++) {
     I[i] = salt[i % salt_len];
   }
-  for (i = 0; i < P_len; i++) {
+  for (size_t i = 0; i < P_len; i++) {
     I[i + S_len] = pass_raw[i % pass_raw_len];
   }
 
@@ -178,8 +177,7 @@
         !EVP_DigestFinal_ex(&ctx, A, &A_len)) {
       goto err;
     }
-    int iter;
-    for (iter = 1; iter < iterations; iter++) {
+    for (int iter = 1; iter < iterations; iter++) {
       if (!EVP_DigestInit_ex(&ctx, md, NULL) ||
           !EVP_DigestUpdate(&ctx, A, A_len) ||
           !EVP_DigestFinal_ex(&ctx, A, &A_len)) {
@@ -198,7 +196,7 @@
     /* B. Concatenate copies of A_i to create a string B of length v bits (the
      * final copy of A_i may be truncated to create B). */
     uint8_t B[EVP_MAX_MD_BLOCK_SIZE];
-    for (i = 0; i < block_size; i++) {
+    for (size_t i = 0; i < block_size; i++) {
       B[i] = A[i % A_len];
     }
 
@@ -206,10 +204,9 @@
      * where k=ceiling(s/v)+ceiling(p/v), modify I by setting I_j=(I_j+B+1) mod
      * 2^v for each j. */
     assert(I_len % block_size == 0);
-    for (i = 0; i < I_len; i += block_size) {
+    for (size_t i = 0; i < I_len; i += block_size) {
       unsigned carry = 1;
-      size_t j;
-      for (j = block_size - 1; j < block_size; j--) {
+      for (size_t j = block_size - 1; j < block_size; j--) {
         carry += I[i + j] + B[j];
         I[i + j] = (uint8_t)carry;
         carry >>= 8;
diff --git a/crypto/rsa/rsa.c b/crypto/rsa/rsa.c
index e0a2b5a..17f0a8a 100644
--- a/crypto/rsa/rsa.c
+++ b/crypto/rsa/rsa.c
@@ -609,8 +609,7 @@
     num_additional_primes = sk_RSA_additional_prime_num(key->additional_primes);
   }
 
-  size_t i;
-  for (i = 0; i < num_additional_primes; i++) {
+  for (size_t i = 0; i < num_additional_primes; i++) {
     const RSA_additional_prime *ap =
         sk_RSA_additional_prime_value(key->additional_primes, i);
     if (!BN_mul(&n, &n, ap->prime, ctx) ||
diff --git a/crypto/rsa/rsa_asn1.c b/crypto/rsa/rsa_asn1.c
index 599a0c3..509f1aa 100644
--- a/crypto/rsa/rsa_asn1.c
+++ b/crypto/rsa/rsa_asn1.c
@@ -329,10 +329,10 @@
       OPENSSL_PUT_ERROR(RSA, RSA_R_ENCODE_ERROR);
       return 0;
     }
-    size_t i;
-    for (i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes); i++) {
+    for (size_t i = 0; i < sk_RSA_additional_prime_num(rsa->additional_primes);
+         i++) {
       RSA_additional_prime *ap =
-              sk_RSA_additional_prime_value(rsa->additional_primes, i);
+          sk_RSA_additional_prime_value(rsa->additional_primes, i);
       CBB other_prime_info;
       if (!CBB_add_asn1(&other_prime_infos, &other_prime_info,
                         CBS_ASN1_SEQUENCE) ||
diff --git a/crypto/stack/stack.c b/crypto/stack/stack.c
index e893221..2d5744a 100644
--- a/crypto/stack/stack.c
+++ b/crypto/stack/stack.c
@@ -131,13 +131,11 @@
 }
 
 void sk_pop_free(_STACK *sk, void (*func)(void *)) {
-  size_t i;
-
   if (sk == NULL) {
     return;
   }
 
-  for (i = 0; i < sk->num; i++) {
+  for (size_t i = 0; i < sk->num; i++) {
     if (sk->data[i] != NULL) {
       func(sk->data[i]);
     }
@@ -209,13 +207,11 @@
 }
 
 void *sk_delete_ptr(_STACK *sk, void *p) {
-  size_t i;
-
   if (sk == NULL) {
     return NULL;
   }
 
-  for (i = 0; i < sk->num; i++) {
+  for (size_t i = 0; i < sk->num; i++) {
     if (sk->data[i] == p) {
       return sk_delete(sk, i);
     }
@@ -225,17 +221,13 @@
 }
 
 int sk_find(_STACK *sk, size_t *out_index, void *p) {
-  const void *const *r;
-  size_t i;
-  int (*comp_func)(const void *,const void *);
-
   if (sk == NULL) {
     return 0;
   }
 
   if (sk->comp == NULL) {
     /* Use pointer equality when no comparison function has been set. */
-    for (i = 0; i < sk->num; i++) {
+    for (size_t i = 0; i < sk->num; i++) {
       if (sk->data[i] == p) {
         if (out_index) {
           *out_index = i;
@@ -257,18 +249,19 @@
    * elements. However, since we're passing an array of pointers to
    * qsort/bsearch, we can just cast the comparison function and everything
    * works. */
-  comp_func=(int (*)(const void *,const void *))(sk->comp);
-  r = bsearch(&p, sk->data, sk->num, sizeof(void *), comp_func);
+  const void *const *r = bsearch(&p, sk->data, sk->num, sizeof(void *),
+                                 (int (*)(const void *, const void *))sk->comp);
   if (r == NULL) {
     return 0;
   }
-  i = ((void **)r) - sk->data;
+  size_t idx = ((void **)r) - sk->data;
   /* This function always returns the first result. */
-  while (i > 0 && sk->comp((const void**) &p, (const void**) &sk->data[i-1]) == 0) {
-    i--;
+  while (idx > 0 &&
+         sk->comp((const void **)&p, (const void **)&sk->data[idx - 1]) == 0) {
+    idx--;
   }
   if (out_index) {
-    *out_index = i;
+    *out_index = idx;
   }
   return 1;
 }
@@ -364,15 +357,13 @@
     return NULL;
   }
 
-  size_t i;
-  for (i = 0; i < ret->num; i++) {
+  for (size_t i = 0; i < ret->num; i++) {
     if (ret->data[i] == NULL) {
       continue;
     }
     ret->data[i] = copy_func(ret->data[i]);
     if (ret->data[i] == NULL) {
-      size_t j;
-      for (j = 0; j < i; j++) {
+      for (size_t j = 0; j < i; j++) {
         if (ret->data[j] != NULL) {
           free_func(ret->data[j]);
         }
diff --git a/crypto/test/test_util.cc b/crypto/test/test_util.cc
index 8021aaa..928972a 100644
--- a/crypto/test/test_util.cc
+++ b/crypto/test/test_util.cc
@@ -20,10 +20,9 @@
 
 void hexdump(FILE *fp, const char *msg, const void *in, size_t len) {
   const uint8_t *data = reinterpret_cast<const uint8_t*>(in);
-  size_t i;
 
   fputs(msg, fp);
-  for (i = 0; i < len; i++) {
+  for (size_t i = 0; i < len; i++) {
     fprintf(fp, "%02x", data[i]);
   }
   fputs("\n", fp);