pw_kvs: Track the correct number of sectors

Previously, the entire sector map was used, which caused out-of-range
flash reads if the sector map was larger than the number of sectors.
This fixes the out-of-range memory reads, but further work is needed to
get all tests passing.

Change-Id: Ic8e3edc45b8b6146c1c3217b3b1ffd6f127358df
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index c4c154c..249425c 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -28,7 +28,31 @@
 using std::byte;
 using std::string_view;
 
+KeyValueStore::KeyValueStore(FlashPartition* partition,
+                             const EntryHeaderFormat& format,
+                             const Options& options)
+    : partition_(*partition),
+      entry_header_format_(format),
+      options_(options),
+      key_descriptor_list_{},
+      key_descriptor_list_size_(0),
+      sector_map_{},
+      sector_map_size_(partition_.sector_count()),
+      last_new_sector_(sector_map_.data()),
+      working_buffer_{} {}
+
 Status KeyValueStore::Init() {
+  if (kMaxUsableSectors < sector_map_size_) {
+    CRT("KeyValueStore::kMaxUsableSectors must be at least as large as the "
+        "number of sectors in the flash partition");
+    return Status::FAILED_PRECONDITION;
+  }
+
+  if (kMaxUsableSectors > sector_map_size_) {
+    DBG("KeyValueStore::kMaxUsableSectors is %zu sectors larger than needed",
+        kMaxUsableSectors - sector_map_size_);
+  }
+
   // Reset the number of occupied key descriptors; we will fill them later.
   key_descriptor_list_size_ = 0;
 
@@ -37,7 +61,6 @@
   // clean start, random is a good second choice.
 
   const size_t sector_size_bytes = partition_.sector_size_bytes();
-  const size_t sector_count = partition_.sector_count();
 
   if (working_buffer_.size() < sector_size_bytes) {
     CRT("ERROR: working_buffer_ (%zu bytes) is smaller than sector "
@@ -48,7 +71,7 @@
   }
 
   DBG("First pass: Read all entries from all sectors");
-  for (size_t sector_id = 0; sector_id < sector_count; ++sector_id) {
+  for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
     // Track writable bytes in this sector. Updated after reading each entry.
     sector_map_[sector_id].tail_free_bytes = sector_size_bytes;
 
@@ -98,7 +121,7 @@
 
   DBG("Second pass: Count valid bytes in each sector");
   // Initialize the sector sizes.
-  for (size_t sector_id = 0; sector_id < sector_count; ++sector_id) {
+  for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
     sector_map_[sector_id].valid_bytes = 0;
   }
   // For every valid key, increment the valid bytes for that sector.
@@ -528,8 +551,6 @@
                                           size_t size,
                                           SectorDescriptor* sector_to_skip,
                                           bool bypass_empty_sector_rule) {
-  const size_t sector_count = partition_.sector_count();
-
   // The last_new_sector_ is the sector that was last selected as the "new empty
   // sector" to write to. This last new sector is used as the starting point for
   // the next "find a new empty sector to write to" operation. By using the last
@@ -542,7 +563,7 @@
   // the persistent storage use SectorDescriptor* rather than sector index
   // because SectorDescriptor* is the standard way to identify a sector.
   size_t last_new_sector_index_ = SectorIndex(last_new_sector_);
-  size_t start = (last_new_sector_index_ + 1) % sector_count;
+  size_t start = (last_new_sector_index_ + 1) % sector_map_size_;
   SectorDescriptor* first_empty_sector = nullptr;
   bool at_least_two_empty_sectors = bypass_empty_sector_rule;
 
@@ -550,7 +571,7 @@
   // first one of those that is found. While scanning for a partial sector, keep
   // track of the first empty sector and if a second sector was seen.
   for (size_t i = start; i != last_new_sector_index_;
-       i = (i + 1) % sector_count) {
+       i = (i + 1) % sector_map_size_) {
     DBG("Examining sector %zu", i);
     SectorDescriptor& sector = sector_map_[i];
 
@@ -610,7 +631,7 @@
   // Step 1: Try to find a sectors with stale keys and no valid keys (no
   // relocation needed). If any such sectors are found, use the sector with the
   // most reclaimable bytes.
-  for (auto& sector : sector_map_) {
+  for (auto& sector : sectors()) {
     if ((sector.valid_bytes == 0) &&
         (RecoverableBytes(sector) > candidate_bytes)) {
       sector_candidate = &sector;
@@ -621,7 +642,7 @@
   // Step 2: If step 1 yields no sectors, just find the sector with the most
   // reclaimable bytes.
   if (sector_candidate == nullptr) {
-    for (auto& sector : sector_map_) {
+    for (auto& sector : sectors()) {
       if (RecoverableBytes(sector) > candidate_bytes) {
         sector_candidate = &sector;
         candidate_bytes = RecoverableBytes(sector);
@@ -706,13 +727,13 @@
 }
 
 void KeyValueStore::LogDebugInfo() {
-  const size_t sector_count = partition_.sector_count();
   const size_t sector_size_bytes = partition_.sector_size_bytes();
   DBG("====================== KEY VALUE STORE DUMP =========================");
   DBG(" ");
   DBG("Flash partition:");
-  DBG("  Sector count     = %zu", sector_count);
-  DBG("  Sector max count = %zu", kUsableSectors);
+  DBG("  Sector count     = %zu", partition_.sector_count());
+  DBG("  Sector max count = %zu", kMaxUsableSectors);
+  DBG("  Sectors in use   = %zu", sector_map_size_);
   DBG("  Sector size      = %zu", sector_size_bytes);
   DBG("  Total size       = %zu", partition_.size_bytes());
   DBG("  Alignment        = %zu", partition_.alignment_bytes());
@@ -735,7 +756,7 @@
 
   DBG("Sector descriptors:");
   DBG("      #     tail free  valid    has_space");
-  for (size_t sector_id = 0; sector_id < sector_count; ++sector_id) {
+  for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
     const SectorDescriptor& sd = sector_map_[sector_id];
     DBG("   |%3zu: | %8zu  |%8zu  | %s",
         sector_id,
@@ -748,7 +769,7 @@
   // TODO: This should stop logging after some threshold.
   // size_t dumped_bytes = 0;
   DBG("Sector raw data:");
-  for (size_t sector_id = 0; sector_id < sector_count; ++sector_id) {
+  for (size_t sector_id = 0; sector_id < sector_map_size_; ++sector_id) {
     // Read sector data. Yes, this will blow the stack on embedded.
     std::array<byte, 500> raw_sector_data;  // TODO
     StatusWithSize sws =
diff --git a/pw_kvs/key_value_store_test.cc b/pw_kvs/key_value_store_test.cc
index a373382..27ce09a 100644
--- a/pw_kvs/key_value_store_test.cc
+++ b/pw_kvs/key_value_store_test.cc
@@ -282,6 +282,30 @@
 
 }  // namespace
 
+TEST_F(KeyValueStoreTest,
+       DISABLED_Put_SameKeySameValueRepeatedly_AlignedEntries) {
+  std::array<char, 8> value{'v', 'a', 'l', 'u', 'e', '6', '7', '\0'};
+
+  for (int i = 0; i < 1000; ++i) {
+    ASSERT_EQ(Status::OK, kvs_.Put("The Key!", as_bytes(span(value))));
+  }
+}
+
+TEST_F(KeyValueStoreTest,
+       DISABLED_Put_SameKeySameValueRepeatedly_UnalignedEntries) {
+  std::array<char, 7> value{'v', 'a', 'l', 'u', 'e', '6', '\0'};
+
+  for (int i = 0; i < 1000; ++i) {
+    ASSERT_EQ(Status::OK, kvs_.Put("The Key!", as_bytes(span(value))));
+  }
+}
+
+TEST_F(KeyValueStoreTest, DISABLED_Put_SameKeyDifferentValueRepeatedly) {
+  for (uint64_t i = 0; i < 1000u; ++i) {
+    ASSERT_EQ(Status::OK, kvs_.Put("The Key!", i));
+  }
+}
+
 TEST_F(KeyValueStoreTest, Delete_GetDeletedKey_ReturnsNotFound) {
   ASSERT_EQ(Status::OK, kvs_.Put("kEy", as_bytes(span("123"))));
   ASSERT_EQ(Status::OK, kvs_.Delete("kEy"));
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index 4bd9d24..d7ee5fb 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -24,8 +24,6 @@
 #include "pw_status/status.h"
 #include "pw_status/status_with_size.h"
 
-// TODO: Resolve uses of partition_.sector_count() vs kMaxUsableSectors.
-
 namespace pw::kvs {
 namespace internal {
 
@@ -71,7 +69,7 @@
   // TODO: Make these configurable
   static constexpr size_t kMaxKeyLength = 64;
   static constexpr size_t kMaxEntries = 64;
-  static constexpr size_t kUsableSectors = 64;
+  static constexpr size_t kMaxUsableSectors = 64;
   static constexpr size_t kWorkingBufferSizeBytes = (4 * 1024);
 
   // +1 for null-terminator.
@@ -79,17 +77,9 @@
 
   // In the future, will be able to provide additional EntryHeaderFormats for
   // backwards compatibility.
-  constexpr KeyValueStore(FlashPartition* partition,
-                          const EntryHeaderFormat& format,
-                          const Options& options = {})
-      : partition_(*partition),
-        entry_header_format_(format),
-        options_(options),
-        key_descriptor_list_{},
-        key_descriptor_list_size_(0),
-        sector_map_{},
-        last_new_sector_(sector_map_.data()),
-        working_buffer_{} {}
+  KeyValueStore(FlashPartition* partition,
+                const EntryHeaderFormat& format,
+                const Options& options = {});
 
   Status Init();
 
@@ -350,6 +340,10 @@
     return span(key_descriptor_list_.data(), key_descriptor_list_size_);
   }
 
+  span<SectorDescriptor> sectors() {
+    return {sector_map_.data(), sector_map_size_};
+  }
+
   FlashPartition& partition_;
   EntryHeaderFormat entry_header_format_;
   Options options_;
@@ -361,7 +355,10 @@
                                      // key_descriptor_list_
 
   // This is dense, so sector_id == indexof(SectorDescriptor) in sector_map
-  std::array<SectorDescriptor, kUsableSectors> sector_map_;
+  // TODO: This may need to be a span that points to an externally allocated
+  // array. This could be handled by a templated KVS derived class.
+  std::array<SectorDescriptor, kMaxUsableSectors> sector_map_;
+  const size_t sector_map_size_;
 
   // The last sector that was selected as the "new empty sector" to write to.
   // This last new sector is used as the starting point for the next "find a new