pw_kvs: Sectors abstraction

- Create the Sectors abstraction, which wraps the list of
  SectorDescriptors.
- Move sector-finding functionality to the Sectors class.
- Start tests for the Sectors class.

Change-Id: I2d804d14bba7c04880e578ade4da3488caec348c
diff --git a/pw_kvs/BUILD b/pw_kvs/BUILD
index f469cce..73a1b19 100644
--- a/pw_kvs/BUILD
+++ b/pw_kvs/BUILD
@@ -39,6 +39,7 @@
         "public/pw_kvs/internal/sectors.h",
         "public/pw_kvs/internal/span_traits.h",
         "pw_kvs_private/macros.h",
+        "sectors.cc",
     ],
     hdrs = [
         "public/pw_kvs/alignment.h",
@@ -174,6 +175,15 @@
     ],
 )
 
+pw_cc_test(
+    name = "sectors_test",
+    srcs = ["sectors_test.cc"],
+    deps = [
+        ":pw_kvs",
+        ":test_utils",
+    ],
+)
+
 # TODO: This binary is not building due to a linker error. The error does not occur in GN Builds.
 # A filegroup is used below so that the file is included in the Bazel build.
 # cc_binary(
diff --git a/pw_kvs/BUILD.gn b/pw_kvs/BUILD.gn
index 5c89533..4f4a9af 100644
--- a/pw_kvs/BUILD.gn
+++ b/pw_kvs/BUILD.gn
@@ -44,6 +44,7 @@
     "public/pw_kvs/internal/sectors.h",
     "public/pw_kvs/internal/span_traits.h",
     "pw_kvs_private/macros.h",
+    "sectors.cc",
   ]
   sources += public
   public_deps = [
@@ -61,6 +62,7 @@
     ":key_value_store_test",
     ":key_value_store_binary_format_test",
     ":key_value_store_map_test",
+    ":sectors_test",
   ]
 }
 
@@ -106,6 +108,7 @@
     ":key_value_store_binary_format_test",
     ":key_value_store_fuzz_test",
     ":key_value_store_map_test",
+    ":sectors_test",
   ]
 }
 
@@ -180,6 +183,14 @@
   sources = [ "key_value_store_map_test.cc" ]
 }
 
+pw_test("sectors_test") {
+  deps = [
+    ":pw_kvs",
+    ":test_utils",
+  ]
+  sources = [ "sectors_test.cc" ]
+}
+
 pw_doc_group("docs") {
   sources = [ "docs.rst" ]
 }
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index 50af414..13708ae 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -33,14 +33,6 @@
   return key.empty() || (key.size() > internal::Entry::kMaxKeyLength);
 }
 
-// Returns true if the container conatins the value.
-// TODO: At some point move this to pw_containers, along with adding tests.
-template <typename Container, typename T>
-bool Contains(const Container& container, const T& value) {
-  return std::find(std::begin(container), std::end(container), value) !=
-         std::end(container);
-}
-
 }  // namespace
 
 KeyValueStore::KeyValueStore(FlashPartition* partition,
@@ -53,20 +45,18 @@
                              Address* addresses)
     : partition_(*partition),
       formats_(formats),
+      sectors_(sector_descriptor_list, *partition, temp_sectors_to_skip),
       entry_cache_(key_descriptor_list, addresses, redundancy),
-      sectors_(sector_descriptor_list),
-      temp_sectors_to_skip_(temp_sectors_to_skip),
       options_(options),
       initialized_(false),
       error_detected_(false),
-      last_new_sector_(nullptr),
       last_transaction_id_(0) {}
 
 Status KeyValueStore::Init() {
   initialized_ = false;
   error_detected_ = false;
-  last_new_sector_ = nullptr;
   last_transaction_id_ = 0;
+  sectors_.Reset();
   entry_cache_.Reset();
 
   INF("Initializing key value store");
@@ -92,9 +82,6 @@
   DBG("First pass: Read all entries from all sectors");
   Address sector_address = 0;
 
-  sectors_.assign(partition_.sector_count(),
-                  SectorDescriptor(sector_size_bytes));
-
   size_t total_corrupt_bytes = 0;
   int corrupt_entries = 0;
   bool empty_sector_found = false;
@@ -110,7 +97,7 @@
           num_entries_in_sector,
           entry_address);
 
-      if (!AddressInSector(sector, entry_address)) {
+      if (!sectors_.AddressInSector(sector, entry_address)) {
         DBG("Fell off end of sector; moving to the next sector");
         break;
       }
@@ -125,7 +112,7 @@
         // The entry could not be read, indicating data corruption within the
         // sector. Try to scan the remainder of the sector for other entries.
         WRN("KVS init: data loss detected in sector %u at address %zu",
-            SectorIndex(&sector),
+            sectors_.Index(sector),
             size_t(entry_address));
 
         error_detected_ = true;
@@ -171,7 +158,7 @@
       error_detected_ = true;
 
       WRN("Sector %u contains %zuB of corrupt data",
-          SectorIndex(&sector),
+          sectors_.Index(sector),
           sector_corrupt_bytes);
     }
 
@@ -194,7 +181,7 @@
     for (Address address : metadata.addresses()) {
       Entry entry;
       TRY(Entry::Read(partition_, address, formats_, &entry));
-      SectorFromAddress(address)->AddValidBytes(entry.size());
+      sectors_.FromAddress(address).AddValidBytes(entry.size());
     }
     if (metadata.IsNewerThan(last_transaction_id_)) {
       last_transaction_id_ = metadata.transaction_id();
@@ -202,7 +189,7 @@
     }
   }
 
-  last_new_sector_ = SectorFromAddress(newest_key);
+  sectors_.set_last_new_sector(newest_key);
 
   if (error_detected_) {
     Status recovery_status = Repair();
@@ -292,7 +279,7 @@
                                    Address start_address,
                                    Address* next_entry_address) {
   DBG("Scanning sector %u for entries starting from address %zx",
-      SectorIndex(&sector),
+      sectors_.Index(sector),
       size_t(start_address));
 
   // Entries must start at addresses which are aligned on a multiple of
@@ -300,7 +287,7 @@
   // When scanning, we don't have an entry to tell us what the current alignment
   // is, so the minimum alignment is used to be exhaustive.
   for (Address address = AlignUp(start_address, Entry::kMinAlignmentBytes);
-       AddressInSector(sector, address);
+       sectors_.AddressInSector(sector, address);
        address += Entry::kMinAlignmentBytes) {
     uint32_t magic;
     TRY(partition_.Read(address, as_writable_bytes(span(&magic, 1))));
@@ -347,7 +334,7 @@
     DBG("Overwriting entry for key 0x%08" PRIx32 " in %zu sectors including %u",
         metadata.hash(),
         metadata.addresses().size(),
-        SectorIndex(SectorFromAddress(metadata.first_address())));
+        sectors_.Index(metadata.first_address()));
     return WriteEntryForExistingKey(metadata, EntryState::kValid, key, value);
   }
 
@@ -368,7 +355,7 @@
   DBG("Writing tombstone for key 0x%08" PRIx32 " in %zu sectors including %u",
       metadata.hash(),
       metadata.addresses().size(),
-      SectorIndex(SectorFromAddress(metadata.first_address())));
+      sectors_.Index(metadata.first_address()));
   return WriteEntryForExistingKey(metadata, EntryState::kDeleted, key, {});
 }
 
@@ -526,8 +513,8 @@
     SectorDescriptor* sector;
     TRY(GetSectorForWrite(&sector, entry_size, span(reserved_addresses, i)));
 
-    DBG("Found space for entry in sector %u", SectorIndex(sector));
-    reserved_addresses[i] = NextWritableAddress(sector);
+    DBG("Found space for entry in sector %u", sectors_.Index(sector));
+    reserved_addresses[i] = sectors_.NextWritableAddress(*sector);
   }
 
   // Write the entry at the first address that was found.
@@ -560,7 +547,7 @@
 
   // Remove valid bytes for the old entry and its copies, which are now stale.
   for (Address address : prior_metadata->addresses()) {
-    SectorFromAddress(address)->RemoveValidBytes(prior_size);
+    sectors_.FromAddress(address).RemoveValidBytes(prior_size);
   }
 
   prior_metadata->Reset(entry.descriptor(key), entry.address());
@@ -575,8 +562,7 @@
 Status KeyValueStore::GetSectorForWrite(SectorDescriptor** sector,
                                         size_t entry_size,
                                         span<const Address> reserved) {
-  Status result =
-      FindSectorWithSpace(sector, entry_size, kAppendEntry, {}, reserved);
+  Status result = sectors_.FindSpace(sector, entry_size, reserved);
 
   size_t gc_sector_count = 0;
   bool do_auto_gc = options_.gc_on_write != GargbageCollectOnWrite::kDisabled;
@@ -598,8 +584,7 @@
       return gc_status;
     }
 
-    result =
-        FindSectorWithSpace(sector, entry_size, kAppendEntry, {}, reserved);
+    result = sectors_.FindSpace(sector, entry_size, reserved);
 
     gc_sector_count++;
     // Allow total sectors + 2 number of GC cycles so that once reclaimable
@@ -625,8 +610,8 @@
 
   // Remove any bytes that were written, even if the write was not successful.
   // This is important to retain the writable space invariant on the sectors.
-  SectorDescriptor* const sector = SectorFromAddress(entry.address());
-  sector->RemoveWritableBytes(result.size());
+  SectorDescriptor& sector = sectors_.FromAddress(entry.address());
+  sector.RemoveWritableBytes(result.size());
 
   if (!result.ok()) {
     ERR("Failed to write %zu bytes at %#zx. %zu actually written",
@@ -640,7 +625,7 @@
     TRY(entry.VerifyChecksumInFlash());
   }
 
-  sector->AddValidBytes(result.size());
+  sector.AddValidBytes(result.size());
   return Status::OK;
 }
 
@@ -658,225 +643,27 @@
   // an immediate extra relocation).
   SectorDescriptor* new_sector;
 
-  TRY(FindSectorWithSpace(&new_sector,
-                          entry.size(),
-                          kGarbageCollect,
-                          metadata.addresses(),
-                          reserved_addresses));
+  TRY(sectors_.FindSpaceDuringGarbageCollection(
+      &new_sector, entry.size(), metadata.addresses(), reserved_addresses));
 
-  const Address new_address = NextWritableAddress(new_sector);
+  const Address new_address = sectors_.NextWritableAddress(*new_sector);
   const StatusWithSize result = entry.Copy(new_address);
   new_sector->RemoveWritableBytes(result.size());
   TRY(result);
 
   // Entry was written successfully; update descriptor's address and the sector
   // descriptors to reflect the new entry.
-  SectorFromAddress(address)->RemoveValidBytes(result.size());
+  sectors_.FromAddress(address).RemoveValidBytes(result.size());
   new_sector->AddValidBytes(result.size());
   address = new_address;
 
   return Status::OK;
 }
 
-// Find either an existing sector with enough space that is not the sector to
-// skip, or an empty sector. Maintains the invariant that there is always at
-// least 1 empty sector except during GC. On GC, skip sectors that have
-// reclaimable bytes.
-Status KeyValueStore::FindSectorWithSpace(
-    SectorDescriptor** found_sector,
-    size_t size,
-    FindSectorMode find_mode,
-    span<const Address> addresses_to_skip,
-    span<const Address> reserved_addresses) {
-  SectorDescriptor* first_empty_sector = nullptr;
-  bool at_least_two_empty_sectors = (find_mode == kGarbageCollect);
-
-  // Used for the GC reclaimable bytes check
-  SectorDescriptor* non_empty_least_reclaimable_sector = nullptr;
-  const size_t sector_size_bytes = partition_.sector_size_bytes();
-
-  // Build a list of sectors to avoid.
-  //
-  // This is overly strict. reserved_addresses is populated when there are
-  // sectors reserved for a new entry. It is safe to garbage collect into
-  // these sectors, as long as there remains room for the pending entry. These
-  // reserved sectors could also be garbage collected if they have recoverable
-  // space. For simplicitly, avoid both the relocating key's redundant entries
-  // (addresses_to_skip) and the sectors reserved for pending writes
-  // (reserved_addresses).
-  // TODO(hepler): Look into improving garbage collection.
-  size_t sectors_to_skip = 0;
-  for (Address address : addresses_to_skip) {
-    temp_sectors_to_skip_[sectors_to_skip++] = SectorFromAddress(address);
-  }
-  for (Address address : reserved_addresses) {
-    temp_sectors_to_skip_[sectors_to_skip++] = SectorFromAddress(address);
-  }
-
-  DBG("Find sector with %zu bytes available, starting with sector %u, %s",
-      size,
-      SectorIndex(last_new_sector_),
-      (find_mode == kAppendEntry) ? "Append" : "GC");
-  for (size_t i = 0; i < sectors_to_skip; ++i) {
-    DBG("  Skip sector %u", SectorIndex(temp_sectors_to_skip_[i]));
-  }
-
-  // The last_new_sector_ is the sector that was last selected as the "new empty
-  // sector" to write to. This last new sector is used as the starting point for
-  // the next "find a new empty sector to write to" operation. By using the last
-  // new sector as the start point we will cycle which empty sector is selected
-  // next, spreading the wear across all the empty sectors and get a wear
-  // leveling benefit, rather than putting more wear on the lower number
-  // sectors.
-  SectorDescriptor* sector = last_new_sector_;
-
-  // Look for a sector to use with enough space. The search uses a 3 priority
-  // tier process.
-  //
-  // Tier 1 is sector that already has valid data. During GC only select a
-  // sector that has no reclaimable bytes. Immediately use the first matching
-  // sector that is found.
-  //
-  // Tier 2 is find sectors that are empty/erased. While scanning for a partial
-  // sector, keep track of the first empty sector and if a second empty sector
-  // was seen. If during GC then count the second empty sector as always seen.
-  //
-  // Tier 3 is during garbage collection, find sectors with enough space that
-  // are not empty but have recoverable bytes. Pick the sector with the least
-  // recoverable bytes to minimize the likelyhood of this sector needing to be
-  // garbage collected soon.
-  for (size_t j = 0; j < sectors_.size(); j++) {
-    sector += 1;
-    if (sector == sectors_.end()) {
-      sector = sectors_.begin();
-    }
-
-    // Skip sectors in the skip list.
-    if (Contains(span(temp_sectors_to_skip_, sectors_to_skip), sector)) {
-      continue;
-    }
-
-    if (!sector->Empty(sector_size_bytes) && sector->HasSpace(size)) {
-      if ((find_mode == kAppendEntry) ||
-          (sector->RecoverableBytes(sector_size_bytes) == 0)) {
-        *found_sector = sector;
-        return Status::OK;
-      } else {
-        if ((non_empty_least_reclaimable_sector == nullptr) ||
-            (non_empty_least_reclaimable_sector->RecoverableBytes(
-                 sector_size_bytes) <
-             sector->RecoverableBytes(sector_size_bytes))) {
-          non_empty_least_reclaimable_sector = sector;
-        }
-      }
-    }
-
-    if (sector->Empty(sector_size_bytes)) {
-      if (first_empty_sector == nullptr) {
-        first_empty_sector = sector;
-      } else {
-        at_least_two_empty_sectors = true;
-      }
-    }
-  }
-
-  // Tier 2 check: If the scan for a partial sector does not find a suitable
-  // sector, use the first empty sector that was found. Normally it is required
-  // to keep 1 empty sector after the sector found here, but that rule does not
-  // apply during GC.
-  if (first_empty_sector != nullptr && at_least_two_empty_sectors) {
-    DBG("  Found a usable empty sector; returning the first found (%u)",
-        SectorIndex(first_empty_sector));
-    last_new_sector_ = first_empty_sector;
-    *found_sector = first_empty_sector;
-    return Status::OK;
-  }
-
-  // Tier 3 check: If we got this far, use the sector with least recoverable
-  // bytes
-  if (non_empty_least_reclaimable_sector != nullptr) {
-    *found_sector = non_empty_least_reclaimable_sector;
-    DBG("  Found a usable sector %u, with %zu B recoverable, in GC",
-        SectorIndex(*found_sector),
-        (*found_sector)->RecoverableBytes(sector_size_bytes));
-    return Status::OK;
-  }
-
-  // No sector was found.
-  DBG("  Unable to find a usable sector");
-  *found_sector = nullptr;
-  return Status::RESOURCE_EXHAUSTED;
-}
-
-// TODO: Break up this function in to smaller sub-chunks including create an
-// abstraction for the sector list. Look in to being able to unit test this as
-// its own thing
-KeyValueStore::SectorDescriptor* KeyValueStore::FindSectorToGarbageCollect(
-    span<const Address> reserved_addresses) {
-  const size_t sector_size_bytes = partition_.sector_size_bytes();
-  SectorDescriptor* sector_candidate = nullptr;
-  size_t candidate_bytes = 0;
-
-  // Build a vector of sectors to avoid.
-  for (size_t i = 0; i < reserved_addresses.size(); ++i) {
-    temp_sectors_to_skip_[i] = SectorFromAddress(reserved_addresses[i]);
-    DBG("    Skip sector %u",
-        SectorIndex(SectorFromAddress(reserved_addresses[i])));
-  }
-  const span sectors_to_skip(temp_sectors_to_skip_, reserved_addresses.size());
-
-  // Step 1: Try to find a sectors with stale keys and no valid keys (no
-  // relocation needed). If any such sectors are found, use the sector with the
-  // most reclaimable bytes.
-  for (auto& sector : sectors_) {
-    if ((sector.valid_bytes() == 0) &&
-        (sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) &&
-        !Contains(sectors_to_skip, &sector)) {
-      sector_candidate = &sector;
-      candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
-    }
-  }
-
-  // Step 2: If step 1 yields no sectors, just find the sector with the most
-  // reclaimable bytes but no addresses to avoid.
-  if (sector_candidate == nullptr) {
-    for (auto& sector : sectors_) {
-      if ((sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) &&
-          !Contains(sectors_to_skip, &sector)) {
-        sector_candidate = &sector;
-        candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
-      }
-    }
-  }
-
-  // Step 3: If no sectors with reclaimable bytes, select the sector with the
-  // most free bytes. This at least will allow entries of existing keys to get
-  // spread to other sectors, including sectors that already have copies of the
-  // current key being written.
-  if (sector_candidate == nullptr) {
-    for (auto& sector : sectors_) {
-      if ((sector.valid_bytes() > candidate_bytes) &&
-          !Contains(sectors_to_skip, &sector)) {
-        sector_candidate = &sector;
-        candidate_bytes = sector.valid_bytes();
-        DBG("    Doing GC on sector with no reclaimable bytes!");
-      }
-    }
-  }
-
-  if (sector_candidate != nullptr) {
-    DBG("Found sector %u to Garbage Collect, %zu recoverable bytes",
-        SectorIndex(sector_candidate),
-        sector_candidate->RecoverableBytes(sector_size_bytes));
-  } else {
-    DBG("Unable to find sector to garbage collect!");
-  }
-  return sector_candidate;
-}
-
 Status KeyValueStore::GarbageCollectFull() {
   DBG("Garbage Collect all sectors");
-  SectorDescriptor* sector = last_new_sector_;
+
+  SectorDescriptor* sector = sectors_.last_new();
 
   // TODO: look in to making an iterator method for cycling through sectors
   // starting from last_new_sector_.
@@ -887,7 +674,7 @@
     }
 
     if (sector->RecoverableBytes(partition_.sector_size_bytes()) > 0) {
-      TRY(GarbageCollectSector(sector, {}));
+      TRY(GarbageCollectSector(*sector, {}));
     }
   }
 
@@ -904,7 +691,7 @@
 
   // Step 1: Find the sector to garbage collect
   SectorDescriptor* sector_to_gc =
-      FindSectorToGarbageCollect(reserved_addresses);
+      sectors_.FindSectorToGarbageCollect(reserved_addresses);
 
   if (sector_to_gc == nullptr) {
     // Nothing to GC.
@@ -912,7 +699,7 @@
   }
 
   // Step 2: Garbage collect the selected sector.
-  return GarbageCollectSector(sector_to_gc, reserved_addresses);
+  return GarbageCollectSector(*sector_to_gc, reserved_addresses);
 }
 
 Status KeyValueStore::RelocateKeyAddressesInSector(
@@ -920,10 +707,10 @@
     const EntryMetadata& metadata,
     span<const Address> reserved_addresses) {
   for (FlashPartition::Address& address : metadata.addresses()) {
-    if (AddressInSector(sector_to_gc, address)) {
+    if (sectors_.AddressInSector(sector_to_gc, address)) {
       DBG("  Relocate entry for Key 0x%08" PRIx32 ", sector %u",
           metadata.hash(),
-          SectorIndex(SectorFromAddress(address)));
+          sectors_.Index(sectors_.FromAddress(address)));
       TRY(RelocateEntry(metadata, address, reserved_addresses));
     }
   }
@@ -932,28 +719,28 @@
 };
 
 Status KeyValueStore::GarbageCollectSector(
-    SectorDescriptor* sector_to_gc, span<const Address> reserved_addresses) {
+    SectorDescriptor& sector_to_gc, span<const Address> reserved_addresses) {
   // Step 1: Move any valid entries in the GC sector to other sectors
-  if (sector_to_gc->valid_bytes() != 0) {
+  if (sector_to_gc.valid_bytes() != 0) {
     for (const EntryMetadata& metadata : entry_cache_) {
       TRY(RelocateKeyAddressesInSector(
-          *sector_to_gc, metadata, reserved_addresses));
+          sector_to_gc, metadata, reserved_addresses));
     }
   }
 
-  if (sector_to_gc->valid_bytes() != 0) {
+  if (sector_to_gc.valid_bytes() != 0) {
     ERR("  Failed to relocate valid entries from sector being garbage "
         "collected, %zu valid bytes remain",
-        sector_to_gc->valid_bytes());
+        sector_to_gc.valid_bytes());
     return Status::INTERNAL;
   }
 
   // Step 2: Reinitialize the sector
-  sector_to_gc->set_writable_bytes(0);
-  TRY(partition_.Erase(SectorBaseAddress(sector_to_gc), 1));
-  sector_to_gc->set_writable_bytes(partition_.sector_size_bytes());
+  sector_to_gc.set_writable_bytes(0);
+  TRY(partition_.Erase(sectors_.BaseAddress(sector_to_gc), 1));
+  sector_to_gc.set_writable_bytes(partition_.sector_size_bytes());
 
-  DBG("  Garbage Collect sector %u complete", SectorIndex(sector_to_gc));
+  DBG("  Garbage Collect sector %u complete", sectors_.Index(sector_to_gc));
   return Status::OK;
 }
 
@@ -1017,10 +804,9 @@
 
   DBG("Sector descriptors:");
   DBG("      #     tail free  valid    has_space");
-  for (size_t sector_id = 0; sector_id < sectors_.size(); ++sector_id) {
-    const SectorDescriptor& sd = sectors_[sector_id];
-    DBG("   |%3zu: | %8zu  |%8zu  | %s",
-        sector_id,
+  for (const SectorDescriptor& sd : sectors_) {
+    DBG("   |%3u: | %8zu  |%8zu  | %s",
+        sectors_.Index(sd),
         size_t(sd.writable_bytes()),
         sd.valid_bytes(),
         sd.writable_bytes() ? "YES" : "");
@@ -1067,7 +853,7 @@
   DBG("Sector descriptors: count %zu", sectors_.size());
   for (auto& sector : sectors_) {
     DBG("  - Sector %u: valid %zu, recoverable %zu, free %zu",
-        SectorIndex(&sector),
+        sectors_.Index(sector),
         sector.valid_bytes(),
         sector.RecoverableBytes(partition_.sector_size_bytes()),
         sector.writable_bytes());
diff --git a/pw_kvs/public/pw_kvs/internal/sectors.h b/pw_kvs/public/pw_kvs/internal/sectors.h
index 58576c7..2be4dfe 100644
--- a/pw_kvs/public/pw_kvs/internal/sectors.h
+++ b/pw_kvs/public/pw_kvs/internal/sectors.h
@@ -17,17 +17,15 @@
 #include <cstddef>
 #include <cstdint>
 
+#include "pw_containers/vector.h"
+#include "pw_kvs/flash_memory.h"
+#include "pw_span/span.h"
+
 namespace pw::kvs::internal {
 
 // Tracks the available and used space in each sector used by the KVS.
 class SectorDescriptor {
  public:
-  explicit constexpr SectorDescriptor(uint16_t sector_size_bytes)
-      : tail_free_bytes_(sector_size_bytes), valid_bytes_(0) {}
-
-  SectorDescriptor(const SectorDescriptor&) = default;
-  SectorDescriptor& operator=(const SectorDescriptor&) = default;
-
   // The number of bytes available to be written in this sector. It the sector
   // is marked as corrupt, no bytes are available.
   size_t writable_bytes() const {
@@ -84,10 +82,151 @@
   static constexpr size_t max_sector_size() { return kMaxSectorSize; }
 
  private:
+  friend class Sectors;
+
   static constexpr uint16_t kCorruptSector = UINT16_MAX;
   static constexpr size_t kMaxSectorSize = UINT16_MAX - 1;
+
+  explicit constexpr SectorDescriptor(uint16_t sector_size_bytes)
+      : tail_free_bytes_(sector_size_bytes), valid_bytes_(0) {}
+
   uint16_t tail_free_bytes_;  // writable bytes at the end of the sector
   uint16_t valid_bytes_;      // sum of sizes of valid entries
 };
 
+// Represents a list of sectors usable by the KVS.
+class Sectors {
+ public:
+  using Address = FlashPartition::Address;
+
+  constexpr Sectors(Vector<SectorDescriptor>& sectors,
+                    FlashPartition& partition,
+                    const SectorDescriptor** temp_sectors_to_skip)
+      : descriptors_(sectors),
+        partition_(partition),
+        last_new_(nullptr),
+        temp_sectors_to_skip_(temp_sectors_to_skip) {}
+
+  // Resets the Sectors list. Must be called before using the object.
+  void Reset() {
+    last_new_ = descriptors_.begin();
+    descriptors_.assign(partition_.sector_count(),
+                        SectorDescriptor(partition_.sector_size_bytes()));
+  }
+
+  // The last sector that was selected as the "new empty sector" to write to.
+  // This last new sector is used as the starting point for the next "find a new
+  // empty sector to write to" operation. By using the last new sector as the
+  // start point we will cycle which empty sector is selected next, spreading
+  // the wear across all the empty sectors, rather than putting more wear on the
+  // lower number sectors.
+  //
+  // Use SectorDescriptor* for the persistent storage rather than sector index
+  // because SectorDescriptor* is the standard way to identify a sector.
+  SectorDescriptor* last_new() const { return last_new_; }
+
+  // Sets the last new sector from the provided address.
+  void set_last_new_sector(Address address) {
+    last_new_ = &FromAddress(address);
+  }
+
+  // Checks if an address is in the particular sector.
+  bool AddressInSector(const SectorDescriptor& sector, Address address) const {
+    const Address sector_base = BaseAddress(sector);
+    const Address sector_end = sector_base + partition_.sector_size_bytes();
+
+    return ((address >= sector_base) && (address < sector_end));
+  }
+
+  // Returns the first address in the provided sector.
+  Address BaseAddress(const SectorDescriptor& sector) const {
+    return Index(sector) * partition_.sector_size_bytes();
+  }
+
+  // Returns the sector that contains this address. The address must be valid.
+  SectorDescriptor& FromAddress(Address address) {
+    const size_t index = address / partition_.sector_size_bytes();
+    // TODO: Add boundary checking once asserts are supported.
+    // DCHECK_LT(index, sector_map_size_);`
+    return descriptors_[index];
+  }
+
+  const SectorDescriptor& FromAddress(Address address) const {
+    return descriptors_[address / partition_.sector_size_bytes()];
+  }
+
+  Address NextWritableAddress(const SectorDescriptor& sector) const {
+    return BaseAddress(sector) + partition_.sector_size_bytes() -
+           sector.writable_bytes();
+  }
+
+  // Finds either an existing sector with enough space that is not the sector to
+  // skip, or an empty sector. Maintains the invariant that there is always at
+  // least 1 empty sector. Addresses in reserved_addresses are avoided.
+  Status FindSpace(SectorDescriptor** found_sector,
+                   size_t size,
+                   span<const Address> reserved_addresses) {
+    return Find(kAppendEntry, found_sector, size, {}, reserved_addresses);
+  }
+
+  // Same as FindSpace, except that the 1 empty sector invariant is ignored.
+  // Both addresses_to_skip and reserved_addresses are avoided.
+  Status FindSpaceDuringGarbageCollection(
+      SectorDescriptor** found_sector,
+      size_t size,
+      span<const Address> addresses_to_skip,
+      span<const Address> reserved_addresses) {
+    return Find(kGarbageCollect,
+                found_sector,
+                size,
+                addresses_to_skip,
+                reserved_addresses);
+  }
+
+  // Finds a sector that is ready to be garbage collected. Returns nullptr if no
+  // sectors can / need to be garbage collected.
+  SectorDescriptor* FindSectorToGarbageCollect(
+      span<const Address> addresses_to_avoid);
+
+  // The number of sectors in use.
+  size_t size() const { return descriptors_.size(); }
+
+  // The maximum number of sectors supported.
+  size_t max_size() const { return descriptors_.max_size(); }
+
+  // Returns the index of the provided sector. Used for logging.
+  unsigned Index(const SectorDescriptor& sector) const {
+    return &sector - descriptors_.begin();
+  }
+  unsigned Index(const SectorDescriptor* s) const { return Index(*s); }
+  unsigned Index(Address address) const { return Index(FromAddress(address)); }
+
+  // Iterators for iterating over all sectors.
+  using iterator = Vector<SectorDescriptor>::iterator;
+  using const_iterator = Vector<SectorDescriptor>::const_iterator;
+
+  iterator begin() { return descriptors_.begin(); }
+  const_iterator begin() const { return descriptors_.begin(); }
+  iterator end() { return descriptors_.end(); }
+  const_iterator end() const { return descriptors_.end(); }
+
+ private:
+  enum FindMode { kAppendEntry, kGarbageCollect };
+
+  Status Find(FindMode find_mode,
+              SectorDescriptor** found_sector,
+              size_t size,
+              span<const Address> addresses_to_skip,
+              span<const Address> reserved_addresses);
+
+  Vector<SectorDescriptor>& descriptors_;
+  FlashPartition& partition_;
+
+  SectorDescriptor* last_new_;
+
+  // Temp buffer with space for redundancy * 2 - 1 sector pointers. This list is
+  // used to track sectors that should be excluded from Find functions.
+  const SectorDescriptor** const temp_sectors_to_skip_;
+};
+
 }  // namespace pw::kvs::internal
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index 16e5fb6..fe705b6 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -372,59 +372,17 @@
                        KeyValueStore::Address& address,
                        span<const Address> addresses_to_skip);
 
-  enum FindSectorMode { kAppendEntry, kGarbageCollect };
-
-  Status FindSectorWithSpace(SectorDescriptor** found_sector,
-                             size_t size,
-                             FindSectorMode find_mode,
-                             span<const Address> addresses_to_skip,
-                             span<const Address> reserved_addresses);
-
-  SectorDescriptor* FindSectorToGarbageCollect(
-      span<const Address> addresses_to_avoid);
-
   Status GarbageCollectPartial(span<const Address> addresses_to_skip);
 
   Status RelocateKeyAddressesInSector(SectorDescriptor& sector_to_gc,
                                       const EntryMetadata& descriptor,
                                       span<const Address> addresses_to_skip);
 
-  Status GarbageCollectSector(SectorDescriptor* sector_to_gc,
+  Status GarbageCollectSector(SectorDescriptor& sector_to_gc,
                               span<const Address> addresses_to_skip);
 
   Status Repair() { return Status::UNIMPLEMENTED; }
 
-  bool AddressInSector(const SectorDescriptor& sector, Address address) const {
-    const Address sector_base = SectorBaseAddress(&sector);
-    const Address sector_end = sector_base + partition_.sector_size_bytes();
-
-    return ((address >= sector_base) && (address < sector_end));
-  }
-
-  unsigned SectorIndex(const SectorDescriptor* sector) const {
-    return sector - sectors_.begin();
-  }
-
-  Address SectorBaseAddress(const SectorDescriptor* sector) const {
-    return SectorIndex(sector) * partition_.sector_size_bytes();
-  }
-
-  SectorDescriptor* SectorFromAddress(Address address) {
-    const size_t index = address / partition_.sector_size_bytes();
-    // TODO: Add boundary checking once asserts are supported.
-    // DCHECK_LT(index, sector_map_size_);`
-    return &sectors_[index];
-  }
-
-  const SectorDescriptor* SectorFromAddress(Address address) const {
-    return &sectors_[address / partition_.sector_size_bytes()];
-  }
-
-  Address NextWritableAddress(const SectorDescriptor* sector) const {
-    return SectorBaseAddress(sector) + partition_.sector_size_bytes() -
-           sector->writable_bytes();
-  }
-
   internal::Entry CreateEntry(Address address,
                               std::string_view key,
                               span<const std::byte> value,
@@ -436,33 +394,19 @@
   FlashPartition& partition_;
   const internal::EntryFormats formats_;
 
+  // List of sectors used by this KVS.
+  internal::Sectors sectors_;
+
   // Unordered list of KeyDescriptors. Finding a key requires scanning and
   // verifying a match by reading the actual entry.
   internal::EntryCache entry_cache_;
 
-  // List of sectors used by this KVS.
-  Vector<SectorDescriptor>& sectors_;
-
-  // Temp buffer with redundancy() * 2 - 1 entries for use in RelocateEntry.
-  // Used in FindSectorWithSpace and FindSectorToGarbageCollect.
-  const SectorDescriptor** const temp_sectors_to_skip_;
-
   Options options_;
 
   bool initialized_;
 
   bool error_detected_;
 
-  // The last sector that was selected as the "new empty sector" to write to.
-  // This last new sector is used as the starting point for the next "find a new
-  // empty sector to write to" operation. By using the last new sector as the
-  // start point we will cycle which empty sector is selected next, spreading
-  // the wear across all the empty sectors, rather than putting more wear on the
-  // lower number sectors.
-  //
-  // Use SectorDescriptor* for the persistent storage rather than sector index
-  // because SectorDescriptor* is the standard way to identify a sector.
-  SectorDescriptor* last_new_sector_;
   uint32_t last_transaction_id_;
 };
 
diff --git a/pw_kvs/sectors.cc b/pw_kvs/sectors.cc
new file mode 100644
index 0000000..0972d69
--- /dev/null
+++ b/pw_kvs/sectors.cc
@@ -0,0 +1,221 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_kvs/internal/sectors.h"
+
+#define PW_LOG_USE_ULTRA_SHORT_NAMES 1
+#include "pw_log/log.h"
+
+namespace pw::kvs::internal {
+namespace {
+
+// Returns true if the container conatins the value.
+// TODO: At some point move this to pw_containers, along with adding tests.
+template <typename Container, typename T>
+bool Contains(const Container& container, const T& value) {
+  return std::find(std::begin(container), std::end(container), value) !=
+         std::end(container);
+}
+
+}  // namespace
+
+Status Sectors::Find(FindMode find_mode,
+                     SectorDescriptor** found_sector,
+                     size_t size,
+                     span<const Address> addresses_to_skip,
+                     span<const Address> reserved_addresses) {
+  SectorDescriptor* first_empty_sector = nullptr;
+  bool at_least_two_empty_sectors = (find_mode == kGarbageCollect);
+
+  // Used for the GC reclaimable bytes check
+  SectorDescriptor* non_empty_least_reclaimable_sector = nullptr;
+  const size_t sector_size_bytes = partition_.sector_size_bytes();
+
+  // Build a list of sectors to avoid.
+  //
+  // This is overly strict. reserved_addresses is populated when there are
+  // sectors reserved for a new entry. It is safe to garbage collect into
+  // these sectors, as long as there remains room for the pending entry. These
+  // reserved sectors could also be garbage collected if they have recoverable
+  // space. For simplicitly, avoid both the relocating key's redundant entries
+  // (addresses_to_skip) and the sectors reserved for pending writes
+  // (reserved_addresses).
+  // TODO(hepler): Look into improving garbage collection.
+  size_t sectors_to_skip = 0;
+  for (Address address : addresses_to_skip) {
+    temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address);
+  }
+  for (Address address : reserved_addresses) {
+    temp_sectors_to_skip_[sectors_to_skip++] = &FromAddress(address);
+  }
+
+  DBG("Find sector with %zu bytes available, starting with sector %u, %s",
+      size,
+      Index(last_new_),
+      (find_mode == kAppendEntry) ? "Append" : "GC");
+  for (size_t i = 0; i < sectors_to_skip; ++i) {
+    DBG("  Skip sector %u", Index(temp_sectors_to_skip_[i]));
+  }
+
+  // last_new_ is the sector that was last selected as the "new empty sector" to
+  // write to. This last new sector is used as the starting point for the next
+  // "find a new empty sector to write to" operation. By using the last new
+  // sector as the start point we will cycle which empty sector is selected
+  // next, spreading the wear across all the empty sectors and get a wear
+  // leveling benefit, rather than putting more wear on the lower number
+  // sectors.
+  SectorDescriptor* sector = last_new_;
+
+  // Look for a sector to use with enough space. The search uses a 3 priority
+  // tier process.
+  //
+  // Tier 1 is sector that already has valid data. During GC only select a
+  // sector that has no reclaimable bytes. Immediately use the first matching
+  // sector that is found.
+  //
+  // Tier 2 is find sectors that are empty/erased. While scanning for a partial
+  // sector, keep track of the first empty sector and if a second empty sector
+  // was seen. If during GC then count the second empty sector as always seen.
+  //
+  // Tier 3 is during garbage collection, find sectors with enough space that
+  // are not empty but have recoverable bytes. Pick the sector with the least
+  // recoverable bytes to minimize the likelyhood of this sector needing to be
+  // garbage collected soon.
+  for (size_t j = 0; j < descriptors_.size(); j++) {
+    sector += 1;
+    if (sector == descriptors_.end()) {
+      sector = descriptors_.begin();
+    }
+
+    // Skip sectors in the skip list.
+    if (Contains(span(temp_sectors_to_skip_, sectors_to_skip), sector)) {
+      continue;
+    }
+
+    if (!sector->Empty(sector_size_bytes) && sector->HasSpace(size)) {
+      if ((find_mode == kAppendEntry) ||
+          (sector->RecoverableBytes(sector_size_bytes) == 0)) {
+        *found_sector = sector;
+        return Status::OK;
+      } else {
+        if ((non_empty_least_reclaimable_sector == nullptr) ||
+            (non_empty_least_reclaimable_sector->RecoverableBytes(
+                 sector_size_bytes) <
+             sector->RecoverableBytes(sector_size_bytes))) {
+          non_empty_least_reclaimable_sector = sector;
+        }
+      }
+    }
+
+    if (sector->Empty(sector_size_bytes)) {
+      if (first_empty_sector == nullptr) {
+        first_empty_sector = sector;
+      } else {
+        at_least_two_empty_sectors = true;
+      }
+    }
+  }
+
+  // Tier 2 check: If the scan for a partial sector does not find a suitable
+  // sector, use the first empty sector that was found. Normally it is required
+  // to keep 1 empty sector after the sector found here, but that rule does not
+  // apply during GC.
+  if (first_empty_sector != nullptr && at_least_two_empty_sectors) {
+    DBG("  Found a usable empty sector; returning the first found (%u)",
+        Index(first_empty_sector));
+    last_new_ = first_empty_sector;
+    *found_sector = first_empty_sector;
+    return Status::OK;
+  }
+
+  // Tier 3 check: If we got this far, use the sector with least recoverable
+  // bytes
+  if (non_empty_least_reclaimable_sector != nullptr) {
+    *found_sector = non_empty_least_reclaimable_sector;
+    DBG("  Found a usable sector %u, with %zu B recoverable, in GC",
+        Index(*found_sector),
+        (*found_sector)->RecoverableBytes(sector_size_bytes));
+    return Status::OK;
+  }
+
+  // No sector was found.
+  DBG("  Unable to find a usable sector");
+  *found_sector = nullptr;
+  return Status::RESOURCE_EXHAUSTED;
+}
+
+// TODO: Consider breaking this function into smaller sub-chunks.
+SectorDescriptor* Sectors::FindSectorToGarbageCollect(
+    span<const Address> reserved_addresses) {
+  const size_t sector_size_bytes = partition_.sector_size_bytes();
+  SectorDescriptor* sector_candidate = nullptr;
+  size_t candidate_bytes = 0;
+
+  // Build a vector of sectors to avoid.
+  for (size_t i = 0; i < reserved_addresses.size(); ++i) {
+    temp_sectors_to_skip_[i] = &FromAddress(reserved_addresses[i]);
+    DBG("    Skip sector %u", Index(reserved_addresses[i]));
+  }
+  const span sectors_to_skip(temp_sectors_to_skip_, reserved_addresses.size());
+
+  // Step 1: Try to find a sectors with stale keys and no valid keys (no
+  // relocation needed). If any such sectors are found, use the sector with the
+  // most reclaimable bytes.
+  for (auto& sector : descriptors_) {
+    if ((sector.valid_bytes() == 0) &&
+        (sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) &&
+        !Contains(sectors_to_skip, &sector)) {
+      sector_candidate = &sector;
+      candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
+    }
+  }
+
+  // Step 2: If step 1 yields no sectors, just find the sector with the most
+  // reclaimable bytes but no addresses to avoid.
+  if (sector_candidate == nullptr) {
+    for (auto& sector : descriptors_) {
+      if ((sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) &&
+          !Contains(sectors_to_skip, &sector)) {
+        sector_candidate = &sector;
+        candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
+      }
+    }
+  }
+
+  // Step 3: If no sectors with reclaimable bytes, select the sector with the
+  // most free bytes. This at least will allow entries of existing keys to get
+  // spread to other sectors, including sectors that already have copies of the
+  // current key being written.
+  if (sector_candidate == nullptr) {
+    for (auto& sector : descriptors_) {
+      if ((sector.valid_bytes() > candidate_bytes) &&
+          !Contains(sectors_to_skip, &sector)) {
+        sector_candidate = &sector;
+        candidate_bytes = sector.valid_bytes();
+        DBG("    Doing GC on sector with no reclaimable bytes!");
+      }
+    }
+  }
+
+  if (sector_candidate != nullptr) {
+    DBG("Found sector %u to Garbage Collect, %zu recoverable bytes",
+        Index(sector_candidate),
+        sector_candidate->RecoverableBytes(sector_size_bytes));
+  } else {
+    DBG("Unable to find sector to garbage collect!");
+  }
+  return sector_candidate;
+}
+
+}  // namespace pw::kvs::internal
diff --git a/pw_kvs/sectors_test.cc b/pw_kvs/sectors_test.cc
new file mode 100644
index 0000000..edf56c2
--- /dev/null
+++ b/pw_kvs/sectors_test.cc
@@ -0,0 +1,86 @@
+// Copyright 2020 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_kvs/internal/sectors.h"
+
+#include "gtest/gtest.h"
+#include "pw_kvs/in_memory_fake_flash.h"
+
+namespace pw::kvs::internal {
+namespace {
+
+class SectorsTest : public ::testing::Test {
+ protected:
+  SectorsTest()
+      : partition_(&flash_),
+        sectors_(sector_descriptors_, partition_, nullptr) {}
+
+  FakeFlashBuffer<128, 16> flash_;
+  FlashPartition partition_;
+  Vector<SectorDescriptor, 32> sector_descriptors_;
+  Sectors sectors_;
+};
+
+TEST_F(SectorsTest, Reset) {
+  EXPECT_EQ(0u, sectors_.size());
+
+  sectors_.Reset();
+
+  EXPECT_EQ(partition_.sector_count(), sectors_.size());
+  EXPECT_EQ(32u, sectors_.max_size());
+  EXPECT_EQ(sectors_.begin(), sectors_.last_new());
+}
+
+TEST_F(SectorsTest, LastNew) {
+  sectors_.set_last_new_sector(130);
+  EXPECT_EQ(128u, sectors_.BaseAddress(*sectors_.last_new()));
+}
+
+TEST_F(SectorsTest, AddressInSector) {
+  SectorDescriptor& sector = sectors_.FromAddress(128);
+
+  EXPECT_FALSE(sectors_.AddressInSector(sector, 127));
+  for (size_t address = 128; address < 256; ++address) {
+    EXPECT_TRUE(sectors_.AddressInSector(sector, address));
+  }
+  EXPECT_FALSE(sectors_.AddressInSector(sector, 256));
+  EXPECT_FALSE(sectors_.AddressInSector(sector, 1025));
+}
+
+TEST_F(SectorsTest, BaseAddressAndFromAddress) {
+  for (size_t address = 0; address < 128; ++address) {
+    EXPECT_EQ(0u, sectors_.BaseAddress(sectors_.FromAddress(address)));
+  }
+  for (size_t address = 128; address < 256; ++address) {
+    EXPECT_EQ(128u, sectors_.BaseAddress(sectors_.FromAddress(address)));
+  }
+  for (size_t address = 256; address < 384; ++address) {
+    EXPECT_EQ(256u, sectors_.BaseAddress(sectors_.FromAddress(address)));
+  }
+}
+
+TEST_F(SectorsTest, NextWritableAddress_EmptySector) {
+  EXPECT_EQ(0u, sectors_.NextWritableAddress(*sectors_.begin()));
+}
+
+TEST_F(SectorsTest, NextWritableAddress_PartiallyWrittenSector) {
+  sectors_.begin()->RemoveWritableBytes(123);
+  EXPECT_EQ(123u, sectors_.NextWritableAddress(*sectors_.begin()));
+}
+
+// TODO: Add tests for FindSpace, FindSpaceDuringGarbageCollection, and
+// FindSectorToGarbageCollect.
+
+}  // namespace
+}  // namespace pw::kvs::internal