pw_kvs: Don't garbage collect sectors with copies of an in-progress key

Don't garbage collect sectors in the addresses to avoid list. These are
addresses of redundant copies already written of the key we are trying
to write.

This results is failing to write keys sooner in some workloads with
near-full flash.  This change is to simplify how GC works to make some KVS restructuring
easier to do.

Change-Id: I1bc52728fa29006253dbff529447e7385376d3a7
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index 0109ce6..93c077d 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -656,7 +656,7 @@
   // - Repeat for redundancy number of total entries.
   for (size_t i = 0; i < redundancy_; i++) {
     SectorDescriptor* sector;
-    TRY(GetSectorForWrite(&sector, entry_size, key_descriptor));
+    TRY(GetSectorForWrite(&sector, entry_size, key_descriptor->addresses()));
 
     DBG("Writing entry %zu; found sector: %u", i, SectorIndex(sector));
     const Address write_address = NextWritableAddress(sector);
@@ -683,9 +683,9 @@
 // RESOURCE_EXHAUSTED: No sector available with the needed space.
 Status KeyValueStore::GetSectorForWrite(SectorDescriptor** sector,
                                         size_t entry_size,
-                                        KeyDescriptor* key_descriptor) {
-  Status result = FindSectorWithSpace(
-      sector, entry_size, kAppendEntry, key_descriptor->addresses());
+                                        span<const Address> addresses_to_skip) {
+  Status result =
+      FindSectorWithSpace(sector, entry_size, kAppendEntry, addresses_to_skip);
 
   size_t gc_sector_count = 0;
   bool do_auto_gc = options_.gc_on_write != GargbageCollectOnWrite::kDisabled;
@@ -698,7 +698,7 @@
       do_auto_gc = false;
     }
     // Garbage collect and then try again to find the best sector.
-    Status gc_status = GarbageCollectPartial(key_descriptor);
+    Status gc_status = GarbageCollectPartial(addresses_to_skip);
     if (!gc_status.ok()) {
       if (gc_status == Status::NOT_FOUND) {
         // Not enough space, and no reclaimable bytes, this KVS is full!
@@ -708,7 +708,7 @@
     }
 
     result = FindSectorWithSpace(
-        sector, entry_size, kAppendEntry, key_descriptor->addresses());
+        sector, entry_size, kAppendEntry, addresses_to_skip);
 
     gc_sector_count++;
     // Allow total sectors + 2 number of GC cycles so that once reclaimable
@@ -997,7 +997,7 @@
     }
   }
 
-  // Step 2a: If step 1 yields no sectors, just find the sector with the most
+  // Step 2: If step 1 yields no sectors, just find the sector with the most
   // reclaimable bytes but no addresses to avoid.
   if (sector_candidate == nullptr) {
     for (auto& sector : sectors_) {
@@ -1009,17 +1009,6 @@
     }
   }
 
-  // Step 2b: If step 1 yields no sectors, just find the sector with the most
-  // reclaimable bytes.
-  if (sector_candidate == nullptr) {
-    for (auto& sector : sectors_) {
-      if (sector.RecoverableBytes(sector_size_bytes) > candidate_bytes) {
-        sector_candidate = &sector;
-        candidate_bytes = sector.RecoverableBytes(sector_size_bytes);
-      }
-    }
-  }
-
   // Step 3: If no sectors with reclaimable bytes, select the sector with the
   // most free bytes. This at least will allow entries of existing keys to get
   // spread to other sectors, including sectors that already have copies of the
@@ -1066,24 +1055,24 @@
   return Status::OK;
 }
 
-Status KeyValueStore::GarbageCollectPartial(KeyDescriptor* key_in_progress) {
-  DBG("Garbage Collect a single sector%s",
-      (key_in_progress == nullptr) ? "" : ", with key in progress");
+Status KeyValueStore::GarbageCollectPartial(
+    span<const Address> addresses_to_skip) {
+  DBG("Garbage Collect a single sector");
+  for (auto address : addresses_to_skip) {
+    DBG("   Avoid address %u", unsigned(address));
+  }
 
   // Step 1: Find the sector to garbage collect
-  auto addresses = span<const Address>();
-  if (key_in_progress != nullptr) {
-    DBG("  Use addresses to avoid");
-    addresses = key_in_progress->addresses();
-  }
-  SectorDescriptor* sector_to_gc = FindSectorToGarbageCollect(addresses);
+  SectorDescriptor* sector_to_gc =
+      FindSectorToGarbageCollect(addresses_to_skip);
 
   if (sector_to_gc == nullptr) {
     // Nothing to GC.
     return Status::NOT_FOUND;
   }
 
-  TRY(GarbageCollectSector(sector_to_gc, key_in_progress));
+  // Step 2: Garbage collect the selected sector.
+  TRY(GarbageCollectSector(sector_to_gc, nullptr));
   return Status::OK;
 }
 
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index 33a2bc6..53614ed 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -179,7 +179,9 @@
 
   // Perform garbage collection of part of the KVS, typically a single sector or
   // similar unit that makes sense for the KVS implementation.
-  Status GarbageCollectPartial() { return GarbageCollectPartial(nullptr); }
+  Status GarbageCollectPartial() {
+    return GarbageCollectPartial(span<const Address>());
+  }
 
   void LogDebugInfo();
 
@@ -374,7 +376,7 @@
 
   Status GetSectorForWrite(SectorDescriptor** sector,
                            size_t entry_size,
-                           KeyDescriptor* key_descriptor);
+                           span<const Address> addresses_to_skip);
 
   Status AppendEntry(Address write_address,
                      Entry& entry,
@@ -396,7 +398,7 @@
   SectorDescriptor* FindSectorToGarbageCollect(
       span<const Address> addresses_to_avoid);
 
-  Status GarbageCollectPartial(KeyDescriptor* key_in_progress);
+  Status GarbageCollectPartial(span<const Address> addresses_to_skip);
 
   Status RelocateKeyAddressesInSector(internal::SectorDescriptor* sector_to_gc,
                                       internal::KeyDescriptor* descriptor);