pw_kvs: Update FullMaintenance garbage collection algorithm

For GC in FullMaintenance, only GC a sector with valid data if valid
data bytes in the KVS are above a 70% threshold. This prevents doing
entry relocations when the space is not needed.

Change-Id: I409c685b4649dda79b69e6dbf034b1c41df61242
diff --git a/pw_kvs/key_value_store.cc b/pw_kvs/key_value_store.cc
index 170ddf0..f3cb4a6 100644
--- a/pw_kvs/key_value_store.cc
+++ b/pw_kvs/key_value_store.cc
@@ -852,7 +852,9 @@
   if (error_detected_) {
     TRY(Repair());
   }
-  Status overall_status = UpdateEntriesToPrimaryFormat();
+  StatusWithSize update_status = UpdateEntriesToPrimaryFormat();
+  Status overall_status = update_status.status();
+
   // Make sure all the entries are on the primary format.
   if (!overall_status.ok()) {
     ERR("Failed to update all entries to the primary format");
@@ -860,6 +862,15 @@
 
   SectorDescriptor* sector = sectors_.last_new();
 
+  // Calculate number of bytes for the threshold.
+  size_t threshold_bytes =
+      (partition_.size_bytes() * kGcUsageThresholdPercentage) / 100;
+
+  // Is bytes in use over the threshold.
+  StorageStats stats = GetStorageStats();
+  bool over_usage_threshold = stats.in_use_bytes > threshold_bytes;
+  bool force_gc = over_usage_threshold || (update_status.size() > 0);
+
   // TODO: look in to making an iterator method for cycling through sectors
   // starting from last_new_sector_.
   Status gc_status;
@@ -869,7 +880,8 @@
       sector = sectors_.begin();
     }
 
-    if (sector->RecoverableBytes(partition_.sector_size_bytes()) > 0) {
+    if (sector->RecoverableBytes(partition_.sector_size_bytes()) > 0 &&
+        (force_gc || sector->valid_bytes() == 0)) {
       gc_status = GarbageCollectSector(*sector, {});
       if (!gc_status.ok()) {
         ERR("Failed to garbage collect all sectors");
@@ -964,10 +976,11 @@
   return Status::OK;
 }
 
-Status KeyValueStore::UpdateEntriesToPrimaryFormat() {
+StatusWithSize KeyValueStore::UpdateEntriesToPrimaryFormat() {
+  size_t entries_updated = 0;
   for (EntryMetadata& prior_metadata : entry_cache_) {
     Entry entry;
-    TRY(ReadEntry(prior_metadata, entry));
+    TRY_WITH_SIZE(ReadEntry(prior_metadata, entry));
     if (formats_.primary().magic == entry.magic()) {
       // Ignore entries that are already on the primary format.
       continue;
@@ -979,17 +992,20 @@
         unsigned(entry.magic()),
         unsigned(formats_.primary().magic));
 
+    entries_updated++;
+
     last_transaction_id_ += 1;
-    TRY(entry.Update(formats_.primary(), last_transaction_id_));
+    TRY_WITH_SIZE(entry.Update(formats_.primary(), last_transaction_id_));
 
     // List of addresses for sectors with space for this entry.
     Address* reserved_addresses = entry_cache_.TempReservedAddressesForWrite();
 
     // Find addresses to write the entry to. This may involve garbage collecting
     // one or more sectors.
-    TRY(GetAddressesForWrite(reserved_addresses, entry.size()));
+    TRY_WITH_SIZE(GetAddressesForWrite(reserved_addresses, entry.size()));
 
-    TRY(CopyEntryToSector(entry,
+    TRY_WITH_SIZE(
+        CopyEntryToSector(entry,
                           &sectors_.FromAddress(reserved_addresses[0]),
                           reserved_addresses[0]));
 
@@ -1001,13 +1017,15 @@
     // Write the additional copies of the entry, if redundancy is greater
     // than 1.
     for (size_t i = 1; i < redundancy(); ++i) {
-      TRY(CopyEntryToSector(entry,
+      TRY_WITH_SIZE(
+          CopyEntryToSector(entry,
                             &sectors_.FromAddress(reserved_addresses[i]),
                             reserved_addresses[i]));
       new_metadata.AddNewAddress(reserved_addresses[i]);
     }
   }
-  return Status::OK;
+
+  return StatusWithSize(entries_updated);
 }
 
 // Add any missing redundant entries/copies for a key.
diff --git a/pw_kvs/key_value_store_map_test.cc b/pw_kvs/key_value_store_map_test.cc
index e2d8f68..5bd8535 100644
--- a/pw_kvs/key_value_store_map_test.cc
+++ b/pw_kvs/key_value_store_map_test.cc
@@ -321,8 +321,12 @@
     StartOperation("GCFull");
     Status status = kvs_.FullMaintenance();
     EXPECT_EQ(Status::OK, status);
+
     KeyValueStore::StorageStats post_stats = kvs_.GetStorageStats();
-    EXPECT_EQ(post_stats.reclaimable_bytes, 0U);
+    if (post_stats.in_use_bytes > ((partition_.size_bytes() * 70) / 100)) {
+      EXPECT_EQ(post_stats.reclaimable_bytes, 0U);
+    }
+
     FinishOperation("GCFull", status);
   }
 
diff --git a/pw_kvs/public/pw_kvs/key_value_store.h b/pw_kvs/public/pw_kvs/key_value_store.h
index 4ca5102..061b512 100644
--- a/pw_kvs/public/pw_kvs/key_value_store.h
+++ b/pw_kvs/public/pw_kvs/key_value_store.h
@@ -443,7 +443,9 @@
 
   // Ensure that all entries are on the primary (first) format. Entries that are
   // not on the primary format are rewritten.
-  Status UpdateEntriesToPrimaryFormat();
+  //
+  // Return: status + number of entries updated.
+  StatusWithSize UpdateEntriesToPrimaryFormat();
 
   Status AddRedundantEntries(EntryMetadata& metadata);
 
@@ -477,6 +479,13 @@
 
   Options options_;
 
+  // Threshold value for when to garbage collect all stale data. Above the
+  // threshold, GC all reclaimable bytes regardless of if valid data is in
+  // sector. Below the threshold, only GC sectors with reclaimable bytes and no
+  // valid bytes. The threshold is based on the portion of KVS capacity used by
+  // valid bytes.
+  static constexpr size_t kGcUsageThresholdPercentage = 70;
+
   enum class InitializationState {
     // KVS Init() has not been called and KVS is not usable.
     kNotInitialized,