Add iterator invalidation checking for when the hashtable is moved.

Motivation: once we enable small object optimization in swisstable, iterators can be invalidated when the table is moved.
PiperOrigin-RevId: 573884505
Change-Id: I4278129829143d3747dfd0ef0ff92f321c2633dc
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 9eee4fc..a8fd80c 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -846,9 +846,10 @@
     if (reserved_growth_ > 0) {
       if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
     } else {
-      *generation_ = NextGeneration(*generation_);
+      increment_generation();
     }
   }
+  void increment_generation() { *generation_ = NextGeneration(*generation_); }
   void reset_reserved_growth(size_t reservation, size_t size) {
     reserved_growth_ = reservation - size;
   }
@@ -895,6 +896,7 @@
     return false;
   }
   void maybe_increment_generation_on_insert() {}
+  void increment_generation() {}
   void reset_reserved_growth(size_t, size_t) {}
   size_t reserved_growth() const { return 0; }
   void set_reserved_growth(size_t) {}
@@ -1066,6 +1068,10 @@
     return CommonFieldsGenerationInfo::
         should_rehash_for_bug_detection_on_insert(control(), capacity());
   }
+  void maybe_increment_generation_on_move() {
+    if (capacity() == 0) return;
+    increment_generation();
+  }
   void reset_reserved_growth(size_t reservation) {
     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
   }
@@ -1219,7 +1225,7 @@
     if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
       ABSL_RAW_LOG(FATAL,
                    "%s called on invalid iterator. The table could have "
-                   "rehashed since this iterator was initialized.",
+                   "rehashed or moved since this iterator was initialized.",
                    operation);
     }
     if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
@@ -1250,8 +1256,8 @@
   if (SwisstableGenerationsEnabled()) {
     if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
       ABSL_RAW_LOG(FATAL,
-                        "Invalid iterator comparison. The table could have "
-                        "rehashed since this iterator was initialized.");
+                   "Invalid iterator comparison. The table could have rehashed "
+                   "or moved since this iterator was initialized.");
     }
     if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
       ABSL_RAW_LOG(
@@ -1338,8 +1344,8 @@
     ABSL_HARDENING_ASSERT(
         AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
         "Invalid iterator comparison. The iterators may be from different "
-        "containers or the container might have rehashed. Consider running "
-        "with --config=asan to diagnose rehashing issues.");
+        "containers or the container might have rehashed or moved. Consider "
+        "running with --config=asan to diagnose issues.");
   }
 }
 
@@ -1926,6 +1932,7 @@
         settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
                   that.alloc_ref()) {
     that.common() = CommonFields{};
+    common().maybe_increment_generation_on_move();
   }
 
   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
@@ -1935,6 +1942,7 @@
     } else {
       move_elements_allocs_unequal(std::move(that));
     }
+    common().maybe_increment_generation_on_move();
   }
 
   raw_hash_set& operator=(const raw_hash_set& that) {
@@ -2707,6 +2715,7 @@
     CopyAlloc(alloc_ref(), that.alloc_ref(),
               std::integral_constant<bool, propagate_alloc>());
     that.common() = CommonFields{};
+    common().maybe_increment_generation_on_move();
     return *this;
   }
 
@@ -2741,6 +2750,7 @@
     // TODO(b/296061262): move instead of copying hash/eq.
     hash_ref() = that.hash_ref();
     eq_ref() = that.eq_ref();
+    common().maybe_increment_generation_on_move();
     return move_elements_allocs_unequal(std::move(that));
   }
 
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index 63448f4..0bb61cf 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -284,32 +284,36 @@
 }
 
 TEST_F(PropagateOnAll, MoveConstructor) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(std::move(t1));
+  auto it = u.begin();
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
   EXPECT_EQ(0, it->num_copies());
 }
 
 TEST_F(NoPropagateOnMove, MoveConstructor) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(std::move(t1));
+  auto it = u.begin();
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
   EXPECT_EQ(0, it->num_copies());
 }
 
 TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(std::move(t1), a1);
+  auto it = u.begin();
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
   EXPECT_EQ(0, it->num_copies());
 }
 
 TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(std::move(t1), a1);
+  auto it = u.begin();
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
   EXPECT_EQ(0, it->num_copies());
@@ -378,9 +382,10 @@
 }
 
 TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(0, a1);
   u = std::move(t1);
+  auto it = u.begin();
   EXPECT_EQ(a1, u.get_allocator());
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
@@ -388,9 +393,10 @@
 }
 
 TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(0, a1);
   u = std::move(t1);
+  auto it = u.begin();
   EXPECT_EQ(a1, u.get_allocator());
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, it->num_moves());
@@ -398,9 +404,10 @@
 }
 
 TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(0, a2);
   u = std::move(t1);
+  auto it = u.begin();
   EXPECT_EQ(a1, u.get_allocator());
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(0, a2.num_allocs());
@@ -409,10 +416,10 @@
 }
 
 TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
-  auto it = t1.insert(0).first;
+  t1.insert(0);
   Table u(0, a2);
   u = std::move(t1);
-  it = u.find(0);
+  auto it = u.find(0);
   EXPECT_EQ(a2, u.get_allocator());
   EXPECT_EQ(1, a1.num_allocs());
   EXPECT_EQ(1, a2.num_allocs());
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 61eb6d9..d194ca1 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -2370,6 +2370,19 @@
 #endif
 }
 
+TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
+  if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+  if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+  IntTable t1, t2;
+  t1.insert(1);
+  auto it = t1.begin();
+  t2 = std::move(t1);
+  EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+  EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
+                            kInvalidIteratorDeathMessage);
+}
+
 TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
   IntTable t;
   for (int i = 0; i < 8; ++i) t.insert(i);