Streamline Arena Cleanup Allocations

PiperOrigin-RevId: 493743618
diff --git a/src/file_lists.cmake b/src/file_lists.cmake
index 1e326bc..b7367ef 100644
--- a/src/file_lists.cmake
+++ b/src/file_lists.cmake
@@ -103,7 +103,6 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_allocation_policy.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_cleanup.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_config.h
-  ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_impl.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arenastring.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arenaz_sampler.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/compiler/importer.h
@@ -170,6 +169,7 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_field.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_ptr_field.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/service.h
+  ${protobuf_SOURCE_DIR}/src/google/protobuf/serial_arena.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/callback.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/common.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/logging.h
@@ -178,6 +178,7 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/port.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/status_macros.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/text_format.h
+  ${protobuf_SOURCE_DIR}/src/google/protobuf/thread_safe_arena.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/unknown_field_set.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/util/delimited_message_util.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/util/field_comparator.h
@@ -227,7 +228,6 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_allocation_policy.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_cleanup.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_config.h
-  ${protobuf_SOURCE_DIR}/src/google/protobuf/arena_impl.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arenastring.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/arenaz_sampler.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/endian.h
@@ -258,6 +258,7 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/port_undef.inc
   ${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_field.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/repeated_ptr_field.h
+  ${protobuf_SOURCE_DIR}/src/google/protobuf/serial_arena.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/callback.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/common.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/logging.h
@@ -265,6 +266,7 @@
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/platform_macros.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/port.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/stubs/status_macros.h
+  ${protobuf_SOURCE_DIR}/src/google/protobuf/thread_safe_arena.h
   ${protobuf_SOURCE_DIR}/src/google/protobuf/wire_format_lite.h
 )
 
diff --git a/src/google/protobuf/BUILD.bazel b/src/google/protobuf/BUILD.bazel
index 48a8115..946802f 100644
--- a/src/google/protobuf/BUILD.bazel
+++ b/src/google/protobuf/BUILD.bazel
@@ -255,6 +255,7 @@
     deps = [
         ":arena_allocation_policy",
         ":arena_cleanup",
+        ":arena_align",
         ":arena_config",
         "//src/google/protobuf/stubs:lite",
         "@com_google_absl//absl/synchronization",
diff --git a/src/google/protobuf/arena.cc b/src/google/protobuf/arena.cc
index 8479f5d..3f30c84 100644
--- a/src/google/protobuf/arena.cc
+++ b/src/google/protobuf/arena.cc
@@ -39,10 +39,12 @@
 
 #include "absl/base/attributes.h"
 #include "absl/synchronization/mutex.h"
+#include "google/protobuf/arena_align.h"
 #include "google/protobuf/arena_allocation_policy.h"
-#include "google/protobuf/arena_impl.h"
 #include "google/protobuf/arenaz_sampler.h"
 #include "google/protobuf/port.h"
+#include "google/protobuf/serial_arena.h"
+#include "google/protobuf/thread_safe_arena.h"
 
 
 #ifdef ADDRESS_SANITIZER
@@ -194,21 +196,31 @@
   return AllocateFromExisting(n);
 }
 
-PROTOBUF_NOINLINE
-void* SerialArena::AllocateAlignedWithCleanupFallback(
-    size_t n, size_t align, void (*destructor)(void*)) {
-  size_t required = AlignUpTo(n, align) + cleanup::Size(destructor);
-  AllocateNewBlock(required);
-  return AllocateFromExistingWithCleanupFallback(n, align, destructor);
+template <typename TagOrCleanup, typename Align>
+void* SerialArena::AllocateWithCleanupFallback(size_t size, Align align,
+                                               TagOrCleanup cleanup) {
+  AllocateNewBlock(align.Padded(size) + cleanup::CleanupSize(cleanup));
+  return BlindlyAllocateWithCleanup(size, align, cleanup);
 }
 
-PROTOBUF_NOINLINE
-void SerialArena::AddCleanupFallback(void* elem, void (*destructor)(void*)) {
-  size_t required = cleanup::Size(destructor);
-  AllocateNewBlock(required);
-  AddCleanupFromExisting(elem, destructor);
+template void* SerialArena::AllocateWithCleanupFallback(size_t,
+                                                        ArenaAlignDefault,
+                                                        cleanup::Tag);
+template void* SerialArena::AllocateWithCleanupFallback(size_t,
+                                                        ArenaAlignDefault,
+                                                        void (*)(void*));
+template void* SerialArena::AllocateWithCleanupFallback(size_t, ArenaAlign,
+                                                        void (*)(void*));
+
+template <typename TagOrCleanup>
+void SerialArena::AddCleanupFallback(void* elem, TagOrCleanup cleanup) {
+  AllocateNewBlock(cleanup::CleanupSize(cleanup));
+  BlindlyAddCleanup(elem, cleanup);
 }
 
+template void SerialArena::AddCleanupFallback(void*, cleanup::Tag);
+template void SerialArena::AddCleanupFallback(void*, void (*)(void*));
+
 void SerialArena::AllocateNewBlock(size_t n) {
   size_t used = 0;
   size_t wasted = 0;
@@ -694,30 +706,22 @@
   return space_allocated;
 }
 
-void* ThreadSafeArena::AllocateAlignedWithCleanup(size_t n, size_t align,
-                                                  void (*destructor)(void*)) {
-  SerialArena* arena;
-  if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
-    return arena->AllocateAlignedWithCleanup(n, align, destructor);
-  } else {
-    return AllocateAlignedWithCleanupFallback(n, align, destructor);
-  }
+template <typename TagOrCleanup, typename Align>
+void* ThreadSafeArena::AllocateWithCleanupFallback(size_t size, Align align,
+                                                   TagOrCleanup cleanup) {
+  const size_t n = align.Padded(size) + cleanup::CleanupSize(cleanup);
+  SerialArena* arena = GetSerialArenaFallback(n);
+  return arena->AllocateWithCleanup(size, align, cleanup);
 }
 
-void ThreadSafeArena::AddCleanup(void* elem, void (*cleanup)(void*)) {
-  SerialArena* arena;
-  if (PROTOBUF_PREDICT_FALSE(!GetSerialArenaFast(&arena))) {
-    arena = GetSerialArenaFallback(kMaxCleanupNodeSize);
-  }
-  arena->AddCleanup(elem, cleanup);
-}
-
-PROTOBUF_NOINLINE
-void* ThreadSafeArena::AllocateAlignedWithCleanupFallback(
-    size_t n, size_t align, void (*destructor)(void*)) {
-  return GetSerialArenaFallback(n + kMaxCleanupNodeSize)
-      ->AllocateAlignedWithCleanup(n, align, destructor);
-}
+template void* ThreadSafeArena::AllocateWithCleanupFallback(size_t,
+                                                            ArenaAlignDefault,
+                                                            cleanup::Tag);
+template void* ThreadSafeArena::AllocateWithCleanupFallback(size_t,
+                                                            ArenaAlignDefault,
+                                                            void (*)(void*));
+template void* ThreadSafeArena::AllocateWithCleanupFallback(size_t, ArenaAlign,
+                                                            void (*)(void*));
 
 template <typename Functor>
 void ThreadSafeArena::WalkConstSerialArenaChunk(Functor fn) const {
@@ -845,11 +849,27 @@
   return impl_.AllocateAligned<internal::AllocationClient::kArray>(n);
 }
 
-void* Arena::AllocateAlignedWithCleanup(size_t n, size_t align,
-                                        void (*destructor)(void*)) {
-  return impl_.AllocateAlignedWithCleanup(n, align, destructor);
+template <typename TagOrCleanup>
+void Arena::AddCleanup(void* object, TagOrCleanup cleanup) {
+  impl_.AddCleanup(object, cleanup);
 }
 
+template void Arena::AddCleanup(void*, internal::cleanup::Tag);
+template void Arena::AddCleanup(void*, void (*)(void*));
+
+template <typename TagOrCleanup, typename Align>
+void* Arena::AllocateWithCleanup(size_t size, Align align,
+                                 TagOrCleanup cleanup) {
+  return impl_.AllocateWithCleanup(size, align, cleanup);
+}
+
+template void* Arena::AllocateWithCleanup(size_t, internal::ArenaAlignDefault,
+                                          internal::cleanup::Tag);
+template void* Arena::AllocateWithCleanup(size_t, internal::ArenaAlignDefault,
+                                          void (*)(void*));
+template void* Arena::AllocateWithCleanup(size_t, internal::ArenaAlign,
+                                          void (*)(void*));
+
 }  // namespace protobuf
 }  // namespace google
 
diff --git a/src/google/protobuf/arena.h b/src/google/protobuf/arena.h
index d17f63f..f9f39f1 100644
--- a/src/google/protobuf/arena.h
+++ b/src/google/protobuf/arena.h
@@ -49,9 +49,11 @@
 #endif
 
 #include <type_traits>
+#include "google/protobuf/arena_align.h"
 #include "google/protobuf/arena_config.h"
-#include "google/protobuf/arena_impl.h"
 #include "google/protobuf/port.h"
+#include "google/protobuf/serial_arena.h"
+#include "google/protobuf/thread_safe_arena.h"
 
 // Must be included last.
 #include "google/protobuf/port_def.inc"
@@ -273,11 +275,10 @@
     if (arena == nullptr) {
       return new T(std::forward<Args>(args)...);
     }
-    auto destructor =
-        internal::ObjectDestructor<std::is_trivially_destructible<T>::value,
-                                   T>::destructor;
-    return new (arena->AllocateInternal(sizeof(T), alignof(T), destructor))
-        T(std::forward<Args>(args)...);
+    void* mem = std::is_trivially_destructible<T>::value
+                    ? arena->AllocateAligned(sizeof(T), alignof(T))
+                    : arena->AllocateWithCleanup<T>();
+    return new (mem) T(std::forward<Args>(args)...);
   }
 
   // API to delete any objects not on an arena.  This can be used to safely
@@ -291,15 +292,16 @@
 
   // Allocates memory with the specific size and alignment.
   void* AllocateAligned(size_t size, size_t align = 8) {
-    if (align <= 8) {
-      return Allocate(internal::AlignUpTo8(size));
+    if (align <= internal::ArenaAlignDefault::align) {
+      return Allocate(internal::ArenaAlignDefault::Ceil(size));
     } else {
       // We are wasting space by over allocating align - 8 bytes. Compared
       // to a dedicated function that takes current alignment in consideration.
       // Such a scheme would only waste (align - 8)/2 bytes on average, but
       // requires a dedicated function in the outline arena allocation
       // functions. Possibly re-evaluate tradeoffs later.
-      return internal::AlignTo(Allocate(size + align - 8), align);
+      auto align_as = internal::ArenaAlignAs(align);
+      return align_as.Ceil(Allocate(align_as.Padded(size)));
     }
   }
 
@@ -370,9 +372,7 @@
   // arena-allocated memory.
   template <typename T>
   PROTOBUF_ALWAYS_INLINE void OwnDestructor(T* object) {
-    if (object != nullptr) {
-      impl_.AddCleanup(object, &internal::cleanup::arena_destruct_object<T>);
-    }
+    if (object != nullptr) AddCleanup(object);
   }
 
   // Adds a custom member function on an object to the list of destructors that
@@ -560,15 +560,8 @@
     }
   }
 
-  PROTOBUF_NDEBUG_INLINE void* AllocateInternal(size_t size, size_t align,
-                                                void (*destructor)(void*)) {
-    // Monitor allocation if needed.
-    if (destructor == nullptr) {
-      return AllocateAligned(size, align);
-    } else {
-      return AllocateAlignedWithCleanup(size, align, destructor);
-    }
-  }
+  template <typename T, bool enable_tags = internal::cleanup::EnableTags()>
+  void* AllocateWithCleanup();
 
   // CreateMessage<T> requires that T supports arenas, but this private method
   // works whether or not T supports arenas. These are not exposed to user code
@@ -610,12 +603,10 @@
 
   template <typename T, typename... Args>
   PROTOBUF_NDEBUG_INLINE T* DoCreateMessage(Args&&... args) {
-    return InternalHelper<T>::Construct(
-        AllocateInternal(sizeof(T), alignof(T),
-                         internal::ObjectDestructor<
-                             InternalHelper<T>::is_destructor_skippable::value,
-                             T>::destructor),
-        this, std::forward<Args>(args)...);
+    void* mem = InternalHelper<T>::is_destructor_skippable::value
+                    ? AllocateAligned(sizeof(T), alignof(T))
+                    : AllocateWithCleanup<T>();
+    return InternalHelper<T>::Construct(mem, this, std::forward<Args>(args)...);
   }
 
   // CreateInArenaStorage is used to implement map field. Without it,
@@ -662,22 +653,30 @@
   }
 
   void* AllocateAlignedForArray(size_t n, size_t align) {
-    if (align <= 8) {
-      return AllocateForArray(internal::AlignUpTo8(n));
+    if (align <= internal::ArenaAlignDefault::align) {
+      return AllocateForArray(internal::ArenaAlignDefault::Ceil(n));
     } else {
       // We are wasting space by over allocating align - 8 bytes. Compared
       // to a dedicated function that takes current alignment in consideration.
       // Such a scheme would only waste (align - 8)/2 bytes on average, but
       // requires a dedicated function in the outline arena allocation
       // functions. Possibly re-evaluate tradeoffs later.
-      return internal::AlignTo(AllocateForArray(n + align - 8), align);
+      auto align_as = internal::ArenaAlignAs(align);
+      return align_as.Ceil(AllocateForArray(align_as.Padded(n)));
     }
   }
 
   void* Allocate(size_t n);
   void* AllocateForArray(size_t n);
-  void* AllocateAlignedWithCleanup(size_t n, size_t align,
-                                   void (*destructor)(void*));
+
+  template <typename T, bool enable_tags = internal::cleanup::EnableTags()>
+  void AddCleanup(T* object);
+
+  template <typename TagOrCleanup>
+  void AddCleanup(void* object, TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup, typename Align>
+  void* AllocateWithCleanup(size_t size, Align align, TagOrCleanup cleanup);
 
   template <typename Type>
   friend class internal::GenericTypeHandler;
@@ -694,6 +693,49 @@
   friend struct internal::ArenaTestPeer;
 };
 
+// Default implementation returns `AddCleanup(T*object, dtor<T>)`
+template <typename T, bool enable_tags>
+inline PROTOBUF_NDEBUG_INLINE void Arena::AddCleanup(T* object) {
+  AddCleanup(object, &internal::cleanup::arena_destruct_object<T>);
+}
+
+// Specialization for `AddCleanup<std::string>()`
+template <>
+inline PROTOBUF_NDEBUG_INLINE void Arena::AddCleanup<std::string, true>(
+    std::string* object) {
+  AddCleanup(object, internal::cleanup::Tag::kString);
+}
+
+// Specialization for `AddCleanup<absl::Cord>()`
+template <>
+inline PROTOBUF_NDEBUG_INLINE void Arena::AddCleanup<absl::Cord, true>(
+    absl::Cord* object) {
+  AddCleanup(object, internal::cleanup::Tag::kCord);
+}
+
+template <typename T, bool enable_tags>
+inline PROTOBUF_NDEBUG_INLINE void* Arena::AllocateWithCleanup() {
+  constexpr auto align = internal::ArenaAlignOf<T>();
+  return AllocateWithCleanup(align.Ceil(sizeof(T)), align,
+                             &internal::cleanup::arena_destruct_object<T>);
+}
+
+template <>
+inline PROTOBUF_NDEBUG_INLINE void*
+Arena::AllocateWithCleanup<std::string, true>() {
+  constexpr auto align = internal::ArenaAlignOf<std::string>();
+  constexpr size_t size = align.Ceil(sizeof(std::string));
+  return AllocateWithCleanup(size, align, internal::cleanup::Tag::kString);
+}
+
+template <>
+inline PROTOBUF_NDEBUG_INLINE void*
+Arena::AllocateWithCleanup<absl::Cord, true>() {
+  constexpr auto align = internal::ArenaAlignOf<absl::Cord>();
+  constexpr size_t size = align.Ceil(sizeof(absl::Cord));
+  return AllocateWithCleanup(size, align, internal::cleanup::Tag::kCord);
+}
+
 }  // namespace protobuf
 }  // namespace google
 
diff --git a/src/google/protobuf/arena_align.h b/src/google/protobuf/arena_align.h
index 0603972..2b94873 100644
--- a/src/google/protobuf/arena_align.h
+++ b/src/google/protobuf/arena_align.h
@@ -35,14 +35,21 @@
 //
 //   Ceil(size_t n)      - rounds `n` up to the nearest `align` boundary.
 //   Floor(size_t n)     - rounds `n` down to the nearest `align` boundary.
-//   Ceil(T* P)          - rounds `p` up to the nearest `align` boundary.
+//   Padded(size_t n)    - returns the unaligned size to align 'n' bytes. (1)
+
+//   Ceil(T* P)          - rounds `p` up to the nearest `align` boundary. (2)
 //   IsAligned(size_t n) - returns true if `n` is aligned to `align`
 //   IsAligned(T* p)     - returns true if `p` is aligned to `align`
 //   CheckAligned(T* p)  - returns `p`. Checks alignment of `p` in debug.
 //
-// Additionally there is an optimized `CeilDefaultAligned(T*)` method which is
-// equivalent to `Ceil(ArenaAlignDefault().CheckAlign(p))` but more efficiently
-// implemented as a 'check only' for ArenaAlignDefault.
+// 1) `Padded(n)` returns the minimum size needed to align an object of size 'n'
+//    into a memory area that is default aligned. For example, allocating 'n'
+//    bytes aligned at 32 bytes requires a size of 'n + 32 - 8' to align at 32
+//    bytes for any 8 byte boundary.
+//
+// 2) There is an optimized `CeilDefaultAligned(T*)` method which is equivalent
+//    to `Ceil(ArenaAlignDefault::CheckAlign(p))` but more efficiently
+//    implemented as a 'check only' for ArenaAlignDefault.
 //
 // These classes allow for generic arena logic using 'alignment policies'.
 //
@@ -50,10 +57,14 @@
 //
 //  template <Align>
 //  void* NaiveAlloc(size_t n, Align align) {
-//    align.CheckAligned(n);
-//    uint8_t* ptr = align.CeilDefaultAligned(ptr_);
-//    ptr_ += n;
-//    return ptr;
+//    ABSL_ASSERT(align.IsAligned(n));
+//    const size_t required = align.Padded(n);
+//    if (required <= static_cast<size_t>(ptr_ - limit_)) {
+//      uint8_t* ptr = align.CeilDefaultAligned(ptr_);
+//      ptr_ = ptr + n;
+//      return ptr;
+//    }
+//    return nullptr;
 //  }
 //
 //  void CallSites() {
@@ -80,31 +91,41 @@
 struct ArenaAlignDefault {
   PROTOBUF_EXPORT static constexpr size_t align = 8;  // NOLINT
 
-  static constexpr bool IsAligned(size_t n) { return (n & (align - 1)) == 0; }
+  static constexpr bool IsAligned(size_t n) { return (n & (align - 1)) == 0U; }
 
   template <typename T>
-  static bool IsAligned(T* ptr) {
-    return (reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0;
+  static inline PROTOBUF_ALWAYS_INLINE bool IsAligned(T* ptr) {
+    return (reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0U;
   }
 
-  static constexpr size_t Ceil(size_t n) { return (n + align - 1) & -align; }
-  static constexpr size_t Floor(size_t n) { return (n & ~(align - 1)); }
+  static inline PROTOBUF_ALWAYS_INLINE constexpr size_t Ceil(size_t n) {
+    return (n + align - 1) & -align;
+  }
+  static inline PROTOBUF_ALWAYS_INLINE constexpr size_t Floor(size_t n) {
+    return (n & ~(align - 1));
+  }
+
+  static inline PROTOBUF_ALWAYS_INLINE size_t Padded(size_t n) {
+    ABSL_ASSERT(IsAligned(n));
+    return n;
+  }
 
   template <typename T>
-  T* Ceil(T* ptr) const {
+  static inline PROTOBUF_ALWAYS_INLINE T* Ceil(T* ptr) {
     uintptr_t intptr = reinterpret_cast<uintptr_t>(ptr);
     return reinterpret_cast<T*>((intptr + align - 1) & -align);
   }
 
   template <typename T>
-  T* CeilDefaultAligned(T* ptr) const {
-    return ArenaAlignDefault().CheckAligned(ptr);
+  static inline PROTOBUF_ALWAYS_INLINE T* CeilDefaultAligned(T* ptr) {
+    ABSL_ASSERT(IsAligned(ptr));
+    return ptr;
   }
 
   // Address sanitizer enabled alignment check
   template <typename T>
-  static T* CheckAligned(T* ptr) {
-    GOOGLE_DCHECK(IsAligned(ptr)) << static_cast<void*>(ptr);
+  static inline PROTOBUF_ALWAYS_INLINE T* CheckAligned(T* ptr) {
+    ABSL_ASSERT(IsAligned(ptr));
     return ptr;
   }
 };
@@ -114,16 +135,22 @@
 
   size_t align;
 
-  constexpr bool IsAligned(size_t n) const { return (n & (align - 1)) == 0; }
+  constexpr bool IsAligned(size_t n) const { return (n & (align - 1)) == 0U; }
 
   template <typename T>
   bool IsAligned(T* ptr) const {
-    return (reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0;
+    return (reinterpret_cast<uintptr_t>(ptr) & (align - 1)) == 0U;
   }
 
   constexpr size_t Ceil(size_t n) const { return (n + align - 1) & -align; }
   constexpr size_t Floor(size_t n) const { return (n & ~(align - 1)); }
 
+  constexpr size_t Padded(size_t n) const {
+    ABSL_ASSERT(IsAligned(n));
+    ABSL_ASSERT(ArenaAlignDefault::IsAligned(align));
+    return n + align - ArenaAlignDefault::align;
+  }
+
   template <typename T>
   T* Ceil(T* ptr) const {
     uintptr_t intptr = reinterpret_cast<uintptr_t>(ptr);
@@ -132,24 +159,55 @@
 
   template <typename T>
   T* CeilDefaultAligned(T* ptr) const {
-    return Ceil(ArenaAlignDefault().CheckAligned(ptr));
+    ABSL_ASSERT(ArenaAlignDefault::IsAligned(ptr));
+    return Ceil(ptr);
   }
 
   // Address sanitizer enabled alignment check
   template <typename T>
   T* CheckAligned(T* ptr) const {
-    GOOGLE_DCHECK(IsAligned(ptr)) << static_cast<void*>(ptr);
+    ABSL_ASSERT(IsAligned(ptr));
     return ptr;
   }
 };
 
 inline ArenaAlign ArenaAlignAs(size_t align) {
   // align must be a non zero power of 2 >= 8
-  GOOGLE_DCHECK_NE(align, 0);
+  GOOGLE_DCHECK_NE(align, 0U);
   GOOGLE_DCHECK(absl::has_single_bit(align)) << "Invalid alignment " << align;
   return ArenaAlign{align};
 }
 
+template <bool, size_t align>
+struct AlignFactory {
+  static_assert(align > ArenaAlignDefault::align, "Not over-aligned");
+  static_assert((align & (align - 1)) == 0U, "Not power of 2");
+  static constexpr ArenaAlign Create() { return ArenaAlign{align}; }
+};
+
+template <size_t align>
+struct AlignFactory<true, align> {
+  static_assert(align <= ArenaAlignDefault::align, "Over-aligned");
+  static_assert((align & (align - 1)) == 0U, "Not power of 2");
+  static constexpr ArenaAlignDefault Create() { return ArenaAlignDefault{}; }
+};
+
+// Returns an `ArenaAlignDefault` instance for `align` less than or equal to the
+// default alignment, and `AlignAs(align)` for over-aligned values of `align`.
+// The purpose is to take advantage of invoking functions accepting a template
+// overloaded 'Align align` argument reducing the alignment operations on
+// `ArenaAlignDefault` implementations to no-ops.
+template <size_t align>
+inline constexpr auto ArenaAlignAs() {
+  return AlignFactory<align <= ArenaAlignDefault::align, align>::Create();
+}
+
+// Returns ArenaAlignAs<alignof(T)>
+template <typename T>
+inline constexpr auto ArenaAlignOf() {
+  return ArenaAlignAs<alignof(T)>();
+}
+
 }  // namespace internal
 }  // namespace protobuf
 }  // namespace google
diff --git a/src/google/protobuf/arena_align_test.cc b/src/google/protobuf/arena_align_test.cc
index e17cb33..466d1b6 100644
--- a/src/google/protobuf/arena_align_test.cc
+++ b/src/google/protobuf/arena_align_test.cc
@@ -67,6 +67,16 @@
   EXPECT_THAT(align_default.Ceil(16), Eq(16));
 }
 
+TEST(ArenaAlignDefault, Padded) {
+  auto align_default = ArenaAlignDefault();
+  EXPECT_THAT(align_default.Padded(0), Eq(0));
+  EXPECT_THAT(align_default.Padded(8), Eq(8));
+  EXPECT_THAT(align_default.Padded(64), Eq(64));
+#ifdef PROTOBUF_HAS_DEATH_TEST
+  EXPECT_DEBUG_DEATH(align_default.Padded(1), ".*");
+#endif  // PROTOBUF_HAS_DEATH_TEST
+}
+
 TEST(ArenaAlignDefault, CeilPtr) {
   alignas(8) char p[17] = {0};
   auto align_default = ArenaAlignDefault();
@@ -147,6 +157,16 @@
   EXPECT_THAT(align_64.Ceil(128), Eq(128));
 }
 
+TEST(ArenaAlign, Padded) {
+  auto align_64 = ArenaAlignAs(64);
+  EXPECT_THAT(align_64.Padded(64), Eq(64 + 64 - ArenaAlignDefault::align));
+  EXPECT_THAT(align_64.Padded(128), Eq(128 + 64 - ArenaAlignDefault::align));
+#ifdef PROTOBUF_HAS_DEATH_TEST
+  EXPECT_DEBUG_DEATH(align_64.Padded(16), ".*");
+  EXPECT_DEBUG_DEATH(ArenaAlignAs(2).Padded(8), ".*");
+#endif  // PROTOBUF_HAS_DEATH_TEST
+}
+
 TEST(ArenaAlign, CeilPtr) {
   alignas(64) char p[129] = {0};
   auto align_64 = ArenaAlignAs(64);
diff --git a/src/google/protobuf/arena_cleanup.h b/src/google/protobuf/arena_cleanup.h
index 4036766..5930c51 100644
--- a/src/google/protobuf/arena_cleanup.h
+++ b/src/google/protobuf/arena_cleanup.h
@@ -78,19 +78,40 @@
   uintptr_t elem;
 };
 
-// EnableSpecializedTags() return true if the alignment of tagged objects
+// EnableTags() return true if the alignment of tagged objects
 // such as std::string allow us to poke tags in the 2 LSB bits.
-inline constexpr bool EnableSpecializedTags() {
+inline constexpr bool EnableTags() {
   // For now we require 2 bits
   return alignof(std::string) >= 8 && alignof(absl::Cord) >= 8;
 }
 
+// Adds a cleanup entry invoking 'cleanup' on `object` at memory location `pos`.
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CreateNode(void* pos,
+                                                    const void* object,
+                                                    void (*cleanup)(void*)) {
+  auto elem = reinterpret_cast<uintptr_t>(object);
+  DynamicNode n = {elem, cleanup};
+  memcpy(pos, &n, sizeof(n));
+}
+
+// Adds a cleanup entry invoking the destructor of `object`, who's type
+// is identified by `tag` at memory location `pos`.
+inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CreateNode(void* pos,
+                                                    const void* object,
+                                                    Tag tag) {
+  GOOGLE_DCHECK(tag == Tag::kString || tag == Tag::kCord);
+  auto elem = reinterpret_cast<uintptr_t>(object);
+  GOOGLE_DCHECK_EQ(elem & 3, 0);
+  TaggedNode n = {elem + static_cast<uintptr_t>(tag)};
+  memcpy(pos, &n, sizeof(n));
+}
+
 // Adds a cleanup entry identified by `tag` at memory location `pos`.
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE void CreateNode(Tag tag, void* pos,
                                                     const void* elem_raw,
                                                     void (*destructor)(void*)) {
   auto elem = reinterpret_cast<uintptr_t>(elem_raw);
-  if (EnableSpecializedTags()) {
+  if (EnableTags()) {
     GOOGLE_DCHECK_EQ(elem & 3, 0ULL);  // Must be aligned
     switch (tag) {
       case Tag::kString: {
@@ -117,7 +138,7 @@
 // anything about the underlying cleanup node or cleanup meta data / tags.
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t
 PrefetchNode(const void* elem_address) {
-  if (EnableSpecializedTags()) {
+  if (EnableTags()) {
     uintptr_t elem;
     memcpy(&elem, elem_address, sizeof(elem));
     if (static_cast<Tag>(elem & 3) != Tag::kDynamic) {
@@ -134,7 +155,7 @@
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t DestroyNode(const void* pos) {
   uintptr_t elem;
   memcpy(&elem, pos, sizeof(elem));
-  if (EnableSpecializedTags()) {
+  if (EnableTags()) {
     switch (static_cast<Tag>(elem & 3)) {
       case Tag::kString: {
         // Some compilers don't like fully qualified explicit dtor calls,
@@ -157,24 +178,10 @@
   return sizeof(DynamicNode);
 }
 
-// Returns the `tag` identifying the type of object for `destructor` or
-// kDynamic if `destructor` does not identify a well know object type.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE Tag Type(void (*destructor)(void*)) {
-  if (EnableSpecializedTags()) {
-    if (destructor == &arena_destruct_object<std::string>) {
-      return Tag::kString;
-    }
-    if (destructor == &arena_destruct_object<absl::Cord>) {
-      return Tag::kCord;
-    }
-  }
-  return Tag::kDynamic;
-}
-
 // Returns the `tag` identifying the type of object stored at memory location
 // `elem`, which represents the first uintptr_t value in the node.
 inline ABSL_ATTRIBUTE_ALWAYS_INLINE Tag Type(void* raw) {
-  if (!EnableSpecializedTags()) return Tag::kDynamic;
+  if (!EnableTags()) return Tag::kDynamic;
 
   uintptr_t elem;
   memcpy(&elem, raw, sizeof(elem));
@@ -191,27 +198,8 @@
   }
 }
 
-// Returns the required size in bytes off the node type identified by `tag`.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t Size(Tag tag) {
-  if (!EnableSpecializedTags()) return sizeof(DynamicNode);
-
-  switch (tag) {
-    case Tag::kDynamic:
-      return sizeof(DynamicNode);
-    case Tag::kString:
-      return sizeof(TaggedNode);
-    case Tag::kCord:
-      return sizeof(TaggedNode);
-    default:
-      GOOGLE_LOG(FATAL) << "Corrupted cleanup tag: " << static_cast<int>(tag);
-      return sizeof(DynamicNode);
-  }
-}
-
-// Returns the required size in bytes off the node type for `destructor`.
-inline ABSL_ATTRIBUTE_ALWAYS_INLINE size_t Size(void (*destructor)(void*)) {
-  return destructor == nullptr ? 0 : Size(Type(destructor));
-}
+inline size_t CleanupSize(void (*)(void*)) { return sizeof(DynamicNode); }
+inline size_t CleanupSize(Tag) { return sizeof(TaggedNode); }
 
 }  // namespace cleanup
 }  // namespace internal
diff --git a/src/google/protobuf/serial_arena.h b/src/google/protobuf/serial_arena.h
new file mode 100644
index 0000000..edff294
--- /dev/null
+++ b/src/google/protobuf/serial_arena.h
@@ -0,0 +1,412 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2022 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file defines the internal class SerialArena
+
+#ifndef GOOGLE_PROTOBUF_SERIAL_ARENA_H__
+#define GOOGLE_PROTOBUF_SERIAL_ARENA_H__
+
+#include <algorithm>
+#include <atomic>
+#include <string>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#include "google/protobuf/stubs/logging.h"
+#include "google/protobuf/stubs/common.h"
+#include "absl/numeric/bits.h"
+#include "google/protobuf/arena_align.h"
+#include "google/protobuf/arena_cleanup.h"
+#include "google/protobuf/arena_config.h"
+#include "google/protobuf/arenaz_sampler.h"
+#include "google/protobuf/port.h"
+
+// Must be included last.
+#include "google/protobuf/port_def.inc"
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// Arena blocks are variable length malloc-ed objects.  The following structure
+// describes the common header for all blocks.
+struct ArenaBlock {
+  // For the sentry block with zero-size where ptr_, limit_, cleanup_nodes all
+  // point to "this".
+  constexpr ArenaBlock()
+      : next(nullptr), cleanup_nodes(this), size(0) {}
+
+  ArenaBlock(ArenaBlock* next, size_t size)
+      : next(next), cleanup_nodes(nullptr), size(size) {
+    GOOGLE_DCHECK_GT(size, sizeof(ArenaBlock));
+  }
+
+  char* Pointer(size_t n) {
+    GOOGLE_DCHECK_LE(n, size);
+    return reinterpret_cast<char*>(this) + n;
+  }
+  char* Limit() { return Pointer(size & static_cast<size_t>(-8)); }
+
+  bool IsSentry() const { return size == 0; }
+
+  ArenaBlock* const next;
+  void* cleanup_nodes;
+  const size_t size;
+  // data follows
+};
+
+enum class AllocationClient { kDefault, kArray };
+
+class ThreadSafeArena;
+
+// Tag type used to invoke the constructor of the first SerialArena.
+struct FirstSerialArena {
+  explicit FirstSerialArena() = default;
+};
+
+// A simple arena allocator. Calls to allocate functions must be properly
+// serialized by the caller, hence this class cannot be used as a general
+// purpose allocator in a multi-threaded program. It serves as a building block
+// for ThreadSafeArena, which provides a thread-safe arena allocator.
+//
+// This class manages
+// 1) Arena bump allocation + owning memory blocks.
+// 2) Maintaining a cleanup list.
+// It delegates the actual memory allocation back to ThreadSafeArena, which
+// contains the information on block growth policy and backing memory allocation
+// used.
+class PROTOBUF_EXPORT SerialArena {
+ public:
+  struct Memory {
+    void* ptr;
+    size_t size;
+  };
+
+  void CleanupList();
+  uint64_t SpaceAllocated() const {
+    return space_allocated_.load(std::memory_order_relaxed);
+  }
+  uint64_t SpaceUsed() const;
+
+  bool HasSpace(size_t n) const {
+    return n <= static_cast<size_t>(limit_ - ptr());
+  }
+
+  // See comments on `cached_blocks_` member for details.
+  PROTOBUF_ALWAYS_INLINE void* TryAllocateFromCachedBlock(size_t size) {
+    if (PROTOBUF_PREDICT_FALSE(size < 16)) return nullptr;
+    // We round up to the next larger block in case the memory doesn't match
+    // the pattern we are looking for.
+    const size_t index = absl::bit_width(size - 1) - 4;
+
+    if (index >= cached_block_length_) return nullptr;
+    auto& cached_head = cached_blocks_[index];
+    if (cached_head == nullptr) return nullptr;
+
+    void* ret = cached_head;
+    PROTOBUF_UNPOISON_MEMORY_REGION(ret, size);
+    cached_head = cached_head->next;
+    return ret;
+  }
+
+  // In kArray mode we look through cached blocks.
+  // We do not do this by default because most non-array allocations will not
+  // have the right size and will fail to find an appropriate cached block.
+  //
+  // TODO(sbenza): Evaluate if we should use cached blocks for message types of
+  // the right size. We can statically know if the allocation size can benefit
+  // from it.
+  template <AllocationClient alloc_client = AllocationClient::kDefault>
+  void* AllocateAligned(size_t n) {
+    GOOGLE_DCHECK(internal::ArenaAlignDefault::IsAligned(n));
+    GOOGLE_DCHECK_GE(limit_, ptr());
+
+    if (alloc_client == AllocationClient::kArray) {
+      if (void* res = TryAllocateFromCachedBlock(n)) {
+        return res;
+      }
+    }
+
+    if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) {
+      return AllocateAlignedFallback(n);
+    }
+    return AllocateFromExisting(n);
+  }
+
+  template <typename TagOrCleanup, typename Align>
+  void* AllocateWithCleanup(size_t size, Align align, TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup, typename Align>
+  void* TryAllocateWithCleanup(size_t size, Align align, TagOrCleanup cleanup);
+
+ private:
+  void* AllocateFromExisting(size_t n) {
+    PROTOBUF_UNPOISON_MEMORY_REGION(ptr(), n);
+    void* ret = ptr();
+    set_ptr(static_cast<char*>(ret) + n);
+    return ret;
+  }
+
+  // See comments on `cached_blocks_` member for details.
+  void ReturnArrayMemory(void* p, size_t size) {
+    // We only need to check for 32-bit platforms.
+    // In 64-bit platforms the minimum allocation size from Repeated*Field will
+    // be 16 guaranteed.
+    if (sizeof(void*) < 8) {
+      if (PROTOBUF_PREDICT_FALSE(size < 16)) return;
+    } else {
+      PROTOBUF_ASSUME(size >= 16);
+    }
+
+    // We round down to the next smaller block in case the memory doesn't match
+    // the pattern we are looking for. eg, someone might have called Reserve()
+    // on the repeated field.
+    const size_t index = absl::bit_width(size) - 5;
+
+    if (PROTOBUF_PREDICT_FALSE(index >= cached_block_length_)) {
+      // We can't put this object on the freelist so make this object the
+      // freelist. It is guaranteed it is larger than the one we have, and
+      // large enough to hold another allocation of `size`.
+      CachedBlock** new_list = static_cast<CachedBlock**>(p);
+      size_t new_size = size / sizeof(CachedBlock*);
+
+      std::copy(cached_blocks_, cached_blocks_ + cached_block_length_,
+                new_list);
+
+      // We need to unpoison this memory before filling it in case it has been
+      // poisoned by another santizer client.
+      PROTOBUF_UNPOISON_MEMORY_REGION(
+          new_list + cached_block_length_,
+          (new_size - cached_block_length_) * sizeof(CachedBlock*));
+
+      std::fill(new_list + cached_block_length_, new_list + new_size, nullptr);
+
+      cached_blocks_ = new_list;
+      // Make the size fit in uint8_t. This is the power of two, so we don't
+      // need anything larger.
+      cached_block_length_ =
+          static_cast<uint8_t>(std::min(size_t{64}, new_size));
+
+      return;
+    }
+
+    auto& cached_head = cached_blocks_[index];
+    auto* new_node = static_cast<CachedBlock*>(p);
+    new_node->next = cached_head;
+    cached_head = new_node;
+    PROTOBUF_POISON_MEMORY_REGION(p, size);
+  }
+
+ public:
+  // Allocate space if the current region provides enough space.
+  bool MaybeAllocateAligned(size_t n, void** out) {
+    GOOGLE_DCHECK(internal::ArenaAlignDefault::IsAligned(n));
+    GOOGLE_DCHECK_GE(limit_, ptr());
+    if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) return false;
+    *out = AllocateFromExisting(n);
+    return true;
+  }
+
+  // If there is enough space in the current block, allocate space for one `T`
+  // object and register for destruction. The object has not been constructed
+  // and the memory returned is uninitialized.
+  template <typename T>
+  void* MaybeAllocateWithCleanup();
+
+  template <typename TagOrCleanup>
+  void AddCleanup(void* elem, TagOrCleanup cleanup);
+
+ private:
+  friend class ThreadSafeArena;
+
+  // Creates a new SerialArena inside mem using the remaining memory as for
+  // future allocations.
+  // The `parent` arena must outlive the serial arena, which is guaranteed
+  // because the parent manages the lifetime of the serial arenas.
+  static SerialArena* New(SerialArena::Memory mem, ThreadSafeArena& parent);
+  // Free SerialArena returning the memory passed in to New
+  template <typename Deallocator>
+  Memory Free(Deallocator deallocator);
+
+  // Members are declared here to track sizeof(SerialArena) and hotness
+  // centrally. They are (roughly) laid out in descending order of hotness.
+
+  // Next pointer to allocate from.  Always 8-byte aligned.  Points inside
+  // head_ (and head_->pos will always be non-canonical).  We keep these
+  // here to reduce indirection.
+  std::atomic<char*> ptr_{nullptr};
+  // Limiting address up to which memory can be allocated from the head block.
+  char* limit_ = nullptr;
+
+  std::atomic<ArenaBlock*> head_{nullptr};  // Head of linked list of blocks.
+  std::atomic<size_t> space_used_{0};       // Necessary for metrics.
+  std::atomic<size_t> space_allocated_{0};
+  ThreadSafeArena& parent_;
+
+  // Repeated*Field and Arena play together to reduce memory consumption by
+  // reusing blocks. Currently, natural growth of the repeated field types makes
+  // them allocate blocks of size `8 + 2^N, N>=3`.
+  // When the repeated field grows returns the previous block and we put it in
+  // this free list.
+  // `cached_blocks_[i]` points to the free list for blocks of size `8+2^(i+3)`.
+  // The array of freelists is grown when needed in `ReturnArrayMemory()`.
+  struct CachedBlock {
+    // Simple linked list.
+    CachedBlock* next;
+  };
+  uint8_t cached_block_length_ = 0;
+  CachedBlock** cached_blocks_ = nullptr;
+
+  // Helper getters/setters to handle relaxed operations on atomic variables.
+  ArenaBlock* head() { return head_.load(std::memory_order_relaxed); }
+  const ArenaBlock* head() const {
+    return head_.load(std::memory_order_relaxed);
+  }
+
+  char* ptr() { return ptr_.load(std::memory_order_relaxed); }
+  const char* ptr() const { return ptr_.load(std::memory_order_relaxed); }
+  void set_ptr(char* ptr) { return ptr_.store(ptr, std::memory_order_relaxed); }
+
+  // Constructor is private as only New() should be used.
+  inline SerialArena(ArenaBlock* b, ThreadSafeArena& parent);
+
+  // Constructors to handle the first SerialArena.
+  inline explicit SerialArena(ThreadSafeArena& parent);
+  inline SerialArena(FirstSerialArena, ArenaBlock* b, ThreadSafeArena& parent);
+
+  void* AllocateAlignedFallback(size_t n);
+
+  template <typename TagOrCleanup>
+  void BlindlyAddCleanup(void* elem, TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup>
+  void AddCleanupFallback(void* elem, TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup, typename Align>
+  void* BlindlyAllocateWithCleanup(size_t size, Align align,
+                                   TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup, typename Align>
+  void* AllocateWithCleanupFallback(size_t size, Align align,
+                                    TagOrCleanup cleanup);
+
+  inline void AllocateNewBlock(size_t n);
+  inline void Init(ArenaBlock* b, size_t offset);
+
+ public:
+ public:
+  static constexpr size_t kBlockHeaderSize =
+      ArenaAlignDefault::Ceil(sizeof(ArenaBlock));
+};
+
+template <typename TagOrCleanup>
+inline PROTOBUF_ALWAYS_INLINE void SerialArena::AddCleanup(
+    void* elem, TagOrCleanup cleanup) {
+  const size_t n = cleanup::CleanupSize(cleanup);
+  if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) {
+    return AddCleanupFallback(elem, cleanup);
+  }
+  BlindlyAddCleanup(elem, cleanup);
+}
+
+template <typename TagOrCleanup>
+inline PROTOBUF_ALWAYS_INLINE void SerialArena::BlindlyAddCleanup(
+    void* elem, TagOrCleanup cleanup) {
+  const size_t n = cleanup::CleanupSize(cleanup);
+  GOOGLE_DCHECK(HasSpace(n));
+  limit_ -= n;
+  PROTOBUF_UNPOISON_MEMORY_REGION(limit_, n);
+  cleanup::CreateNode(limit_, elem, cleanup);
+}
+
+template <typename TagOrCleanup, typename Align>
+inline PROTOBUF_ALWAYS_INLINE void* SerialArena::BlindlyAllocateWithCleanup(
+    size_t size, Align align, TagOrCleanup cleanup) {
+  GOOGLE_DCHECK(align.IsAligned(size));
+  char* ptr = align.CeilDefaultAligned(this->ptr());
+  PROTOBUF_UNPOISON_MEMORY_REGION(ptr, size);
+  BlindlyAddCleanup(ptr, cleanup);
+  GOOGLE_DCHECK_LE(ptr + size, limit_);
+  set_ptr(ptr + size);
+  return ptr;
+}
+
+template <typename TagOrCleanup, typename Align>
+inline PROTOBUF_NDEBUG_INLINE void* SerialArena::AllocateWithCleanup(
+    size_t size, Align align, TagOrCleanup cleanup) {
+  const size_t n = align.Padded(size) + cleanup::CleanupSize(cleanup);
+  if (PROTOBUF_PREDICT_TRUE(HasSpace(n))) {
+    return BlindlyAllocateWithCleanup(size, align, cleanup);
+  }
+  return AllocateWithCleanupFallback(size, align, cleanup);
+}
+
+template <typename TagOrCleanup, typename Align>
+inline PROTOBUF_NDEBUG_INLINE void* SerialArena::TryAllocateWithCleanup(
+    size_t size, Align align, TagOrCleanup cleanup) {
+  const size_t n = align.Padded(size) + cleanup::CleanupSize(cleanup);
+  if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) return nullptr;
+  void* ptr = BlindlyAllocateWithCleanup(size, align, cleanup);
+  PROTOBUF_ASSUME(ptr != nullptr);
+  return ptr;
+}
+
+template <typename T>
+inline PROTOBUF_ALWAYS_INLINE void* SerialArena::MaybeAllocateWithCleanup() {
+  static_assert(!std::is_trivially_destructible<T>::value, "");
+  constexpr auto align = internal::ArenaAlignOf<T>();
+  return TryAllocateWithCleanup(align.Ceil(sizeof(T)), align,
+                                cleanup::arena_destruct_object<T>);
+}
+
+template <>
+inline PROTOBUF_ALWAYS_INLINE void*
+SerialArena::MaybeAllocateWithCleanup<std::string>() {
+  constexpr auto align = internal::ArenaAlignOf<std::string>();
+  return TryAllocateWithCleanup(align.Ceil(sizeof(std::string)), align,
+                                cleanup::Tag::kString);
+}
+
+template <>
+inline PROTOBUF_ALWAYS_INLINE void*
+SerialArena::MaybeAllocateWithCleanup<absl::Cord>() {
+  constexpr auto align = internal::ArenaAlignOf<absl::Cord>();
+  return TryAllocateWithCleanup(align.Ceil(sizeof(absl::Cord)), align,
+                                cleanup::Tag::kCord);
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#include "google/protobuf/port_undef.inc"
+
+#endif  // GOOGLE_PROTOBUF_SERIAL_ARENA_H__
diff --git a/src/google/protobuf/thread_safe_arena.h b/src/google/protobuf/thread_safe_arena.h
new file mode 100644
index 0000000..23f886d
--- /dev/null
+++ b/src/google/protobuf/thread_safe_arena.h
@@ -0,0 +1,301 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2022 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file defines the internal class ThreadSafeArena
+
+#ifndef GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__
+#define GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__
+
+#include <algorithm>
+#include <atomic>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/synchronization/mutex.h"
+#include "google/protobuf/arena_align.h"
+#include "google/protobuf/arena_allocation_policy.h"
+#include "google/protobuf/arena_cleanup.h"
+#include "google/protobuf/arena_config.h"
+#include "google/protobuf/arenaz_sampler.h"
+#include "google/protobuf/port.h"
+#include "google/protobuf/serial_arena.h"
+
+// Must be included last.
+#include "google/protobuf/port_def.inc"
+
+namespace google {
+namespace protobuf {
+namespace internal {
+
+// use #ifdef the select the best implementation based on hardware / OS.
+class PROTOBUF_EXPORT ThreadSafeArena {
+ public:
+  ThreadSafeArena();
+
+  ThreadSafeArena(char* mem, size_t size);
+
+  explicit ThreadSafeArena(void* mem, size_t size,
+                           const AllocationPolicy& policy);
+
+  // All protos have pointers back to the arena hence Arena must have
+  // pointer stability.
+  ThreadSafeArena(const ThreadSafeArena&) = delete;
+  ThreadSafeArena& operator=(const ThreadSafeArena&) = delete;
+  ThreadSafeArena(ThreadSafeArena&&) = delete;
+  ThreadSafeArena& operator=(ThreadSafeArena&&) = delete;
+
+  // Destructor deletes all owned heap allocated objects, and destructs objects
+  // that have non-trivial destructors, except for proto2 message objects whose
+  // destructors can be skipped. Also, frees all blocks except the initial block
+  // if it was passed in.
+  ~ThreadSafeArena();
+
+  uint64_t Reset();
+
+  uint64_t SpaceAllocated() const;
+  uint64_t SpaceUsed() const;
+
+  template <AllocationClient alloc_client = AllocationClient::kDefault>
+  void* AllocateAligned(size_t n) {
+    SerialArena* arena;
+    if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
+      return arena->AllocateAligned<alloc_client>(n);
+    } else {
+      return AllocateAlignedFallback<alloc_client>(n);
+    }
+  }
+
+  void ReturnArrayMemory(void* p, size_t size) {
+    SerialArena* arena;
+    if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
+      arena->ReturnArrayMemory(p, size);
+    }
+  }
+
+  // This function allocates n bytes if the common happy case is true and
+  // returns true. Otherwise does nothing and returns false. This strange
+  // semantics is necessary to allow callers to program functions that only
+  // have fallback function calls in tail position. This substantially improves
+  // code for the happy path.
+  PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) {
+    SerialArena* arena;
+    if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
+      return arena->MaybeAllocateAligned(n, out);
+    }
+    return false;
+  }
+
+  // Add object pointer and cleanup function pointer to the list.
+  template <typename TagOrCleanup>
+  void AddCleanup(void* elem, TagOrCleanup cleanup);
+
+  template <typename TagOrCleanup, typename Align>
+  void* AllocateWithCleanup(size_t size, Align align, TagOrCleanup cleanup) {
+    SerialArena* arena;
+    if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
+      return arena->AllocateWithCleanup(size, align, cleanup);
+    }
+    return AllocateWithCleanupFallback(size, align, cleanup);
+  }
+
+ private:
+  friend class ArenaBenchmark;
+  friend class TcParser;
+  friend class SerialArena;
+  friend struct SerialArenaChunkHeader;
+  static uint64_t GetNextLifeCycleId();
+
+  class SerialArenaChunk;
+
+  // Returns a new SerialArenaChunk that has {id, serial} at slot 0. It may
+  // grow based on "prev_num_slots".
+  static SerialArenaChunk* NewSerialArenaChunk(uint32_t prev_capacity, void* id,
+                                               SerialArena* serial);
+  static SerialArenaChunk* SentrySerialArenaChunk();
+
+  // Returns the first ArenaBlock* for the first SerialArena. If users provide
+  // one, use it if it's acceptable. Otherwise returns a sentry block.
+  ArenaBlock* FirstBlock(void* buf, size_t size);
+  // Same as the above but returns a valid block if "policy" is not default.
+  ArenaBlock* FirstBlock(void* buf, size_t size,
+                         const AllocationPolicy& policy);
+
+  // Adds SerialArena to the chunked list. May create a new chunk.
+  void AddSerialArena(void* id, SerialArena* serial);
+
+  // Members are declared here to track sizeof(ThreadSafeArena) and hotness
+  // centrally.
+
+  // Unique for each arena. Changes on Reset().
+  uint64_t tag_and_id_ = 0;
+
+  TaggedAllocationPolicyPtr alloc_policy_;  // Tagged pointer to AllocPolicy.
+  ThreadSafeArenaStatsHandle arena_stats_;
+
+  // Adding a new chunk to head_ must be protected by mutex_.
+  absl::Mutex mutex_;
+  // Pointer to a linked list of SerialArenaChunk.
+  std::atomic<SerialArenaChunk*> head_{nullptr};
+
+  void* first_owner_;
+  // Must be declared after alloc_policy_; otherwise, it may lose info on
+  // user-provided initial block.
+  SerialArena first_arena_;
+
+  static_assert(std::is_trivially_destructible<SerialArena>{},
+                "SerialArena needs to be trivially destructible.");
+
+  const AllocationPolicy* AllocPolicy() const { return alloc_policy_.get(); }
+  void InitializeWithPolicy(const AllocationPolicy& policy);
+
+  void Init();
+
+  // Delete or Destruct all objects owned by the arena.
+  void CleanupList();
+
+  inline void CacheSerialArena(SerialArena* serial) {
+    thread_cache().last_serial_arena = serial;
+    thread_cache().last_lifecycle_id_seen = tag_and_id_;
+  }
+
+  PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(SerialArena** arena) {
+    // If this thread already owns a block in this arena then try to use that.
+    // This fast path optimizes the case where multiple threads allocate from
+    // the same arena.
+    ThreadCache* tc = &thread_cache();
+    if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == tag_and_id_)) {
+      *arena = tc->last_serial_arena;
+      return true;
+    }
+    return false;
+  }
+
+  // Finds SerialArena or creates one if not found. When creating a new one,
+  // create a big enough block to accommodate n bytes.
+  SerialArena* GetSerialArenaFallback(size_t n);
+
+  template <AllocationClient alloc_client = AllocationClient::kDefault>
+  void* AllocateAlignedFallback(size_t n);
+
+  template <typename TagOrCleanup, typename Align>
+  void* AllocateWithCleanupFallback(size_t size, Align align,
+                                    TagOrCleanup cleanup);
+
+  // Executes callback function over SerialArenaChunk. Passes const
+  // SerialArenaChunk*.
+  template <typename Functor>
+  void WalkConstSerialArenaChunk(Functor fn) const;
+
+  // Executes callback function over SerialArenaChunk.
+  template <typename Functor>
+  void WalkSerialArenaChunk(Functor fn);
+
+  // Executes callback function over SerialArena in chunked list in reverse
+  // chronological order. Passes const SerialArena*.
+  template <typename Functor>
+  void PerConstSerialArenaInChunk(Functor fn) const;
+
+  // Releases all memory except the first block which it returns. The first
+  // block might be owned by the user and thus need some extra checks before
+  // deleting.
+  SerialArena::Memory Free(size_t* space_allocated);
+
+#ifdef _MSC_VER
+#pragma warning(disable : 4324)
+#endif
+  struct alignas(kCacheAlignment) ThreadCache {
+    // Number of per-thread lifecycle IDs to reserve. Must be power of two.
+    // To reduce contention on a global atomic, each thread reserves a batch of
+    // IDs.  The following number is calculated based on a stress test with
+    // ~6500 threads all frequently allocating a new arena.
+    static constexpr size_t kPerThreadIds = 256;
+    // Next lifecycle ID available to this thread. We need to reserve a new
+    // batch, if `next_lifecycle_id & (kPerThreadIds - 1) == 0`.
+    uint64_t next_lifecycle_id{0};
+    // The ThreadCache is considered valid as long as this matches the
+    // lifecycle_id of the arena being used.
+    uint64_t last_lifecycle_id_seen{static_cast<uint64_t>(-1)};
+    SerialArena* last_serial_arena{nullptr};
+  };
+
+  // Lifecycle_id can be highly contended variable in a situation of lots of
+  // arena creation. Make sure that other global variables are not sharing the
+  // cacheline.
+#ifdef _MSC_VER
+#pragma warning(disable : 4324)
+#endif
+  using LifecycleId = uint64_t;
+  ABSL_CONST_INIT alignas(
+      kCacheAlignment) static std::atomic<LifecycleId> lifecycle_id_;
+#if defined(PROTOBUF_NO_THREADLOCAL)
+  // iOS does not support __thread keyword so we use a custom thread local
+  // storage class we implemented.
+  static ThreadCache& thread_cache();
+#elif defined(PROTOBUF_USE_DLLS)
+  // Thread local variables cannot be exposed through DLL interface but we can
+  // wrap them in static functions.
+  static ThreadCache& thread_cache();
+#else
+  static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_;
+  static ThreadCache& thread_cache() { return thread_cache_; }
+#endif
+
+ public:
+  // kBlockHeaderSize is sizeof(ArenaBlock), aligned up to the default alignment
+  // to protect the invariant that `pos` is always default aligned.
+  static constexpr size_t kBlockHeaderSize = SerialArena::kBlockHeaderSize;
+  static constexpr size_t kSerialArenaSize =
+      ArenaAlignDefault::Ceil(sizeof(SerialArena));
+  static constexpr size_t kAllocPolicySize =
+      ArenaAlignDefault::Ceil(sizeof(AllocationPolicy));
+  static constexpr size_t kMaxCleanupNodeSize = 16;
+  static_assert(ArenaAlignDefault::IsAligned(kBlockHeaderSize),
+                "kBlockHeaderSize must be default aligned.");
+  static_assert(ArenaAlignDefault::IsAligned(kSerialArenaSize),
+                "kSerialArenaSize must be default aligned.");
+};
+
+template <typename TagOrCleanup>
+inline void ThreadSafeArena::AddCleanup(void* elem, TagOrCleanup cleanup) {
+  SerialArena* arena;
+  if (PROTOBUF_PREDICT_FALSE(!GetSerialArenaFast(&arena))) {
+    arena = GetSerialArenaFallback(kMaxCleanupNodeSize);
+  }
+  arena->AddCleanup(elem, cleanup);
+}
+
+}  // namespace internal
+}  // namespace protobuf
+}  // namespace google
+
+#include "google/protobuf/port_undef.inc"
+
+#endif  // GOOGLE_PROTOBUF_THREAD_SAFE_ARENA_H__