| // Protocol Buffers - Google's data interchange format |
| // Copyright 2023 Google LLC. All rights reserved. |
| // |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file or at |
| // https://developers.google.com/open-source/licenses/bsd |
| |
| #ifndef UPB_MEM_INTERNAL_ARENA_H_ |
| #define UPB_MEM_INTERNAL_ARENA_H_ |
| |
| #include <stddef.h> |
| #include <stdint.h> |
| #include <string.h> |
| |
| #include "upb/port/sanitizers.h" |
| |
| // Must be last. |
| #include "upb/port/def.inc" |
| |
| // This is QUITE an ugly hack, which specifies the number of pointers needed |
| // to equal (or exceed) the storage required for one upb_Arena. |
| // |
| // We need this because the decoder inlines a upb_Arena for performance but |
| // the full struct is not visible outside of arena.c. Yes, I know, it's awful. |
| #ifndef NDEBUG |
| #define UPB_ARENA_BASE_SIZE_HACK 11 |
| #else |
| #define UPB_ARENA_BASE_SIZE_HACK 10 |
| #endif |
| |
| #define UPB_ARENA_SIZE_HACK \ |
| (UPB_ARENA_BASE_SIZE_HACK + (UPB_XSAN_STRUCT_SIZE * 2)) |
| |
| // LINT.IfChange(upb_Arena) |
| |
| struct upb_Arena { |
| char* UPB_ONLYBITS(ptr); |
| const UPB_NODEREF char* UPB_ONLYBITS(end); |
| UPB_XSAN_MEMBER |
| }; |
| |
| // LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts:upb_Arena) |
| |
| #ifdef __cplusplus |
| extern "C" { |
| #endif |
| |
| void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des, |
| const struct upb_Arena* src); |
| void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des, |
| const struct upb_Arena* src); |
| |
| UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) { |
| return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr)); |
| } |
| |
| UPB_INLINE size_t UPB_PRIVATE(_upb_Arena_AllocSpan)(size_t size) { |
| return UPB_ALIGN_MALLOC(size) + UPB_PRIVATE(kUpb_Asan_GuardSize); |
| } |
| |
| UPB_INLINE bool UPB_PRIVATE(_upb_Arena_WasLastAllocFromCurrentBlock)( |
| const struct upb_Arena* a, void* ptr, size_t size) { |
| return UPB_PRIVATE(upb_Xsan_PtrEq)( |
| (char*)ptr + UPB_PRIVATE(_upb_Arena_AllocSpan)(size), |
| a->UPB_ONLYBITS(ptr)); |
| } |
| |
| UPB_INLINE bool UPB_PRIVATE(_upb_Arena_IsAligned)(const void* ptr) { |
| return (uintptr_t)ptr % UPB_MALLOC_ALIGN == 0; |
| } |
| |
| UPB_API_INLINE void* upb_Arena_Malloc(struct upb_Arena* a, size_t size) { |
| UPB_PRIVATE(upb_Xsan_AccessReadWrite)(UPB_XSAN(a)); |
| |
| size_t span = UPB_PRIVATE(_upb_Arena_AllocSpan)(size); |
| |
| if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) { |
| void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(struct upb_Arena * a, size_t size); |
| return UPB_PRIVATE(_upb_Arena_SlowMalloc)(a, span); |
| } |
| |
| // We have enough space to do a fast malloc. |
| void* ret = a->UPB_ONLYBITS(ptr); |
| a->UPB_ONLYBITS(ptr) += span; |
| UPB_ASSERT(UPB_PRIVATE(_upb_Arena_IsAligned)(ret)); |
| UPB_ASSERT(UPB_PRIVATE(_upb_Arena_IsAligned)(a->UPB_ONLYBITS(ptr))); |
| |
| return UPB_PRIVATE(upb_Xsan_NewUnpoisonedRegion)(UPB_XSAN(a), ret, size); |
| } |
| |
| UPB_API_INLINE void upb_Arena_ShrinkLast(struct upb_Arena* a, void* ptr, |
| size_t oldsize, size_t size) { |
| UPB_ASSERT(ptr); |
| UPB_ASSERT(size <= oldsize); |
| |
| UPB_PRIVATE(upb_Xsan_AccessReadWrite)(UPB_XSAN(a)); |
| UPB_PRIVATE(upb_Xsan_ResizeUnpoisonedRegion)(ptr, oldsize, size); |
| |
| if (UPB_PRIVATE(_upb_Arena_WasLastAllocFromCurrentBlock)(a, ptr, oldsize)) { |
| // We can reclaim some memory. |
| a->UPB_ONLYBITS(ptr) -= UPB_ALIGN_MALLOC(oldsize) - UPB_ALIGN_MALLOC(size); |
| } else { |
| // We can't reclaim any memory, but we need to verify that `ptr` really |
| // does represent the most recent allocation. |
| #ifndef NDEBUG |
| bool _upb_Arena_WasLastAlloc(struct upb_Arena * a, void* ptr, |
| size_t oldsize); |
| UPB_ASSERT(_upb_Arena_WasLastAlloc(a, ptr, oldsize)); |
| #endif |
| } |
| } |
| |
| UPB_API_INLINE bool upb_Arena_TryExtend(struct upb_Arena* a, void* ptr, |
| size_t oldsize, size_t size) { |
| UPB_ASSERT(ptr); |
| UPB_ASSERT(size > oldsize); |
| |
| UPB_PRIVATE(upb_Xsan_AccessReadWrite)(UPB_XSAN(a)); |
| size_t extend = UPB_ALIGN_MALLOC(size) - UPB_ALIGN_MALLOC(oldsize); |
| |
| if (UPB_PRIVATE(_upb_Arena_WasLastAllocFromCurrentBlock)(a, ptr, oldsize) && |
| UPB_PRIVATE(_upb_ArenaHas)(a) >= extend) { |
| a->UPB_ONLYBITS(ptr) += extend; |
| UPB_PRIVATE(upb_Xsan_ResizeUnpoisonedRegion)(ptr, oldsize, size); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| UPB_API_INLINE void* upb_Arena_Realloc(struct upb_Arena* a, void* ptr, |
| size_t oldsize, size_t size) { |
| UPB_PRIVATE(upb_Xsan_AccessReadWrite)(UPB_XSAN(a)); |
| |
| void* ret; |
| |
| if (ptr && (size <= oldsize || upb_Arena_TryExtend(a, ptr, oldsize, size))) { |
| // We can extend or shrink in place. |
| if (size <= oldsize && |
| UPB_PRIVATE(_upb_Arena_WasLastAllocFromCurrentBlock)(a, ptr, oldsize)) { |
| upb_Arena_ShrinkLast(a, ptr, oldsize, size); |
| } |
| ret = ptr; |
| } else { |
| // We need to copy into a new allocation. |
| ret = upb_Arena_Malloc(a, size); |
| if (ret && oldsize > 0) { |
| memcpy(ret, ptr, UPB_MIN(oldsize, size)); |
| } |
| } |
| |
| // We want to invalidate pointers to the old region if hwasan is enabled, so |
| // we poison and unpoison even if ptr == ret. |
| UPB_PRIVATE(upb_Xsan_PoisonRegion)(ptr, oldsize); |
| return UPB_PRIVATE(upb_Xsan_NewUnpoisonedRegion)(UPB_XSAN(a), ret, size); |
| } |
| |
| #ifdef __cplusplus |
| } /* extern "C" */ |
| #endif |
| |
| #include "upb/port/undef.inc" |
| |
| #endif /* UPB_MEM_INTERNAL_ARENA_H_ */ |