pw_allocator: Fix tests on ARM Clang build
* Fixes test buffer alignment to meet Block requirements.
* Renames private Block variables to match Google style.
Bugs: pwbug/315
Change-Id: If2323aca3b90c3b9c0da8a3f6402f7af5715df4e
Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/31968
Reviewed-by: Keir Mierle <keir@google.com>
Commit-Queue: Armando Montanez <amontanez@google.com>
diff --git a/BUILD.gn b/BUILD.gn
index 06e411d..7985203 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -271,6 +271,7 @@
# Targets for all module unit test groups.
pw_test_group("pw_module_tests") {
group_deps = [
+ "$dir_pw_allocator:tests",
"$dir_pw_assert:tests",
"$dir_pw_base64:tests",
"$dir_pw_blob_store:tests",
@@ -312,19 +313,6 @@
"$dir_pw_varint:tests",
]
- # TODO(pwbug/315): Fix pw_allocator tests on ARM Clang build.
- _qemu_toolchains = [
- "lm3s6965evb_qemu_clang_debug",
- "lm3s6965evb_qemu_clang_size_optimized",
- "lm3s6965evb_qemu_clang_speed_optimized",
- ]
- _toolchain_is_qemu_clang =
- _qemu_toolchains + [ get_label_info(current_toolchain, "name") ] -
- [ get_label_info(current_toolchain, "name") ] != _qemu_toolchains
- if (!_toolchain_is_qemu_clang) {
- group_deps += [ "$dir_pw_allocator:tests" ]
- }
-
if (defined(pw_toolchain_SCOPE.is_host_toolchain) &&
pw_toolchain_SCOPE.is_host_toolchain) {
# TODO(pwbug/196): KVS tests are not compatible with device builds as they
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
index 0bfa9f9..7390419 100644
--- a/pw_allocator/block.cc
+++ b/pw_allocator/block.cc
@@ -38,10 +38,10 @@
// with the following storage. Since the space between this block and the
// next are implicitly part of the raw data, size can be computed by
// subtracting the pointers.
- aliased.block->next = reinterpret_cast<Block*>(region.end());
+ aliased.block->next_ = reinterpret_cast<Block*>(region.end());
aliased.block->MarkLast();
- aliased.block->prev = nullptr;
+ aliased.block->prev_ = nullptr;
*block = aliased.block;
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
(*block)->PoisonBlock();
@@ -101,17 +101,17 @@
// If we're inserting in the middle, we need to update the current next
// block to point to what we're inserting
if (!Last()) {
- Next()->prev = new_next;
+ Next()->prev_ = new_next;
}
// Copy next verbatim so the next block also gets the "last"-ness
- new_next->next = next;
- new_next->prev = this;
+ new_next->next_ = next_;
+ new_next->prev_ = this;
// Update the current block to point to the new head.
- next = new_next;
+ next_ = new_next;
- *new_block = next;
+ *new_block = next_;
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
PoisonBlock();
@@ -135,11 +135,11 @@
// Simply enough, this block's next pointer becomes the next block's
// next pointer. We then need to re-wire the "next next" block's prev
// pointer to point back to us though.
- next = Next()->next;
+ next_ = Next()->next_;
// Copying the pointer also copies the "last" status, so this is safe.
if (!Last()) {
- Next()->prev = this;
+ Next()->prev_ = this;
}
return OkStatus();
@@ -148,14 +148,14 @@
Status Block::MergePrev() {
// We can't merge if we have no previous. After that though, merging with
// the previous block is just MergeNext from the previous block.
- if (prev == nullptr) {
+ if (prev_ == nullptr) {
return Status::OutOfRange();
}
// WARNING: This class instance will still exist, but technically be invalid
// after this has been invoked. Be careful when doing anything with `this`
// After doing the below.
- return prev->MergeNext();
+ return prev_->MergeNext();
}
// TODO(pwbug/234): Add stack tracing to locate which call to the heap operation
diff --git a/pw_allocator/freelist_heap_test.cc b/pw_allocator/freelist_heap_test.cc
index 7746383..469e7d9 100644
--- a/pw_allocator/freelist_heap_test.cc
+++ b/pw_allocator/freelist_heap_test.cc
@@ -23,7 +23,7 @@
TEST(FreeListHeap, CanAllocate) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -37,7 +37,7 @@
TEST(FreeListHeap, AllocationsDontOverlap) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -59,7 +59,7 @@
// and get that value back again.
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -72,7 +72,7 @@
TEST(FreeListHeap, ReturnsNullWhenAllocationTooLarge) {
constexpr size_t N = 2048;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -81,7 +81,7 @@
TEST(FreeListHeap, ReturnsNullWhenFull) {
constexpr size_t N = 2048;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -93,7 +93,7 @@
TEST(FreeListHeap, ReturnedPointersAreAligned) {
constexpr size_t N = 2048;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -119,7 +119,7 @@
// We can cheat; create a heap, allocate it all, and try and return something
// random to it. Try allocating again, and check that we get nullptr back.
constexpr size_t N = 2048;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -146,7 +146,7 @@
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 768;
- std::byte buf[N] = {std::byte(1)};
+ alignas(Block) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
@@ -161,7 +161,7 @@
constexpr size_t N = 2048;
constexpr size_t kAllocSize = sizeof(int);
constexpr size_t kNewAllocSize = sizeof(int) * 2;
- std::byte buf[N] = {std::byte(1)};
+ alignas(Block) std::byte buf[N] = {std::byte(1)};
// Data inside the allocated block.
std::byte data1[kAllocSize];
// Data inside the reallocated block.
@@ -185,7 +185,7 @@
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -200,7 +200,7 @@
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -215,7 +215,7 @@
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 4096;
- std::byte buf[N] = {std::byte(0)};
+ alignas(Block) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -232,7 +232,7 @@
constexpr size_t kAllocSize = 128;
constexpr size_t kNum = 4;
constexpr int size = kNum * kAllocSize;
- std::byte buf[N] = {std::byte(1)};
+ alignas(Block) std::byte buf[N] = {std::byte(1)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -251,7 +251,7 @@
constexpr size_t kAllocSize = 143;
constexpr size_t kNum = 3;
constexpr int size = kNum * kAllocSize;
- std::byte buf[N] = {std::byte(132)};
+ alignas(Block) std::byte buf[N] = {std::byte(132)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -268,7 +268,7 @@
TEST(FreeListHeap, CallocTooLarge) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 2049;
- std::byte buf[N] = {std::byte(1)};
+ alignas(Block) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index b82dee9..fcea0e0 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -176,22 +176,22 @@
// Mark this block as in-use
void MarkUsed() {
- next = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
+ next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
}
// Mark this block as free
void MarkFree() {
- next = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
+ next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
}
// Mark this block as the last one in the chain.
void MarkLast() {
- next = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
+ next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
}
// Clear the "last" bit from this block.
void ClearLast() {
- next = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
+ next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
}
// Fetch the block immediately after this one.
@@ -204,7 +204,7 @@
// Return the block immediately before this one. This will return nullptr
// if this is the "first" block.
- Block* Prev() const { return prev; }
+ Block* Prev() const { return prev_; }
// Return true if the block is aligned, the prev/next field matches with the
// previous and next block, and the poisoned bytes is not damaged. Otherwise,
@@ -238,7 +238,7 @@
// Helper to reduce some of the casting nesting in the block management
// functions.
- uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next); }
+ uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
void PoisonBlock();
bool CheckPoisonBytes() const;
@@ -248,8 +248,8 @@
// block, with templated type for the offset size. There are some interesting
// tradeoffs here; perhaps a pool of small allocations could use 1-byte
// next/prev offsets to reduce size further.
- Block* next;
- Block* prev;
+ Block* next_;
+ Block* prev_;
};
} // namespace pw::allocator