blob: 6b7b7fd88516e9f2af6ae6f6c4d75977ac1984d2 [file] [log] [blame]
// Copyright 2020 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include "pw_allocator/block.h"
#include <cstring>
namespace pw::allocator {
Status Block::Init(const std::span<std::byte> region, Block** block) {
// Ensure the region we're given is aligned and sized accordingly
if (reinterpret_cast<uintptr_t>(region.data()) % alignof(Block) != 0) {
return Status::INVALID_ARGUMENT;
}
if (region.size() < sizeof(Block)) {
return Status::INVALID_ARGUMENT;
}
union {
Block* block;
std::byte* bytes;
} aliased;
aliased.bytes = region.data();
// Make "next" point just past the end of this block; forming a linked list
// with the following storage. Since the space between this block and the
// next are implicitly part of the raw data, size can be computed by
// subtracting the pointers.
aliased.block->next = reinterpret_cast<Block*>(region.end());
aliased.block->MarkLast();
aliased.block->prev = nullptr;
*block = aliased.block;
#if PW_ALLOCATOR_POISON_ENABLE
(*block)->PoisonBlock();
#endif // PW_ALLOCATOR_POISON_ENABLE
return Status::OK;
}
Status Block::Split(size_t head_block_inner_size, Block** new_block) {
if (new_block == nullptr) {
return Status::INVALID_ARGUMENT;
}
// Don't split used blocks.
// TODO: Relax this restriction? Flag to enable/disable this check?
if (Used()) {
return Status::FAILED_PRECONDITION;
}
// First round the head_block_inner_size up to a alignof(Block) bounary.
// This ensures that the next block header is aligned accordingly.
// Alignment must be a power of two, hence align()-1 will return the
// remainder.
auto align_bit_mask = alignof(Block) - 1;
size_t aligned_head_block_inner_size = head_block_inner_size;
if ((head_block_inner_size & align_bit_mask) != 0) {
aligned_head_block_inner_size =
(head_block_inner_size & ~align_bit_mask) + alignof(Block);
}
// (1) Are we trying to allocate a head block larger than the current head
// block? This may happen because of the alignment above.
if (aligned_head_block_inner_size > InnerSize()) {
return Status::OUT_OF_RANGE;
}
// (2) Does the resulting block have enough space to store the header?
// TODO: What to do if the returned section is empty (i.e. remaining
// size == sizeof(Block))?
if (InnerSize() - aligned_head_block_inner_size <
sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET) {
return Status::RESOURCE_EXHAUSTED;
}
// Create the new block inside the current one.
Block* new_next = reinterpret_cast<Block*>(
// From the current position...
reinterpret_cast<intptr_t>(this) +
// skip past the current header...
sizeof(*this) +
// add the poison bytes before usable space ...
PW_ALLOCATOR_POISON_OFFSET +
// into the usable bytes by the new inner size...
aligned_head_block_inner_size +
// add the poison bytes after the usable space ...
PW_ALLOCATOR_POISON_OFFSET);
// If we're inserting in the middle, we need to update the current next
// block to point to what we're inserting
if (!Last()) {
Next()->prev = new_next;
}
// Copy next verbatim so the next block also gets the "last"-ness
new_next->next = next;
new_next->prev = this;
// Update the current block to point to the new head.
next = new_next;
*new_block = next;
#if PW_ALLOCATOR_POISON_ENABLE
PoisonBlock();
(*new_block)->PoisonBlock();
#endif // PW_ALLOCATOR_POISON_ENABLE
return Status::OK;
}
Status Block::MergeNext() {
// Anything to merge with?
if (Last()) {
return Status::OUT_OF_RANGE;
}
// Is this or the next block in use?
if (Used() || Next()->Used()) {
return Status::FAILED_PRECONDITION;
}
// Simply enough, this block's next pointer becomes the next block's
// next pointer. We then need to re-wire the "next next" block's prev
// pointer to point back to us though.
next = Next()->next;
// Copying the pointer also copies the "last" status, so this is safe.
if (!Last()) {
Next()->prev = this;
}
return Status::OK;
}
Status Block::MergePrev() {
// We can't merge if we have no previous. After that though, merging with
// the previous block is just MergeNext from the previous block.
if (prev == nullptr) {
return Status::OUT_OF_RANGE;
}
// WARNING: This class instance will still exist, but technically be invalid
// after this has been invoked. Be careful when doing anything with `this`
// After doing the below.
return prev->MergeNext();
}
// TODO(pwbug/234): Add stack tracing to locate which call to the heap operation
// caused the corruption.
// TODO: Add detailed information to log report and leave succinct messages
// in the crash message.
void Block::CrashIfInvalid() {
switch (CheckStatus()) {
case VALID:
break;
case MISALIGNED:
PW_DCHECK(false, "The block at address %p is not aligned.", this);
break;
case NEXT_MISMATCHED:
PW_DCHECK(false,
"The 'prev' field in the next block (%p) does not match the "
"address of the current block (%p).",
Next()->Prev(),
this);
break;
case PREV_MISMATCHED:
PW_DCHECK(false,
"The 'next' field in the previous block (%p) does not match "
"the address of the current block (%p).",
Prev()->Next(),
this);
break;
case POISON_CORRUPTED:
PW_DCHECK(
false, "The poisoned pattern in the block at %p is corrupted.", this);
break;
}
}
// This function will return a Block::BlockStatus that is either VALID or
// indicates the reason why the Block is invalid. If the Block is invalid at
// multiple points, this function will only return one of the reasons.
Block::BlockStatus Block::CheckStatus() const {
// Make sure the Block is aligned.
if (reinterpret_cast<uintptr_t>(this) % alignof(Block) != 0) {
return BlockStatus::MISALIGNED;
}
// Test if the prev/next pointer for this Block matches.
if (!Last() && (this >= Next() || this != Next()->Prev())) {
return BlockStatus::NEXT_MISMATCHED;
}
if (Prev() && (this <= Prev() || this != Prev()->Next())) {
return BlockStatus::PREV_MISMATCHED;
}
#if PW_ALLOCATOR_POISON_ENABLE
if (!this->CheckPoisonBytes()) {
return BlockStatus::POISON_CORRUPTED;
}
#endif // PW_ALLOCATOR_POISON_ENABLE
return BlockStatus::VALID;
}
// Paint sizeof(void*) bytes before and after the usable space in Block as the
// randomized function pattern.
void Block::PoisonBlock() {
#if PW_ALLOCATOR_POISON_ENABLE
std::byte* front_region = reinterpret_cast<std::byte*>(this) + sizeof(*this);
memcpy(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
std::byte* end_region =
reinterpret_cast<std::byte*>(Next()) - PW_ALLOCATOR_POISON_OFFSET;
memcpy(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
#endif // PW_ALLOCATOR_POISON_ENABLE
}
bool Block::CheckPoisonBytes() const {
#if PW_ALLOCATOR_POISON_ENABLE
std::byte* front_region = reinterpret_cast<std::byte*>(
reinterpret_cast<intptr_t>(this) + sizeof(*this));
if (std::memcmp(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
return false;
}
std::byte* end_region = reinterpret_cast<std::byte*>(
reinterpret_cast<intptr_t>(this->Next()) - PW_ALLOCATOR_POISON_OFFSET);
if (std::memcmp(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
return false;
}
#endif // PW_ALLOCATOR_POISON_ENABLE
return true;
}
} // namespace pw::allocator