Add integration tests for custom allocator and GCs
diff --git a/kotlin-native/runtime/build.gradle.kts b/kotlin-native/runtime/build.gradle.kts
index def32fc..2916138 100644
--- a/kotlin-native/runtime/build.gradle.kts
+++ b/kotlin-native/runtime/build.gradle.kts
@@ -165,9 +165,6 @@
}
compilerArgs.add("-DCUSTOM_ALLOCATOR")
-
- // Directly depends on cms which is only supported with threads.
- onlyIf { target.supportsThreads() }
}
module("opt_alloc") {
@@ -196,6 +193,7 @@
onlyIf { target.supportsCoreSymbolication() }
}
+
module("source_info_libbacktrace") {
srcRoot.set(layout.projectDirectory.dir("src/source_info/libbacktrace"))
headersDirs.from(files("src/main/cpp", "src/libbacktrace/c/include"))
@@ -372,6 +370,11 @@
testedModules.addAll("main", "experimental_memory_manager", "common_gc", "same_thread_ms_gc", "std_alloc", "objc")
}
+ testsGroup("experimentalMM_custom_alloc_runtime_tests") {
+ testedModules.addAll("experimental_memory_manager_custom", "same_thread_ms_gc_custom")
+ testSupportModules.addAll("main", "common_gc", "custom_alloc", "objc")
+ }
+
testsGroup("experimentalMM_cms_mimalloc_runtime_tests") {
testedModules.addAll("main", "experimental_memory_manager", "common_gc", "concurrent_ms_gc", "mimalloc", "opt_alloc", "objc")
}
@@ -380,6 +383,11 @@
testedModules.addAll("main", "experimental_memory_manager", "common_gc", "concurrent_ms_gc", "std_alloc", "objc")
}
+ testsGroup("experimentalMM_cms_custom_alloc_runtime_tests") {
+ testedModules.addAll("experimental_memory_manager_custom", "concurrent_ms_gc_custom")
+ testSupportModules.addAll("main", "common_gc", "custom_alloc", "objc")
+ }
+
testsGroup("experimentalMM_noop_mimalloc_runtime_tests") {
testedModules.addAll("main", "experimental_memory_manager", "common_gc", "noop_gc", "mimalloc", "opt_alloc", "objc")
}
@@ -387,6 +395,11 @@
testsGroup("experimentalMM_noop_std_alloc_runtime_tests") {
testedModules.addAll("main", "experimental_memory_manager", "common_gc", "noop_gc", "std_alloc", "objc")
}
+
+ testsGroup("experimentalMM_noop_custom_alloc_runtime_tests") {
+ testedModules.addAll("experimental_memory_manager_custom", "noop_gc_custom")
+ testSupportModules.addAll("main", "common_gc", "custom_alloc", "objc")
+ }
}
}
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/AtomicStack.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/AtomicStack.hpp
index 7db4f43..26f94d0 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/AtomicStack.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/AtomicStack.hpp
@@ -10,6 +10,7 @@
#include "KAssert.h"
#include "Utils.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -82,6 +83,17 @@
RuntimeAssert(isEmpty(), "AtomicStack must be empty on destruction");
}
+ // Test method
+ std_support::vector<T*> GetElements() {
+ std_support::vector<T*> elements;
+ T* elm = stack_.load();
+ while (elm) {
+ elements.push_back(elm);
+ elm = elm->next_;
+ }
+ return elements;
+ }
+
private:
std::atomic<T*> stack_{nullptr};
};
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/CustomAllocator.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/CustomAllocator.hpp
index 947b5ce..212852a 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/CustomAllocator.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/CustomAllocator.hpp
@@ -37,6 +37,10 @@
FinalizerQueue ExtractFinalizerQueue() noexcept;
static size_t GetAllocatedHeapSize(ObjHeader* object) noexcept;
+
+ Heap& heap() noexcept {
+ return heap_;
+ }
private:
uint8_t* Allocate(uint64_t cellCount) noexcept;
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.cpp
index d8356e1..30fedd7 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.cpp
@@ -86,4 +86,22 @@
return nextFree_.first > 0 || nextFree_.last < end_;
}
+std_support::vector<uint8_t*> FixedBlockPage::GetAllocatedBlocks() noexcept {
+ std_support::vector<uint8_t*> allocated;
+ CustomAllocInfo("FixedBlockPage(%p)::Sweep()", this);
+ FixedCellRange nextFree = nextFree_; // Accessing the previous free list structure.
+ for (uint32_t cell = 0 ; cell < end_ ; cell += blockSize_) {
+ for (; cell < nextFree.first ; cell += blockSize_) {
+ allocated.push_back(cells_[cell].data);
+ }
+ if (nextFree.last >= end_) {
+ break;
+ }
+ cell = nextFree.last;
+ nextFree = cells_[cell].nextFree;
+ }
+ return allocated;
+}
+
+
} // namespace kotlin::alloc
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.hpp
index 80df4bc..ca760bf 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPage.hpp
@@ -12,6 +12,7 @@
#include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -43,6 +44,9 @@
bool Sweep(GCSweepScope& sweepHandle, FinalizerQueue& finalizerQueue) noexcept;
+ // Testing method
+ std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept;
+
private:
explicit FixedBlockPage(uint32_t blockSize) noexcept;
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPageTest.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPageTest.cpp
index ef483f0..0f23388 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPageTest.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/FixedBlockPageTest.cpp
@@ -150,6 +150,15 @@
}
}
EXPECT_EQ(page->Sweep(gcScope, finalizerQueue), !live.empty());
+ uint8_t* prev = nullptr;
+ uint32_t allocCount = 0;
+ for (auto* obj : page->GetAllocatedBlocks()) {
+ EXPECT_LT(prev, obj);
+ prev = obj;
+ ++allocCount;
+ EXPECT_NE(live.find(obj), live.end());
+ }
+ EXPECT_EQ(allocCount, live.size());
}
while ((ptr = alloc(page, size))) live.insert(ptr);
EXPECT_EQ(live.size(), BLOCK_COUNT);
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/Heap.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/Heap.cpp
index 35ab3df..15095f9 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/Heap.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/Heap.cpp
@@ -14,8 +14,12 @@
#include "CustomAllocConstants.hpp"
#include "AtomicStack.hpp"
#include "CustomLogging.hpp"
+#include "ExtraObjectData.hpp"
#include "ExtraObjectPage.hpp"
+#include "GCApi.hpp"
+#include "Memory.h"
#include "ThreadRegistry.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -31,6 +35,7 @@
FinalizerQueue Heap::Sweep(gc::GCHandle gcHandle) noexcept {
FinalizerQueue finalizerQueue;
+ CustomAllocDebug("Heap: before sweep FinalizerQueue size == %zu", finalizerQueue.size());
CustomAllocDebug("Heap::Sweep()");
{
auto sweepHandle = gcHandle.sweep();
@@ -40,6 +45,7 @@
nextFitPages_.Sweep(sweepHandle, finalizerQueue);
singleObjectPages_.SweepAndFree(sweepHandle, finalizerQueue);
}
+ CustomAllocDebug("Heap: before extra sweep FinalizerQueue size == %zu", finalizerQueue.size());
{
auto sweepHandle = gcHandle.sweepExtraObjects();
extraObjectPages_.Sweep(sweepHandle, finalizerQueue);
@@ -68,4 +74,41 @@
return extraObjectPages_.GetPage(0, finalizerQueue);
}
+std_support::vector<ObjHeader*> Heap::GetAllocatedObjects() noexcept {
+ std_support::vector<ObjHeader*> allocated;
+ for (int blockSize = 0; blockSize <= FIXED_BLOCK_PAGE_MAX_BLOCK_SIZE; ++blockSize) {
+ for (auto* page : fixedBlockPages_[blockSize].GetPages()) {
+ for (auto* block : page->GetAllocatedBlocks()) {
+ allocated.push_back(reinterpret_cast<ObjHeader*>(block + gcDataSize));
+ }
+ }
+ }
+ for (auto* page : nextFitPages_.GetPages()) {
+ for (auto* block : page->GetAllocatedBlocks()) {
+ allocated.push_back(reinterpret_cast<ObjHeader*>(block + gcDataSize));
+ }
+ }
+ for (auto* page : singleObjectPages_.GetPages()) {
+ for (auto* block : page->GetAllocatedBlocks()) {
+ allocated.push_back(reinterpret_cast<ObjHeader*>(block + gcDataSize));
+ }
+ }
+ std_support::vector<ObjHeader*> unfinalized;
+ for (auto* block: allocated) {
+ if (!block->has_meta_object() || !mm::ExtraObjectData::Get(block)->getFlag(mm::ExtraObjectData::FLAGS_FINALIZED)) {
+ unfinalized.push_back(block);
+ }
+ }
+ return unfinalized;
+}
+
+void Heap::ClearForTests() noexcept {
+ for (int blockSize = 0; blockSize <= FIXED_BLOCK_PAGE_MAX_BLOCK_SIZE; ++blockSize) {
+ fixedBlockPages_[blockSize].ClearForTests();
+ }
+ nextFitPages_.ClearForTests();
+ singleObjectPages_.ClearForTests();
+ extraObjectPages_.ClearForTests();
+}
+
} // namespace kotlin::alloc
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/Heap.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/Heap.hpp
index a3c206c..d2fdf4f 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/Heap.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/Heap.hpp
@@ -13,6 +13,7 @@
#include "CustomAllocConstants.hpp"
#include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp"
+#include "Memory.h"
#include "SingleObjectPage.hpp"
#include "NextFitPage.hpp"
#include "PageStore.hpp"
@@ -35,6 +36,10 @@
SingleObjectPage* GetSingleObjectPage(uint64_t cellCount, FinalizerQueue& finalizerQueue) noexcept;
ExtraObjectPage* GetExtraObjectPage(FinalizerQueue& finalizerQueue) noexcept;
+ // Test method
+ std_support::vector<ObjHeader*> GetAllocatedObjects() noexcept;
+ void ClearForTests() noexcept;
+
private:
PageStore<FixedBlockPage> fixedBlockPages_[FIXED_BLOCK_PAGE_MAX_BLOCK_SIZE + 1];
PageStore<NextFitPage> nextFitPages_;
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.cpp
index fb08351..89bed47 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.cpp
@@ -11,6 +11,7 @@
#include "CustomLogging.hpp"
#include "CustomAllocConstants.hpp"
#include "GCApi.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -104,4 +105,15 @@
}
}
+std_support::vector<uint8_t*> NextFitPage::GetAllocatedBlocks() noexcept {
+ std_support::vector<uint8_t*> allocated;
+ Cell* end = cells_ + NEXT_FIT_PAGE_CELL_COUNT;
+ for (Cell* block = cells_ + 1; block != end; block = block->Next()) {
+ if (block->isAllocated_) {
+ allocated.push_back(block->data_);
+ }
+ }
+ return allocated;
+}
+
} // namespace kotlin::alloc
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.hpp
index 085ac5b..e8bd30f 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPage.hpp
@@ -13,6 +13,7 @@
#include "Cell.hpp"
#include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -34,6 +35,9 @@
// Testing method
bool CheckInvariants() noexcept;
+ // Testing method
+ std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept;
+
private:
explicit NextFitPage(uint32_t cellCount) noexcept;
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPageTest.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPageTest.cpp
index 88c3dfe..1190a10 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPageTest.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/NextFitPageTest.cpp
@@ -109,9 +109,7 @@
std::minstd_rand r(seed);
NextFitPage* page = NextFitPage::Create(MIN_BLOCK_SIZE);
int unmarked = 0;
- while (true) {
- uint8_t* ptr = alloc(page, MIN_BLOCK_SIZE);
- if (ptr == nullptr) break;
+ while (uint8_t* ptr = alloc(page, MIN_BLOCK_SIZE)) {
if (r() & 1) {
mark(ptr);
} else {
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/PageStore.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/PageStore.hpp
index 831b7df..bf989d4 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/PageStore.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/PageStore.hpp
@@ -7,10 +7,12 @@
#define CUSTOM_ALLOC_CPP_PAGESTORE_HPP_
#include <atomic>
+#include <cstdint>
#include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -78,6 +80,8 @@
}
private:
+ friend class Heap;
+
T* SweepSingle(GCSweepScope& sweepHandle, T* page, AtomicStack<T>& from, AtomicStack<T>& to, FinalizerQueue& finalizerQueue) noexcept {
if (!page) {
return nullptr;
@@ -92,6 +96,22 @@
return nullptr;
}
+ // Testing method
+ std_support::vector<T*> GetPages() noexcept {
+ std_support::vector<T*> pages;
+ for (T* page : ready_.GetElements()) pages.push_back(page);
+ for (T* page : used_.GetElements()) pages.push_back(page);
+ for (T* page : unswept_.GetElements()) pages.push_back(page);
+ return pages;
+ }
+
+ void ClearForTests() noexcept {
+ while (T* page = empty_.Pop()) page->Destroy();
+ while (T* page = ready_.Pop()) page->Destroy();
+ while (T* page = used_.Pop()) page->Destroy();
+ while (T* page = unswept_.Pop()) page->Destroy();
+ }
+
AtomicStack<T> empty_;
AtomicStack<T> ready_;
AtomicStack<T> used_;
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.cpp b/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.cpp
index e38d2d4..69998c2 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.cpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.cpp
@@ -11,6 +11,7 @@
#include "CustomLogging.hpp"
#include "CustomAllocConstants.hpp"
#include "GCApi.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -46,4 +47,12 @@
return false;
}
+std_support::vector<uint8_t*> SingleObjectPage::GetAllocatedBlocks() noexcept {
+ std_support::vector<uint8_t*> allocated;
+ if (isAllocated_) {
+ allocated.push_back(data_);
+ }
+ return allocated;
+}
+
} // namespace kotlin::alloc
diff --git a/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.hpp b/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.hpp
index 51646a7..41dade4 100644
--- a/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.hpp
+++ b/kotlin-native/runtime/src/custom_alloc/cpp/SingleObjectPage.hpp
@@ -12,6 +12,7 @@
#include "AtomicStack.hpp"
#include "ExtraObjectPage.hpp"
#include "GCStatistics.hpp"
+#include "std_support/Vector.hpp"
namespace kotlin::alloc {
@@ -33,9 +34,13 @@
private:
friend class AtomicStack<SingleObjectPage>;
+ friend class Heap;
explicit SingleObjectPage(size_t size) noexcept;
+ // Testing method
+ std_support::vector<uint8_t*> GetAllocatedBlocks() noexcept;
+
SingleObjectPage* next_;
bool isAllocated_ = false;
size_t size_;
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
index ee93875..cb73a63 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
@@ -178,6 +178,9 @@
}
std_support::vector<ObjHeader*> Alive(mm::ThreadData& threadData) {
+#ifdef CUSTOM_ALLOCATOR
+ return threadData.gc().impl().alloc().heap().GetAllocatedObjects();
+#else
std_support::vector<ObjHeader*> objects;
for (auto node : threadData.gc().impl().objectFactoryThreadQueue()) {
objects.push_back(node.GetObjHeader());
@@ -186,6 +189,7 @@
objects.push_back(node.GetObjHeader());
}
return objects;
+#endif
}
bool IsMarked(ObjHeader* objHeader) {
@@ -214,7 +218,9 @@
~ConcurrentMarkAndSweepTest() {
mm::GlobalsRegistry::Instance().ClearForTests();
mm::SpecialRefRegistry::instance().clearForTests();
+#ifndef CUSTOM_ALLOCATOR
mm::GlobalData::Instance().extraObjectDataFactory().ClearForTests();
+#endif
mm::GlobalData::Instance().gc().ClearForTests();
}
@@ -1012,6 +1018,8 @@
}
}
+// Custom allocator does not have a notion of objects alive only for some thread
+#ifndef CUSTOM_ALLOCATOR
TEST_P(ConcurrentMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
std_support::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(2 * kDefaultThreadCount);
@@ -1092,7 +1100,7 @@
EXPECT_THAT(newMutators[i].Alive(), testing::UnorderedElementsAreArray(aliveForThisThread));
}
}
-
+#endif // CUSTOM_ALLOCATOR
TEST_P(ConcurrentMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) {
std_support::vector<Mutator> mutators(2);
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/FinalizerProcessorTest.cpp b/kotlin-native/runtime/src/gc/cms/cpp/FinalizerProcessorTest.cpp
index 50ed078..1def417 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/FinalizerProcessorTest.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/FinalizerProcessorTest.cpp
@@ -27,6 +27,8 @@
// These tests can only work if `GC` is `ConcurrentMarkAndSweep`.
+// custom allocator uses its own finalizer processor
+#ifndef CUSTOM_ALLOCATOR
namespace {
struct Payload {
@@ -144,4 +146,4 @@
ASSERT_EQ(threadsCount(), 1);
});
}
-
+#endif // CUSTOM_ALLOCATOR
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
index d505fe7..314fcaa 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
@@ -5,7 +5,6 @@
#include "GCImpl.hpp"
-#include "Common.h"
#include "ConcurrentMarkAndSweep.hpp"
#include "GC.hpp"
#include "GCStatistics.hpp"
@@ -57,6 +56,8 @@
void gc::GC::ThreadData::ClearForTests() noexcept {
#ifndef CUSTOM_ALLOCATOR
impl_->objectFactoryThreadQueue().ClearForTests();
+#else
+ impl_->alloc().PrepareForGC();
#endif
}
@@ -115,6 +116,8 @@
impl_->gc().StopFinalizerThreadIfRunning();
#ifndef CUSTOM_ALLOCATOR
impl_->objectFactory().ClearForTests();
+#else
+ impl_->gc().heap().ClearForTests();
#endif
GCHandle::ClearForTests();
}
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/GCImplTestSupport.cpp b/kotlin-native/runtime/src/gc/cms/cpp/GCImplTestSupport.cpp
index 3c84c32..131801a4 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/GCImplTestSupport.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/GCImplTestSupport.cpp
@@ -26,6 +26,10 @@
} // namespace
void gc::AssertClear(GC& gc) noexcept {
+#ifdef CUSTOM_ALLOCATOR
+ auto objects = gc.impl().gc().heap().GetAllocatedObjects();
+#else
auto objects = gc.impl().objectFactory().LockForIter();
+#endif
EXPECT_THAT(collectCopy(objects), testing::UnorderedElementsAre());
}
diff --git a/kotlin-native/runtime/src/gc/noop/cpp/GCImplTestSupport.cpp b/kotlin-native/runtime/src/gc/noop/cpp/GCImplTestSupport.cpp
index 1fab0db..131801a4 100644
--- a/kotlin-native/runtime/src/gc/noop/cpp/GCImplTestSupport.cpp
+++ b/kotlin-native/runtime/src/gc/noop/cpp/GCImplTestSupport.cpp
@@ -26,8 +26,10 @@
} // namespace
void gc::AssertClear(GC& gc) noexcept {
-#ifndef CUSTOM_ALLOCATOR
+#ifdef CUSTOM_ALLOCATOR
+ auto objects = gc.impl().gc().heap().GetAllocatedObjects();
+#else
auto objects = gc.impl().objectFactory().LockForIter();
- EXPECT_THAT(collectCopy(objects), testing::UnorderedElementsAre());
#endif
+ EXPECT_THAT(collectCopy(objects), testing::UnorderedElementsAre());
}
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
index 81eb0c3..a952533 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
@@ -59,6 +59,8 @@
void gc::GC::ThreadData::ClearForTests() noexcept {
#ifndef CUSTOM_ALLOCATOR
impl_->objectFactoryThreadQueue().ClearForTests();
+#else
+ impl_->alloc().PrepareForGC();
#endif
}
@@ -96,10 +98,10 @@
// static
size_t gc::GC::GetAllocatedHeapSize(ObjHeader* object) noexcept {
-#ifndef CUSTOM_ALLOCATOR
- return mm::ObjectFactory<GCImpl>::GetAllocatedHeapSize(object);
-#else
+#ifdef CUSTOM_ALLOCATOR
return alloc::CustomAllocator::GetAllocatedHeapSize(object);
+#else
+ return mm::ObjectFactory<GCImpl>::GetAllocatedHeapSize(object);
#endif
}
@@ -114,6 +116,8 @@
void gc::GC::ClearForTests() noexcept {
#ifndef CUSTOM_ALLOCATOR
impl_->objectFactory().ClearForTests();
+#else
+ impl_->gc().heap().ClearForTests();
#endif
GCHandle::ClearForTests();
}
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/GCImplTestSupport.cpp b/kotlin-native/runtime/src/gc/stms/cpp/GCImplTestSupport.cpp
index 3c84c32..131801a4 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/GCImplTestSupport.cpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/GCImplTestSupport.cpp
@@ -26,6 +26,10 @@
} // namespace
void gc::AssertClear(GC& gc) noexcept {
+#ifdef CUSTOM_ALLOCATOR
+ auto objects = gc.impl().gc().heap().GetAllocatedObjects();
+#else
auto objects = gc.impl().objectFactory().LockForIter();
+#endif
EXPECT_THAT(collectCopy(objects), testing::UnorderedElementsAre());
}
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.cpp b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.cpp
index 29b6cb7..3294c67 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.cpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.cpp
@@ -188,8 +188,7 @@
#ifndef CUSTOM_ALLOCATOR
finalizerQueue.Finalize();
#else
- alloc::ExtraObjectCell* cell;
- while ((cell = finalizerQueue.Pop())) {
+ while (alloc::ExtraObjectCell* cell = finalizerQueue.Pop()) {
RunFinalizers(cell->Data()->GetBaseObject());
}
#endif
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.hpp b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.hpp
index 71bcbdd..69bb82d 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.hpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweep.hpp
@@ -42,7 +42,6 @@
class ObjectData {
public:
bool tryMark() noexcept {
- printf("WTF?!?!?\n"); fflush(stdout);
bool result = trySetNext(reinterpret_cast<ObjectData*>(1));
RuntimeLogDebug({"gc"}, "tryMark %p = %d", this, result);
return result;
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweepTest.cpp b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweepTest.cpp
index 5bd2ba6..b8140f8 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweepTest.cpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/SameThreadMarkAndSweepTest.cpp
@@ -179,6 +179,9 @@
}
std_support::vector<ObjHeader*> Alive(mm::ThreadData& threadData) {
+#ifdef CUSTOM_ALLOCATOR
+ return threadData.gc().impl().alloc().heap().GetAllocatedObjects();
+#else
std_support::vector<ObjHeader*> objects;
for (auto node : threadData.gc().impl().objectFactoryThreadQueue()) {
objects.push_back(node.GetObjHeader());
@@ -187,6 +190,7 @@
objects.push_back(node.GetObjHeader());
}
return objects;
+#endif
}
bool IsMarked(ObjHeader* objHeader) {
@@ -210,8 +214,12 @@
~SameThreadMarkAndSweepTest() {
mm::GlobalsRegistry::Instance().ClearForTests();
mm::SpecialRefRegistry::instance().clearForTests();
+#ifndef CUSTOM_ALLOCATOR
mm::GlobalData::Instance().extraObjectDataFactory().ClearForTests();
mm::GlobalData::Instance().gc().impl().objectFactory().ClearForTests();
+#else
+ mm::GlobalData::Instance().gc().impl().gc().heap().ClearForTests();
+#endif
}
testing::MockFunction<void(ObjHeader*)>& finalizerHook() { return finalizerHooks_.finalizerHook(); }
@@ -996,6 +1004,8 @@
}
}
+// Custom allocator does not have a notion of objects alive only for some thread
+#ifndef CUSTOM_ALLOCATOR
TEST_F(SameThreadMarkAndSweepTest, NewThreadsWhileRequestingCollection) {
std_support::vector<Mutator> mutators(kDefaultThreadCount);
std_support::vector<ObjHeader*> globals(2 * kDefaultThreadCount);
@@ -1076,6 +1086,7 @@
EXPECT_THAT(newMutators[i].Alive(), testing::UnorderedElementsAreArray(aliveForThisThread));
}
}
+#endif
TEST_F(SameThreadMarkAndSweepTest, FreeObjectWithFreeWeakReversedOrder) {
diff --git a/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataFactory.cpp b/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataFactory.cpp
index 41a3e24..dd7d146 100644
--- a/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataFactory.cpp
+++ b/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataFactory.cpp
@@ -10,6 +10,7 @@
using namespace kotlin;
+#ifndef CUSTOM_ALLOCATOR
// static
mm::ExtraObjectDataFactory& mm::ExtraObjectDataFactory::Instance() noexcept {
return GlobalData::Instance().extraObjectDataFactory();
@@ -41,3 +42,4 @@
mm::ExtraObjectDataFactory::ExtraObjectDataFactory() = default;
mm::ExtraObjectDataFactory::~ExtraObjectDataFactory() = default;
+#endif
diff --git a/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataTest.cpp b/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataTest.cpp
index 806cd4c..0053207 100644
--- a/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataTest.cpp
+++ b/kotlin-native/runtime/src/mm/cpp/ExtraObjectDataTest.cpp
@@ -29,7 +29,9 @@
~ExtraObjectDataTest() {
mm::GlobalsRegistry::Instance().ClearForTests();
+#ifndef CUSTOM_ALLOCATOR
mm::GlobalData::Instance().extraObjectDataFactory().ClearForTests();
+#endif
mm::GlobalData::Instance().gc().ClearForTests();
}
};
diff --git a/kotlin-native/runtime/src/mm/cpp/GlobalData.hpp b/kotlin-native/runtime/src/mm/cpp/GlobalData.hpp
index b6d7b25..45c48f9 100644
--- a/kotlin-native/runtime/src/mm/cpp/GlobalData.hpp
+++ b/kotlin-native/runtime/src/mm/cpp/GlobalData.hpp
@@ -27,7 +27,9 @@
ThreadRegistry& threadRegistry() noexcept { return threadRegistry_; }
GlobalsRegistry& globalsRegistry() noexcept { return globalsRegistry_; }
SpecialRefRegistry& specialRefRegistry() noexcept { return specialRefRegistry_; }
+#ifndef CUSTOM_ALLOCATOR
ExtraObjectDataFactory& extraObjectDataFactory() noexcept { return extraObjectDataFactory_; }
+#endif
gc::GC& gc() noexcept { return gc_; }
AppStateTracking& appStateTracking() noexcept { return appStateTracking_; }
@@ -42,7 +44,9 @@
AppStateTracking appStateTracking_;
GlobalsRegistry globalsRegistry_;
SpecialRefRegistry specialRefRegistry_;
+#ifndef CUSTOM_ALLOCATOR
ExtraObjectDataFactory extraObjectDataFactory_;
+#endif
gc::GC gc_;
};
diff --git a/kotlin-native/runtime/src/mm/cpp/ObjectFactoryTest.cpp b/kotlin-native/runtime/src/mm/cpp/ObjectFactoryTest.cpp
index 36ec7b6..a795beb 100644
--- a/kotlin-native/runtime/src/mm/cpp/ObjectFactoryTest.cpp
+++ b/kotlin-native/runtime/src/mm/cpp/ObjectFactoryTest.cpp
@@ -21,6 +21,8 @@
#include "std_support/CStdlib.hpp"
#include "std_support/Vector.hpp"
+// ObjectFactory is not used by custom allocator
+#ifndef CUSTOM_ALLOCATOR
using namespace kotlin;
using testing::_;
@@ -1130,3 +1132,4 @@
EXPECT_THAT(actual, testing::UnorderedElementsAreArray(expected));
EXPECT_CALL(allocator, Free(_, _)).Times(kThreadCount);
}
+#endif
diff --git a/kotlin-native/runtime/src/mm/cpp/TestSupport.cpp b/kotlin-native/runtime/src/mm/cpp/TestSupport.cpp
index 6a041a3..2c8ebbf 100644
--- a/kotlin-native/runtime/src/mm/cpp/TestSupport.cpp
+++ b/kotlin-native/runtime/src/mm/cpp/TestSupport.cpp
@@ -45,12 +45,15 @@
extern "C" void Kotlin_TestSupport_AssertClearGlobalState() {
// Validate that global registries are empty.
auto globals = mm::GlobalsRegistry::Instance().LockForIter();
- auto extraObjects = mm::GlobalData::Instance().extraObjectDataFactory().LockForIter();
auto specialRefs = mm::SpecialRefRegistry::instance().lockForIter();
auto threads = mm::ThreadRegistry::Instance().LockForIter();
- EXPECT_THAT(collectCopy(globals), testing::UnorderedElementsAre());
+#ifndef CUSTOM_ALLOCATOR
+ auto extraObjects = mm::GlobalData::Instance().extraObjectDataFactory().LockForIter();
EXPECT_THAT(collectPointers(extraObjects), testing::UnorderedElementsAre());
+#endif
+
+ EXPECT_THAT(collectCopy(globals), testing::UnorderedElementsAre());
EXPECT_THAT(collectPointers(specialRefs), testing::UnorderedElementsAre());
EXPECT_THAT(collectPointers(threads), testing::UnorderedElementsAre());
gc::AssertClear(mm::GlobalData::Instance().gc());
diff --git a/kotlin-native/runtime/src/mm/cpp/ThreadData.hpp b/kotlin-native/runtime/src/mm/cpp/ThreadData.hpp
index 1ad8101..22dba9a 100644
--- a/kotlin-native/runtime/src/mm/cpp/ThreadData.hpp
+++ b/kotlin-native/runtime/src/mm/cpp/ThreadData.hpp
@@ -34,7 +34,9 @@
threadId_(threadId),
globalsThreadQueue_(GlobalsRegistry::Instance()),
specialRefRegistry_(SpecialRefRegistry::instance()),
+#ifndef CUSTOM_ALLOCATOR
extraObjectDataThreadQueue_(ExtraObjectDataFactory::Instance()),
+#endif
gc_(GlobalData::Instance().gc(), *this),
suspensionData_(ThreadState::kNative, *this) {}
@@ -48,7 +50,9 @@
SpecialRefRegistry::ThreadQueue& specialRefRegistry() noexcept { return specialRefRegistry_; }
+#ifndef CUSTOM_ALLOCATOR
ExtraObjectDataFactory::ThreadQueue& extraObjectDataThreadQueue() noexcept { return extraObjectDataThreadQueue_; }
+#endif
ThreadState state() noexcept { return suspensionData_.state(); }
@@ -66,14 +70,18 @@
// TODO: These use separate locks, which is inefficient.
globalsThreadQueue_.Publish();
specialRefRegistry_.publish();
+#ifndef CUSTOM_ALLOCATOR
extraObjectDataThreadQueue_.Publish();
+#endif
gc_.Publish();
}
void ClearForTests() noexcept {
globalsThreadQueue_.ClearForTests();
specialRefRegistry_.clearForTests();
+#ifndef CUSTOM_ALLOCATOR
extraObjectDataThreadQueue_.ClearForTests();
+#endif
gc_.ClearForTests();
}
@@ -82,7 +90,9 @@
GlobalsRegistry::ThreadQueue globalsThreadQueue_;
ThreadLocalStorage tls_;
SpecialRefRegistry::ThreadQueue specialRefRegistry_;
+#ifndef CUSTOM_ALLOCATOR
ExtraObjectDataFactory::ThreadQueue extraObjectDataThreadQueue_;
+#endif
ShadowStack shadowStack_;
gc::GC::ThreadData gc_;
std_support::vector<std::pair<ObjHeader**, ObjHeader*>> initializingSingletons_;