[K/N] concurrent weak sweep
diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/BinaryOptions.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/BinaryOptions.kt
index d9d3fb5..16f5804 100644
--- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/BinaryOptions.kt
+++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/BinaryOptions.kt
@@ -37,6 +37,8 @@
 
     val gcMarkSingleThreaded by booleanOption()
 
+    val concurrentWeakSweep by booleanOption()
+
     val auxGCThreads by intOption()
 
     val linkRuntime by option<RuntimeLinkageStrategyBinaryOption>()
diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/KonanConfig.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/KonanConfig.kt
index 754a368..43b7c89e 100644
--- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/KonanConfig.kt
+++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/KonanConfig.kt
@@ -173,6 +173,9 @@
     val gcMarkSingleThreaded: Boolean
         get() = configuration.get(BinaryOptions.gcMarkSingleThreaded) ?: false
 
+    val concurrentWeakSweep: Boolean
+        get() = configuration.get(BinaryOptions.concurrentWeakSweep) == true
+
     val auxGCThreads: Int
         get() = configuration.get(BinaryOptions.auxGCThreads) ?: 1
 
diff --git a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IrToBitcode.kt b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IrToBitcode.kt
index 97809a3..f95c496 100644
--- a/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IrToBitcode.kt
+++ b/kotlin-native/backend.native/compiler/ir/backend.native/src/org/jetbrains/kotlin/backend/konan/llvm/IrToBitcode.kt
@@ -2956,6 +2956,7 @@
     setRuntimeConstGlobal("Kotlin_freezingEnabled", llvm.constInt32(if (config.freezing.enableFreezeAtRuntime) 1 else 0))
     setRuntimeConstGlobal("Kotlin_freezingChecksEnabled", llvm.constInt32(if (config.freezing.enableFreezeChecks) 1 else 0))
     setRuntimeConstGlobal("Kotlin_gcSchedulerType", llvm.constInt32(config.gcSchedulerType.value))
+    setRuntimeConstGlobal("Kotlin_concurrentWeakSweep", llvm.constInt32(if (context.config.concurrentWeakSweep) 1 else 0))
 
     return llvmModule
 }
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp
new file mode 100644
index 0000000..8bbc310e
--- /dev/null
+++ b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.cpp
@@ -0,0 +1,89 @@
+#include "Barriers.hpp"
+
+#include "ThreadData.hpp"
+#include "ThreadRegistry.hpp"
+#include "GCImpl.hpp"
+#include <atomic>
+
+using namespace kotlin;
+
+namespace {
+
+[[clang::no_destroy]] std::atomic<bool> weakRefBarriersEnabled = false;
+
+template<typename Iterable, typename Pred>
+bool forall(Iterable& iterable, Pred&& pred) {
+    for (auto& item : iterable) {
+        if (!pred(item)) return false;
+    }
+    return true;
+}
+
+void checkpointAction(mm::ThreadData& thread) {
+    thread.gc().impl().gc().barriers().onCheckpoint();
+}
+
+void waitForThreadsToReachCheckpoint() {
+    // resetCheckpoint
+    for (auto& thr: mm::ThreadRegistry::Instance().LockForIter()) {
+        thr.gc().impl().gc().barriers().resetCheckpoint();
+    }
+
+    // requestCheckpoint
+    bool safePointSet = mm::TrySetSafePointAction(checkpointAction);
+    RuntimeAssert(safePointSet, "Only the GC thread can request safepoint actions, and STW must have already finished");
+
+    // waitForAllThreadsToVisitCheckpoint
+    auto threads = mm::ThreadRegistry::Instance().LockForIter();
+    while (!forall(threads, [](mm::ThreadData& thr) { return thr.gc().impl().gc().barriers().visitedCheckpoint() || thr.suspensionData().suspendedOrNative(); })) {
+        std::this_thread::yield();
+    }
+
+    //unsetSafePointAction
+    mm::UnsetSafePointAction();
+}
+
+}
+
+void gc::BarriersThreadData::onCheckpoint() {
+    visitedCheckpoint_.store(true, std::memory_order_seq_cst);
+}
+
+void gc::BarriersThreadData::resetCheckpoint() {
+    visitedCheckpoint_.store(false, std::memory_order_seq_cst);
+}
+
+bool gc::BarriersThreadData::visitedCheckpoint() const {
+    return visitedCheckpoint_.load(std::memory_order_relaxed);
+}
+
+void gc::EnableWeakRefBarriers(bool inSTW) {
+    weakRefBarriersEnabled.store(true, std::memory_order_seq_cst);
+    if (!inSTW) {
+        waitForThreadsToReachCheckpoint();
+    }
+}
+
+void gc::DisableWeakRefBarriers(bool inSTW) {
+    weakRefBarriersEnabled.store(false, std::memory_order_seq_cst);
+    if (!inSTW) {
+        waitForThreadsToReachCheckpoint();
+    }
+}
+
+OBJ_GETTER(kotlin::gc::WeakRefRead, ObjHeader* weakReferee) noexcept {
+    if (compiler::concurrentWeakSweep()) {
+        if (weakReferee != nullptr) {
+            // weakRefBarriersEnabled changes are synchronized with checkpoints or STW
+            if (weakRefBarriersEnabled.load(std::memory_order_relaxed)) {
+                // When weak ref barriers are enabled, marked state cannot change and the
+                // object cannot be deleted.
+                if (!gc::isMarked(weakReferee)) {
+                    RETURN_OBJ(nullptr);
+                }
+            }
+        }
+    }
+    RETURN_OBJ(weakReferee);
+}
+
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp
new file mode 100644
index 0000000..6203840
--- /dev/null
+++ b/kotlin-native/runtime/src/gc/cms/cpp/Barriers.hpp
@@ -0,0 +1,24 @@
+#pragma once
+
+#include <atomic>
+
+#include "Memory.h"
+#include "Utils.hpp"
+
+namespace kotlin::gc {
+
+class BarriersThreadData : private Pinned {
+public:
+    void onCheckpoint();
+    void resetCheckpoint();
+    bool visitedCheckpoint() const;
+private:
+    std::atomic<bool> visitedCheckpoint_ = false;
+};
+
+void EnableWeakRefBarriers(bool inSTW);
+void DisableWeakRefBarriers(bool inSTW);
+
+OBJ_GETTER(WeakRefRead, ObjHeader* weakReferee) noexcept;
+
+}
\ No newline at end of file
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.cpp b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.cpp
index 90f07a6..40f9b4e 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.cpp
@@ -14,7 +14,6 @@
 #include "MarkAndSweepUtils.hpp"
 #include "Memory.h"
 #include "ThreadData.hpp"
-#include "ThreadRegistry.hpp"
 #include "ThreadSuspension.hpp"
 #include "FinalizerProcessor.hpp"
 #include "GCStatistics.hpp"
@@ -262,8 +261,6 @@
     auto markStats = gcHandle.getMarked();
     scheduler.gcData().UpdateAliveSetBytes(markStats.markedSizeBytes);
 
-    gc::processWeaks<ProcessWeaksTraits>(gcHandle, mm::SpecialRefRegistry::instance());
-
 #ifndef CUSTOM_ALLOCATOR
     // Taking the locks before the pause is completed. So that any destroying thread
     // would not publish into the global state at an unexpected time.
@@ -271,8 +268,22 @@
     auto objectFactoryIterable = objectFactory_.LockForIter();
     checkMarkCorrectness(objectFactoryIterable);
 
-    mm::ResumeThreads();
-    gcHandle.threadsAreResumed();
+    if (compiler::concurrentWeakSweep()) {
+        // Expected to happen inside STW.
+        gc::EnableWeakRefBarriers(true);
+
+        mm::ResumeThreads();
+        gcHandle.threadsAreResumed();
+    }
+
+    gc::processWeaks<ProcessWeaksTraits>(gcHandle, mm::SpecialRefRegistry::instance());
+
+    if (compiler::concurrentWeakSweep()) {
+        gc::DisableWeakRefBarriers(false);
+    } else {
+        mm::ResumeThreads();
+        gcHandle.threadsAreResumed();
+    }
 
     gc::SweepExtraObjects<SweepTraits>(gcHandle, extraObjectFactoryIterable);
     auto finalizerQueue = gc::Sweep<SweepTraits>(gcHandle, objectFactoryIterable);
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.hpp b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.hpp
index f44a091..0213b44 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.hpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweep.hpp
@@ -22,6 +22,7 @@
 #include "GCStatistics.hpp"
 #include "MarkStack.hpp"
 #include "ParallelMark.hpp"
+#include "Barriers.hpp"
 
 #ifdef CUSTOM_ALLOCATOR
 #include "CustomAllocator.hpp"
@@ -76,6 +77,7 @@
         void clearMarkFlags();
 
         mm::ThreadData& commonThreadData() const;
+        BarriersThreadData& barriers() { return barriersThreadData_; };
 
     private:
         friend ConcurrentMarkAndSweep;
@@ -86,6 +88,8 @@
         std::atomic<bool> rootSetLocked_ = false;
         std::atomic<bool> published_ = false;
         std::atomic<bool> cooperative_ = false;
+
+        BarriersThreadData barriersThreadData_;
     };
 
     using ObjectData = ThreadData::ObjectData;
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
index 08528a7..eb46044 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/ConcurrentMarkAndSweepTest.cpp
@@ -745,6 +745,19 @@
                 mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.gc().SafePointFunctionPrologue(); });
     }
 
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i].wait();
+    }
+
+    // Spin until barriers confirmation is requested.
+    while (!mm::IsSafePointActionRequested()) {}
+
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i] = mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+        });
+    }
+
     for (auto& future : gcFutures) {
         future.wait();
     }
@@ -794,7 +807,7 @@
         gcFutures[i] = mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
             threadData.gc().ScheduleAndWaitFullGC();
             // If GC starts before all thread executed line above, two gc will be run
-            // So we are temporary switch threads to native state and then return them back after all GC runs are done
+            // So we temporary switch threads to native state and then return them back after all GC runs are done
             SwitchThreadState(mm::GetMemoryState(), kotlin::ThreadState::kNative);
         });
     }
@@ -879,6 +892,19 @@
         });
     }
 
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i].wait();
+    }
+
+    // Spin until barriers confirmation is requested.
+    while (!mm::IsSafePointActionRequested()) {}
+
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i] = mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+        });
+    }
+
     for (auto& future : gcFutures) {
         future.wait();
     }
@@ -942,6 +968,19 @@
                 mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.gc().SafePointFunctionPrologue(); });
     }
 
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i].wait();
+    }
+
+    // Spin until barriers confirmation is requested.
+    while (!mm::IsSafePointActionRequested()) {}
+
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i] = mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+        });
+    }
+
     for (auto& future : gcFutures) {
         future.wait();
     }
@@ -1008,6 +1047,20 @@
         });
     }
 
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i].wait();
+    }
+
+    // Spin until barriers confirmation is requested.
+    while (!mm::IsSafePointActionRequested()) {}
+
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i] = mutators[i].Execute([weak](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+            EXPECT_THAT(weak->get(), nullptr);
+        });
+    }
+
     for (auto& future : gcFutures) {
         future.wait();
     }
@@ -1064,6 +1117,25 @@
                 mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) { threadData.gc().SafePointFunctionPrologue(); });
     }
 
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i].wait();
+    }
+
+    // Spin until barriers confirmation is requested.
+    while (!mm::IsSafePointActionRequested()) {}
+
+    for (int i = 1; i < kDefaultThreadCount; ++i) {
+        gcFutures[i] = mutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+        });
+    }
+
+    for (int i = 0; i < kDefaultThreadCount; ++i) {
+        attachFutures[i] = newMutators[i].Execute([](mm::ThreadData& threadData, Mutator& mutator) {
+            threadData.gc().SafePointFunctionPrologue();
+        });
+    }
+
     // GC will be completed first
     for (auto& future : gcFutures) {
         future.wait();
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
index ccb4efb..267ac92 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/GCImpl.cpp
@@ -8,6 +8,7 @@
 #include "GC.hpp"
 #include "GCStatistics.hpp"
 #include "MarkAndSweepUtils.hpp"
+#include "ObjectOps.hpp"
 #include "ThreadSuspension.hpp"
 #include "MarkStack.hpp"
 #include "std_support/Memory.hpp"
@@ -138,3 +139,12 @@
 ALWAYS_INLINE void gc::GC::processFieldInMark(void* state, ObjHeader* field) noexcept {
     gc::internal::processFieldInMark<gc::mark::MarkTraits>(state, field);
 }
+
+bool gc::isMarked(ObjHeader* object) noexcept {
+    auto& objectData = mm::ObjectFactory<gc::ConcurrentMarkAndSweep>::NodeRef::From(object).ObjectData();
+    return objectData.marked();
+}
+
+ALWAYS_INLINE OBJ_GETTER(gc::tryRef, ObjHeader* object) noexcept {
+    RETURN_RESULT_OF(gc::WeakRefRead, object);
+}
diff --git a/kotlin-native/runtime/src/gc/cms/cpp/ParallelMark.cpp b/kotlin-native/runtime/src/gc/cms/cpp/ParallelMark.cpp
index ff0b5b4..e8b16e7 100644
--- a/kotlin-native/runtime/src/gc/cms/cpp/ParallelMark.cpp
+++ b/kotlin-native/runtime/src/gc/cms/cpp/ParallelMark.cpp
@@ -338,7 +338,7 @@
 void gc::mark::MarkDispatcher::waitForThreadsPauseMutation() noexcept {
     RuntimeAssert(!kotlin::mm::IsCurrentThreadRegistered(), "Dispatcher thread must not be registered");
     waitFast([this] {
-        return allMutators([](mm::ThreadData& mut){ return mm::isSuspendedOrNative(mut) || mut.gc().impl().gc().cooperative(); });
+        return allMutators([](mm::ThreadData& mut){ return mut.suspensionData().suspendedOrNative() || mut.gc().impl().gc().cooperative(); });
     });
 }
 
diff --git a/kotlin-native/runtime/src/gc/common/cpp/GC.hpp b/kotlin-native/runtime/src/gc/common/cpp/GC.hpp
index 1b68ad2..91066e6 100644
--- a/kotlin-native/runtime/src/gc/common/cpp/GC.hpp
+++ b/kotlin-native/runtime/src/gc/common/cpp/GC.hpp
@@ -77,6 +77,9 @@
     std_support::unique_ptr<Impl> impl_;
 };
 
+bool isMarked(ObjHeader* object) noexcept;
+OBJ_GETTER(tryRef, ObjHeader* object) noexcept;
+
 inline constexpr bool kSupportsMultipleMutators = true;
 
 } // namespace gc
diff --git a/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp
index f60b612..fb8b870 100644
--- a/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp
+++ b/kotlin-native/runtime/src/gc/noop/cpp/GCImpl.cpp
@@ -9,6 +9,7 @@
 #include "std_support/Memory.hpp"
 #include "GlobalData.hpp"
 #include "GCStatistics.hpp"
+#include "ObjectOps.hpp"
 
 using namespace kotlin;
 
@@ -96,3 +97,12 @@
 
 // static
 ALWAYS_INLINE void gc::GC::processFieldInMark(void* state, ObjHeader* field) noexcept {}
+
+bool gc::isMarked(ObjHeader* object) noexcept {
+    RuntimeAssert(false, "Should not reach here");
+    return true;
+}
+
+ALWAYS_INLINE OBJ_GETTER(gc::tryRef, ObjHeader* object) noexcept {
+    RETURN_OBJ(object);
+}
diff --git a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
index 5acb3ef..1faef5b 100644
--- a/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
+++ b/kotlin-native/runtime/src/gc/stms/cpp/GCImpl.cpp
@@ -10,6 +10,7 @@
 #include "std_support/Memory.hpp"
 #include "GlobalData.hpp"
 #include "GCStatistics.hpp"
+#include "ObjectOps.hpp"
 
 using namespace kotlin;
 
@@ -115,3 +116,12 @@
 ALWAYS_INLINE void gc::GC::processFieldInMark(void* state, ObjHeader* field) noexcept {
     gc::internal::processFieldInMark<gc::internal::MarkTraits>(state, field);
 }
+
+bool gc::isMarked(ObjHeader* object) noexcept {
+    auto& objectData = mm::ObjectFactory<gc::SameThreadMarkAndSweep>::NodeRef::From(object).ObjectData();
+    return objectData.marked();
+}
+
+ALWAYS_INLINE OBJ_GETTER(gc::tryRef, ObjHeader* object) noexcept {
+    RETURN_OBJ(object);
+}
diff --git a/kotlin-native/runtime/src/legacymm/cpp/Weak.cpp b/kotlin-native/runtime/src/legacymm/cpp/Weak.cpp
index 0b538f1..6eb8da5 100644
--- a/kotlin-native/runtime/src/legacymm/cpp/Weak.cpp
+++ b/kotlin-native/runtime/src/legacymm/cpp/Weak.cpp
@@ -84,12 +84,12 @@
 
 // Materialize a weak reference to either null or the real reference.
 OBJ_GETTER(Konan_WeakReferenceCounterLegacyMM_get, ObjHeader* counter) {
-  ObjHeader** referredAddress = &asWeakReferenceCounter(counter)->referred;
+    ObjHeader** referredAddress = &asWeakReferenceCounter(counter)->referred;
 #if KONAN_NO_THREADS
-  RETURN_OBJ(*referredAddress);
+    RETURN_OBJ(*referredAddress);
 #else
-  auto* weakCounter = asWeakReferenceCounter(counter);
-  RETURN_RESULT_OF(ReadHeapRefLocked, referredAddress,  &weakCounter->lock,  &weakCounter->cookie);
+    auto* weakCounter = asWeakReferenceCounter(counter);
+    RETURN_RESULT_OF(ReadHeapRefLocked, referredAddress,  &weakCounter->lock,  &weakCounter->cookie);
 #endif
 }
 
diff --git a/kotlin-native/runtime/src/main/cpp/CompilerConstants.hpp b/kotlin-native/runtime/src/main/cpp/CompilerConstants.hpp
index 9a17c31..d5b101e 100644
--- a/kotlin-native/runtime/src/main/cpp/CompilerConstants.hpp
+++ b/kotlin-native/runtime/src/main/cpp/CompilerConstants.hpp
@@ -37,6 +37,7 @@
 extern "C" const int32_t Kotlin_runtimeAssertsMode;
 extern "C" const char* const Kotlin_runtimeLogs;
 extern "C" const int32_t Kotlin_gcSchedulerType;
+extern "C" const int32_t Kotlin_concurrentWeakSweep;
 extern "C" const int32_t Kotlin_freezingEnabled;
 extern "C" const int32_t Kotlin_freezingChecksEnabled;
 
@@ -106,6 +107,10 @@
     return static_cast<compiler::GCSchedulerType>(Kotlin_gcSchedulerType);
 }
 
+ALWAYS_INLINE inline bool concurrentWeakSweep() noexcept {
+    return Kotlin_concurrentWeakSweep != 0;
+}
+
 
 WorkerExceptionHandling workerExceptionHandling() noexcept;
 DestroyRuntimeMode destroyRuntimeMode() noexcept;
diff --git a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp
index fed207f..5ca0a22 100644
--- a/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp
+++ b/kotlin-native/runtime/src/mm/cpp/SpecialRefRegistry.hpp
@@ -7,6 +7,7 @@
 
 #include <atomic>
 
+#include "GC.hpp"
 #include "Memory.h"
 #include "RawPtr.hpp"
 #include "ThreadRegistry.hpp"
@@ -101,7 +102,7 @@
         OBJ_GETTER0(tryRef) noexcept {
             AssertThreadState(ThreadState::kRunnable);
             // TODO: Weak read barrier with CMS.
-            RETURN_OBJ(obj_);
+            RETURN_RESULT_OF(kotlin::gc::tryRef, obj_);
         }
 
         void retainRef() noexcept {
diff --git a/kotlin-native/runtime/src/test_support/cpp/CompilerGenerated.cpp b/kotlin-native/runtime/src/test_support/cpp/CompilerGenerated.cpp
index de869ab..1651f48 100644
--- a/kotlin-native/runtime/src/test_support/cpp/CompilerGenerated.cpp
+++ b/kotlin-native/runtime/src/test_support/cpp/CompilerGenerated.cpp
@@ -70,6 +70,7 @@
 extern const int32_t Kotlin_runtimeAssertsMode = static_cast<int32_t>(kotlin::compiler::RuntimeAssertsMode::kPanic);
 extern const char* const Kotlin_runtimeLogs = nullptr;
 extern const int32_t Kotlin_gcSchedulerType = static_cast<int32_t>(kotlin::compiler::GCSchedulerType::kDisabled);
+extern const int32_t Kotlin_concurrentWeakSweep = 1;
 extern const int32_t Kotlin_freezingChecksEnabled = 1;
 extern const int32_t Kotlin_freezingEnabled = 1;