Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/coreclr/nativeaot/Runtime/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ set(COMMON_RUNTIME_SOURCES
${CLR_SRC_NATIVE_DIR}/minipal/xoshiro128pp.c
)

if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM)
list(APPEND COMMON_RUNTIME_SOURCES
${RUNTIME_DIR}/asyncsafethreadmap.cpp
)
endif()

set(SERVER_GC_SOURCES
${GC_DIR}/gceesvr.cpp
${GC_DIR}/gcsvr.cpp
Expand Down
20 changes: 20 additions & 0 deletions src/coreclr/nativeaot/Runtime/threadstore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
#include "TargetPtrs.h"
#include "yieldprocessornormalized.h"
#include <minipal/time.h>
#include <minipal/thread.h>
#include "asyncsafethreadmap.h"

#include "slist.inl"

Expand Down Expand Up @@ -143,6 +145,14 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached;

pTS->m_ThreadList.PushHead(pAttachingThread);

#if defined(TARGET_UNIX) && !defined(TARGET_WASM)
if (!InsertThreadIntoAsyncSafeMap(pAttachingThread->m_threadId, pAttachingThread))
{
ASSERT_UNCONDITIONALLY("Failed to insert thread into async-safe map due to OOM.");
RhFailFast();
}
#endif // TARGET_UNIX && !TARGET_WASM
}

// static
Expand Down Expand Up @@ -188,6 +198,9 @@ void ThreadStore::DetachCurrentThread()
pTS->m_ThreadList.RemoveFirst(pDetachingThread);
// tidy up GC related stuff (release allocation context, etc..)
pDetachingThread->Detach();
#if defined(TARGET_UNIX) && !defined(TARGET_WASM)
RemoveThreadFromAsyncSafeMap(pDetachingThread->m_threadId, pDetachingThread);
#endif
}

// post-mortem clean up.
Expand Down Expand Up @@ -352,6 +365,13 @@ EXTERN_C RuntimeThreadLocals* RhpGetThread()
return &tls_CurrentThread;
}

#if defined(TARGET_UNIX) && !defined(TARGET_WASM)
Thread * ThreadStore::GetCurrentThreadIfAvailableAsyncSafe()
{
return (Thread*)FindThreadInAsyncSafeMap(minipal_get_current_thread_id_no_cache());
}
#endif // TARGET_UNIX && !TARGET_WASM

#endif // !DACCESS_COMPILE

#ifdef _WIN32
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/nativeaot/Runtime/threadstore.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class ThreadStore
static Thread * RawGetCurrentThread();
static Thread * GetCurrentThread();
static Thread * GetCurrentThreadIfAvailable();
static Thread * GetCurrentThreadIfAvailableAsyncSafe();
static PTR_Thread GetSuspendingThread();
static void AttachCurrentThread();
static void AttachCurrentThread(bool fAcquireThreadStoreLock);
Expand Down
30 changes: 15 additions & 15 deletions src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1017,24 +1017,24 @@ static struct sigaction g_previousActivationHandler;

static void ActivationHandler(int code, siginfo_t* siginfo, void* context)
{
// Only accept activations from the current process
if (siginfo->si_pid == getpid()
Thread* pThread = ThreadStore::GetCurrentThreadIfAvailableAsyncSafe();
if (pThread)
{
// Only accept activations from the current process
if (siginfo->si_pid == getpid()
#ifdef HOST_APPLE
// On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple
// signals are in flight in the same process at the same time, it may be overwritten / zeroed.
|| siginfo->si_pid == 0
// On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple
// signals are in flight in the same process at the same time, it may be overwritten / zeroed.
|| siginfo->si_pid == 0
#endif
)
{
// Make sure that errno is not modified
int savedErrNo = errno;
Thread::HijackCallback((NATIVE_CONTEXT*)context, NULL);
errno = savedErrNo;
}
)
{
// Make sure that errno is not modified
int savedErrNo = errno;
Thread::HijackCallback((NATIVE_CONTEXT*)context, pThread);
errno = savedErrNo;
}

Thread* pThread = ThreadStore::GetCurrentThreadIfAvailable();
if (pThread)
{
pThread->SetActivationPending(false);
}

Expand Down
24 changes: 11 additions & 13 deletions src/coreclr/pal/src/exception/signal.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -936,22 +936,20 @@ static void inject_activation_handler(int code, siginfo_t *siginfo, void *contex
CONTEXTToNativeContext(&winContext, ucontext);
}
}

// Call the original handler when it is not ignored or default (terminate).
if (g_previous_activation.sa_flags & SA_SIGINFO)
{
_ASSERTE(g_previous_activation.sa_sigaction != NULL);
g_previous_activation.sa_sigaction(code, siginfo, context);
}
else
{
// Call the original handler when it is not ignored or default (terminate).
if (g_previous_activation.sa_flags & SA_SIGINFO)
{
_ASSERTE(g_previous_activation.sa_sigaction != NULL);
g_previous_activation.sa_sigaction(code, siginfo, context);
}
else
if (g_previous_activation.sa_handler != SIG_IGN &&
g_previous_activation.sa_handler != SIG_DFL)
{
if (g_previous_activation.sa_handler != SIG_IGN &&
g_previous_activation.sa_handler != SIG_DFL)
{
_ASSERTE(g_previous_activation.sa_handler != NULL);
g_previous_activation.sa_handler(code);
}
_ASSERTE(g_previous_activation.sa_handler != NULL);
g_previous_activation.sa_handler(code);
}
}
}
Expand Down
126 changes: 126 additions & 0 deletions src/coreclr/runtime/asyncsafethreadmap.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#include "common.h"

#include "asyncsafethreadmap.h"

// Async safe lock free thread map for use in signal handlers

struct ThreadEntry
{
size_t osThread;
void* pThread;
};

#define MAX_THREADS_IN_SEGMENT 256

struct ThreadSegment
{
ThreadEntry entries[MAX_THREADS_IN_SEGMENT];
ThreadSegment* pNext;
};

static ThreadSegment *s_pAsyncSafeThreadMapHead = NULL;

bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;

ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
ThreadSegment** ppSegment = &s_pAsyncSafeThreadMapHead;
while (true)
{
if (pSegment == NULL)
{
// Need to add a new segment
ThreadSegment* pNewSegment = new (nothrow) ThreadSegment();
if (pNewSegment == NULL)
{
// Memory allocation failed
return false;
}

memset(pNewSegment, 0, sizeof(ThreadSegment));
ThreadSegment* pExpected = NULL;
if (!__atomic_compare_exchange_n(
ppSegment,
&pExpected,
pNewSegment,
false /* weak */,
__ATOMIC_RELEASE /* success_memorder */,
__ATOMIC_RELAXED /* failure_memorder */))
{
// Another thread added the segment first
delete pNewSegment;
pNewSegment = pExpected;
}

pSegment = pNewSegment;
}
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;

size_t expected = 0;
if (__atomic_compare_exchange_n(
&pSegment->entries[index].osThread,
&expected,
osThread,
false /* weak */,
__ATOMIC_RELEASE /* success_memorder */,
__ATOMIC_RELAXED /* failure_memorder */))
{
// Successfully inserted
// Use atomic store with release to ensure proper ordering
__atomic_store_n(&pSegment->entries[index].pThread, pThread, __ATOMIC_RELEASE);
return true;
}
}

ppSegment = &pSegment->pNext;
pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE);
}
}

void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;

ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
while (pSegment)
{
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;
if (pSegment->entries[index].pThread == pThread)
{
// Found the entry, remove it
pSegment->entries[index].pThread = NULL;
__atomic_exchange_n(&pSegment->entries[index].osThread, (size_t)0, __ATOMIC_RELEASE);
return;
}
}
pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE);
}
}

void *FindThreadInAsyncSafeMap(size_t osThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;
ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
while (pSegment)
{
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;
// Use acquire to synchronize with release in InsertThreadIntoAsyncSafeMap
if (__atomic_load_n(&pSegment->entries[index].osThread, __ATOMIC_ACQUIRE) == osThread)
{
return pSegment->entries[index].pThread;
}
}
pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE);
}
return NULL;
}
27 changes: 27 additions & 0 deletions src/coreclr/runtime/asyncsafethreadmap.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#ifndef __ASYNCSAFETHREADMAP_H__
#define __ASYNCSAFETHREADMAP_H__

#if defined(TARGET_UNIX) && !defined(TARGET_WASM)

// Insert a thread into the async-safe map.
// * osThread - The OS thread ID to insert.
// * pThread - A pointer to the thread object to associate with the OS thread ID.
// * return true if the insertion was successful, false otherwise (OOM).
bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread);

// Remove a thread from the async-safe map.
// * osThread - The OS thread ID to remove.
// * pThread - A pointer to the thread object associated with the OS thread ID.
void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread);

// Find a thread in the async-safe map.
// * osThread = The OS thread ID to search for.
// * return - A pointer to the thread object associated with the OS thread ID, or NULL if not found.
void* FindThreadInAsyncSafeMap(size_t osThread);

#endif // TARGET_UNIX && !TARGET_WASM

#endif // __ASYNCSAFETHREADMAP_H__
12 changes: 12 additions & 0 deletions src/coreclr/vm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -380,6 +380,12 @@ set(VM_SOURCES_WKS
${VM_SOURCES_GDBJIT}
)

if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM)
list(APPEND VM_SOURCES_WKS
${RUNTIME_DIR}/asyncsafethreadmap.cpp
)
endif()

# coreclr needs to compile codeman.cpp differently depending on flavor (i.e. dll vs. static lib))
list(REMOVE_ITEM VM_SOURCES_WKS codeman.cpp)

Expand Down Expand Up @@ -476,6 +482,12 @@ set(VM_HEADERS_WKS
${VM_HEADERS_GDBJIT}
)

if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM)
list(APPEND VM_HEADERS_WKS
${RUNTIME_DIR}/asyncsafethreadmap.h
)
endif()

set(GC_SOURCES_WKS
${GC_SOURCES_DAC_AND_WKS_COMMON}
../gc/gceventstatus.cpp
Expand Down
25 changes: 23 additions & 2 deletions src/coreclr/vm/codeman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ IJitManager::IJitManager()
// been stopped when we suspend the EE so they won't be touching an element that is about to be deleted.
// However for pre-emptive mode threads, they could be stalled right on top of the element we want
// to delete, so we need to apply the reader lock to them and wait for them to drain.
ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
ExecutionManager::ScanFlag ExecutionManager::GetScanFlags(Thread *pThread)
{
CONTRACTL {
NOTHROW;
Expand All @@ -869,7 +869,10 @@ ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
#if !defined(DACCESS_COMPILE)


Thread *pThread = GetThreadNULLOk();
if (!pThread)
{
pThread = GetThreadNULLOk();
}

if (!pThread)
return ScanNoReaderLock;
Expand Down Expand Up @@ -5229,6 +5232,24 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC)
return IsManagedCodeWorker(currentPC, &lockState);
}

//**************************************************************************
BOOL ExecutionManager::IsManagedCodeNoLock(PCODE currentPC)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;

if (currentPC == (PCODE)NULL)
return FALSE;

_ASSERTE(GetScanFlags() != ScanReaderLock);

// Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken.
RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked;
return IsManagedCodeWorker(currentPC, &lockState);
}

//**************************************************************************
NOINLINE // Make sure that the slow path with lock won't affect the fast path
BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC)
Expand Down
6 changes: 5 additions & 1 deletion src/coreclr/vm/codeman.h
Original file line number Diff line number Diff line change
Expand Up @@ -2293,11 +2293,15 @@ class ExecutionManager
};

// Returns default scan flag for current thread
static ScanFlag GetScanFlags();
static ScanFlag GetScanFlags(Thread *pThread = NULL);

// Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
static BOOL IsManagedCode(PCODE currentPC);

// Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
// Does not acquire the reader lock. Caller must ensure it is safe.
static BOOL IsManagedCodeNoLock(PCODE currentPC);

// Returns true if currentPC is ready to run codegen
static BOOL IsReadyToRunCode(PCODE currentPC);

Expand Down
Loading
Loading