Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions src/coreclr/nativeaot/Runtime/threadstore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "TargetPtrs.h"
#include "yieldprocessornormalized.h"
#include <minipal/time.h>
#include <minipal/thread.h>

#include "slist.inl"

Expand Down Expand Up @@ -143,6 +144,14 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock)
pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached;

pTS->m_ThreadList.PushHead(pAttachingThread);

#ifdef TARGET_UNIX
if (!minipal_insert_thread_into_async_safe_map(pAttachingThread->m_threadId, pAttachingThread))
{
ASSERT_UNCONDITIONALLY("Failed to insert thread into async-safe map due to OOM.");
RhFailFast();
}
#endif
}

// static
Expand Down Expand Up @@ -188,6 +197,9 @@ void ThreadStore::DetachCurrentThread()
pTS->m_ThreadList.RemoveFirst(pDetachingThread);
// tidy up GC related stuff (release allocation context, etc..)
pDetachingThread->Detach();
#ifdef TARGET_UNIX
minipal_remove_thread_from_async_safe_map(pDetachingThread->m_threadId, pDetachingThread);
#endif
}

// post-mortem clean up.
Expand Down Expand Up @@ -352,6 +364,13 @@ EXTERN_C RuntimeThreadLocals* RhpGetThread()
return &tls_CurrentThread;
}

#ifdef TARGET_UNIX
Thread * ThreadStore::GetCurrentThreadIfAvailableAsyncSafe()
{
return (Thread*)minipal_find_thread_in_async_safe_map(minipal_get_current_thread_id_no_cache());
}
#endif // TARGET_UNIX

#endif // !DACCESS_COMPILE

#ifdef _WIN32
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/nativeaot/Runtime/threadstore.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ class ThreadStore
static Thread * RawGetCurrentThread();
static Thread * GetCurrentThread();
static Thread * GetCurrentThreadIfAvailable();
static Thread * GetCurrentThreadIfAvailableAsyncSafe();
static PTR_Thread GetSuspendingThread();
static void AttachCurrentThread();
static void AttachCurrentThread(bool fAcquireThreadStoreLock);
Expand Down
2 changes: 2 additions & 0 deletions src/coreclr/nativeaot/Runtime/threadstore.inl
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ inline Thread * ThreadStore::RawGetCurrentThread()
return (Thread *) &tls_CurrentThread;
}

#if defined(TARGET_UNIX) && !defined(DACCESS_COMPILE)
// static
inline Thread * ThreadStore::GetCurrentThread()
{
Expand All @@ -24,6 +25,7 @@ inline Thread * ThreadStore::GetCurrentThread()
ASSERT(pCurThread->IsInitialized());
return pCurThread;
}
#endif // TARGET_UNIX && !DACCESS_COMPILE

// static
inline Thread * ThreadStore::GetCurrentThreadIfAvailable()
Expand Down
2 changes: 1 addition & 1 deletion src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1032,7 +1032,7 @@ static void ActivationHandler(int code, siginfo_t* siginfo, void* context)
errno = savedErrNo;
}

Thread* pThread = ThreadStore::GetCurrentThreadIfAvailable();
Thread* pThread = ThreadStore::GetCurrentThreadIfAvailableAsyncSafe();
if (pThread)
{
pThread->SetActivationPending(false);
Expand Down
25 changes: 23 additions & 2 deletions src/coreclr/vm/codeman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ IJitManager::IJitManager()
// been stopped when we suspend the EE so they won't be touching an element that is about to be deleted.
// However for pre-emptive mode threads, they could be stalled right on top of the element we want
// to delete, so we need to apply the reader lock to them and wait for them to drain.
ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
ExecutionManager::ScanFlag ExecutionManager::GetScanFlags(Thread *pThread)
{
CONTRACTL {
NOTHROW;
Expand All @@ -869,7 +869,10 @@ ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
#if !defined(DACCESS_COMPILE)


Thread *pThread = GetThreadNULLOk();
if (!pThread)
{
pThread = GetThreadNULLOk();
}

if (!pThread)
return ScanNoReaderLock;
Expand Down Expand Up @@ -5229,6 +5232,24 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC)
return IsManagedCodeWorker(currentPC, &lockState);
}

//**************************************************************************
BOOL ExecutionManager::IsManagedCodeNoLock(PCODE currentPC)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;

if (currentPC == (PCODE)NULL)
return FALSE;

_ASSERTE(GetScanFlags() != ScanReaderLock);

// Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken.
RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked;
return IsManagedCodeWorker(currentPC, &lockState);
}

//**************************************************************************
NOINLINE // Make sure that the slow path with lock won't affect the fast path
BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC)
Expand Down
6 changes: 5 additions & 1 deletion src/coreclr/vm/codeman.h
Original file line number Diff line number Diff line change
Expand Up @@ -2293,11 +2293,15 @@ class ExecutionManager
};

// Returns default scan flag for current thread
static ScanFlag GetScanFlags();
static ScanFlag GetScanFlags(Thread *pThread = NULL);

// Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
static BOOL IsManagedCode(PCODE currentPC);

// Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64.
// Does not acquire the reader lock. Caller must ensure it is safe.
static BOOL IsManagedCodeNoLock(PCODE currentPC);

// Returns true if currentPC is ready to run codegen
static BOOL IsReadyToRunCode(PCODE currentPC);

Expand Down
23 changes: 23 additions & 0 deletions src/coreclr/vm/threads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include "vmholder.h"
#include "exceptmacros.h"
#include "minipal/time.h"
#include "minipal/thread.h"

#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
Expand Down Expand Up @@ -62,6 +63,13 @@
#include "interpexec.h"
#endif // FEATURE_INTERPRETER

#ifdef TARGET_UNIX
Thread* GetThreadAsyncSafe()
{
return (Thread*)minipal_find_thread_in_async_safe_map(minipal_get_current_thread_id_no_cache());
}
#endif // TARGET_UNIX

static const PortableTailCallFrame g_sentinelTailCallFrame = { NULL, NULL };

TailCallTls::TailCallTls()
Expand Down Expand Up @@ -371,6 +379,21 @@ void SetThread(Thread* t)

// Clear or set the app domain to the one domain based on if the thread is being nulled out or set
t_CurrentThreadInfo.m_pAppDomain = t == NULL ? NULL : AppDomain::GetCurrentDomain();

#ifdef TARGET_UNIX
if (t != NULL)
{
if (!minipal_insert_thread_into_async_safe_map(t->GetOSThreadId64(), t))
{
// TODO: can we handle this OOM more gracefully?
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to insert thread into async-safe map due to OOM."));
}
}
else if (origThread != NULL)
{
minipal_remove_thread_from_async_safe_map(origThread->GetOSThreadId64(), origThread);
}
#endif
}

BOOL Thread::Alert ()
Expand Down
4 changes: 4 additions & 0 deletions src/coreclr/vm/threads.h
Original file line number Diff line number Diff line change
Expand Up @@ -5512,6 +5512,10 @@ class StackWalkerWalkingThreadHolder
Thread* m_PreviousValue;
};

#ifdef TARGET_UNIX
EXTERN_C Thread* GetThreadAsyncSafe();
#endif

#ifndef DACCESS_COMPILE
#if defined(TARGET_WINDOWS) && defined(TARGET_AMD64)
EXTERN_C void STDCALL ClrRestoreNonvolatileContextWorker(PCONTEXT ContextRecord, DWORD64 ssp);
Expand Down
8 changes: 5 additions & 3 deletions src/coreclr/vm/threadsuspend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5731,16 +5731,18 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason)
// It is unsafe to use blocking APIs or allocate in this method.
BOOL CheckActivationSafePoint(SIZE_T ip)
{
Thread *pThread = GetThreadNULLOk();
Thread *pThread = GetThreadAsyncSafe();
_ASSERTE(pThread != NULL);

// The criteria for safe activation is to be running managed code.
// Also we are not interested in handling interruption if we are already in preemptive mode nor if we are single stepping
BOOL isActivationSafePoint = pThread != NULL &&
(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping) == 0 &&
pThread->PreemptiveGCDisabled() &&
ExecutionManager::IsManagedCode(ip);
(ExecutionManager::GetScanFlags(pThread) != ExecutionManager::ScanReaderLock) &&
ExecutionManager::IsManagedCodeNoLock(ip);

if (!isActivationSafePoint)
if (!isActivationSafePoint && pThread != NULL)
{
pThread->m_hasPendingActivation = false;
}
Expand Down
1 change: 1 addition & 0 deletions src/native/minipal/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ set(SOURCES
random.c
debugger.c
strings.c
thread.c
time.c
unicodedata.c
utf8.c
Expand Down
130 changes: 130 additions & 0 deletions src/native/minipal/thread.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.

#define _GNU_SOURCE

#include <stdbool.h>
#include "utils.h"
#include "thread.h"

#ifdef TARGET_UNIX

// Async safe lock free thread map for use in signal handlers

struct ThreadEntry
{
size_t osThread;
void* pThread;
};

#define MAX_THREADS_IN_SEGMENT 256

struct ThreadSegment
{
struct ThreadEntry entries[MAX_THREADS_IN_SEGMENT];
struct ThreadSegment* pNext;
};

static struct ThreadSegment *s_pAsyncSafeThreadMapHead = NULL;

bool minipal_insert_thread_into_async_safe_map(size_t osThread, void* pThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;

struct ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
struct ThreadSegment** ppSegment = &s_pAsyncSafeThreadMapHead;
while (true)
{
if (pSegment == NULL)
{
// Need to add a new segment
struct ThreadSegment* pNewSegment = (struct ThreadSegment*)malloc(sizeof(struct ThreadSegment));
if (pNewSegment == NULL)
{
// Memory allocation failed
return false;
}

memset(pNewSegment, 0, sizeof(struct ThreadSegment));
struct ThreadSegment* pExpected = NULL;
if (!__atomic_compare_exchange_n(
ppSegment,
&pExpected,
pNewSegment,
false /* weak */,
__ATOMIC_RELEASE /* success_memorder */,
__ATOMIC_RELAXED /* failure_memorder */))
{
// Another thread added the segment first
free(pNewSegment);
pNewSegment = *ppSegment;
}

pSegment = pNewSegment;
}
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;

size_t expected = 0;
if (__atomic_compare_exchange_n(
&pSegment->entries[index].osThread,
&expected, osThread,
false /* weak */,
__ATOMIC_RELEASE /* success_memorder */,
__ATOMIC_RELAXED /* failure_memorder */))
{
// Successfully inserted
pSegment->entries[index].pThread = pThread;
return true;
}
}

ppSegment = &pSegment->pNext;
pSegment = pSegment->pNext;
}
}

void minipal_remove_thread_from_async_safe_map(size_t osThread, void* pThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;

struct ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
while (pSegment)
{
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;
if (pSegment->entries[index].pThread == pThread)
{
// Found the entry, remove it
pSegment->entries[index].pThread = NULL;
__atomic_exchange_n(&pSegment->entries[index].osThread, (size_t)0, __ATOMIC_RELEASE);
return;
}
}
pSegment = pSegment->pNext;
}
}

void *minipal_find_thread_in_async_safe_map(size_t osThread)
{
size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT;
struct ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead;
while (pSegment)
{
for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++)
{
size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT;
// Use acquire to synchronize with release in insert_thread_to_async_safe_map
if (__atomic_load_n(&pSegment->entries[index].osThread, __ATOMIC_ACQUIRE) == osThread)
{
return pSegment->entries[index].pThread;
}
}
pSegment = pSegment->pNext;
}
return NULL;
}

#endif // TARGET_UNIX
Loading
Loading