diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index dccf7e12fc7b54..623cee04134508 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -388,11 +388,6 @@ inline void* ALIGN_DOWN(void* ptr, size_t alignment) return reinterpret_cast(ALIGN_DOWN(as_size_t, alignment)); } -inline int GetRandomInt(int max) -{ - return rand() % max; -} - typedef struct _PROCESSOR_NUMBER { uint16_t Group; uint8_t Number; diff --git a/src/coreclr/inc/random.h b/src/coreclr/inc/random.h index 0bd2164cbb1098..99936c6177b0a3 100644 --- a/src/coreclr/inc/random.h +++ b/src/coreclr/inc/random.h @@ -13,10 +13,7 @@ // 2) It can have multiple instantiations with different seeds // 3) It behaves the same regardless of whether we build with VC++ or GCC // -// If you are working in the VM, we have a convenience method: code:GetRandomInt. This usess a thread-local -// Random instance if a Thread object is available, and otherwise falls back to a global instance -// with a spin-lock. -// +// If you are working in the VM, we have a convenience method: code:GetRandomInt. #ifndef _CLRRANDOM_H_ #define _CLRRANDOM_H_ diff --git a/src/coreclr/vm/amd64/asmconstants.h b/src/coreclr/vm/amd64/asmconstants.h index e12f3e1eafd26e..47cca560d7bb11 100644 --- a/src/coreclr/vm/amd64/asmconstants.h +++ b/src/coreclr/vm/amd64/asmconstants.h @@ -108,21 +108,21 @@ ASMCONSTANTS_C_ASSERT(OFFSETOF__ComPlusCallInfo__m_pILStub #endif // FEATURE_COMINTEROP -#define OFFSETOF__Thread__m_fPreemptiveGCDisabled 0x0C +#define OFFSETOF__Thread__m_fPreemptiveGCDisabled 0x04 ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); #define Thread_m_fPreemptiveGCDisabled OFFSETOF__Thread__m_fPreemptiveGCDisabled -#define OFFSETOF__Thread__m_pFrame 0x10 +#define OFFSETOF__Thread__m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_pFrame == offsetof(Thread, m_pFrame)); #define Thread_m_pFrame OFFSETOF__Thread__m_pFrame -#define OFFSET__Thread__m_alloc_context__alloc_ptr 0x58 +#define OFFSET__Thread__m_alloc_context__alloc_ptr 0x50 ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr)); -#define OFFSET__Thread__m_alloc_context__alloc_limit 0x60 +#define OFFSET__Thread__m_alloc_context__alloc_limit 0x58 ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit)); #define OFFSETOF__gc_alloc_context__alloc_ptr 0x0 diff --git a/src/coreclr/vm/appdomain.cpp b/src/coreclr/vm/appdomain.cpp index bb5d3d17e00534..12152db1c5b27c 100644 --- a/src/coreclr/vm/appdomain.cpp +++ b/src/coreclr/vm/appdomain.cpp @@ -2608,16 +2608,20 @@ void AppDomain::LoadDomainAssembly(DomainAssembly *pFile, #ifndef DACCESS_COMPILE -FileLoadLevel AppDomain::GetThreadFileLoadLevel() +thread_local LoadLevelLimiter* LoadLevelLimiter::t_currentLoadLevelLimiter = nullptr; + +namespace { - WRAPPER_NO_CONTRACT; - if (GetThread()->GetLoadLevelLimiter() == NULL) - return FILE_ACTIVE; - else - return (FileLoadLevel)(GetThread()->GetLoadLevelLimiter()->GetLoadLevel()-1); + FileLoadLevel GetCurrentFileLoadLevel() + { + WRAPPER_NO_CONTRACT; + if (LoadLevelLimiter::GetCurrent() == NULL) + return FILE_ACTIVE; + else + return (FileLoadLevel)(LoadLevelLimiter::GetCurrent()->GetLoadLevel()-1); + } } - Assembly *AppDomain::LoadAssembly(AssemblySpec* pIdentity, PEAssembly * pPEAssembly, FileLoadLevel targetLevel) @@ -2710,7 +2714,7 @@ DomainAssembly *AppDomain::LoadDomainAssemblyInternal(AssemblySpec* pIdentity, PRECONDITION(CheckPointer(pPEAssembly)); PRECONDITION(::GetAppDomain()==this); POSTCONDITION(CheckPointer(RETVAL)); - POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel() + POSTCONDITION(RETVAL->GetLoadLevel() >= GetCurrentFileLoadLevel() || RETVAL->GetLoadLevel() >= targetLevel); POSTCONDITION(RETVAL->CheckNoError(targetLevel)); INJECT_FAULT(COMPlusThrowOM();); @@ -2817,7 +2821,7 @@ DomainAssembly *AppDomain::LoadDomainAssembly(FileLoadLock *pLock, FileLoadLevel STANDARD_VM_CHECK; PRECONDITION(CheckPointer(pLock)); PRECONDITION(AppDomain::GetCurrentDomain() == this); - POSTCONDITION(RETVAL->GetLoadLevel() >= GetThreadFileLoadLevel() + POSTCONDITION(RETVAL->GetLoadLevel() >= GetCurrentFileLoadLevel() || RETVAL->GetLoadLevel() >= targetLevel); POSTCONDITION(RETVAL->CheckNoError(targetLevel)); } diff --git a/src/coreclr/vm/appdomain.hpp b/src/coreclr/vm/appdomain.hpp index 5924e4f4b9fb53..1c400154c7601d 100644 --- a/src/coreclr/vm/appdomain.hpp +++ b/src/coreclr/vm/appdomain.hpp @@ -43,7 +43,6 @@ class StringLiteralMap; class FrozenObjectHeapManager; class MngStdInterfacesInfo; class DomainAssembly; -class LoadLevelLimiter; class TypeEquivalenceHashTable; #ifdef FEATURE_COMINTEROP @@ -815,23 +814,19 @@ typedef FileLoadLock::Holder FileLoadLockHolder; typedef ListLockBase JitListLock; typedef ListLockEntryBase JitListLockEntry; - -#ifdef _MSC_VER -#pragma warning(push) -#pragma warning (disable: 4324) //sometimes 64bit compilers complain about alignment -#endif -class LoadLevelLimiter +class LoadLevelLimiter final { - FileLoadLevel m_currentLevel; + static thread_local LoadLevelLimiter* t_currentLoadLevelLimiter; + FileLoadLevel m_currentLevel; LoadLevelLimiter* m_previousLimit; - BOOL m_bActive; + bool m_bActive; public: LoadLevelLimiter() : m_currentLevel(FILE_ACTIVE), - m_previousLimit(NULL), - m_bActive(FALSE) + m_previousLimit(nullptr), + m_bActive(false) { LIMITED_METHOD_CONTRACT; } @@ -839,11 +834,11 @@ class LoadLevelLimiter void Activate() { WRAPPER_NO_CONTRACT; - m_previousLimit= GetThread()->GetLoadLevelLimiter(); - if(m_previousLimit) - m_currentLevel=m_previousLimit->GetLoadLevel(); - GetThread()->SetLoadLevelLimiter(this); - m_bActive=TRUE; + m_previousLimit = t_currentLoadLevelLimiter; + if (m_previousLimit) + m_currentLevel = m_previousLimit->GetLoadLevel(); + t_currentLoadLevelLimiter = this; + m_bActive = true; } void Deactivate() @@ -851,8 +846,8 @@ class LoadLevelLimiter WRAPPER_NO_CONTRACT; if (m_bActive) { - GetThread()->SetLoadLevelLimiter(m_previousLimit); - m_bActive=FALSE; + t_currentLoadLevelLimiter = m_previousLimit; + m_bActive = false; } } @@ -882,10 +877,13 @@ class LoadLevelLimiter LIMITED_METHOD_CONTRACT; m_currentLevel = level; } + + static LoadLevelLimiter* GetCurrent() + { + LIMITED_METHOD_CONTRACT; + return t_currentLoadLevelLimiter; + } }; -#ifdef _MSC_VER -#pragma warning (pop) //4324 -#endif #define OVERRIDE_LOAD_LEVEL_LIMIT(newLimit) \ LoadLevelLimiter __newLimit; \ @@ -1798,7 +1796,6 @@ class AppDomain : public BaseDomain CHECK CheckLoading(DomainAssembly *pFile, FileLoadLevel level); BOOL IsLoading(DomainAssembly *pFile, FileLoadLevel level); - static FileLoadLevel GetThreadFileLoadLevel(); void LoadDomainAssembly(DomainAssembly *pFile, FileLoadLevel targetLevel); diff --git a/src/coreclr/vm/arm/asmconstants.h b/src/coreclr/vm/arm/asmconstants.h index 16931168e3ce0d..5c92427008bb45 100644 --- a/src/coreclr/vm/arm/asmconstants.h +++ b/src/coreclr/vm/arm/asmconstants.h @@ -141,11 +141,11 @@ ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedTo #endif // FEATURE_COMINTEROP -#define Thread__m_fPreemptiveGCDisabled 0x08 +#define Thread__m_fPreemptiveGCDisabled 0x04 ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); #define Thread_m_fPreemptiveGCDisabled Thread__m_fPreemptiveGCDisabled -#define Thread__m_pFrame 0x0C +#define Thread__m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame)); #define Thread_m_pFrame Thread__m_pFrame diff --git a/src/coreclr/vm/arm64/asmconstants.h b/src/coreclr/vm/arm64/asmconstants.h index cc19728d9e29ee..262fa6860df73f 100644 --- a/src/coreclr/vm/arm64/asmconstants.h +++ b/src/coreclr/vm/arm64/asmconstants.h @@ -33,8 +33,8 @@ #define DynamicHelperFrameFlags_ObjectArg 1 #define DynamicHelperFrameFlags_ObjectArg2 2 -#define Thread__m_fPreemptiveGCDisabled 0x0C -#define Thread__m_pFrame 0x10 +#define Thread__m_fPreemptiveGCDisabled 0x04 +#define Thread__m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame)); diff --git a/src/coreclr/vm/clsload.cpp b/src/coreclr/vm/clsload.cpp index 9f83cf1a19c12e..32d69142890b5e 100644 --- a/src/coreclr/vm/clsload.cpp +++ b/src/coreclr/vm/clsload.cpp @@ -3065,7 +3065,7 @@ TypeHandle ClassLoader::LoadTypeHandleForTypeKeyNoLock(const TypeKey *pTypeKey, // class PendingTypeLoadHolder { - Thread * m_pThread; + static thread_local PendingTypeLoadHolder * t_pCurrent; PendingTypeLoadTable::Entry * m_pEntry; PendingTypeLoadHolder * m_pPrevious; @@ -3074,26 +3074,25 @@ class PendingTypeLoadHolder { LIMITED_METHOD_CONTRACT; - m_pThread = GetThread(); m_pEntry = pEntry; - m_pPrevious = m_pThread->GetPendingTypeLoad(); - m_pThread->SetPendingTypeLoad(this); + m_pPrevious = t_pCurrent; + t_pCurrent = this; } ~PendingTypeLoadHolder() { LIMITED_METHOD_CONTRACT; - _ASSERTE(m_pThread->GetPendingTypeLoad() == this); - m_pThread->SetPendingTypeLoad(m_pPrevious); + _ASSERTE(t_pCurrent == this); + t_pCurrent = m_pPrevious; } static bool CheckForDeadLockOnCurrentThread(PendingTypeLoadTable::Entry * pEntry) { LIMITED_METHOD_CONTRACT; - PendingTypeLoadHolder * pCurrent = GetThread()->GetPendingTypeLoad(); + PendingTypeLoadHolder * pCurrent = t_pCurrent; while (pCurrent != NULL) { @@ -3107,6 +3106,8 @@ class PendingTypeLoadHolder } }; +thread_local PendingTypeLoadHolder * PendingTypeLoadHolder::t_pCurrent = NULL; + //--------------------------------------------------------------------------------------- // TypeHandle diff --git a/src/coreclr/vm/i386/asmconstants.h b/src/coreclr/vm/i386/asmconstants.h index 876925dbb94413..edafbdf72ae717 100644 --- a/src/coreclr/vm/i386/asmconstants.h +++ b/src/coreclr/vm/i386/asmconstants.h @@ -174,13 +174,13 @@ ASMCONSTANTS_C_ASSERT(CORINFO_ArgumentException_ASM == CORINFO_ArgumentException -#define Thread_m_State 0x04 +#define Thread_m_State 0x00 ASMCONSTANTS_C_ASSERT(Thread_m_State == offsetof(Thread, m_State)) -#define Thread_m_fPreemptiveGCDisabled 0x08 +#define Thread_m_fPreemptiveGCDisabled 0x04 ASMCONSTANTS_C_ASSERT(Thread_m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)) -#define Thread_m_pFrame 0x0C +#define Thread_m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(Thread_m_pFrame == offsetof(Thread, m_pFrame)) diff --git a/src/coreclr/vm/loongarch64/asmconstants.h b/src/coreclr/vm/loongarch64/asmconstants.h index e12d0040a74d0a..6828eec7505e1c 100644 --- a/src/coreclr/vm/loongarch64/asmconstants.h +++ b/src/coreclr/vm/loongarch64/asmconstants.h @@ -34,8 +34,8 @@ #define DynamicHelperFrameFlags_ObjectArg 1 #define DynamicHelperFrameFlags_ObjectArg2 2 -#define Thread__m_fPreemptiveGCDisabled 0x0C -#define Thread__m_pFrame 0x10 +#define Thread__m_fPreemptiveGCDisabled 0x04 +#define Thread__m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame)); diff --git a/src/coreclr/vm/riscv64/asmconstants.h b/src/coreclr/vm/riscv64/asmconstants.h index 71095e3cffc994..8076ee94f2130b 100644 --- a/src/coreclr/vm/riscv64/asmconstants.h +++ b/src/coreclr/vm/riscv64/asmconstants.h @@ -29,8 +29,8 @@ #define DynamicHelperFrameFlags_ObjectArg 1 #define DynamicHelperFrameFlags_ObjectArg2 2 -#define Thread__m_fPreemptiveGCDisabled 0x0C -#define Thread__m_pFrame 0x10 +#define Thread__m_fPreemptiveGCDisabled 0x04 +#define Thread__m_pFrame 0x08 ASMCONSTANTS_C_ASSERT(Thread__m_fPreemptiveGCDisabled == offsetof(Thread, m_fPreemptiveGCDisabled)); ASMCONSTANTS_C_ASSERT(Thread__m_pFrame == offsetof(Thread, m_pFrame)); diff --git a/src/coreclr/vm/stackingallocator.cpp b/src/coreclr/vm/stackingallocator.cpp index 286c4d09e5fd28..cee2b40049100a 100644 --- a/src/coreclr/vm/stackingallocator.cpp +++ b/src/coreclr/vm/stackingallocator.cpp @@ -427,24 +427,33 @@ void * __cdecl operator new[](size_t n, StackingAllocator * alloc, const NoThrow return alloc->UnsafeAllocNoThrow((unsigned)n); } +thread_local StackingAllocator* StackingAllocatorHolder::t_currentStackingAllocator = nullptr; + StackingAllocatorHolder::~StackingAllocatorHolder() { m_pStackingAllocator->Collapse(m_checkpointMarker); if (m_owner) { - m_thread->m_stackLocalAllocator = NULL; + t_currentStackingAllocator = NULL; m_pStackingAllocator->~StackingAllocator(); } } -StackingAllocatorHolder::StackingAllocatorHolder(StackingAllocator *pStackingAllocator, Thread *pThread, bool owner) : +StackingAllocatorHolder::StackingAllocatorHolder(StackingAllocator *pStackingAllocator, bool owner) : m_pStackingAllocator(pStackingAllocator), m_checkpointMarker(pStackingAllocator->GetCheckpoint()), - m_thread(pThread), m_owner(owner) { + _ASSERTE(pStackingAllocator != nullptr); + _ASSERTE((t_currentStackingAllocator == nullptr) == m_owner); if (m_owner) { - m_thread->m_stackLocalAllocator = pStackingAllocator; + t_currentStackingAllocator = pStackingAllocator; } } + + +StackingAllocator* StackingAllocatorHolder::GetCurrentThreadStackingAllocator() +{ + return t_currentStackingAllocator; +} diff --git a/src/coreclr/vm/stackingallocator.h b/src/coreclr/vm/stackingallocator.h index 2753de73908b42..c306e1b482d38e 100644 --- a/src/coreclr/vm/stackingallocator.h +++ b/src/coreclr/vm/stackingallocator.h @@ -224,14 +224,13 @@ private : }; #define ACQUIRE_STACKING_ALLOCATOR(stackingAllocatorName) \ - Thread *pThread__ACQUIRE_STACKING_ALLOCATOR = GetThread(); \ - StackingAllocator *stackingAllocatorName = pThread__ACQUIRE_STACKING_ALLOCATOR->m_stackLocalAllocator; \ + StackingAllocator *stackingAllocatorName = StackingAllocatorHolder::GetCurrentThreadStackingAllocator(); \ bool allocatorOwner__ACQUIRE_STACKING_ALLOCATOR = false; \ NewArrayHolder heapAllocatedStackingBuffer__ACQUIRE_STACKING_ALLOCATOR; \ \ if (stackingAllocatorName == NULL) \ { \ - if (pThread__ACQUIRE_STACKING_ALLOCATOR->CheckCanUseStackAlloc()) \ + if (GetThread()->CheckCanUseStackAlloc()) \ { \ stackingAllocatorName = new (_alloca(sizeof(StackingAllocator))) StackingAllocator; \ } \ @@ -245,21 +244,28 @@ private : }\ allocatorOwner__ACQUIRE_STACKING_ALLOCATOR = true; \ } \ - StackingAllocatorHolder sah_ACQUIRE_STACKING_ALLOCATOR(stackingAllocatorName, pThread__ACQUIRE_STACKING_ALLOCATOR, allocatorOwner__ACQUIRE_STACKING_ALLOCATOR) + StackingAllocatorHolder sah_ACQUIRE_STACKING_ALLOCATOR(stackingAllocatorName, allocatorOwner__ACQUIRE_STACKING_ALLOCATOR) -class Thread; class StackingAllocatorHolder { + // Allocator used during marshaling for temporary buffers, much faster than + // heap allocation. + // + // Uses of this allocator should be effectively statically scoped, i.e. a "region" + // is started using a CheckPointHolder and GetCheckpoint, and this region can then be used for allocations + // from that point onwards, and then all memory is reclaimed when the static scope for the + // checkpoint is exited by the running thread. + static thread_local StackingAllocator* t_currentStackingAllocator; StackingAllocator *m_pStackingAllocator; void* m_checkpointMarker; - Thread* m_thread; bool m_owner; public: ~StackingAllocatorHolder(); - StackingAllocatorHolder(StackingAllocator *pStackingAllocator, Thread *pThread, bool owner); + StackingAllocatorHolder(StackingAllocator *pStackingAllocator, bool owner); StackingAllocator *GetStackingAllocator() { return m_pStackingAllocator; } StackingAllocator &operator->() { return *m_pStackingAllocator; } + static StackingAllocator* GetCurrentThreadStackingAllocator(); }; diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index 2a6a87ce1bc7c2..bc7a06af0d1b09 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -1513,7 +1513,6 @@ Thread::Thread() m_TraceCallCount = 0; m_ThrewControlForThread = 0; m_ThreadTasks = (ThreadTasks)0; - m_pLoadLimiter= NULL; // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed. _ASSERTE((((size_t) &m_State) & 3) == 0); @@ -1548,17 +1547,11 @@ Thread::Thread() m_RedirectContextInUse = false; #endif -#ifdef FEATURE_COMINTEROP - m_pRCWStack = new RCWStackHeader(); -#endif - #ifdef _DEBUG m_bGCStressing = FALSE; m_bUniqueStacking = FALSE; #endif - m_pPendingTypeLoad = NULL; - m_dwAVInRuntimeImplOkayCount = 0; #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(TARGET_UNIX) // GCCOVER @@ -1745,8 +1738,6 @@ void Thread::InitThread() _ASSERTE(HasValidThreadHandle()); - m_random.Init(); - // Set floating point mode to round to nearest #ifndef TARGET_UNIX (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR ); @@ -2648,11 +2639,6 @@ Thread::~Thread() MarkRedirectContextInUse(m_pSavedRedirectContext); m_pSavedRedirectContext = NULL; -#ifdef FEATURE_COMINTEROP - if (m_pRCWStack) - delete m_pRCWStack; -#endif - if (m_pExceptionDuringStartup) { Exception::Delete (m_pExceptionDuringStartup); diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index 9d304e2662894d..c4bc4208121dfe 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -131,7 +131,6 @@ class NDirect; class Frame; class ThreadBaseObject; class AppDomainStack; -class LoadLevelLimiter; class DomainAssembly; class DeadlockAwareLock; struct HelperMethodFrameCallerList; @@ -141,7 +140,6 @@ class FaultingExceptionFrame; enum BinderMethodID : int; class CRWLock; struct LockEntry; -class PendingTypeLoadHolder; class PrepareCodeConfig; class NativeCodeVersion; @@ -153,7 +151,6 @@ typedef void(*ADCallBackFcnType)(LPVOID); #include "stackwalktypes.h" #include "log.h" -#include "stackingallocator.h" #include "excep.h" #include "synch.h" #include "exstate.h" @@ -462,288 +459,6 @@ struct LockEntry BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId ); #endif -#ifdef FEATURE_COMINTEROP - -#define RCW_STACK_SIZE 64 - -class RCWStack -{ -public: - inline RCWStack() - { - LIMITED_METHOD_CONTRACT; - memset(this, 0, sizeof(RCWStack)); - } - - inline VOID SetEntry(unsigned int index, RCW* pRCW) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(index < RCW_STACK_SIZE); - PRECONDITION(CheckPointer(pRCW, NULL_OK)); - } - CONTRACTL_END; - - m_pList[index] = pRCW; - } - - inline RCW* GetEntry(unsigned int index) - { - CONTRACT (RCW*) - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(index < RCW_STACK_SIZE); - } - CONTRACT_END; - - RETURN m_pList[index]; - } - - inline VOID SetNextStack(RCWStack* pStack) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pStack)); - PRECONDITION(m_pNext == NULL); - } - CONTRACTL_END; - - m_pNext = pStack; - } - - inline RCWStack* GetNextStack() - { - CONTRACT (RCWStack*) - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); - } - CONTRACT_END; - - RETURN m_pNext; - } - -private: - RCWStack* m_pNext; - RCW* m_pList[RCW_STACK_SIZE]; -}; - - -class RCWStackHeader -{ -public: - RCWStackHeader() - { - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - m_iIndex = 0; - m_iSize = RCW_STACK_SIZE; - m_pHead = new RCWStack(); - } - - ~RCWStackHeader() - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - } - CONTRACTL_END; - - RCWStack* pStack = m_pHead; - RCWStack* pNextStack = NULL; - - while (pStack) - { - pNextStack = pStack->GetNextStack(); - delete pStack; - pStack = pNextStack; - } - } - - bool Push(RCW* pRCW) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pRCW, NULL_OK)); - } - CONTRACTL_END; - - if (!GrowListIfNeeded()) - return false; - - // Fast Path - if (m_iIndex < RCW_STACK_SIZE) - { - m_pHead->SetEntry(m_iIndex, pRCW); - m_iIndex++; - return true; - } - - // Slow Path - unsigned int count = m_iIndex; - RCWStack* pStack = m_pHead; - while (count >= RCW_STACK_SIZE) - { - pStack = pStack->GetNextStack(); - _ASSERTE(pStack); - - count -= RCW_STACK_SIZE; - } - - pStack->SetEntry(count, pRCW); - m_iIndex++; - return true; - } - - RCW* Pop() - { - CONTRACT (RCW*) - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(m_iIndex > 0); - POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); - } - CONTRACT_END; - - RCW* pRCW = NULL; - - m_iIndex--; - - // Fast Path - if (m_iIndex < RCW_STACK_SIZE) - { - pRCW = m_pHead->GetEntry(m_iIndex); - m_pHead->SetEntry(m_iIndex, NULL); - RETURN pRCW; - } - - // Slow Path - unsigned int count = m_iIndex; - RCWStack* pStack = m_pHead; - while (count >= RCW_STACK_SIZE) - { - pStack = pStack->GetNextStack(); - _ASSERTE(pStack); - count -= RCW_STACK_SIZE; - } - - pRCW = pStack->GetEntry(count); - pStack->SetEntry(count, NULL); - - RETURN pRCW; - } - - BOOL IsInStack(RCW* pRCW) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pRCW)); - } - CONTRACTL_END; - - if (m_iIndex == 0) - return FALSE; - - // Fast Path - if (m_iIndex <= RCW_STACK_SIZE) - { - for (int i = 0; i < (int)m_iIndex; i++) - { - if (pRCW == m_pHead->GetEntry(i)) - return TRUE; - } - - return FALSE; - } - - // Slow Path - RCWStack* pStack = m_pHead; - int totalcount = 0; - while (pStack != NULL) - { - for (int i = 0; (i < RCW_STACK_SIZE) && (totalcount < m_iIndex); i++, totalcount++) - { - if (pRCW == pStack->GetEntry(i)) - return TRUE; - } - - pStack = pStack->GetNextStack(); - } - - return FALSE; - } - -private: - bool GrowListIfNeeded() - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - INJECT_FAULT(COMPlusThrowOM()); - PRECONDITION(CheckPointer(m_pHead)); - } - CONTRACTL_END; - - if (m_iIndex == m_iSize) - { - RCWStack* pStack = m_pHead; - RCWStack* pNextStack = NULL; - while ( (pNextStack = pStack->GetNextStack()) != NULL) - pStack = pNextStack; - - RCWStack* pNewStack = new (nothrow) RCWStack(); - if (NULL == pNewStack) - return false; - - pStack->SetNextStack(pNewStack); - - m_iSize += RCW_STACK_SIZE; - } - - return true; - } - - // Zero-based index to the first free element in the list. - int m_iIndex; - - // Total size of the list, including all stacks. - int m_iSize; - - // Pointer to the first stack. - RCWStack* m_pHead; -}; - -#endif // FEATURE_COMINTEROP - - typedef DWORD (*AppropriateWaitFunc) (void *args, DWORD timeout, DWORD option); // The Thread class represents a managed thread. This thread could be internal @@ -874,15 +589,6 @@ class Thread } public: - // Allocator used during marshaling for temporary buffers, much faster than - // heap allocation. - // - // Uses of this allocator should be effectively statically scoped, i.e. a "region" - // is started using a CheckPointHolder and GetCheckpoint, and this region can then be used for allocations - // from that point onwards, and then all memory is reclaimed when the static scope for the - // checkpoint is exited by the running thread. - StackingAllocator* m_stackLocalAllocator = NULL; - // If we are trying to suspend a thread, we set the appropriate pending bit to // indicate why we want to suspend it (TS_GCSuspendPending or TS_DebugSuspendPending). // @@ -1324,76 +1030,6 @@ class Thread Frame* NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP); #endif // DACCESS_COMPILE -#if defined(FEATURE_COMINTEROP) && !defined(DACCESS_COMPILE) - void RegisterRCW(RCW *pRCW) - { - CONTRACTL - { - THROWS; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pRCW)); - } - CONTRACTL_END; - - if (!m_pRCWStack->Push(pRCW)) - { - ThrowOutOfMemory(); - } - } - - // Returns false on OOM. - BOOL RegisterRCWNoThrow(RCW *pRCW) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pRCW, NULL_OK)); - } - CONTRACTL_END; - - return m_pRCWStack->Push(pRCW); - } - - RCW *UnregisterRCW(INDEBUG(SyncBlock *pSB)) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pSB)); - } - CONTRACTL_END; - - RCW* pPoppedRCW = m_pRCWStack->Pop(); - -#ifdef _DEBUG - // The RCW we popped must be the one pointed to by pSB if pSB still points to an RCW. - RCW* pCurrentRCW = pSB->GetInteropInfoNoCreate()->GetRawRCW(); - _ASSERTE(pCurrentRCW == NULL || pPoppedRCW == NULL || pCurrentRCW == pPoppedRCW); -#endif // _DEBUG - - return pPoppedRCW; - } - - BOOL RCWIsInUse(RCW* pRCW) - { - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - PRECONDITION(CheckPointer(pRCW)); - } - CONTRACTL_END; - - return m_pRCWStack->IsInStack(pRCW); - } -#endif // FEATURE_COMINTEROP && !DACCESS_COMPILE - // Lock thread is trying to acquire VolatilePtr m_pBlockingLock; @@ -1427,11 +1063,6 @@ class Thread inline TypeHandle GetTHAllocContextObj() {LIMITED_METHOD_CONTRACT; return m_thAllocContextObj; } -#ifdef FEATURE_COMINTEROP - // The header for the per-thread in-use RCW stack. - RCWStackHeader* m_pRCWStack; -#endif // FEATURE_COMINTEROP - // Flags used to indicate tasks the thread has to do. ThreadTasks m_ThreadTasks; @@ -1555,24 +1186,6 @@ class Thread } #endif - private: - LoadLevelLimiter *m_pLoadLimiter; - - public: - LoadLevelLimiter *GetLoadLevelLimiter() - { - LIMITED_METHOD_CONTRACT; - return m_pLoadLimiter; - } - - void SetLoadLevelLimiter(LoadLevelLimiter *limiter) - { - LIMITED_METHOD_CONTRACT; - m_pLoadLimiter = limiter; - } - - - public: //-------------------------------------------------------------- // Constructor. @@ -2620,26 +2233,6 @@ class Thread #endif // !DACCESS_COMPILE #endif // FEATURE_EMULATE_SINGLESTEP - private: - - PendingTypeLoadHolder* m_pPendingTypeLoad; - - public: - -#ifndef DACCESS_COMPILE - PendingTypeLoadHolder* GetPendingTypeLoad() - { - LIMITED_METHOD_CONTRACT; - return m_pPendingTypeLoad; - } - - void SetPendingTypeLoad(PendingTypeLoadHolder* pPendingTypeLoad) - { - LIMITED_METHOD_CONTRACT; - m_pPendingTypeLoad = pPendingTypeLoad; - } -#endif - public: // Indicate whether this thread should run in the background. Background threads @@ -4241,11 +3834,6 @@ class Thread // See ThreadStore::TriggerGCForDeadThreadsIfNecessary() bool m_fHasDeadThreadBeenConsideredForGCTrigger; - CLRRandom m_random; - -public: - CLRRandom* GetRandom() {return &m_random;} - #ifdef FEATURE_COMINTEROP private: // Cookie returned from CoRegisterInitializeSpy diff --git a/src/coreclr/vm/util.cpp b/src/coreclr/vm/util.cpp index 7412b03b3b4aed..58dc2d28ee373e 100644 --- a/src/coreclr/vm/util.cpp +++ b/src/coreclr/vm/util.cpp @@ -1774,21 +1774,19 @@ static BOOL TrustMeIAmSafe(void *pLock) LockOwner g_lockTrustMeIAmThreadSafe = { NULL, TrustMeIAmSafe }; -static DangerousNonHostedSpinLock g_randomLock; -static CLRRandom g_random; +namespace +{ + DangerousNonHostedSpinLock g_randomLock; + CLRRandom g_random; +} int GetRandomInt(int maxVal) { - // Use the thread-local Random instance if possible - Thread* pThread = GetThreadNULLOk(); - if (pThread) - return pThread->GetRandom()->Next(maxVal); - - // No Thread object - need to fall back to the global generator. // In DAC builds we don't need the lock (DAC is single-threaded) and can't get it anyway (DNHSL isn't supported) #ifndef DACCESS_COMPILE - DangerousNonHostedSpinLockHolder lh(&g_randomLock); + DangerousNonHostedSpinLockHolder lockHolder(&g_randomLock); #endif + // Use the thread-local Random instance if (!g_random.IsInitialized()) g_random.Init(); return g_random.Next(maxVal);