diff --git a/src/coreclr/nativeaot/Runtime/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/CMakeLists.txt index c3dd534aa2f76b..dbd19d083a16ee 100644 --- a/src/coreclr/nativeaot/Runtime/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/CMakeLists.txt @@ -55,6 +55,12 @@ set(COMMON_RUNTIME_SOURCES ${CLR_SRC_NATIVE_DIR}/minipal/xoshiro128pp.c ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND COMMON_RUNTIME_SOURCES + ${RUNTIME_DIR}/asyncsafethreadmap.cpp + ) +endif() + set(SERVER_GC_SOURCES ${GC_DIR}/gceesvr.cpp ${GC_DIR}/gcsvr.cpp diff --git a/src/coreclr/nativeaot/Runtime/threadstore.cpp b/src/coreclr/nativeaot/Runtime/threadstore.cpp index 498d36d94d8549..5ba546e3893d13 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.cpp +++ b/src/coreclr/nativeaot/Runtime/threadstore.cpp @@ -22,6 +22,8 @@ #include "TargetPtrs.h" #include "yieldprocessornormalized.h" #include +#include +#include "asyncsafethreadmap.h" #include "slist.inl" @@ -143,6 +145,14 @@ void ThreadStore::AttachCurrentThread(bool fAcquireThreadStoreLock) pAttachingThread->m_ThreadStateFlags = Thread::TSF_Attached; pTS->m_ThreadList.PushHead(pAttachingThread); + +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (!InsertThreadIntoAsyncSafeMap(pAttachingThread->m_threadId, pAttachingThread)) + { + PalPrintFatalError("\nFailed to insert thread into async-safe map due to out of memory.\n"); + RhFailFast(); + } +#endif // TARGET_UNIX && !TARGET_WASM } // static @@ -188,6 +198,9 @@ void ThreadStore::DetachCurrentThread() pTS->m_ThreadList.RemoveFirst(pDetachingThread); // tidy up GC related stuff (release allocation context, etc..) pDetachingThread->Detach(); +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + RemoveThreadFromAsyncSafeMap(pDetachingThread->m_threadId, pDetachingThread); +#endif } // post-mortem clean up. @@ -352,6 +365,13 @@ EXTERN_C RuntimeThreadLocals* RhpGetThread() return &tls_CurrentThread; } +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) +Thread * ThreadStore::GetCurrentThreadIfAvailableAsyncSafe() +{ + return (Thread*)FindThreadInAsyncSafeMap(minipal_get_current_thread_id_no_cache()); +} +#endif // TARGET_UNIX && !TARGET_WASM + #endif // !DACCESS_COMPILE #ifdef _WIN32 diff --git a/src/coreclr/nativeaot/Runtime/threadstore.h b/src/coreclr/nativeaot/Runtime/threadstore.h index 0ebe8e7d39a606..694608e70e6d42 100644 --- a/src/coreclr/nativeaot/Runtime/threadstore.h +++ b/src/coreclr/nativeaot/Runtime/threadstore.h @@ -47,6 +47,7 @@ class ThreadStore static Thread * RawGetCurrentThread(); static Thread * GetCurrentThread(); static Thread * GetCurrentThreadIfAvailable(); + static Thread * GetCurrentThreadIfAvailableAsyncSafe(); static PTR_Thread GetSuspendingThread(); static void AttachCurrentThread(); static void AttachCurrentThread(bool fAcquireThreadStoreLock); diff --git a/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp b/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp index 873ae69c28ddf5..9388e78944e65c 100644 --- a/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp +++ b/src/coreclr/nativeaot/Runtime/unix/PalUnix.cpp @@ -1017,24 +1017,24 @@ static struct sigaction g_previousActivationHandler; static void ActivationHandler(int code, siginfo_t* siginfo, void* context) { - // Only accept activations from the current process - if (siginfo->si_pid == getpid() + Thread* pThread = ThreadStore::GetCurrentThreadIfAvailableAsyncSafe(); + if (pThread) + { + // Only accept activations from the current process + if (siginfo->si_pid == getpid() #ifdef HOST_APPLE - // On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple - // signals are in flight in the same process at the same time, it may be overwritten / zeroed. - || siginfo->si_pid == 0 + // On Apple platforms si_pid is sometimes 0. It was confirmed by Apple to be expected, as the si_pid is tracked at the process level. So when multiple + // signals are in flight in the same process at the same time, it may be overwritten / zeroed. + || siginfo->si_pid == 0 #endif - ) - { - // Make sure that errno is not modified - int savedErrNo = errno; - Thread::HijackCallback((NATIVE_CONTEXT*)context, NULL); - errno = savedErrNo; - } + ) + { + // Make sure that errno is not modified + int savedErrNo = errno; + Thread::HijackCallback((NATIVE_CONTEXT*)context, pThread); + errno = savedErrNo; + } - Thread* pThread = ThreadStore::GetCurrentThreadIfAvailable(); - if (pThread) - { pThread->SetActivationPending(false); } diff --git a/src/coreclr/pal/src/exception/signal.cpp b/src/coreclr/pal/src/exception/signal.cpp index 732d1d5db05933..0dfd05d028a73b 100644 --- a/src/coreclr/pal/src/exception/signal.cpp +++ b/src/coreclr/pal/src/exception/signal.cpp @@ -936,22 +936,20 @@ static void inject_activation_handler(int code, siginfo_t *siginfo, void *contex CONTEXTToNativeContext(&winContext, ucontext); } } + + // Call the original handler when it is not ignored or default (terminate). + if (g_previous_activation.sa_flags & SA_SIGINFO) + { + _ASSERTE(g_previous_activation.sa_sigaction != NULL); + g_previous_activation.sa_sigaction(code, siginfo, context); + } else { - // Call the original handler when it is not ignored or default (terminate). - if (g_previous_activation.sa_flags & SA_SIGINFO) - { - _ASSERTE(g_previous_activation.sa_sigaction != NULL); - g_previous_activation.sa_sigaction(code, siginfo, context); - } - else + if (g_previous_activation.sa_handler != SIG_IGN && + g_previous_activation.sa_handler != SIG_DFL) { - if (g_previous_activation.sa_handler != SIG_IGN && - g_previous_activation.sa_handler != SIG_DFL) - { - _ASSERTE(g_previous_activation.sa_handler != NULL); - g_previous_activation.sa_handler(code); - } + _ASSERTE(g_previous_activation.sa_handler != NULL); + g_previous_activation.sa_handler(code); } } } diff --git a/src/coreclr/runtime/asyncsafethreadmap.cpp b/src/coreclr/runtime/asyncsafethreadmap.cpp new file mode 100644 index 00000000000000..5c5882589a2613 --- /dev/null +++ b/src/coreclr/runtime/asyncsafethreadmap.cpp @@ -0,0 +1,126 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include "common.h" + +#include "asyncsafethreadmap.h" + +// Async safe lock free thread map for use in signal handlers + +struct ThreadEntry +{ + size_t osThread; + void* pThread; +}; + +#define MAX_THREADS_IN_SEGMENT 256 + +struct ThreadSegment +{ + ThreadEntry entries[MAX_THREADS_IN_SEGMENT]; + ThreadSegment* pNext; +}; + +static ThreadSegment *s_pAsyncSafeThreadMapHead = NULL; + +bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + ThreadSegment** ppSegment = &s_pAsyncSafeThreadMapHead; + while (true) + { + if (pSegment == NULL) + { + // Need to add a new segment + ThreadSegment* pNewSegment = new (nothrow) ThreadSegment(); + if (pNewSegment == NULL) + { + // Memory allocation failed + return false; + } + + memset(pNewSegment, 0, sizeof(ThreadSegment)); + ThreadSegment* pExpected = NULL; + if (!__atomic_compare_exchange_n( + ppSegment, + &pExpected, + pNewSegment, + false /* weak */, + __ATOMIC_RELEASE /* success_memorder */, + __ATOMIC_RELAXED /* failure_memorder */)) + { + // Another thread added the segment first + delete pNewSegment; + pNewSegment = pExpected; + } + + pSegment = pNewSegment; + } + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + + size_t expected = 0; + if (__atomic_compare_exchange_n( + &pSegment->entries[index].osThread, + &expected, + osThread, + false /* weak */, + __ATOMIC_RELEASE /* success_memorder */, + __ATOMIC_RELAXED /* failure_memorder */)) + { + // Successfully inserted + // Use atomic store with release to ensure proper ordering + __atomic_store_n(&pSegment->entries[index].pThread, pThread, __ATOMIC_RELEASE); + return true; + } + } + + ppSegment = &pSegment->pNext; + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } +} + +void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + while (pSegment) + { + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + if (pSegment->entries[index].pThread == pThread) + { + // Found the entry, remove it + pSegment->entries[index].pThread = NULL; + __atomic_exchange_n(&pSegment->entries[index].osThread, (size_t)0, __ATOMIC_RELEASE); + return; + } + } + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } +} + +void *FindThreadInAsyncSafeMap(size_t osThread) +{ + size_t startIndex = osThread % MAX_THREADS_IN_SEGMENT; + ThreadSegment* pSegment = s_pAsyncSafeThreadMapHead; + while (pSegment) + { + for (size_t i = 0; i < MAX_THREADS_IN_SEGMENT; i++) + { + size_t index = (startIndex + i) % MAX_THREADS_IN_SEGMENT; + // Use acquire to synchronize with release in InsertThreadIntoAsyncSafeMap + if (__atomic_load_n(&pSegment->entries[index].osThread, __ATOMIC_ACQUIRE) == osThread) + { + return pSegment->entries[index].pThread; + } + } + pSegment = __atomic_load_n(&pSegment->pNext, __ATOMIC_ACQUIRE); + } + return NULL; +} diff --git a/src/coreclr/runtime/asyncsafethreadmap.h b/src/coreclr/runtime/asyncsafethreadmap.h new file mode 100644 index 00000000000000..b7d44f84da4080 --- /dev/null +++ b/src/coreclr/runtime/asyncsafethreadmap.h @@ -0,0 +1,27 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#ifndef __ASYNCSAFETHREADMAP_H__ +#define __ASYNCSAFETHREADMAP_H__ + +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + +// Insert a thread into the async-safe map. +// * osThread - The OS thread ID to insert. +// * pThread - A pointer to the thread object to associate with the OS thread ID. +// * return true if the insertion was successful, false otherwise (OOM). +bool InsertThreadIntoAsyncSafeMap(size_t osThread, void* pThread); + +// Remove a thread from the async-safe map. +// * osThread - The OS thread ID to remove. +// * pThread - A pointer to the thread object associated with the OS thread ID. +void RemoveThreadFromAsyncSafeMap(size_t osThread, void* pThread); + +// Find a thread in the async-safe map. +// * osThread = The OS thread ID to search for. +// * return - A pointer to the thread object associated with the OS thread ID, or NULL if not found. +void* FindThreadInAsyncSafeMap(size_t osThread); + +#endif // TARGET_UNIX && !TARGET_WASM + +#endif // __ASYNCSAFETHREADMAP_H__ diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index 73755a194c7537..7f8b6825893a1f 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -380,6 +380,12 @@ set(VM_SOURCES_WKS ${VM_SOURCES_GDBJIT} ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND VM_SOURCES_WKS + ${RUNTIME_DIR}/asyncsafethreadmap.cpp + ) +endif() + # coreclr needs to compile codeman.cpp differently depending on flavor (i.e. dll vs. static lib)) list(REMOVE_ITEM VM_SOURCES_WKS codeman.cpp) @@ -476,6 +482,12 @@ set(VM_HEADERS_WKS ${VM_HEADERS_GDBJIT} ) +if (CLR_CMAKE_TARGET_UNIX AND NOT CLR_CMAKE_TARGET_ARCH_WASM) + list(APPEND VM_HEADERS_WKS + ${RUNTIME_DIR}/asyncsafethreadmap.h + ) +endif() + set(GC_SOURCES_WKS ${GC_SOURCES_DAC_AND_WKS_COMMON} ../gc/gceventstatus.cpp diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 8dd4adf08945f0..c9a1230e8bab17 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -858,7 +858,7 @@ IJitManager::IJitManager() // been stopped when we suspend the EE so they won't be touching an element that is about to be deleted. // However for pre-emptive mode threads, they could be stalled right on top of the element we want // to delete, so we need to apply the reader lock to them and wait for them to drain. -ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() +ExecutionManager::ScanFlag ExecutionManager::GetScanFlags(Thread *pThread) { CONTRACTL { NOTHROW; @@ -869,7 +869,10 @@ ExecutionManager::ScanFlag ExecutionManager::GetScanFlags() #if !defined(DACCESS_COMPILE) - Thread *pThread = GetThreadNULLOk(); + if (!pThread) + { + pThread = GetThreadNULLOk(); + } if (!pThread) return ScanNoReaderLock; @@ -5229,6 +5232,24 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) return IsManagedCodeWorker(currentPC, &lockState); } +//************************************************************************** +BOOL ExecutionManager::IsManagedCodeNoLock(PCODE currentPC) +{ + CONTRACTL { + NOTHROW; + GC_NOTRIGGER; + } CONTRACTL_END; + + if (currentPC == (PCODE)NULL) + return FALSE; + + _ASSERTE(GetScanFlags() != ScanReaderLock); + + // Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken. + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); +} + //************************************************************************** NOINLINE // Make sure that the slow path with lock won't affect the fast path BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 466a6a7fa6f6d5..da9b0b3f50bff4 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -2293,11 +2293,15 @@ class ExecutionManager }; // Returns default scan flag for current thread - static ScanFlag GetScanFlags(); + static ScanFlag GetScanFlags(Thread *pThread = NULL); // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64. static BOOL IsManagedCode(PCODE currentPC); + // Returns whether currentPC is in managed code. Returns false for jump stubs on WIN64. + // Does not acquire the reader lock. Caller must ensure it is safe. + static BOOL IsManagedCodeNoLock(PCODE currentPC); + // Returns true if currentPC is ready to run codegen static BOOL IsReadyToRunCode(PCODE currentPC); diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index 003789dc8dfe6f..1b54f430fc9884 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -33,6 +33,8 @@ #include "vmholder.h" #include "exceptmacros.h" #include "minipal/time.h" +#include "minipal/thread.h" +#include "asyncsafethreadmap.h" #ifdef FEATURE_COMINTEROP #include "runtimecallablewrapper.h" @@ -62,6 +64,17 @@ #include "interpexec.h" #endif // FEATURE_INTERPRETER +#ifndef DACCESS_COMPILE +Thread* GetThreadAsyncSafe() +{ +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + return (Thread*)FindThreadInAsyncSafeMap(minipal_get_current_thread_id_no_cache()); +#else + return GetThreadNULLOk(); +#endif +} +#endif // DACCESS_COMPILE + static const PortableTailCallFrame g_sentinelTailCallFrame = { NULL, NULL }; TailCallTls::TailCallTls() @@ -370,7 +383,26 @@ void SetThread(Thread* t) #endif // Clear or set the app domain to the one domain based on if the thread is being nulled out or set - t_CurrentThreadInfo.m_pAppDomain = t == NULL ? NULL : AppDomain::GetCurrentDomain(); + if (t != NULL) + { + t_CurrentThreadInfo.m_pAppDomain = AppDomain::GetCurrentDomain(); +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (!InsertThreadIntoAsyncSafeMap(t->GetOSThreadId64(), t)) + { + EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to insert thread into async-safe map due to out of memory.")); + } +#endif // TARGET_UNIX && !TARGET_WASM + } + else + { + t_CurrentThreadInfo.m_pAppDomain = NULL; +#if defined(TARGET_UNIX) && !defined(TARGET_WASM) + if (origThread != NULL) + { + RemoveThreadFromAsyncSafeMap(origThread->GetOSThreadId64(), origThread); + } +#endif // TARGET_UNIX && !TARGET_WASM + } } BOOL Thread::Alert () diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index 572c57fd9a08e3..ab6ae3127d3942 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -5512,6 +5512,8 @@ class StackWalkerWalkingThreadHolder Thread* m_PreviousValue; }; +EXTERN_C Thread* GetThreadAsyncSafe(); + #ifndef DACCESS_COMPILE #if defined(TARGET_WINDOWS) && defined(TARGET_AMD64) EXTERN_C void STDCALL ClrRestoreNonvolatileContextWorker(PCONTEXT ContextRecord, DWORD64 ssp); diff --git a/src/coreclr/vm/threadsuspend.cpp b/src/coreclr/vm/threadsuspend.cpp index e003ae328df79b..14b52e7c4a7b99 100644 --- a/src/coreclr/vm/threadsuspend.cpp +++ b/src/coreclr/vm/threadsuspend.cpp @@ -5731,16 +5731,17 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason) // It is unsafe to use blocking APIs or allocate in this method. BOOL CheckActivationSafePoint(SIZE_T ip) { - Thread *pThread = GetThreadNULLOk(); + Thread *pThread = GetThreadAsyncSafe(); // The criteria for safe activation is to be running managed code. // Also we are not interested in handling interruption if we are already in preemptive mode nor if we are single stepping BOOL isActivationSafePoint = pThread != NULL && (pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping) == 0 && pThread->PreemptiveGCDisabled() && - ExecutionManager::IsManagedCode(ip); + (ExecutionManager::GetScanFlags(pThread) != ExecutionManager::ScanReaderLock) && + ExecutionManager::IsManagedCodeNoLock(ip); - if (!isActivationSafePoint) + if (!isActivationSafePoint && pThread != NULL) { pThread->m_hasPendingActivation = false; } diff --git a/src/native/minipal/thread.h b/src/native/minipal/thread.h index 5d2225df125bb2..0f0c1ae1e8b1bf 100644 --- a/src/native/minipal/thread.h +++ b/src/native/minipal/thread.h @@ -4,6 +4,8 @@ #ifndef HAVE_MINIPAL_THREAD_H #define HAVE_MINIPAL_THREAD_H +#ifndef HOST_WINDOWS + #include #include #include @@ -36,6 +38,42 @@ extern "C" { #endif +/** + * Get the current thread ID without caching in a TLS variable. + * + * @return The current thread ID as a size_t value. + */ +static inline size_t minipal_get_current_thread_id_no_cache(void) +{ + size_t tid; +#if defined(__wasm) && !defined(_REENTRANT) + tid = 1; // In non-reentrant WASM builds, we define a single thread with ID 1. +#else // !__wasm || _REENTRANT + +#if defined(__linux__) + tid = (size_t)syscall(SYS_gettid); +#elif defined(__APPLE__) + uint64_t thread_id; + pthread_threadid_np(pthread_self(), &thread_id); + tid = (size_t)thread_id; // Cast the uint64_t thread ID to size_t +#elif defined(__FreeBSD__) + tid = (size_t)pthread_getthreadid_np(); +#elif defined(__NetBSD__) + tid = (size_t)_lwp_self(); +#elif defined(__HAIKU__) + tid = (size_t)find_thread(NULL); +#elif defined(__sun) + tid = (size_t)pthread_self(); +#elif defined(__wasm) + tid = (size_t)(void*)pthread_self(); +#else +#error "Unsupported platform" +#endif + +#endif // __wasm && !_REENTRANT + return tid; +} + /** * Get the current thread ID. * @@ -44,7 +82,7 @@ extern "C" { static inline size_t minipal_get_current_thread_id(void) { #if defined(__wasm) && !defined(_REENTRANT) - return 1; // In non-reentrant WASM builds, we define a single thread with ID 1. + return minipal_get_current_thread_id_no_cache(); #else // !__wasm || _REENTRANT #if defined(__GNUC__) && !defined(__clang__) && defined(__cplusplus) @@ -57,25 +95,7 @@ static inline size_t minipal_get_current_thread_id(void) if (!tid) { -#if defined(__linux__) - tid = (size_t)syscall(SYS_gettid); -#elif defined(__APPLE__) - uint64_t thread_id; - pthread_threadid_np(pthread_self(), &thread_id); - tid = (size_t)thread_id; // Cast the uint64_t thread ID to size_t -#elif defined(__FreeBSD__) - tid = (size_t)pthread_getthreadid_np(); -#elif defined(__NetBSD__) - tid = (size_t)_lwp_self(); -#elif defined(__HAIKU__) - tid = (size_t)find_thread(NULL); -#elif defined(__sun) - tid = (size_t)pthread_self(); -#elif defined(__wasm) - tid = (size_t)(void*)pthread_self(); -#else -#error "Unsupported platform" -#endif + tid = minipal_get_current_thread_id_no_cache(); } return tid; @@ -118,8 +138,11 @@ static inline int minipal_set_thread_name(pthread_t thread, const char* name) #endif } + #ifdef __cplusplus } #endif // extern "C" +#endif // !HOST_WINDOWS + #endif // HAVE_MINIPAL_THREAD_H