diff --git a/stl/CMakeLists.txt b/stl/CMakeLists.txt index 74c5f383bfc..d3ee49f175f 100644 --- a/stl/CMakeLists.txt +++ b/stl/CMakeLists.txt @@ -180,6 +180,7 @@ set(HEADERS ${CMAKE_CURRENT_LIST_DIR}/inc/sstream ${CMAKE_CURRENT_LIST_DIR}/inc/stack ${CMAKE_CURRENT_LIST_DIR}/inc/stdexcept + ${CMAKE_CURRENT_LIST_DIR}/inc/stop_token ${CMAKE_CURRENT_LIST_DIR}/inc/streambuf ${CMAKE_CURRENT_LIST_DIR}/inc/string ${CMAKE_CURRENT_LIST_DIR}/inc/string_view diff --git a/stl/inc/__msvc_all_public_headers.hpp b/stl/inc/__msvc_all_public_headers.hpp index 3ba146c97bc..912896ed2d9 100644 --- a/stl/inc/__msvc_all_public_headers.hpp +++ b/stl/inc/__msvc_all_public_headers.hpp @@ -108,6 +108,7 @@ #include #include #include +#include #endif // _M_CEE_PURE #ifndef _M_CEE diff --git a/stl/inc/atomic b/stl/inc/atomic index 07fd8d8316d..bf107a58f89 100644 --- a/stl/inc/atomic +++ b/stl/inc/atomic @@ -2970,6 +2970,69 @@ inline void atomic_flag_notify_all(volatile atomic_flag* const _Flag) noexcept { inline void atomic_flag_notify_all(atomic_flag* const _Flag) noexcept { return _Flag->notify_all(); } + +template +class _Locked_pointer { +public: + static_assert(alignof(_Ty) >= (1 << 2), "2 low order bits are needed by _Locked_pointer"); + static constexpr uintptr_t _Lock_mask = 3; + static constexpr uintptr_t _Not_locked = 0; + static constexpr uintptr_t _Locked_notify_not_needed = 1; + static constexpr uintptr_t _Locked_notify_needed = 2; + static constexpr uintptr_t _Ptr_value_mask = ~_Lock_mask; + + constexpr _Locked_pointer() noexcept : _Storage{} {} + explicit _Locked_pointer(_Ty* const _Ptr) noexcept : _Storage{reinterpret_cast(_Ptr)} {} + + _Locked_pointer(const _Locked_pointer&) = delete; + _Locked_pointer& operator=(const _Locked_pointer&) = delete; + + _NODISCARD _Ty* _Lock_and_load() noexcept { + uintptr_t _Rep = _Storage.load(memory_order_relaxed); + for (;;) { + switch (_Rep & _Lock_mask) { + case _Not_locked: // Can try to lock now + if (_Storage.compare_exchange_weak(_Rep, _Rep | _Locked_notify_not_needed)) { + return reinterpret_cast<_Ty*>(_Rep); + } + _YIELD_PROCESSOR(); + break; + + case _Locked_notify_not_needed: // Try to set "notify needed" and wait + if (!_Storage.compare_exchange_weak(_Rep, (_Rep & _Ptr_value_mask) | _Locked_notify_needed)) { + // Failed to set notify needed flag, try again + _YIELD_PROCESSOR(); + break; + } + _Rep = (_Rep & _Ptr_value_mask) | _Locked_notify_needed; + [[fallthrough]]; + + case _Locked_notify_needed: // "Notify needed" is already set, just wait + _Storage.wait(_Rep, memory_order_relaxed); + _Rep = _Storage.load(memory_order_relaxed); + break; + + default: // Unrecognized bit pattern + _CSTD abort(); + } + } + } + + void _Store_and_unlock(_Ty* const _Value) noexcept { + const auto _Rep = _Storage.exchange(reinterpret_cast(_Value)); + if ((_Rep & _Lock_mask) == _Locked_notify_needed) { + // As we don't count waiters, every waiter is notified, and then some may re-request notification + _Storage.notify_all(); + } + } + + _NODISCARD _Ty* _Unsafe_load_relaxed() const noexcept { + return reinterpret_cast<_Ty*>(_Storage.load(memory_order_relaxed)); + } + +private: + atomic _Storage; +}; #endif // _HAS_CXX20 _STD_END diff --git a/stl/inc/condition_variable b/stl/inc/condition_variable index 0fdd0e13306..1289183537c 100644 --- a/stl/inc/condition_variable +++ b/stl/inc/condition_variable @@ -12,6 +12,9 @@ #include #include #include +#if _HAS_CXX20 +#include +#endif // _HAS_CXX20 #pragma pack(push, _CRT_PACKING) #pragma warning(push, _STL_WARNING_LEVEL) @@ -25,6 +28,27 @@ _STL_DISABLE_CLANG_WARNINGS #endif // _M_CEE _STD_BEGIN +template +struct _Unlock_guard { + explicit _Unlock_guard(_Lock& _Mtx_) : _Mtx(_Mtx_) { + _Mtx.unlock(); + } + + ~_Unlock_guard() noexcept /* terminates */ { + // relock mutex or terminate() + // condition_variable_any wait functions are required to terminate if + // the mutex cannot be relocked; + // we slam into noexcept here for easier user debugging. + _Mtx.lock(); + } + + _Unlock_guard(const _Unlock_guard&) = delete; + _Unlock_guard& operator=(const _Unlock_guard&) = delete; + +private: + _Lock& _Mtx; +}; + class condition_variable_any { // class for waiting for conditions with any kind of mutex public: condition_variable_any() : _Myptr{_STD make_shared()} { @@ -50,20 +74,17 @@ public: template void wait(_Lock& _Lck) noexcept /* terminates */ { // wait for signal - { - const shared_ptr _Ptr = _Myptr; // for immunity to *this destruction - lock_guard _Guard{*_Ptr}; - _Lck.unlock(); - _Cnd_wait(_Mycnd(), _Ptr->_Mymtx()); - } // unlock - - _Lck.lock(); - } + const shared_ptr _Ptr = _Myptr; // for immunity to *this destruction + unique_lock _Guard{*_Ptr}; + _Unlock_guard<_Lock> _Unlock_outer{_Lck}; + _Cnd_wait(_Mycnd(), _Ptr->_Mymtx()); + _Guard.unlock(); + } // relock _Lck template - void wait(_Lock& _Lck, _Predicate _Pred) noexcept(noexcept(!_Pred())) /* strengthened */ { + void wait(_Lock& _Lck, _Predicate _Pred) noexcept(noexcept(static_cast(_Pred()))) /* strengthened */ { // wait for signal and check predicate - while (!_Pred()) { + while (!static_cast(_Pred())) { wait(_Lck); } } @@ -89,8 +110,8 @@ public: template cv_status wait_for(_Lock& _Lck, const chrono::duration<_Rep, _Period>& _Rel_time) { // wait for duration if (_Rel_time <= chrono::duration<_Rep, _Period>::zero()) { - _Lck.unlock(); - _Relock(_Lck); + _Unlock_guard<_Lock> _Unlock_outer{_Lck}; + (void) _Unlock_outer; return cv_status::timeout; } @@ -128,6 +149,93 @@ public: return true; } +#if _HAS_CXX20 +private: + struct _Cv_any_notify_all { + condition_variable_any* _This; + + explicit _Cv_any_notify_all(condition_variable_any* _This_) : _This{_This_} {} + + _Cv_any_notify_all(const _Cv_any_notify_all&) = delete; + _Cv_any_notify_all& operator=(const _Cv_any_notify_all&) = delete; + + void operator()() const noexcept { + _This->notify_all(); + } + }; + +public: + template + bool wait(_Lock& _Lck, stop_token _Stoken, _Predicate _Pred) noexcept( + noexcept(static_cast(_Pred()))) /* strengthened */ { + // TRANSITION, ABI: Due to the unsynchronized delivery of notify_all by _Stoken, + // this implementation cannot tolerate *this destruction while an interruptible wait + // is outstanding. A future ABI should store both the internal CV and internal mutex + // in the reference counted block to allow this. + stop_callback<_Cv_any_notify_all> _Cb{_Stoken, this}; + for (;;) { + if (_Pred()) { + return true; + } + + unique_lock _Guard{*_Myptr}; + if (_Stoken.stop_requested()) { + _Guard.unlock(); + return _Pred(); + } + + _Unlock_guard<_Lock> _Unlock_outer{_Lck}; + _Cnd_wait(_Mycnd(), _Myptr->_Mymtx()); + _Guard.unlock(); + } // relock + } + + template + bool wait_until( + _Lock& _Lck, stop_token _Stoken, const chrono::time_point<_Clock, _Duration>& _Abs_time, _Predicate _Pred) { + stop_callback<_Cv_any_notify_all> _Cb{_Stoken, this}; + for (;;) { + if (_Pred()) { + return true; + } + + unique_lock _Guard{*_Myptr}; + if (_Stoken.stop_requested()) { + break; + } + + _Unlock_guard<_Lock> _Unlock_outer{_Lck}; + const auto _Now = _Clock::now(); + if (_Now >= _Abs_time) { + break; + } + + const auto _Rel_time = _Abs_time - _Now; + // TRANSITION, ABI: The standard says that we should use a steady clock, + // but unfortunately our ABI speaks struct xtime, which is relative to the system clock. + _CSTD xtime _Tgt; + (void) _To_xtime_10_day_clamped(_Tgt, _Rel_time); + const int _Res = _Cnd_timedwait(_Mycnd(), _Myptr->_Mymtx(), &_Tgt); + _Guard.unlock(); + + switch (_Res) { + case _Thrd_timedout: + case _Thrd_success: + break; + default: + _Throw_C_error(_Res); + } + } // relock + + return _Pred(); + } + + template + bool wait_for(_Lock& _Lck, stop_token _Stoken, const chrono::duration<_Rep, _Period>& _Rel_time, _Predicate _Pred) { + return wait_until(_Lck, _STD move(_Stoken), chrono::steady_clock::now() + _Rel_time, _STD move(_Pred)); + } +#endif // _HAS_CXX20 + private: shared_ptr _Myptr; @@ -139,16 +247,11 @@ private: template cv_status _Wait_until(_Lock& _Lck, const xtime* const _Abs_time) { // wait for signal with timeout - int _Res; - - { - const shared_ptr _Ptr = _Myptr; // for immunity to *this destruction - lock_guard _Guard{*_Ptr}; - _Lck.unlock(); - _Res = _Cnd_timedwait(_Mycnd(), _Ptr->_Mymtx(), _Abs_time); - } // unlock - - _Relock(_Lck); + const shared_ptr _Ptr = _Myptr; // for immunity to *this destruction + unique_lock _Guard{*_Ptr}; + _Unlock_guard<_Lock> _Unlock_outer{_Lck}; + const int _Res = _Cnd_timedwait(_Mycnd(), _Ptr->_Mymtx(), _Abs_time); + _Guard.unlock(); switch (_Res) { case _Thrd_success: @@ -159,13 +262,6 @@ private: _Throw_C_error(_Res); } } - - template - static void _Relock(_Lock& _Lck) noexcept /* terminates */ { // relock external mutex or terminate() - // Wait functions are required to terminate if the mutex cannot be locked; - // we slam into noexcept here for easier user debugging. - _Lck.lock(); - } }; inline void notify_all_at_thread_exit(condition_variable& _Cnd, unique_lock _Lck) { diff --git a/stl/inc/memory b/stl/inc/memory index 42ad7303c0e..486625fa8f3 100644 --- a/stl/inc/memory +++ b/stl/inc/memory @@ -3199,65 +3199,16 @@ _CXX20_DEPRECATE_OLD_SHARED_PTR_ATOMIC_SUPPORT bool atomic_compare_exchange_stro template class alignas(2 * sizeof(void*)) _Atomic_ptr_base { // overalignment is to allow potential future use of cmpxchg16b - - static_assert(alignof(_Ref_count_base) >= (1 << 2), "Two bits don't fit as low bits"); - - static constexpr uintptr_t _Lock_mask = 3; - static constexpr uintptr_t _Not_locked = 0; - static constexpr uintptr_t _Locked_notify_not_needed = 1; - static constexpr uintptr_t _Locked_notify_needed = 2; - static constexpr uintptr_t _Ptr_value_mask = ~_Lock_mask; - protected: constexpr _Atomic_ptr_base() noexcept = default; - _Atomic_ptr_base(_Ty* const _Px, _Ref_count_base* const _Ref) noexcept - : _Ptr(_Px), _Repptr(reinterpret_cast(_Ref)) {} - - _NODISCARD _Ref_count_base* _Lock_and_load() const noexcept { - uintptr_t _Rep = _Repptr.load(memory_order_relaxed); - for (;;) { - switch (_Rep & _Lock_mask) { - case _Not_locked: // Can try to lock now - if (_Repptr.compare_exchange_weak(_Rep, _Rep | _Locked_notify_not_needed)) { - return reinterpret_cast<_Ref_count_base*>(_Rep); - } - _YIELD_PROCESSOR(); - break; - - case _Locked_notify_not_needed: // Try to set "notify needed" and wait - if (!_Repptr.compare_exchange_weak(_Rep, (_Rep & _Ptr_value_mask) | _Locked_notify_needed)) { - // Failed to put notify needed flag on, try again - _YIELD_PROCESSOR(); - break; - } - _Rep = (_Rep & _Ptr_value_mask) | _Locked_notify_needed; - [[fallthrough]]; - - case _Locked_notify_needed: // "Notify needed" is already set, just wait - _Repptr.wait(_Rep, memory_order_relaxed); - _Rep = _Repptr.load(memory_order_relaxed); - break; - - default: // Unrecognized bit pattern - _CSTD abort(); - } - } - } - - void _Store_and_unlock(_Ref_count_base* const _Value) const noexcept { - uintptr_t _Rep = _Repptr.exchange(reinterpret_cast(_Value)); - if ((_Rep & _Lock_mask) == _Locked_notify_needed) { - // As we don't count waiters, every waiter is notified, and then some may re-request notification - _Repptr.notify_all(); - } - } + _Atomic_ptr_base(_Ty* const _Px, _Ref_count_base* const _Ref) noexcept : _Ptr(_Px), _Repptr(_Ref) {} void _Wait(_Ty* _Old, memory_order) const noexcept { for (;;) { - auto _Rep = _Lock_and_load(); + auto _Rep = _Repptr._Lock_and_load(); bool _Equal = _Ptr.load(memory_order_relaxed) == _Old; - _Store_and_unlock(_Rep); + _Repptr._Store_and_unlock(_Rep); if (!_Equal) { break; } @@ -3274,7 +3225,7 @@ protected: } atomic<_Ty*> _Ptr{nullptr}; - mutable atomic _Repptr{0}; + mutable _Locked_pointer<_Ref_count_base> _Repptr; }; template @@ -3293,22 +3244,22 @@ public: void store(shared_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_store_memory_order(_Order); - const auto _Rep = this->_Lock_and_load(); + const auto _Rep = this->_Repptr._Lock_and_load(); _Ty* const _Tmp = _Value._Ptr; _Value._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Tmp, memory_order_relaxed); - this->_Store_and_unlock(_Value._Rep); + this->_Repptr._Store_and_unlock(_Value._Rep); _Value._Rep = _Rep; } _NODISCARD shared_ptr<_Ty> load(const memory_order _Order = memory_order_seq_cst) const noexcept { _Check_load_memory_order(_Order); shared_ptr<_Ty> _Result; - const auto _Rep = this->_Lock_and_load(); + const auto _Rep = this->_Repptr._Lock_and_load(); _Result._Ptr = this->_Ptr.load(memory_order_relaxed); _Result._Rep = _Rep; _Result._Incref(); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); return _Result; } @@ -3319,10 +3270,10 @@ public: shared_ptr<_Ty> exchange(shared_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_memory_order(_Order); shared_ptr<_Ty> _Result; - _Result._Rep = this->_Lock_and_load(); + _Result._Rep = this->_Repptr._Lock_and_load(); _Result._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Value._Ptr, memory_order_relaxed); - this->_Store_and_unlock(_Value._Rep); + this->_Repptr._Store_and_unlock(_Value._Rep); _Value._Ptr = nullptr; // ownership of _Value ref has been given to this, silence decrement _Value._Rep = nullptr; return _Result; @@ -3346,20 +3297,20 @@ public: bool compare_exchange_strong(shared_ptr<_Ty>& _Expected, shared_ptr<_Ty> _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_memory_order(_Order); - auto _Rep = this->_Lock_and_load(); + auto _Rep = this->_Repptr._Lock_and_load(); if (this->_Ptr.load(memory_order_relaxed) == _Expected._Ptr && _Rep == _Expected._Rep) { _Ty* const _Tmp = _Desired._Ptr; _Desired._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Tmp, memory_order_relaxed); _STD swap(_Rep, _Desired._Rep); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); return true; } _Ref_count_base* _Expected_rep = _Expected._Rep; _Expected._Ptr = this->_Ptr.load(memory_order_relaxed); _Expected._Rep = _Rep; _Expected._Incref(); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); if (_Expected_rep) { _Expected_rep->_Decref(); } @@ -3387,7 +3338,7 @@ public: } ~atomic() { - const auto _Rep = reinterpret_cast<_Ref_count_base*>(this->_Repptr.load(memory_order_relaxed)); + const auto _Rep = this->_Repptr._Unsafe_load_relaxed(); if (_Rep) { _Rep->_Decref(); } @@ -3410,22 +3361,22 @@ public: void store(weak_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_store_memory_order(_Order); - const auto _Rep = this->_Lock_and_load(); + const auto _Rep = this->_Repptr._Lock_and_load(); _Ty* const _Tmp = _Value._Ptr; _Value._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Tmp, memory_order_relaxed); - this->_Store_and_unlock(_Value._Rep); + this->_Repptr._Store_and_unlock(_Value._Rep); _Value._Rep = _Rep; } _NODISCARD weak_ptr<_Ty> load(const memory_order _Order = memory_order_seq_cst) const noexcept { _Check_load_memory_order(_Order); weak_ptr<_Ty> _Result; - const auto _Rep = this->_Lock_and_load(); + const auto _Rep = this->_Repptr._Lock_and_load(); _Result._Ptr = this->_Ptr.load(memory_order_relaxed); _Result._Rep = _Rep; _Result._Incwref(); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); return _Result; } @@ -3436,10 +3387,10 @@ public: weak_ptr<_Ty> exchange(weak_ptr<_Ty> _Value, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_memory_order(_Order); weak_ptr<_Ty> _Result; - _Result._Rep = this->_Lock_and_load(); + _Result._Rep = this->_Repptr._Lock_and_load(); _Result._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Value._Ptr, memory_order_relaxed); - this->_Store_and_unlock(_Value._Rep); + this->_Repptr._Store_and_unlock(_Value._Rep); _Value._Ptr = nullptr; // ownership of _Value ref has been given to this, silence decrement _Value._Rep = nullptr; return _Result; @@ -3463,20 +3414,20 @@ public: bool compare_exchange_strong( weak_ptr<_Ty>& _Expected, weak_ptr<_Ty> _Desired, const memory_order _Order = memory_order_seq_cst) noexcept { _Check_memory_order(_Order); - auto _Rep = this->_Lock_and_load(); + auto _Rep = this->_Repptr._Lock_and_load(); if (this->_Ptr.load(memory_order_relaxed) == _Expected._Ptr && _Rep == _Expected._Rep) { _Ty* const _Tmp = _Desired._Ptr; _Desired._Ptr = this->_Ptr.load(memory_order_relaxed); this->_Ptr.store(_Tmp, memory_order_relaxed); _STD swap(_Rep, _Desired._Rep); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); return true; } const auto _Expected_rep = _Expected._Rep; _Expected._Ptr = this->_Ptr.load(memory_order_relaxed); _Expected._Rep = _Rep; _Expected._Incwref(); - this->_Store_and_unlock(_Rep); + this->_Repptr._Store_and_unlock(_Rep); if (_Expected_rep) { _Expected_rep->_Decwref(); } @@ -3504,7 +3455,7 @@ public: } ~atomic() { - const auto _Rep = reinterpret_cast<_Ref_count_base*>(this->_Repptr.load(memory_order_relaxed)); + const auto _Rep = this->_Repptr._Unsafe_load_relaxed(); if (_Rep) { _Rep->_Decwref(); } diff --git a/stl/inc/stop_token b/stl/inc/stop_token new file mode 100644 index 00000000000..ac4bc775af4 --- /dev/null +++ b/stl/inc/stop_token @@ -0,0 +1,398 @@ +// stop_token standard header + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#pragma once +#ifndef _STOP_TOKEN_ +#define _STOP_TOKEN_ +#include +#if _STL_COMPILER_PREPROCESSOR + +#if !_HAS_CXX20 +#pragma message("The contents of are available only with C++20 or later.") +#else // ^^^ !_HAS_CXX20 / _HAS_CXX20 vvv + +#include +#include +#include + +#pragma pack(push, _CRT_PACKING) +#pragma warning(push, _STL_WARNING_LEVEL) +#pragma warning(disable : _STL_DISABLED_WARNINGS) +_STL_DISABLE_CLANG_WARNINGS +#pragma push_macro("new") +#undef new + +_STD_BEGIN +struct nostopstate_t { + explicit nostopstate_t() = default; +}; + +inline constexpr nostopstate_t nostopstate{}; + +struct _Stop_state; +class stop_token; + +class _Stop_callback_base { + friend _Stop_state; + +private: + using _Callback_fn = void(__cdecl*)(_Stop_callback_base*) _NOEXCEPT_FNPTR; + +public: + explicit _Stop_callback_base(const _Callback_fn _Fn_) noexcept : _Fn{_Fn_} {} + + _Stop_callback_base(const _Stop_callback_base&) = delete; + _Stop_callback_base& operator=(const _Stop_callback_base&) = delete; + +protected: + // if _Token is _Stop_requested, calls the callback; + // otherwise, inserts *this into the callback list if stop is possible + inline void _Attach(const stop_token& _Token) noexcept; + inline void _Attach(stop_token&& _Token) noexcept; + + // if *this is in a callback list, removes it + inline void _Detach() noexcept; + +private: + template + void _Do_attach(conditional_t<_Transfer_ownership, _Stop_state*&, _Stop_state* const> _State) noexcept; + +protected: + _Stop_state* _Parent = nullptr; + _Stop_callback_base* _Next = nullptr; + _Stop_callback_base* _Prev = nullptr; + _Callback_fn _Fn; +}; + +struct _Stop_state { + atomic _Stop_tokens = 1; // plus one shared by all stop_sources + atomic _Stop_sources = 2; // plus the low order bit is the stop requested bit + _Locked_pointer<_Stop_callback_base> _Callbacks; + // always uses relaxed operations; ordering provided by the _Callbacks lock + // (atomic just to get wait/notify support) + atomic _Current_callback = nullptr; + _Thrd_id_t _Stopping_thread = 0; + + _NODISCARD bool _Stop_requested() const noexcept { + return (_Stop_sources.load() & uint32_t{1}) != 0; + } + + _NODISCARD bool _Stop_possible() const noexcept { + return _Stop_sources.load() != 0; + } + + _NODISCARD bool _Request_stop() noexcept { + // Attempts to request stop and call callbacks, returns whether request was successful + if ((_Stop_sources.fetch_or(uint32_t{1}) & uint32_t{1}) != 0) { + // another thread already requested + return false; + } + + _Stopping_thread = _Thrd_id(); + for (;;) { + auto _Head = _Callbacks._Lock_and_load(); + _Current_callback.store(_Head, memory_order_relaxed); + _Current_callback.notify_all(); + if (_Head == nullptr) { + _Callbacks._Store_and_unlock(nullptr); + return true; + } + + const auto _Next = _STD exchange(_Head->_Next, nullptr); + _STL_INTERNAL_CHECK(_Head->_Prev == nullptr); + if (_Next != nullptr) { + _Next->_Prev = nullptr; + } + + _Callbacks._Store_and_unlock(_Next); // unlock before running _Head so other registrations + // can detach without blocking on the callback + + _Head->_Fn(_Head); // might destroy *_Head + } + } +}; + +class stop_source; + +class stop_token { + friend stop_source; + friend _Stop_callback_base; + +public: + stop_token() noexcept : _State{} {} + stop_token(const stop_token& _Other) noexcept : _State{_Other._State} { + const auto _Local = _State; + if (_Local != nullptr) { + _Local->_Stop_tokens.fetch_add(1, memory_order_relaxed); + } + } + + stop_token(stop_token&& _Other) noexcept : _State{_STD exchange(_Other._State, nullptr)} {} + stop_token& operator=(const stop_token& _Other) noexcept { + stop_token{_Other}.swap(*this); + return *this; + } + + stop_token& operator=(stop_token&& _Other) noexcept { + stop_token{_STD move(_Other)}.swap(*this); + return *this; + } + + ~stop_token() { + const auto _Local = _State; + if (_Local != nullptr) { + if (_Local->_Stop_tokens.fetch_sub(1, memory_order_acq_rel) == 1) { + delete _Local; + } + } + } + + void swap(stop_token& _Other) noexcept { + _STD swap(_State, _Other._State); + } + + _NODISCARD bool stop_requested() const noexcept { + const auto _Local = _State; + return _Local != nullptr && _Local->_Stop_requested(); + } + + _NODISCARD bool stop_possible() const noexcept { + const auto _Local = _State; + return _Local != nullptr && _Local->_Stop_possible(); + } + + _NODISCARD friend bool operator==(const stop_token& _Lhs, const stop_token& _Rhs) noexcept = default; + + friend void swap(stop_token& _Lhs, stop_token& _Rhs) noexcept { + _STD swap(_Lhs._State, _Rhs._State); + } + +private: + explicit stop_token(_Stop_state* const _State_) : _State{_State_} {} + + _Stop_state* _State; +}; + +class stop_source { +public: + stop_source() : _State{new _Stop_state} {} + explicit stop_source(nostopstate_t) noexcept : _State{} {} + stop_source(const stop_source& _Other) noexcept : _State{_Other._State} { + const auto _Local = _State; + if (_Local != nullptr) { + _Local->_Stop_sources.fetch_add(2, memory_order_relaxed); + } + } + + stop_source(stop_source&& _Other) noexcept : _State{_STD exchange(_Other._State, nullptr)} {} + stop_source& operator=(const stop_source& _Other) noexcept { + stop_source{_Other}.swap(*this); + return *this; + } + + stop_source& operator=(stop_source&& _Other) noexcept { + stop_source{_STD move(_Other)}.swap(*this); + return *this; + } + + ~stop_source() { + const auto _Local = _State; + if (_Local != nullptr) { + if ((_Local->_Stop_sources.fetch_sub(2, memory_order_acq_rel) >> 1) == 1) { + if (_Local->_Stop_tokens.fetch_sub(1, memory_order_acq_rel) == 1) { + delete _Local; + } + } + } + } + + void swap(stop_source& _Other) noexcept { + _STD swap(_State, _Other._State); + } + + _NODISCARD stop_token get_token() const noexcept { + const auto _Local = _State; + if (_Local != nullptr) { + _Local->_Stop_tokens.fetch_add(1, memory_order_relaxed); + } + + return stop_token{_Local}; + } + + _NODISCARD bool stop_requested() const noexcept { + const auto _Local = _State; + return _Local != nullptr && _Local->_Stop_requested(); + } + + _NODISCARD bool stop_possible() const noexcept { + return _State != nullptr; + } + + bool request_stop() noexcept { + const auto _Local = _State; + return _Local && _Local->_Request_stop(); + } + + _NODISCARD friend bool operator==(const stop_source& _Lhs, const stop_source& _Rhs) noexcept = default; + + friend void swap(stop_source& _Lhs, stop_source& _Rhs) noexcept { + _STD swap(_Lhs._State, _Rhs._State); + } + +private: + _Stop_state* _State; +}; + +template +void _Stop_callback_base::_Do_attach( + conditional_t<_Transfer_ownership, _Stop_state*&, _Stop_state* const> _State_raw) noexcept { + const auto _State = _State_raw; // avoid an indirection in all of the below + if (_State == nullptr) { + return; + } + + // fast path check if the state is already known + auto _Local_sources = _State->_Stop_sources.load(); + if ((_Local_sources & uint32_t{1}) != 0) { + // stop already requested + _Fn(this); + return; + } + + if (_Local_sources == 0) { + return; // stop not possible + } + + // fast path doesn't know, so try to insert + auto _Head = _State->_Callbacks._Lock_and_load(); + // recheck the state in case it changed while we were waiting to acquire the lock + _Local_sources = _State->_Stop_sources.load(); + if ((_Local_sources & uint32_t{1}) != 0) { + // stop already requested + _State->_Callbacks._Store_and_unlock(_Head); + _Fn(this); + return; + } + + if (_Local_sources != 0) { + // stop possible, do the insert + _Parent = _State; + _Next = _Head; + if constexpr (_Transfer_ownership) { + _State_raw = nullptr; + } else { + _State->_Stop_tokens.fetch_add(1, memory_order_relaxed); + } + + if (_Head != nullptr) { + _Head->_Prev = this; + } + + _Head = this; + } + + _State->_Callbacks._Store_and_unlock(_Head); +} + +inline void _Stop_callback_base::_Attach(const stop_token& _Token) noexcept { + this->_Do_attach(_Token._State); +} + +inline void _Stop_callback_base::_Attach(stop_token&& _Token) noexcept { + this->_Do_attach(_Token._State); +} + +inline void _Stop_callback_base::_Detach() noexcept { + stop_token _Token{_Parent}; // transfers ownership + if (_Token._State == nullptr) { + // callback was never inserted into the list + return; + } + + auto _Head = _Token._State->_Callbacks._Lock_and_load(); + if (this == _Head) { + // we are still in the list, so the callback is not being request_stop'd + const auto _Local_next = _Next; + if (_Local_next != nullptr) { + _Local_next->_Prev = nullptr; + } + + _STL_INTERNAL_CHECK(_Prev == nullptr); + _Token._State->_Callbacks._Store_and_unlock(_Next); + return; + } + + const auto _Local_prev = _Prev; + if (_Local_prev != nullptr) { + // we are still in the list, so the callback is not being request_stop'd, and there is at least one other + // callback still registered + const auto _Local_next = _Next; + if (_Local_next != nullptr) { + _Next->_Prev = _Local_prev; + } + + _Prev->_Next = _Local_next; + _Token._State->_Callbacks._Store_and_unlock(_Head); + return; + } + + // we aren't in the callback list even though we were added to it, so the stop requesting thread is attempting to + // call the callback + _STL_INTERNAL_CHECK((_Token._State->_Stop_sources.load() & uint32_t{1}) != 0); + if (_Token._State->_Current_callback.load(memory_order_acquire) != this + || _Token._State->_Stopping_thread == _Thrd_id()) { + // the callback is done or the dtor is being recursively reentered, do not block + _Token._State->_Callbacks._Store_and_unlock(_Head); + return; + } + + // the callback is being executed by another thread, block until it is complete + _Token._State->_Callbacks._Store_and_unlock(_Head); + _Token._State->_Current_callback.wait(this, memory_order_acquire); +} + +template +class stop_callback : public _Stop_callback_base { +public: + using callback_type = _Callback; + + template , int> = 0> + explicit stop_callback(const stop_token& _Token, _CbInitTy&& _Cb_) noexcept( + is_nothrow_constructible_v<_Callback, _CbInitTy>) + : _Stop_callback_base{_Invoke_by_stop}, _Cb(_STD forward<_CbInitTy>(_Cb_)) { + _Attach(_Token); + } + + template , int> = 0> + explicit stop_callback(stop_token&& _Token, _CbInitTy&& _Cb_) noexcept( + is_nothrow_constructible_v<_Callback, _CbInitTy>) + : _Stop_callback_base{_Invoke_by_stop}, _Cb(_STD forward<_CbInitTy>(_Cb_)) { + _Attach(_STD move(_Token)); + } + + ~stop_callback() { + _Detach(); + } + +private: + static void __cdecl _Invoke_by_stop(_Stop_callback_base* const _This) noexcept // terminates + { + _STD forward<_Callback>(static_cast(_This)->_Cb)(); + } + + _Callback _Cb; +}; + +template +stop_callback(stop_token, _Callback) -> stop_callback<_Callback>; + +_STD_END +#pragma pop_macro("new") +_STL_RESTORE_CLANG_WARNINGS +#pragma warning(pop) +#pragma pack(pop) +#endif // _HAS_CXX20 +#endif // _STL_COMPILER_PREPROCESSOR +#endif // _STOP_TOKEN_ diff --git a/stl/inc/thread b/stl/inc/thread index 7a1d332d12b..20d6ee84272 100644 --- a/stl/inc/thread +++ b/stl/inc/thread @@ -13,6 +13,9 @@ #include #include #include +#if _HAS_CXX20 +#include +#endif // _HAS_CXX20 #ifdef _M_CEE_PURE #error is not supported when compiling with /clr:pure. @@ -26,6 +29,10 @@ _STL_DISABLE_CLANG_WARNINGS #undef new _STD_BEGIN +#if _HAS_CXX20 +class jthread; +#endif // _HAS_CXX20 + class thread { // class for observing and managing threads public: class id; @@ -35,6 +42,10 @@ public: thread() noexcept : _Thr{} {} private: +#if _HAS_CXX20 + friend jthread; +#endif // _HAS_CXX20 + template static unsigned int __stdcall _Invoke(void* _RawVals) noexcept /* terminates */ { // adapt invoke of user's callable object to _beginthreadex's thread procedure @@ -50,9 +61,8 @@ private: return &_Invoke<_Tuple, _Indices...>; } -public: - template , thread>, int> = 0> - explicit thread(_Fn&& _Fx, _Args&&... _Ax) { + template + void _Start(_Fn&& _Fx, _Args&&... _Ax) { using _Tuple = tuple, decay_t<_Args>...>; auto _Decay_copied = _STD make_unique<_Tuple>(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...); constexpr auto _Invoker_proc = _Get_invoke<_Tuple>(make_index_sequence<1 + sizeof...(_Args)>{}); @@ -73,6 +83,12 @@ public: } } +public: + template , thread>, int> = 0> + explicit thread(_Fn&& _Fx, _Args&&... _Ax) { + _Start(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...); + } + ~thread() noexcept { if (joinable()) { _STD terminate(); @@ -240,6 +256,96 @@ struct hash { return _Hash_representation(_Keyval._Id); } }; + +#if _HAS_CXX20 +class jthread { +public: + using id = thread::id; + using native_handle_type = thread::native_handle_type; + + jthread() noexcept : _Impl{}, _Ssource{nostopstate} {} + + template , jthread>, int> = 0> + explicit jthread(_Fn&& _Fx, _Args&&... _Ax) { + if constexpr (is_invocable_v, stop_token, decay_t<_Args>...>) { + _Impl._Start(_STD forward<_Fn>(_Fx), _Ssource.get_token(), _STD forward<_Args>(_Ax)...); + } else { + _Impl._Start(_STD forward<_Fn>(_Fx), _STD forward<_Args>(_Ax)...); + } + } + + ~jthread() { + _Try_cancel_and_join(); + } + + jthread(const jthread&) = delete; + jthread(jthread&&) noexcept = default; + jthread& operator=(const jthread&) = delete; + + jthread& operator=(jthread&& _Other) noexcept { + // note: the standard specifically disallows making self-move-assignment a no-op here + // N4861 [thread.jthread.cons]/13 + // Effects: If joinable() is true, calls request_stop() and then join(). Assigns the state + // of x to *this and sets x to a default constructed state. + _Try_cancel_and_join(); + _Impl = _STD move(_Other._Impl); + _Ssource = _STD move(_Other._Ssource); + return *this; + } + + void swap(jthread& _Other) noexcept { + _Impl.swap(_Other._Impl); + _Ssource.swap(_Other._Ssource); + } + + _NODISCARD bool joinable() const noexcept { + return _Impl.joinable(); + } + + void join() { + _Impl.join(); + } + + void detach() { + _Impl.detach(); + } + + _NODISCARD id get_id() const noexcept { + return _Impl.get_id(); + } + + _NODISCARD stop_source get_stop_source() noexcept { + return _Ssource; + } + + _NODISCARD stop_token get_stop_token() const noexcept { + return _Ssource.get_token(); + } + + bool request_stop() noexcept { + return _Ssource.request_stop(); + } + + friend void swap(jthread& _Lhs, jthread& _Rhs) noexcept { + _Lhs.swap(_Rhs); + } + + _NODISCARD static unsigned int hardware_concurrency() noexcept { + return thread::hardware_concurrency(); + } + +private: + void _Try_cancel_and_join() noexcept { + if (_Impl.joinable()) { + _Ssource.request_stop(); + _Impl.join(); + } + } + + thread _Impl; + stop_source _Ssource; +}; +#endif // _HAS_CXX20 _STD_END #pragma pop_macro("new") diff --git a/stl/inc/yvals_core.h b/stl/inc/yvals_core.h index 0d62a90da3e..6113e82987e 100644 --- a/stl/inc/yvals_core.h +++ b/stl/inc/yvals_core.h @@ -159,6 +159,7 @@ // P0646R1 list/forward_list remove()/remove_if()/unique() Return size_type // P0653R2 to_address() // P0655R1 visit() +// P0660R10 And jthread // P0674R1 make_shared() For Arrays // P0718R2 atomic>, atomic> // P0758R1 is_nothrow_convertible @@ -1189,6 +1190,7 @@ #define __cpp_lib_interpolate 201902L #define __cpp_lib_is_constant_evaluated 201811L #define __cpp_lib_is_nothrow_convertible 201806L +#define __cpp_lib_jthread 201911L #define __cpp_lib_latch 201907L #define __cpp_lib_list_remove_return_type 201806L #define __cpp_lib_math_constants 201907L diff --git a/tests/std/include/new_counter.hpp b/tests/std/include/new_counter.hpp new file mode 100644 index 00000000000..9ecb27607a5 --- /dev/null +++ b/tests/std/include/new_counter.hpp @@ -0,0 +1,120 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include +#include +#include +#include + +#pragma once +#pragma warning(push) +#pragma warning(disable : 28251) // Inconsistent annotation for 'new': this instance has no annotations. + +namespace std_testing { + size_t g_total_news = 0; + size_t g_total_deletes = 0; + size_t g_maximum_news = 0; + + void reset_new_counters(size_t new_maximum_news) { + assert(g_total_news == g_total_deletes); + g_total_news = 0; + g_total_deletes = 0; + g_maximum_news = new_maximum_news; + } +} // namespace std_testing + +void* operator new(size_t size) { + void* const p = ::operator new(size, std::nothrow); + if (p == nullptr) { + throw std::bad_alloc{}; + } + + return p; +} + +void* operator new(size_t, std::align_val_t) { + abort(); +} + +void* operator new(size_t size, const std::nothrow_t&) noexcept { + if (std_testing::g_total_news == std_testing::g_maximum_news) { + return nullptr; + } + + if (size == 0) { + ++size; + } + + ++std_testing::g_total_news; + return malloc(size); +} + +void* operator new(size_t, std::align_val_t, const std::nothrow_t&) noexcept { + abort(); +} + +void operator delete(void* ptr) noexcept { + ::operator delete(ptr, std::nothrow); +} + +void operator delete(void* ptr, size_t) noexcept { + ::operator delete(ptr, std::nothrow); +} + +void operator delete(void*, std::align_val_t) noexcept { + abort(); +} + +void operator delete(void* ptr, const std::nothrow_t&) noexcept { + if (ptr) { + ++std_testing::g_total_deletes; + assert(std_testing::g_total_deletes <= std_testing::g_total_news); + free(ptr); + } +} + +void operator delete(void*, std::align_val_t, const std::nothrow_t&) noexcept { + abort(); +} + +void* operator new[](size_t size) { + return ::operator new(size); +} + +void* operator new[](size_t, std::align_val_t) { + abort(); +} + +void* operator new[](size_t size, const std::nothrow_t&) noexcept { + return ::operator new(size, std::nothrow); +} + +void* operator new[](size_t, std::align_val_t, const std::nothrow_t&) noexcept { + abort(); +} + +void operator delete[](void* ptr) noexcept { + ::operator delete(ptr); +} + +void operator delete[](void* ptr, size_t size) noexcept { + ::operator delete(ptr, size); +} + +void operator delete[](void*, std::align_val_t) noexcept { + abort(); +} + +void operator delete[](void*, size_t, std::align_val_t) noexcept { + abort(); +} + +void operator delete[](void* ptr, const std::nothrow_t&) noexcept { + ::operator delete(ptr, std::nothrow); +} + +void operator delete[](void*, std::align_val_t, const std::nothrow_t&) noexcept { + abort(); +} + +#pragma warning(pop) diff --git a/tests/std/test.lst b/tests/std/test.lst index 2585db8b27f..138ab74ecc7 100644 --- a/tests/std/test.lst +++ b/tests/std/test.lst @@ -235,6 +235,9 @@ tests\P0595R2_is_constant_evaluated tests\P0607R0_inline_variables tests\P0616R0_using_move_in_numeric tests\P0631R8_numbers_math_constants +tests\P0660R10_jthread_and_cv_any +tests\P0660R10_stop_token +tests\P0660R10_stop_token_death tests\P0674R1_make_shared_for_arrays tests\P0718R2_atomic_smart_ptrs tests\P0758R1_is_nothrow_convertible diff --git a/tests/std/tests/P0660R10_jthread_and_cv_any/env.lst b/tests/std/tests/P0660R10_jthread_and_cv_any/env.lst new file mode 100644 index 00000000000..642f530ffad --- /dev/null +++ b/tests/std/tests/P0660R10_jthread_and_cv_any/env.lst @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +RUNALL_INCLUDE ..\usual_latest_matrix.lst diff --git a/tests/std/tests/P0660R10_jthread_and_cv_any/test.cpp b/tests/std/tests/P0660R10_jthread_and_cv_any/test.cpp new file mode 100644 index 00000000000..1495227aacd --- /dev/null +++ b/tests/std/tests/P0660R10_jthread_and_cv_any/test.cpp @@ -0,0 +1,245 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include +#include +#include +#include +#include +#include +#include + +#define STATIC_ASSERT(...) static_assert(__VA_ARGS__, #__VA_ARGS__) + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wself-move" +#endif // __clang__ + +using namespace std; + +STATIC_ASSERT(is_same_v); +STATIC_ASSERT(is_same_v); + +int main() { + // dtor tested in lots of places here so no explicit tests for it + + { // default ctor + jthread default_constructed; + assert(default_constructed.get_id() == thread::id{}); + assert(!default_constructed.get_stop_source().stop_possible()); + } + + { // initializing ctor, traditional functor + jthread worker{[] {}}; + assert(worker.get_id() != thread::id{}); + assert(worker.joinable()); + assert(worker.get_stop_source().stop_possible()); + } + + { // also make sure that we don't delegate to std::thread's constructor which would try to move assign over the + // std::thread inside jthread rather than passing it to the functor + jthread worker{[](thread t) { t.join(); }, thread{[] {}}}; + assert(worker.get_id() != thread::id{}); + assert(worker.joinable()); + assert(worker.get_stop_source().stop_possible()); + } + + { // initializing ctor, token functor + bool called = false; + struct overload_detector { + bool* p_called; + void operator()(stop_token, int i) const { + assert(i == 1729); + *p_called = true; + } + void operator()(int) const { + assert(false); + } + }; + + { + jthread worker{overload_detector{&called}, 1729}; + (void) worker; + } + + assert(called); + } + + { // move ctor + jthread worker{[] {}}; + auto worker_source = worker.get_stop_source(); + { + jthread moved{move(worker)}; + assert(moved.get_stop_source() == worker_source); + assert(moved.joinable()); + assert(worker.get_stop_source() != worker_source); + assert(!worker.joinable()); + } + } + + { // move assign + jthread worker_a{[] {}}; + auto source_a = worker_a.get_stop_source(); + jthread worker_b{[] {}}; + auto id_b = worker_b.get_id(); + auto source_b = worker_b.get_stop_source(); + worker_a = move(worker_b); + assert(source_a.stop_requested()); + assert(id_b == worker_a.get_id()); + assert(!source_b.stop_requested()); + assert(worker_a.get_stop_source() == source_b); + assert(!worker_b.joinable()); + } + + { // self move assign, as of N4861 specified to try to cancel and join [thread.jthread.cons]/13 + jthread worker{[] {}}; + auto source = worker.get_stop_source(); + worker = move(worker); + assert(!worker.joinable()); + assert(source.stop_requested()); + } + + { // swaps + jthread worker_a{[] {}}; + auto id_a = worker_a.get_id(); + auto source_a = worker_a.get_stop_source(); + auto token_a = worker_a.get_stop_token(); + jthread worker_b{[] {}}; + auto id_b = worker_b.get_id(); + auto source_b = worker_b.get_stop_source(); + auto token_b = worker_b.get_stop_token(); + + assert(id_a != id_b); + assert(source_a != source_b); + assert(token_a != token_b); + + worker_a.swap(worker_b); + assert(worker_a.get_id() == id_b); + assert(worker_a.get_stop_source() == source_b); + assert(worker_b.get_id() == id_a); + assert(worker_b.get_stop_source() == source_a); + swap(worker_a, worker_b); + assert(worker_a.get_id() == id_a); + assert(worker_a.get_stop_source() == source_a); + assert(worker_b.get_id() == id_b); + assert(worker_b.get_stop_source() == source_b); + } + + { // join + jthread worker{[] {}}; + auto source = worker.get_stop_source(); + worker.join(); + assert(!worker.joinable()); + assert(worker.get_stop_source() == source); + assert(!source.stop_requested()); + assert(source.stop_possible()); + } + + // TRANSITION, OS-11107628 "_Exit allows cleanup in other DLLs" + // detach() is intentionally not tested + + // get_id, get_stop_source, get_stop_token tested above + + assert(jthread::hardware_concurrency() == thread::hardware_concurrency()); + + { // first wait_until overload; without the cancellation this would deadlock + jthread worker([](stop_token token) { + mutex m; + condition_variable_any cv; + unique_lock lck{m}; + assert(cv.wait(lck, move(token), [] { return false; }) == false); + }); + } + + static constexpr auto forever = chrono::steady_clock::duration::max(); + static constexpr auto infinity = chrono::steady_clock::time_point::max(); + + { // ditto without the cancellation this would deadlock + jthread worker([](stop_token token) { + mutex m; + condition_variable_any cv; + unique_lock lck{m}; + assert(cv.wait_until(lck, move(token), infinity, [] { return false; }) == false); + }); + } + + { // ditto without the cancellation this would deadlock + jthread worker([](stop_token token) { + mutex m; + condition_variable_any cv; + unique_lock lck{m}; + assert(cv.wait_for(lck, move(token), forever, [] { return false; }) == false); + }); + } + + // smoke test true-returning versions of the above + { + mutex m; + condition_variable_any cv; + bool b = false; + jthread worker([&](stop_token token) { + unique_lock lck{m}; + assert(cv.wait(lck, move(token), [] { return true; }) == true); + assert(cv.wait(lck, move(token), [&] { return b; }) == true); + }); + + { + lock_guard lck{m}; + b = true; + } + + cv.notify_all(); + } + + { + mutex m; + condition_variable_any cv; + bool b = false; + jthread worker([&](stop_token token) { + unique_lock lck{m}; + assert(cv.wait_until(lck, move(token), infinity, [] { return true; }) == true); + assert(cv.wait_until(lck, move(token), infinity, [&] { return b; }) == true); + }); + + { + lock_guard lck{m}; + b = true; + } + + cv.notify_all(); + } + + { + mutex m; + condition_variable_any cv; + bool b = false; + jthread worker([&](stop_token token) { + unique_lock lck{m}; + assert(cv.wait_for(lck, move(token), forever, [] { return true; }) == true); + assert(cv.wait_for(lck, move(token), forever, [&] { return b; }) == true); + }); + + { + lock_guard lck{m}; + b = true; + } + + cv.notify_all(); + } + + // smoke test a timeout case: + { + jthread worker([] { + stop_source never_stopped; + mutex m; + condition_variable_any cv; + unique_lock lck{m}; + auto started_at = chrono::steady_clock::now(); + assert(cv.wait_for(lck, never_stopped.get_token(), 100ms, [] { return false; }) == false); + // not a timing assumption: the wait_for must wait at least that long + assert(started_at + 100ms <= chrono::steady_clock::now()); + }); + } + + puts("pass"); +} diff --git a/tests/std/tests/P0660R10_stop_token/env.lst b/tests/std/tests/P0660R10_stop_token/env.lst new file mode 100644 index 00000000000..642f530ffad --- /dev/null +++ b/tests/std/tests/P0660R10_stop_token/env.lst @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +RUNALL_INCLUDE ..\usual_latest_matrix.lst diff --git a/tests/std/tests/P0660R10_stop_token/test.cpp b/tests/std/tests/P0660R10_stop_token/test.cpp new file mode 100644 index 00000000000..eaf5cdc24e0 --- /dev/null +++ b/tests/std/tests/P0660R10_stop_token/test.cpp @@ -0,0 +1,418 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; +using namespace std_testing; + +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wself-move" +#endif // __clang__ + +struct throwing_construction_functor { + throwing_construction_functor(int x) { + throw x; + } + + void operator()() const { + assert(false); + } +}; + +struct call_counting_functor { + atomic* state; + + call_counting_functor(atomic* state_) : state(state_) {} + + call_counting_functor(const call_counting_functor&) = delete; + call_counting_functor& operator=(const call_counting_functor&) = delete; + + void operator()() && { + ++*state; + } +}; + +struct cb_destroying_functor { + optional>& owner; + cb_destroying_functor(optional>& owner_) : owner(owner_) {} + + cb_destroying_functor(const cb_destroying_functor&) = delete; + cb_destroying_functor& operator=(const cb_destroying_functor&) = delete; + + void operator()() && { + owner.reset(); + } +}; + +int main() noexcept { + reset_new_counters(0); + { // all the following must not allocate, and must work with a nostopstate source; in rough synopsis order + stop_token token; + stop_token token_copy{token}; + stop_token token_moved{move(token)}; + token_copy = token; + token_moved = move(token); + token.swap(token_copy); + assert(!token.stop_requested()); + assert(!token.stop_possible()); + assert(token == token_copy); + swap(token, token_copy); + + stop_source source{nostopstate}; + stop_source copied_source{source}; + stop_source moved_source{move(source)}; + copied_source = source; + moved_source = move(source); + copied_source.swap(source); + + assert(!source.get_token().stop_possible()); + assert(!source.get_token().stop_requested()); + assert(!source.stop_possible()); + assert(!source.stop_requested()); + assert(!source.request_stop()); + + assert(source == copied_source); + assert(source == moved_source); + + swap(source, copied_source); + + stop_callback cb{token, [] { assert(false); }}; + stop_callback cb_moved{move(token), [] { assert(false); }}; + } + + // normal reference counted things state management; in rough synopsis order + reset_new_counters(2); + { // stop_source + stop_source empty{nostopstate}; + + // default ctor + stop_source source_a; + assert(source_a.stop_possible()); + assert(!source_a.stop_requested()); + stop_source source_b; + assert(source_b.stop_possible()); + assert(!source_b.stop_requested()); + assert(source_a != empty); + assert(source_a != source_b); + + // copy ctor + stop_source copied_source{source_a}; + assert(copied_source == source_a); + source_a.swap(source_b); + assert(copied_source == source_b); + swap(source_a, source_b); + assert(copied_source == source_a); + + // move ctor + stop_source moved_source{move(source_a)}; + assert(!source_a.stop_possible()); + assert(empty == source_a); + assert(moved_source != source_a); + moved_source = move(moved_source); + swap(moved_source, source_a); + + // copy assignment + copied_source = source_b; + assert(copied_source == source_b); + + // move assignment + moved_source = move(source_a); + assert(!source_a.stop_possible()); + assert(moved_source.stop_possible()); + + // swap member + moved_source.swap(source_a); + assert(source_a.stop_possible()); + assert(!moved_source.stop_possible()); + + // get_token tested with tokens below + // stop_possible tested above + // stop_requested tested below + assert(!empty.request_stop()); + assert(source_a.request_stop()); + assert(source_a.stop_requested()); + assert(!source_a.request_stop()); + assert(source_a.stop_requested()); + + assert(!source_b.stop_requested()); + assert(!copied_source.stop_requested()); + assert(copied_source.request_stop()); + assert(source_b.stop_requested()); + assert(copied_source.stop_requested()); + assert(!source_b.request_stop()); + } + + reset_new_counters(2); + { // stop_token + stop_source source_a; + stop_token token_a = source_a.get_token(); + assert(token_a.stop_possible()); + assert(!token_a.stop_requested()); + + stop_source source_b; + stop_token token_b = source_b.get_token(); + assert(token_a != token_b); + + stop_token empty; + + // default ctor tested above in the no-alloc block + + // copy ctor + stop_token copied_token{token_a}; + assert(copied_token == token_a); + + // move ctor + stop_token moved_token{move(token_a)}; + assert(moved_token == copied_token); + assert(moved_token != token_a); + assert(!token_a.stop_possible()); + assert(!token_a.stop_requested()); + moved_token.swap(token_a); + + // copy assign + copied_token = token_b; + assert(copied_token == token_b); + + // move assign + moved_token = move(token_b); + assert(moved_token == copied_token); + moved_token = move(moved_token); + assert(moved_token == copied_token); + assert(moved_token != token_a); + assert(!token_b.stop_possible()); + assert(!token_b.stop_requested()); + swap(token_b, moved_token); + + // stop_possible tested above and 1 special case below + + // stop_requested + assert(!copied_token.stop_requested()); + assert(source_b.request_stop()); + assert(!token_a.stop_requested()); + assert(token_b.stop_requested()); + assert(copied_token.stop_requested()); + + // equals and swap tested above + } + + // the stop_possible special cases + reset_new_counters(1); + { // all sources are gone + stop_token token; + { + stop_source source; + token = source.get_token(); + assert(token.stop_possible()); + assert(!token.stop_requested()); + } // destroy source + + assert(!token.stop_possible()); + assert(!token.stop_requested()); + stop_callback cb{token, [] { assert(false); }}; + (void) cb; + } + + reset_new_counters(1); + { // all sources are gone but stop happened first + stop_token token; + { + stop_source source; + token = source.get_token(); + assert(token.stop_possible()); + assert(!token.stop_requested()); + assert(source.request_stop()); + assert(token.stop_possible()); + assert(token.stop_requested()); + } // destroy source + + assert(token.stop_possible()); + assert(token.stop_requested()); + } + + // empty assign special cases + reset_new_counters(1); + { + stop_source source; + stop_source empty{nostopstate}; + source = empty; // lvalue + assert(!source.stop_possible()); + } + + reset_new_counters(1); + { + stop_source source; + source = stop_source{nostopstate}; // rvalue + assert(!source.stop_possible()); + } + + reset_new_counters(1); + { + stop_source source; + auto token = source.get_token(); + stop_token empty; + token = empty; // lvalue + assert(!token.stop_possible()); + } + + reset_new_counters(1); + { + stop_source source; + auto token = source.get_token(); + token = stop_token{}; // rvalue + assert(!token.stop_possible()); + } + + // callback calling in the ctor + reset_new_counters(1); + { + atomic calls{0}; + stop_source source; + source.request_stop(); + assert(calls.load() == 0); + stop_callback cb{source.get_token(), &calls}; + (void) cb; + assert(calls.load() == 1); + } + + reset_new_counters(1); + { + atomic calls{0}; + stop_token token; + + { + stop_source source; + token = source.get_token(); + source.request_stop(); + } // destroy source + + assert(calls.load() == 0); + stop_callback cb{token, &calls}; + (void) cb; + assert(calls.load() == 1); + } + + // callback calling on cancel + reset_new_counters(1); + { + atomic calls{0}; + stop_source source; + assert(calls.load() == 0); + stop_callback cb{source.get_token(), &calls}; + assert(calls.load() == 0); + source.request_stop(); + assert(calls.load() == 1); + } + + // if the callback is executing on the current thread it does not block for the callback to finish executing + reset_new_counters(1); + { + stop_source source; + auto token = source.get_token(); + optional> cb; + cb.emplace(token, cb); + source.request_stop(); // if we don't do what the standard says, this will deadlock + } + + // if the callback is executing on another thread it blocks for the callback to finish executing + reset_new_counters(2); // nonstandard assumption that our std::thread allocates exactly once + { + static constexpr chrono::milliseconds callback_wait_length = 5s; + static constexpr chrono::milliseconds request_wait_length = 500ms; + stop_source source; + atomic block_request_stop{false}; + // block_destroy makes it more likely that the timing assumption above is correct because the timer doesn't + // start until we know the worker thread is actively running trying to request_stop + atomic block_destroy{false}; + thread worker{[&] { + // run the callbacks in the worker thread + block_request_stop.wait(false); + block_destroy.store(true); + block_destroy.notify_one(); + assert("request_wait_length TIMING ASSUMPTION" && source.request_stop()); + }}; + + + auto worker_id = worker.get_id(); + chrono::steady_clock::time_point started_at; + { + // timing assumption that the main thread will try to destroy cb within request_wait_length + stop_callback cb{source.get_token(), [&] { + this_thread::sleep_for(callback_wait_length); + assert("request_wait_length TIMING ASSUMPTION" && this_thread::get_id() == worker_id); + }}; + started_at = chrono::steady_clock::now(); + block_request_stop.store(true); + block_request_stop.notify_one(); + block_destroy.wait(false); // wait for the other thread to start stopping + // timing assumption that worker enters request_stop before we try to destroy cb here; + // if that assumption is wrong then we merely don't test the case in which we're interested (because cb will + // run on this thread so we won't have to block for destruction) + this_thread::sleep_for(request_wait_length); + assert("request_wait_length TIMING ASSUMPTION" && !source.request_stop()); + } // destroy cb + + worker.join(); + + // not a timing assumption: we must have waited at least as long as the sleep_for in the cancellation callback + // (that's the point of this test) + auto stopped_at = chrono::steady_clock::now(); + assert(started_at + callback_wait_length <= stopped_at); + } + + // more than one callback in the list and the first callback unregisters one of the others + // (this tests edge cases in the callback linked list management) + for (int idx = 0; idx < 5; ++idx) { + reset_new_counters(1); + stop_source source; + auto token = source.get_token(); + optional> cbs[5]; + cbs[0].emplace(token, cbs[idx]); + cbs[1].emplace(token, cbs[1]); + cbs[2].emplace(token, cbs[2]); + cbs[3].emplace(token, cbs[3]); + cbs[4].emplace(token, cbs[4]); + cbs[2].reset(); + source.request_stop(); + } + + // exception safety cases + reset_new_counters(0); + try { + stop_source source; + (void) source; + assert(false); + } catch (const bad_alloc&) { + // expected + } + + reset_new_counters(1); + try { + stop_source source; + stop_callback cb{source.get_token(), 42}; + } catch (int i) { + assert(i == 42); + } + + reset_new_counters(1); + try { + stop_source source; + auto token_lvalue = source.get_token(); + stop_callback cb{token_lvalue, 43}; + } catch (int i) { + assert(i == 43); + } + + reset_new_counters(0); + + puts("pass"); +} diff --git a/tests/std/tests/P0660R10_stop_token_death/env.lst b/tests/std/tests/P0660R10_stop_token_death/env.lst new file mode 100644 index 00000000000..e5b00aee0d4 --- /dev/null +++ b/tests/std/tests/P0660R10_stop_token_death/env.lst @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +RUNALL_INCLUDE ..\usual_latest_winsdk_matrix.lst diff --git a/tests/std/tests/P0660R10_stop_token_death/test.cpp b/tests/std/tests/P0660R10_stop_token_death/test.cpp new file mode 100644 index 00000000000..e9075218b7a --- /dev/null +++ b/tests/std/tests/P0660R10_stop_token_death/test.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +#include + +#include + +using namespace std; + +struct throwing_functor { + void operator()() { + throw 42; + } +}; + +void test_case_throw_during_callback_ctor() { + stop_source source; + source.request_stop(); + stop_callback cb{source.get_token(), throwing_functor{}}; + (void) cb; +} + +void test_case_throw_during_callback_lvalue_ctor() { + stop_source source; + source.request_stop(); + auto lvalue_token = source.get_token(); + stop_callback cb{lvalue_token, throwing_functor{}}; + (void) cb; +} + +void test_case_throw_during_request_stop() { + stop_source source; + stop_callback cb{source.get_token(), throwing_functor{}}; + (void) cb; + source.request_stop(); +} + +int main(int argc, char* argv[]) { + std_testing::death_test_executive exec([] {}); + + exec.add_death_tests({ + test_case_throw_during_request_stop, + test_case_throw_during_callback_lvalue_ctor, + test_case_throw_during_request_stop, + }); + + return exec.run(argc, argv); +} diff --git a/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp b/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp index 455494fdd26..f7cfed4739b 100644 --- a/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp +++ b/tests/std/tests/VSO_0157762_feature_test_macros/test.cpp @@ -862,6 +862,20 @@ STATIC_ASSERT(__cpp_lib_is_swappable == 201603L); #endif #endif +#if _HAS_CXX20 +#ifndef __cpp_lib_jthread +#error __cpp_lib_jthread is not defined +#elif __cpp_lib_jthread != 201911L +#error __cpp_lib_jthread is not 201911L +#else +STATIC_ASSERT(__cpp_lib_jthread == 201911L); +#endif +#else +#ifdef __cpp_lib_jthread +#error __cpp_lib_jthread is defined +#endif +#endif + #if _HAS_CXX20 #ifndef __cpp_lib_latch #error __cpp_lib_latch is not defined diff --git a/tests/std/tests/include_each_header_alone_matrix.lst b/tests/std/tests/include_each_header_alone_matrix.lst index 71cb23f8d3d..6728e9f7124 100644 --- a/tests/std/tests/include_each_header_alone_matrix.lst +++ b/tests/std/tests/include_each_header_alone_matrix.lst @@ -62,6 +62,7 @@ PM_CL="/DMEOW_HEADER=span" PM_CL="/DMEOW_HEADER=sstream" PM_CL="/DMEOW_HEADER=stack" PM_CL="/DMEOW_HEADER=stdexcept" +PM_CL="/DMEOW_HEADER=stop_token" PM_CL="/DMEOW_HEADER=streambuf" PM_CL="/DMEOW_HEADER=string" PM_CL="/DMEOW_HEADER=string_view"