Skip to content

Instantly share code, notes, and snippets.

@BillyONeal
Created June 11, 2017 06:20
Show Gist options
  • Save BillyONeal/dd41a73ffca9d9b49eed19f8cc30ab3f to your computer and use it in GitHub Desktop.
Save BillyONeal/dd41a73ffca9d9b49eed19f8cc30ab3f to your computer and use it in GitHub Desktop.
Visual C++ vMajorNext <future>
// STACK TRACE FUNCTIONS
_VCP_STATIC_ASSERT(_STD is_same<
decltype(__std_async_capture_stack_trace),
decltype(RtlCaptureStackBackTrace)
>::value);
// THREADPOOL FUNCTIONS
struct _Legacy_threadpool_thunk_data
{
__std_threadpool_work_callback _Fn;
void * _Context;
};
static DWORD __stdcall _Legacy_threadpool_thunk(void * _Thunk_data_void)
{
const auto _Thunk_data = static_cast<_Legacy_threadpool_thunk_data *>(_Thunk_data_void);
const __std_threadpool_work_callback _Fn{_Thunk_data->_Fn};
void * const _Context{_Thunk_data->_Context};
_free_crt(_Thunk_data_void);
_Fn(nullptr, _Context, nullptr);
return (0);
}
extern "C" __std_win32_error __cdecl __std_threadpool_schedule_work(
__std_threadpool_work_handle * _This,
__std_threadpool_work_callback _Fn,
void * _Context
) noexcept
{ // schedule work for async(launch::async | launch::deferred)
if (__vcp_has_vista_threadpool())
{
_This->_Work_handle = __vcp_CreateThreadpoolWork(
reinterpret_cast<PTP_WORK_CALLBACK>(_Fn),
_Context,
nullptr
);
if (!_This->_Work_handle)
{
return (GetLastError());
}
__vcp_SubmitThreadpoolWork(static_cast<PTP_WORK>(_This->_Work_handle));
return (__std_win32_success);
}
if (__vcp_has_QueueUserWorkItem())
{
auto _Thunk_data = _calloc_crt_t(_Legacy_threadpool_thunk_data, 1);
if (!_Thunk_data)
{
return (ERROR_OUTOFMEMORY);
}
_Thunk_data.get()->_Fn = _Fn;
_Thunk_data.get()->_Context = _Context;
if (__vcp_QueueUserWorkItem(_Legacy_threadpool_thunk, _Thunk_data.get(), WT_EXECUTEDEFAULT))
{
_Thunk_data.detach();
return (__std_win32_success);
}
return (GetLastError());
}
return (ERROR_NOT_SUPPORTED);
}
extern "C" __std_win32_error __cdecl __std_threadpool_try_join_work(
__std_threadpool_work_handle * _This
) noexcept
{
if (_This->_Work_handle)
{
__vcp_WaitForThreadpoolWorkCallbacks(static_cast<PTP_WORK>(_This->_Work_handle), TRUE);
__vcp_CloseThreadpoolWork(static_cast<PTP_WORK>(_This->_Work_handle));
return (__std_win32_success);
}
return (ERROR_NOT_SUPPORTED);
}
// future standard header
#pragma once
#ifndef _FUTURE_
#define _FUTURE_
#ifndef RC_INVOKED
#ifdef _RESUMABLE_FUNCTIONS_SUPPORTED
#include <experimental/resumable>
#endif /* _RESUMABLE_FUNCTIONS_SUPPORTED */
#include <algorithm>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <stdexcept>
#include <system_error>
#include <thread>
#include <utility>
#pragma pack(push,_CRT_PACKING)
#pragma warning(push,_STL_WARNING_LEVEL)
#pragma warning(disable: _STL_DISABLED_WARNINGS)
#pragma push_macro("new")
#undef new
#pragma warning(disable: 4793) // function compiled as native
_EXTERN_C
enum __std_future_errc : int
{
__std_future_errc_broken_promise = 1, // starts at 1 due to N4606 30.6.1 [futures.overview]/3
__std_future_errc_future_already_retrieved,
__std_future_errc_promise_already_satisfied,
__std_future_errc_no_state
};
_Check_return_
const char * __cdecl __std_future_error_get_message(_In_ __std_future_errc _ErrorId) _NOEXCEPT;
_Check_return_
unsigned short __stdcall __std_async_capture_stack_trace(
_In_ unsigned long _FramesToSkip,
_In_ unsigned long _FramesToCapture,
_Out_writes_to_(_FramesToCapture, return) void ** _BackTrace,
_Out_opt_ unsigned long * _BackTraceHash
) _NOEXCEPT;
struct __std_threadpool_work_handle
{
void * _Work_handle; // Windows API PTP_WORK; null on Windows XP
};
// matches PTP_WORK_CALLBACK
using __std_threadpool_work_callback = void (__stdcall *)(
_Inout_ void *,
_Inout_opt_ void * _Context,
_Inout_ void *
);
// schedules work for the threadpool
_Check_return_
__std_win32_error __cdecl __std_threadpool_schedule_work(
_Out_ __std_threadpool_work_handle * _This,
_In_ __std_threadpool_work_callback _Fn,
_Inout_opt_ void * _Context
) _NOEXCEPT;
// Tries to join with work on the threadpool. Note that the Windows XP threadpool does not support
// cancellation; there this function always fails.
// If this function succeeds, either the work completed on the threadpool and we have joined with
// it, or the work was removed from the threadpool's queue and not invoked.
_Check_return_
__std_win32_error __cdecl __std_threadpool_try_join_work(
_Inout_ __std_threadpool_work_handle * _This
) _NOEXCEPT;
_END_EXTERN_C
_STD_BEGIN
// ALIAS TEMPLATE _Alloc_ptr_traits
template<class _Alloc>
using _Alloc_ptr_traits = pointer_traits<typename allocator_traits<_Alloc>::pointer>;
// ENUM CLASS future_errc
enum class future_errc
{
broken_promise = __std_future_errc_broken_promise,
future_already_retrieved = __std_future_errc_future_already_retrieved,
promise_already_satisfied = __std_future_errc_promise_already_satisfied,
no_state = __std_future_errc_no_state
};
// ENUM CLASS launch
enum class launch
{ // names for launch options passed to async
async = 0x1,
deferred = 0x2,
_Threadpool = 0x3 // also launch::async | launch::deferred
};
_BITMASK_OPS(launch)
// ENUM CLASS future_status
enum class future_status
{ // names for timed wait function returns
ready,
timeout,
deferred
};
// STRUCT TEMPLATE SPECIALIZATION is_error_code_enum
template<>
struct is_error_code_enum<future_errc>
: true_type
{ // tests for error_code enumeration
};
// CLASS _Future_error_category
class _Future_error_category
: public _Generic_error_category
{ // categorize a future error
public:
_Future_error_category() _NOEXCEPT
{ // default constructor
_Set_imaginary_address(_Future_addr);
}
virtual const char *name() const _NOEXCEPT override
{ // get name of category
return ("future");
}
virtual _STD_IDL string message(int _Errcode) const override
{ // convert to name of error
return (__std_future_error_get_message(static_cast<__std_future_errc>(_Errcode)));
}
};
// FUNCTION future_category
inline const error_category& future_category() _NOEXCEPT
{ // return error_category object for future
return (_Immortalize<_Future_error_category>());
}
// FUNCTION make_error_code (FOR FUTURE)
inline error_code make_error_code(future_errc _Errno) _NOEXCEPT
{ // make an error_code object
return (error_code(static_cast<int>(_Errno), future_category()));
}
// FUNCTION make_error_condition (FOR FUTURE)
inline error_condition make_error_condition(future_errc _Errno) _NOEXCEPT
{ // make an error_condition object
return (error_condition(static_cast<int>(_Errno), future_category()));
}
// CLASS future_error
class future_error
: public logic_error
{ // future exception
public:
explicit future_error(future_errc _Errcode)
: logic_error(__std_future_error_get_message(
static_cast<__std_future_errc>(_Errcode)), __std_exception_message_string_literal),
_Mycode(_STD make_error_code(_Errcode))
{ // construct from error code
}
__declspec(nothrow) future_error(const char *_Message, __std_exception_message_kind _Kind)
: logic_error(_Message, _Kind)
{
}
const error_code& code() const _NOEXCEPT
{ // return stored error code
return (_Mycode);
}
#if !_HAS_EXCEPTIONS
protected:
virtual void _Doraise() const
{ // perform class-specific exception handling
_RAISE(*this);
}
#endif /* !_HAS_EXCEPTIONS */
private:
error_code _Mycode; // the stored error code
};
// STRUCT TEMPLATE _Future_return_storage
enum class _Return_storing : char
{ // possible states of future return storage
_Nothing_stored,
_Value_stored,
_Exception_stored
};
template<class _StoredType>
struct _Future_return_storage
{ // stores either user _StoredType or an exception
union
{
char _Nothing;
_StoredType _Value;
exception_ptr _Exception;
};
_Return_storing _Storing;
_Future_return_storage()
: _Nothing(),
_Storing(_Return_storing::_Nothing_stored)
{ // default to no stored value
}
~_Future_return_storage() _NOEXCEPT
{ // don't destroy anything; users will call _Destroy
}
_Future_return_storage(const _Future_return_storage&) = delete;
_Future_return_storage& operator=(const _Future_return_storage&) = delete;
template<class _Init>
void _Set_value(_Init&& _New_value)
{ // store a return value
::new (static_cast<void *>(_STD addressof(_Value)))
_StoredType(_STD forward<_Init>(_New_value));
_Storing = _Return_storing::_Value_stored;
}
template<class _Alloc,
class _Init>
void _Set_value(_Alloc& _Al, _Init&& _New_value)
{ // store a return value constructed with _Al, R is user's R
allocator_traits<_Alloc>::construct(
_Al,
_STD addressof(_Value),
_STD forward<_Init>(_New_value)
);
_Storing = _Return_storing::_Value_stored;
}
void _Set_exception(exception_ptr _New_exception)
{ // store an exception for rethrow on future::get
::new (static_cast<void *>(_STD addressof(_Exception)))
exception_ptr(_STD move(_New_exception));
_Storing = _Return_storing::_Exception_stored;
}
void _Destroy() _NOEXCEPT
{
switch (_Storing)
{
case _Return_storing::_Nothing_stored:
// nothing to do
return;
case _Return_storing::_Value_stored:
_Value.~_StoredType();
return;
case _Return_storing::_Exception_stored:
_Exception.~exception_ptr();
return;
}
}
template<class _Alloc>
void _Destroy(_Alloc& _Al) _NOEXCEPT
{
switch (_Storing)
{
case _Return_storing::_Nothing_stored:
// nothing to do
return;
case _Return_storing::_Value_stored:
allocator_traits<_Alloc>::destroy(_Al, _STD addressof(_Value));
return;
case _Return_storing::_Exception_stored:
_Exception.~exception_ptr();
return;
}
}
};
// STRUCT TEMPLATE _Future_traits
struct _Nothing_stored_t {};
template<class _Rx,
bool _Is_ref = is_reference<_Rx>::value,
bool _Is_void = is_void<_Rx>::value>
struct _Future_traits
{ // adapters used by various future machinery, non-reference non-void _Rx
using _State_stores_t = _Rx; // asynchronous shared state stores this
// traits for future<R>::get()
static _Rx&& _Future_get_transform(
_Rx& _Storage) _NOEXCEPT
{ // transform the asynchronous shared state storage for future::get()
return (_STD move(_Storage));
}
// traits for shared_future<R>::get()
using _Shared_future_get_return_type = const _Rx&;
static const _Rx& _Shared_future_get_transform(
_Rx& _Storage) _NOEXCEPT
{ // transform the asynchronous shared state storage for shared_future::get()
return (_Storage);
}
// traits for storing values
template<class _Alloc,
class _Init>
static void _Store(_Alloc& _Al,
_Future_return_storage<_State_stores_t>& _Storage,
_Init&& _Val)
{ // store a value in an asynchronous shared state using _Alloc::construct
_Storage._Set_value(_Al, _STD forward<_Init>(_Val));
}
template<class _Callable,
class... _Args>
static void _Invoke(
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state
_TRY_BEGIN
_Storage._Set_value(_STD invoke(
_STD forward<_Callable>(_Obj),
_STD forward<_Args>(_Vals)...));
_CATCH_ALL
_Storage._Set_exception(_STD current_exception());
_CATCH_END
}
template<class _Alloc,
class _Callable,
class... _Args>
static void _Invoke_al(_Alloc& _Al,
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state, using
// an allocator
_TRY_BEGIN
_Storage._Set_value(_Al, _STD invoke(
_STD forward<_Callable>(_Obj),
_STD forward<_Args>(_Vals)...));
_CATCH_ALL
_Storage._Set_exception(_STD current_exception());
_CATCH_END
}
// traits for destruction
template<class _Alloc>
static void _Destroy_storage(_Alloc& _Al,
_Future_return_storage<_State_stores_t>& _Storage) _NOEXCEPT
{ // destroys asynchronous shared state using _Alloc::destroy()
_Storage._Destroy(_Al);
}
};
template<class _Rx>
struct _Future_traits<_Rx, true, false>
{ // special case for future<R&>
static_assert(!is_rvalue_reference<_Rx>::value, "The standard does not allow "
"future<R&&>, shared_future<R&&>, promise<R&&>, or packaged_task<R&&()>.");
using _State_stores_t = remove_reference_t<_Rx> *;
// traits for future<R&>::get()
static _Rx _Future_get_transform(
_State_stores_t _Storage) _NOEXCEPT
{ // transform the asynchronous shared state storage for future::get()
return (static_cast<_Rx>(*_Storage));
}
// traits for shared_future<R&>::get()
using _Shared_future_get_return_type = _Rx;
static _Rx _Shared_future_get_transform(
_State_stores_t _Storage) _NOEXCEPT
{ // transform the asynchronous shared state storage for shared_future::get()
return (static_cast<_Rx>(*_Storage));
}
// traits for storing values
template<class _Alloc>
static void _Store(const _Alloc&,
_Future_return_storage<_State_stores_t>& _Storage,
_State_stores_t _Val)
{ // store a value in an asynchronous shared state
// (ignoring _Alloc because we transformed user-supplied R& to R*)
_Storage._Set_value(_Val);
}
template<class _Callable,
class... _Args>
static void _Invoke(
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state
_TRY_BEGIN
_Storage._Set_value(_STD addressof(_STD invoke(
_STD forward<_Callable>(_Obj),
_STD forward<_Args>(_Vals)...)));
_CATCH_ALL
_Storage._Set_exception(_STD current_exception());
_CATCH_END
}
template<class _Alloc,
class _Callable,
class... _Args>
static void _Invoke_al(const _Alloc&,
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state, using
// an allocator (ignoring _Alloc because we transformed user-supplied R& to R*)
_Invoke(_Storage, _STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...);
}
// traits for destruction
template<class _Alloc>
static void _Destroy_storage(_Alloc&,
_Future_return_storage<_State_stores_t>& _Storage) _NOEXCEPT
{ // destroys asynchronous shared state ignoring _Alloc
_Storage._Destroy();
}
};
template<class _Rx>
struct _Future_traits<_Rx, false, true>
{ // special case for future<void>
using _State_stores_t = _Nothing_stored_t;
// traits for future<void>::get()
static _Rx _Future_get_transform(_Nothing_stored_t) _NOEXCEPT
{ // transform the asynchronous shared state storage for future::get()
// nothing to do
}
// traits for shared_future<void>::get()
using _Shared_future_get_return_type = _Rx;
static _Rx _Shared_future_get_transform(_Nothing_stored_t) _NOEXCEPT
{ // transform the asynchronous shared state storage for shared_future::get()
// nothing to do
}
// traits for storing values
template<class _Alloc>
static void _Store(const _Alloc&,
_Future_return_storage<_Nothing_stored_t>& _Storage,
_Nothing_stored_t)
{ // store a value in an asynchronous shared state (ignoring _Alloc
// because because we transformed user-supplied cv-void to _Nothing_stored_t)
_Storage._Set_value(_Nothing_stored_t{});
}
template<class _Callable,
class... _Args>
static void _Invoke(
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state
_TRY_BEGIN
_STD invoke(_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...);
_Storage._Set_value(_Nothing_stored_t{});
_CATCH_ALL
_Storage._Set_exception(_STD current_exception());
_CATCH_END
}
template<class _Alloc,
class _Callable,
class... _Args>
static void _Invoke_al(const _Alloc&,
_Future_return_storage<_State_stores_t>& _Storage,
_Callable&& _Obj, _Args&&... _Vals) _NOEXCEPT
{ // invoke a callable and store the result in an asynchronous shared state, using
// an allocator (ignoring _Alloc because because we transformed user-supplied
// cv-void to _Nothing_stored_t)
_Invoke(_Storage, _STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...);
}
// traits for destruction
template<class _Alloc>
static void _Destroy_storage(_Alloc&,
_Future_return_storage<_State_stores_t>& _Storage) _NOEXCEPT
{ // destroys asynchronous shared state ignoring _Alloc
_Storage._Destroy();
}
};
// STRUCT TEMPLATE _Future_state_interface
struct __declspec(novtable) _Basic_future_state_interface
{ // interface for future and shared_future access to asynchronous state
// parts that do not depend on R
virtual void _Add_ref() _NOEXCEPT = 0;
virtual void _Release() _NOEXCEPT = 0;
virtual void _Wait() _NOEXCEPT = 0;
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT = 0;
virtual bool _Is_ready() const _NOEXCEPT = 0; // advisory only, for coroutines
_Basic_future_state_interface() = default;
_Basic_future_state_interface(const _Basic_future_state_interface&) = delete;
_Basic_future_state_interface& operator=(const _Basic_future_state_interface&) = delete;
protected:
~_Basic_future_state_interface() _NOEXCEPT = default;
};
template<class _StoredType>
struct __declspec(novtable) _Future_state_interface
: _Basic_future_state_interface
{ // interface for future and shared_future access to asynchronous states
virtual _Future_return_storage<_StoredType>& _Get_value() _NOEXCEPT = 0;
protected:
~_Future_state_interface() = default;
};
// FUNCTION TEMPLATE _Verify_future_state
template<class _StateType> inline
void _Verify_future_state(_StateType * _Ptr)
{ // verify that the state is valid; otherwise, throw no_state
// (technically if user code fails here, it has triggered undefined behavior)
if (!_Ptr)
{
_THROW(future_error, future_errc::no_state);
}
}
// CLASS TEMPLATE _Future_base
template<class _Rx>
class shared_future;
class _Future_base
{ // implements future/shared_future functions for interaction with
// asynchronous shared state that do not depend on _Rx
public:
bool _Is_ready() const _NOEXCEPT
{ // advises coroutines whether the shared state is nominally ready
// Note that calling wait after _Is_ready returns true may block for
// a brief period while the asynchronous provider is performing its
// handshake to give up ownership of the asynchronous shared state.
_Verify_future_state(_State);
return (_State->_Is_ready());
}
bool valid() const _NOEXCEPT
{ // check whether this future has an associated asynchronous shared state
return (_State != nullptr_t{});
}
void wait() const
{ // wait for the associated state to become ready
_Verify_future_state(_State);
_State->_Wait();
}
template<class _Rep,
class _Period>
future_status wait_for(const chrono::duration<_Rep, _Period>& _Rel_time) const
{ // wait for the associated state to become ready with relative timeout
_Verify_future_state(_State);
// ceil ensures we wait for at least as long as the supplied duration
const auto _Rel_ms = chrono::ceil<chrono::milliseconds>(_Rel_time);
return (_State->_Wait_for(_Rel_ms));
}
template<class _Clock,
class _Duration>
future_status wait_until(const chrono::time_point<_Clock, _Duration>& _Abs_time) const
{ // wait for the associated state to become ready with absolute timeout
auto _Now = _Clock::now();
if (_Abs_time <= _Now)
{ // check _Abs_time against _Now in case _Duration's backing type is unsigned
return (wait_for(_Duration::zero()));
}
for (; _Now < _Abs_time; _Now = _Clock::now())
{
const auto _Status = wait_for(_Abs_time - _Now);
if (_Status != future_status::timeout)
{
return (_Status);
}
}
return (future_status::timeout);
}
protected:
template<class _Rx>
friend class shared_future;
_Future_base() _NOEXCEPT
: _State{nullptr_t{}}
{ // default initialize a future or shared_future
}
explicit _Future_base(_Basic_future_state_interface * _Associated_state) _NOEXCEPT
: _State(_Associated_state)
{ // construct around associated state
}
_Future_base(const _Future_base&) = default;
_Future_base(_Future_base&& _Other) _NOEXCEPT
: _State{_STD exchange(_Other._State, nullptr_t{})}
{ // move ownership of associated state
}
~_Future_base() _NOEXCEPT
{ // destroy a future, potentially destroying any associated state
if (_State)
{
_State->_Release();
}
}
void _Swap(_Future_base& _Other) _NOEXCEPT
{ // swap future or shared_future instances
_STD swap(_State, _Other._State);
}
_Basic_future_state_interface * _State;
};
// CLASS TEMPLATE future
struct _Future_state_deleter
{ // effectively unique_ptr for an asynchronous shared state
_Basic_future_state_interface * _State;
explicit _Future_state_deleter(_Basic_future_state_interface * _Ptr) _NOEXCEPT
: _State(_Ptr)
{ // make a state deleter that destroys shared state on scope exit
}
_Future_state_deleter(const _Future_state_deleter&) = delete;
_Future_state_deleter& operator=(const _Future_state_deleter&) = delete;
~_Future_state_deleter() _NOEXCEPT
{ // destroy shared state on scope exit
_State->_Release();
}
};
template<class _Rx>
class future
: public _Future_base
{ // describes a value that can be retrieved some time in the future
using _Mytraits = _Future_traits<_Rx>;
using _State_stores_t = typename _Mytraits::_State_stores_t;
using _TState_ptr_t = _Future_state_interface<_State_stores_t> *;
public:
explicit future(_TState_ptr_t _Associated_state) _NOEXCEPT
: _Future_base(_Associated_state)
{ // construct a future for a given associated state
// STL internal use only
}
future() = default;
future(future&&) = default;
future(const future&) = delete;
~future() _NOEXCEPT = default;
inline shared_future<_Rx> share() _NOEXCEPT;
future& operator=(const future&) = delete;
future& operator=(future&& _Other) _NOEXCEPT
{ // move-assign a future
future(_STD move(_Other))._Swap(*this);
return (*this);
}
_Rx get()
{ // get stored return value from the associated state, or throw
_Verify_future_state(_State);
_Future_state_deleter _Deleter{_STD exchange(_State, nullptr_t{})};
_Future_return_storage<_State_stores_t>& _Return_value
= static_cast<_TState_ptr_t>(_Deleter._State)->_Get_value();
if (_Return_value._Storing == _Return_storing::_Exception_stored)
{
_STD rethrow_exception(_Return_value._Exception);
}
return (_Mytraits::_Future_get_transform(_Return_value._Value));
}
};
// CLASS TEMPLATE shared_future
template<class _Rx>
class shared_future
: public _Future_base
{ // describes a value that can be retrieved some time in the future to which access is shared
using _Mytraits = _Future_traits<_Rx>;
using _State_stores_t = typename _Mytraits::_State_stores_t;
using _TState_ptr_t = _Future_state_interface<_State_stores_t> *;
public:
shared_future() = default;
shared_future(const shared_future& _Other) _NOEXCEPT
: _Future_base(_Other)
{ // copy a shared_future
if (_State)
{
_State->_Add_ref();
}
}
shared_future(future<_Rx>&& _Other) _NOEXCEPT
: _Future_base{_STD exchange(_Other._State, nullptr_t{})}
{ // transform a future into a shared_future
}
shared_future(shared_future&&) = default;
~shared_future() _NOEXCEPT = default;
shared_future& operator=(const shared_future& _Other) _NOEXCEPT
{ // copy assign a shared_future
if (_State != _Other._State)
{
if (_State)
{
_State->_Release();
}
_State = _Other._State;
if (_State)
{
_State->_Add_ref();
}
}
return (*this);
}
shared_future& operator=(shared_future&& _Other) _NOEXCEPT
{ // move assign a shared_future
shared_future(_STD move(_Other))._Swap(*this);
return (*this);
}
typename _Mytraits::_Shared_future_get_return_type get() const
{ // get stored return value from the associated state, or throw
_Verify_future_state(_State);
_Future_return_storage<_State_stores_t>& _Return_value
= static_cast<_TState_ptr_t>(_State)->_Get_value();
if (_Return_value._Storing == _Return_storing::_Exception_stored)
{
_STD rethrow_exception(_Return_value._Exception);
}
return (_Mytraits::_Shared_future_get_transform(_Return_value._Value));
}
};
template<class _Rx> inline
shared_future<_Rx> future<_Rx>::share() _NOEXCEPT
{ // transform a future into a shared_future
return (shared_future<_Rx>(_STD move(*this)));
}
// CLASS _Shared_state_synchronizer
struct _Packaged_task_refcounting_tag {};
struct _Shared_state_synchronizer
{ // asynchronous shared state synchronization; controls threads waiting for
// and synchronizing with the asynchronous shared state becoming ready
mutex _Mtx;
_STDEXT condition_variable_light _Condition;
_Atomic_reference_count _Future_counter;
atomic<unsigned char> _Control;
static constexpr unsigned char _Ready_bit{1};
static constexpr unsigned char _Waiting_bit{2};
static constexpr unsigned char _All_futures_dead_bit{4};
_Shared_state_synchronizer()
: _Mtx(),
_Condition(),
_Future_counter(1), // logically, when an asynchronous provider is constructed, its future is "alive" inside
_Control()
{ // initialize _Shared_state_synchronizer
}
explicit _Shared_state_synchronizer(_Packaged_task_refcounting_tag)
: _Mtx(),
_Condition(),
_Future_counter(2), // packaged_task also acts like a future due to packaged_task::reset
_Control()
{ // initialize _Shared_state_synchronizer for packaged_task
}
void _Add_ref() _NOEXCEPT
{ // increments the number of futures alive (called by shared_future copy ctor, NOT promise::get_future)
_Future_counter._Add_ref();
}
bool _Release_block() _NOEXCEPT
{ // decrements the number of futures alive; returns whether any are left
// (used for async policies, who synchronize with the make ready thread on their own)
return (_Future_counter._Release() == 0);
}
bool _Release() _NOEXCEPT
{ // decrements the number of futures alive; returns whether no references to *this remain
if (_Future_counter._Release() != 0)
{
return (false);
}
if (_Control.load() & _Waiting_bit)
{ // If we are destroying the last future and the waiting bit is set, then timeout occurred.
// We take the lock here so that we don't try to delete this while _Make_ready is
// unlocking the lock.
lock_guard<mutex> _Lck(_Mtx); // enforces memory order
return ((_Control.exchange(_All_futures_dead_bit, memory_order_relaxed) & _Ready_bit) != 0);
}
return ((_Control.exchange(_All_futures_dead_bit) & _Ready_bit) != 0);
}
void _Wait() _NOEXCEPT
{ // waits for the associated state to become ready
// this compare/exchange verifies that the ready bit is unset, and, if so,
// sets the waiting bit
unsigned char _Local_control{0};
const bool _Set_waiting_bit{_Control.compare_exchange_strong(_Local_control, _Waiting_bit)};
if (_Set_waiting_bit
|| ((_Local_control & _Ready_bit) == 0)) // CEX fails if another thread is already waiting
{ // if we successfully set the waiting bit, go to sleep on _Condition until ready
unique_lock<mutex> _Lck(_Mtx); // enforces memory ordering on _Control
_Condition.wait(_Lck, [this] { return ((_Control.load(memory_order_relaxed) & _Ready_bit) != 0); });
_Control.store(_Ready_bit, memory_order_relaxed); // clear waiting bit
}
}
future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT
{ // waits for the associated state to become ready with timeout
unsigned char _Local_control{0}; // same atomics rationale as _Wait
const bool _Set_waiting_bit{_Control.compare_exchange_strong(_Local_control, _Waiting_bit)};
if (_Set_waiting_bit
|| ((_Local_control & _Ready_bit) == 0))
{
unique_lock<mutex> _Lck(_Mtx);
if (_Condition.wait_for(_Lck, _Rel_time,
[this] { return ((_Control.load(memory_order_relaxed) & _Ready_bit) != 0); }))
{ // clear waiting bit
_Control.store(_Ready_bit, memory_order_relaxed);
return (future_status::ready);
}
else
{ // waiting bit left set
return (future_status::timeout);
}
}
return (future_status::ready);
}
bool _Make_ready() _NOEXCEPT
{ // atomically give up ownership and set ready
// returns whether no references to *this remain
unsigned char _Local_control{0};
if (_Control.compare_exchange_strong(_Local_control, _Ready_bit))
{ // if waiting bit was not set, we can just set the ready bit
// to give up control; no notify necessary because nobody is waiting
return (false);
}
if (_Local_control & _All_futures_dead_bit)
{ // if all futures dead bit was set, we can just delete this
// without setting the ready bit, since nobody is listening
return (true);
}
// otherwise, wake up _Condition
lock_guard<mutex> _Lck(_Mtx);
// notify_all must be done under _Mtx to ensure future threads don't return
// from _Wait until we have given up ownership; last thing this thread touches
// in *this is unlocking _Mtx
_Condition.notify_all();
return ((_Control.fetch_or(_Ready_bit) & _All_futures_dead_bit) != 0);
}
bool _Is_ready() const _NOEXCEPT
{ // advisory only, does not synchronize with the shared state becoming ready
// (for example, used after joining with the threadpool, which provides its
// own synchronization)
return ((_Control.load() & _Ready_bit) != 0);
}
};
#ifndef _M_CEE
// FUNCTION _Schedule_ready_at_thread_exit
template<class _StateType> inline
void __cdecl _Make_ready_at_thread_exit_callback(void * const _Void_state, void *) _NOEXCEPT
{ // TLS callback to make an asynchronous shared state ready at thread exit
static_cast<_StateType *>(_Void_state)->_Make_ready();
}
template<class _StateType> inline
void _Schedule_ready_at_thread_exit(_StateType * _State)
{ // registers an asynchronous shared state to be made ready at thread exit
if (!__dyn_tls_register_final_dtor(
&_Make_ready_at_thread_exit_callback<_StateType>, _State, nullptr_t{}))
{
_THROW(bad_alloc, _EMPTY_ARGUMENT); // TRANSITION, VSO#235214
}
}
#endif /* _M_CEE */
// FUNCTION TEMPLATE _Make_associated_state
template<class _StateType,
class _Alloc,
class... _Extra> inline
_StateType * _Make_associated_state(const _Alloc& _Al, _Extra&&... _Extras)
{ // constructs an allocator-aware asynchronous shared state
using _Rebound_ty = typename allocator_traits<_Alloc>::template rebind_alloc<_StateType>;
_Rebound_ty _Rebound{_Al};
_StateType * const _State_ptr = _Unfancy(allocator_traits<_Rebound_ty>::allocate(_Rebound, 1));
::new (static_cast<void *>(_State_ptr)) _StateType(_STD move(_Rebound),
_STD forward<_Extra>(_Extras)...);
return (_State_ptr);
}
// FUNCTION TEMPLATE _Destroy_state
template<class _StateType> inline
void _Destroy_state(_StateType * const _This) _NOEXCEPT
{ // destroys an allocator-aware asynchronous shared state
using _Traits = typename _StateType::_Mytraits;
using _This_alloc_t = typename _StateType::_This_alloc_t;
// move alloc from *this to stack (so alloc isn't trying to deallocate itself)
_This_alloc_t _Al_stack{_STD move(_This->_Data._Get_first())};
_Traits::_Destroy_storage(_Al_stack, _This->_Data._Get_second()._Return_storage);
_This->~_StateType();
allocator_traits<_This_alloc_t>::deallocate(_Al_stack,
_Alloc_ptr_traits<_This_alloc_t>::pointer_to(*_This), 1);
}
// FUNCTION TEMPLATE _Abandon_state
template<class _StateType> inline
void _Abandon_state(_StateType * const _State, const bool _Future_retrieved,
const bool _Value_set) _NOEXCEPT
{ // abandon state, all ye who enter here
if (_State)
{
if (!_Future_retrieved)
{
_State->_Release();
}
if (!_Value_set)
{
_State->_Set_exception_and_make_ready(_STD make_exception_ptr(
future_error{future_errc::broken_promise}));
}
}
}
// STRUCT TEMPLATE _Promise_state_interface
template<class _StoredType>
struct __declspec(novtable) _Promise_state_interface
: _Future_state_interface<_StoredType>
{ // interface for promise interaction with asynchronous shared state
virtual void _Release() _NOEXCEPT = 0;
virtual void _Make_ready() _NOEXCEPT = 0;
virtual void _Set_exception(exception_ptr) _NOEXCEPT = 0;
virtual void _Set_exception_and_make_ready(exception_ptr) _NOEXCEPT = 0;
virtual void _Set_value(const _StoredType&) = 0;
virtual void _Set_value(_StoredType&&) = 0;
virtual void _Set_value_and_make_ready(const _StoredType&) = 0;
virtual void _Set_value_and_make_ready(_StoredType&&) = 0;
protected:
~_Promise_state_interface() _NOEXCEPT = default;
};
// STRUCT TEMPLATE _Promise_state
template<class _StoredType>
struct _Promise_state_data
{ // data members for _Promise_state_noncopyable
_Future_return_storage<_StoredType> _Return_storage;
_Shared_state_synchronizer _Sync;
};
template<class _Rx,
class _Alloc>
struct _Promise_state final
: _Promise_state_interface<typename _Future_traits<_Rx>::_State_stores_t>
{ // implementation of asynchronous shared state for promises
using _Mytraits = _Future_traits<_Rx>;
using _State_stores_t = typename _Mytraits::_State_stores_t;
using _This_alloc_t = typename allocator_traits<_Alloc>::template rebind_alloc<_Promise_state>;
_Compressed_pair<_This_alloc_t, _Promise_state_data<_State_stores_t>> _Data;
explicit _Promise_state(_Alloc&& _Al)
: _Data(_One_then_variadic_args_t{}, _STD move(_Al))
{ // create a new promise state
}
virtual void _Add_ref() _NOEXCEPT override
{ // forward add ref to the synchronizer
_Data._Get_second()._Sync._Add_ref();
}
virtual void _Release() _NOEXCEPT override
{ // forward release to the synchronizer and delete this if futures and promise are dead
if (_Data._Get_second()._Sync._Release())
{
_Destroy_state(this);
}
}
virtual void _Wait() _NOEXCEPT override
{ // forward wait to the synchronizer
_Data._Get_second()._Sync._Wait();
}
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT override
{ // forward wait_for to the synchronizer
return (_Data._Get_second()._Sync._Wait_for(_Rel_time));
}
virtual bool _Is_ready() const _NOEXCEPT override
{ // forward _Is_ready to the synchronizer
return (_Data._Get_second()._Sync._Is_ready());
}
virtual _Future_return_storage<_State_stores_t>& _Get_value() _NOEXCEPT override
{ // wait for the asynchronous shared state to become ready and return the result
auto& _Second = _Data._Get_second();
_Second._Sync._Wait();
return (_Second._Return_storage);
}
virtual void _Make_ready() _NOEXCEPT override
{ // atomically make this shared state ready and give up ownership
// delete this if all futures are dead
if (_Data._Get_second()._Sync._Make_ready())
{
_Destroy_state(this);
}
}
virtual void _Set_exception(exception_ptr _Ex) _NOEXCEPT override
{ // store an exception in this shared state
auto& _Second = _Data._Get_second();
_Second._Return_storage._Set_exception(_STD move(_Ex));
}
virtual void _Set_exception_and_make_ready(exception_ptr _Ex) _NOEXCEPT override
{ // store an exception in this shared state, and become ready
_Set_exception(_STD move(_Ex));
_Make_ready();
}
void _Set_value_impl(const _State_stores_t& _Init, true_type)
{ // copy a value into this shared state for copyable _Rx
auto& _Second = _Data._Get_second();
_Mytraits::_Store(_Data._Get_first(), _Second._Return_storage, _Init);
}
void _Set_value_impl(const _State_stores_t&, false_type)
{ // (don't) copy a value into this shared state for noncopyable _Rx
_STD terminate(); // not callable, enforced by static_assert in promise<R>::set_value(const R&)
}
virtual void _Set_value(const _State_stores_t& _Init) override
{ // copy a value into this shared state
_Set_value_impl(_Init, typename is_copy_constructible<_State_stores_t>::type{});
}
virtual void _Set_value(_State_stores_t&& _Init) override
{ // move a value into this shared state
auto& _Second = _Data._Get_second();
_Mytraits::_Store(_Data._Get_first(), _Second._Return_storage, _STD move(_Init));
}
virtual void _Set_value_and_make_ready(const _State_stores_t& _Init) override
{ // copy a value into this shared state and become ready
_Set_value(_Init);
_Make_ready();
}
virtual void _Set_value_and_make_ready(_State_stores_t&& _Init) override
{ // move a value into this shared state and become ready
_Set_value(_STD move(_Init));
_Make_ready();
}
};
// FUNCTION _Verify_value_unset
inline void _Verify_value_unset(const bool _Value_set)
{ // verifies that the value is unset, and if not, throws promise_already_satisfied
if (_Value_set)
{
_THROW(future_error, future_errc::promise_already_satisfied);
}
}
inline void _Verify_future_not_retrieved(const bool _Future_retrieved)
{ // verifies that the future has not yet been retrieved
if (_Future_retrieved)
{
_THROW(future_error, future_errc::future_already_retrieved);
}
}
template<class _Void> inline
void _At_thread_exit_clr_check()
{ // *_at_thread_exit functions are unimplementable under /clr
// because managed code cannot run under the loader lock
// and that is where at-thread-exit code is processed.
#ifdef _M_CEE
static_assert(_Always_false<_Void>, "*_at_thread_exit functions are unavailable with /clr");
#endif /* _M_CEE_ */
}
// CLASS TEMPLATE promise
template<class _Rx>
class _Promise_base
{ // implements promise
public:
_Promise_base()
: _Promise_base(allocator_arg, allocator<int>{})
{ // initializes a promise and its associated state
}
template<class _Alloc>
_Promise_base(allocator_arg_t, const _Alloc& _Al)
: _Setter_mutex(),
_State(_Make_associated_state<_Promise_state<_Rx, _Alloc>>(_Al)),
_Future_retrieved(false),
_Value_set(false)
{ // initializes a promise and its associated state using an allocator
}
_Promise_base(const _Promise_base&) = delete;
_Promise_base(_Promise_base&& _Other) _NOEXCEPT
: _Setter_mutex(),
_State(_STD exchange(_Other._State, nullptr_t{})),
_Future_retrieved(_STD exchange(_Other._Future_retrieved, false)),
_Value_set(_STD exchange(_Other._Value_set, false))
{ // move construct promise
}
~_Promise_base() _NOEXCEPT
{ // destroy a promise
_Abandon_state(_State, _Future_retrieved, _Value_set);
}
_Promise_base& operator=(const _Promise_base&) = delete;
_Promise_base& operator=(_Promise_base&& _Other) _NOEXCEPT
{ // move assign a promise
_Promise_base(_STD move(_Other))._Swap(*this); // TRANSITION, VSO#434382
return (*this);
}
void set_exception(exception_ptr _Ptr)
{ // makes this promise ready with an exception
lock_guard<mutex> _Lck(_Setter_mutex);
_Verify_value_unset(_Value_set);
_Verify_future_state(_State);
_State->_Set_exception_and_make_ready(_STD move(_Ptr));
_Value_set = true;
}
void set_exception_at_thread_exit(exception_ptr _Ptr)
{ // stores an exception in this promise and becomes ready at thread exit
_At_thread_exit_clr_check<_Rx>();
lock_guard<mutex> _Lck(_Setter_mutex);
_Verify_value_unset(_Value_set);
_Verify_future_state(_State);
_State->_Set_exception(_STD move(_Ptr));
_Schedule_ready_at_thread_exit(_State);
_Value_set = true;
}
future<_Rx> get_future()
{ // gets the future from this promise
_Verify_future_not_retrieved(_Future_retrieved);
_Verify_future_state(_State);
_Future_retrieved = true;
return (future<_Rx>{_State});
}
protected:
void _Swap(_Promise_base& _Other) _NOEXCEPT
{ // swaps this promise
_STD swap(_State, _Other._State);
_STD swap(_Future_retrieved, _Other._Future_retrieved);
_STD swap(_Value_set, _Other._Value_set);
}
template<class _Init>
void _Set_value(_Init&& _To_set)
{ // sets a value in the associated state and becomes ready
lock_guard<mutex> _Lck(_Setter_mutex);
_Verify_value_unset(_Value_set);
_Verify_future_state(_State);
_State->_Set_value_and_make_ready(_STD forward<_Init>(_To_set));
_Value_set = true; // last thing in case construct throws
}
#ifndef _M_CEE
template<class _Init>
void _Set_value_at_thread_exit(_Init&& _To_set)
{ // sets a value in the associated state and becomes ready at thread exit
lock_guard<mutex> _Lck(_Setter_mutex);
_Verify_value_unset(_Value_set);
_Verify_future_state(_State);
_State->_Set_value(_STD forward<_Init>(_To_set));
_Schedule_ready_at_thread_exit(_State);
_Value_set = true; // last thing in case construct throws
}
#endif /* _M_CEE */
mutex _Setter_mutex;
// *_State must not be touched if _Future_retrieved and _Value_set
_Promise_state_interface<typename _Future_traits<_Rx>::_State_stores_t> * _State;
bool _Future_retrieved;
bool _Value_set;
};
template<class _Rx,
bool = is_reference<_Rx>::value,
bool = is_void<_Rx>::value>
struct _Promise_set_value
: _Promise_base<_Rx>
{ // set_value interface facade for promise<R>
using _Promise_base<_Rx>::_Promise_base;
void set_value(const _Rx& _To_set)
{ // copies a value into the associated state and becomes ready
static_assert(is_copy_constructible<_Rx>::value,
"Calling promise::set_value(const R&) requires that R be copyable");
this->_Set_value(_To_set);
}
void set_value(_Rx&& _To_set)
{ // moves a value into the associated state and becomes ready
this->_Set_value(_STD move(_To_set));
}
void set_value_at_thread_exit(const _Rx& _To_set)
{ // copies a value into the associated state and schedules that state to become ready at thread exit
static_assert(is_copy_constructible<_Rx>::value,
"Calling promise::set_value_at_thread_exit(const R&) requires that R be copyable");
_At_thread_exit_clr_check<_Rx>();
this->_Set_value_at_thread_exit(_To_set);
}
void set_value_at_thread_exit(_Rx&& _To_set)
{ // moves a value into the associated state and schedules that state to become ready at thread exit
_At_thread_exit_clr_check<_Rx>();
this->_Set_value_at_thread_exit(_STD move(_To_set));
}
};
template<class _Rx>
struct _Promise_set_value<_Rx, true, false>
: _Promise_base<_Rx>
{ // set_value interface facade for promise<R&>
using _Promise_base<_Rx>::_Promise_base;
void set_value(_Rx _To_set)
{ // sets a value in the associated state and becomes ready
this->_Set_value(_STD addressof(_To_set));
}
void set_value_at_thread_exit(_Rx _To_set)
{ // sets a value in the associated state and schedules that state to become ready at thread exit
_At_thread_exit_clr_check<_Rx>();
this->_Set_value_at_thread_exit(_STD addressof(_To_set));
}
};
template<class _Rx>
struct _Promise_set_value<_Rx, false, true>
: _Promise_base<_Rx>
{ // set_value interface facade for promise<void> (and nonstandard promise<cv void>)
using _Promise_base<_Rx>::_Promise_base;
void set_value()
{ // marks this promise as ready
this->_Set_value(_Nothing_stored_t{});
}
void set_value_at_thread_exit()
{ // schedules this promise to become ready at thread exit
_At_thread_exit_clr_check<_Rx>();
this->_Set_value_at_thread_exit(_Nothing_stored_t{});
}
};
template<class _Rx>
class promise
: public _Promise_set_value<_Rx>
{ // implement promise get and swap
public:
promise() = default;
using _Promise_set_value<_Rx>::_Promise_set_value;
promise(const promise&) = delete;
promise(promise&&) = default;
promise& operator=(promise&&) = default;
void swap(promise& _Other) _NOEXCEPT
{ // swaps *this with another promise
this->_Swap(_Other);
}
};
template<class _Rx> inline
void swap(promise<_Rx>& _Lhs, promise<_Rx>& _Rhs) _NOEXCEPT
{ // swaps a promise with another promise
_Lhs.swap(_Rhs);
}
template<class _Rx,
class _Alloc>
struct uses_allocator<promise<_Rx>, _Alloc>
: true_type
{ // asserts that promise<_Ty> can use an allocator
};
// CLASS TEMPLATE _Packaged_task_state
template<class _StoredType,
class... _Args>
struct __declspec(novtable) _Packaged_task_interface
: _Future_state_interface<_StoredType>
{ // interface for packaged_task to communicate with the asynchronous shared state
virtual void _Make_ready() _NOEXCEPT = 0;
virtual void _Set_exception_and_make_ready(exception_ptr) _NOEXCEPT = 0;
virtual void _Call(_Args&&...) _NOEXCEPT = 0;
virtual void _Call_and_make_ready(_Args&&...) _NOEXCEPT = 0;
virtual _Packaged_task_interface * _Reset() = 0;
protected:
~_Packaged_task_interface() _NOEXCEPT = default;
};
template<class _StoredType,
class _Callable>
struct _Packaged_task_state_data
{
_Future_return_storage<_StoredType> _Return_storage;
_Shared_state_synchronizer _Sync;
_Callable _Obj;
template<class _CallableInit,
class = enable_if<!is_same<decay_t<_CallableInit>, _Packaged_task_state_data>::value>>
explicit _Packaged_task_state_data(_CallableInit&& _Obj_arg)
: _Return_storage(),
_Sync(_Packaged_task_refcounting_tag{}),
_Obj(_STD forward<_CallableInit>(_Obj_arg))
{ // initialize packaged_task state data
}
};
template<class _Rx,
class _Alloc,
class _Callable,
class... _Args>
struct _Packaged_task_state final
: _Packaged_task_interface<typename _Future_traits<_Rx>::_State_stores_t, _Args...>
{ // asynchronous shared state for packaged_task
// note that unlike the other asynchronous shared states, the existence of packaged_task::reset
// forces this asynchronous state to use a more traditional reference counting scheme; it
// does not have the ability to atomically become ready and give up ownership from the
// asynchronous provider
using _Mytraits = _Future_traits<_Rx>;
using _State_stores_t = typename _Mytraits::_State_stores_t;
using _This_alloc_t = typename allocator_traits<_Alloc>::template rebind_alloc<_Packaged_task_state>;
_Compressed_pair<_This_alloc_t, _Packaged_task_state_data<_State_stores_t, _Callable>> _Data;
template<class _AllocInit,
class _CallableInit>
_Packaged_task_state(_AllocInit&& _Al, _CallableInit&& _Obj_arg)
: _Data(_One_then_variadic_args_t{}, _STD forward<_AllocInit>(_Al),
_STD forward<_CallableInit>(_Obj_arg))
{ // initializes a packaged_task shared state
}
virtual void _Add_ref() _NOEXCEPT override
{ // forward add ref to the synchronizer
_Data._Get_second()._Sync._Add_ref();
}
virtual void _Release() _NOEXCEPT override
{ // forward release to the synchronizer and delete this if futures and promise are dead
if (_Data._Get_second()._Sync._Release())
{
_Destroy_state(this);
}
}
virtual void _Wait() _NOEXCEPT override
{ // forward wait to the synchronizer
_Data._Get_second()._Sync._Wait();
}
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT override
{ // forward wait_for to the synchronizer
return (_Data._Get_second()._Sync._Wait_for(_Rel_time));
}
virtual bool _Is_ready() const _NOEXCEPT override
{ // forward _Is_ready to the synchronizer
return (_Data._Get_second()._Sync._Is_ready());
}
virtual void _Make_ready() _NOEXCEPT override
{ // mark this shared state ready
// never destroys *this because packaged_task has an owning "future ref"
static_cast<void>(_Data._Get_second()._Sync._Make_ready());
}
virtual _Future_return_storage<_State_stores_t>& _Get_value() _NOEXCEPT override
{ // wait for the asynchronous shared state to become ready and return the result
_Wait();
return (_Data._Get_second()._Return_storage);
}
virtual void _Call(_Args&&... _Vals) _NOEXCEPT override
{ // invoke the callable stored in this asynchronous shared state, with
// supplied arguments, and stores the result in this shared state
auto& _Second = _Data._Get_second();
_Mytraits::template _Invoke_al(_Data._Get_first(), _Second._Return_storage,
_Second._Obj, _STD forward<_Args>(_Vals)...);
}
virtual void _Call_and_make_ready(_Args&&... _Vals) _NOEXCEPT override
{ // invoke the callable stored in this asynchronous shared state, with
// supplied arguments, stores the result in this shared state,
// and becomes ready
auto& _Second = _Data._Get_second();
_Mytraits::template _Invoke_al(_Data._Get_first(), _Second._Return_storage,
_Second._Obj, _STD forward<_Args>(_Vals)...);
_Make_ready();
}
virtual void _Set_exception_and_make_ready(exception_ptr _Ex) _NOEXCEPT override
{ // stores an exception in this asynchronous shared state and becomes
// ready (used for packaged_task abandonment)
_Data._Get_second()._Return_storage._Set_exception(_STD move(_Ex));
_Make_ready();
}
virtual _Packaged_task_interface<_State_stores_t, _Args...> * _Reset() override
{ // creates a new asynchronous shared state with the same callable
// as *this
auto& _Al = _Data._Get_first();
auto& _Second = _Data._Get_second();
const auto _Alloc_ptr = allocator_traits<_This_alloc_t>::allocate(_Al, 1);
_TRY_BEGIN
const auto _Result = _Unfancy(_Alloc_ptr);
::new (static_cast<void *>(_Result))
_Packaged_task_state(_Al, _STD move(_Second._Obj));
_Release(); // may delete this
return (_Result);
_CATCH_ALL
allocator_traits<_This_alloc_t>::deallocate(_Al, _Alloc_ptr, 1);
_RERAISE;
_CATCH_END
}
};
// CLASS TEMPLATE packaged_task
template<class _Rx,
class... _Args>
class _Packaged_task_base
{ // implement packaged_task with linearized function type
private:
using _State_stores_t = typename _Future_traits<_Rx>::_State_stores_t;
using _Interface_ptr = _Packaged_task_interface<_State_stores_t, _Args...> *;
protected:
_Packaged_task_base() _NOEXCEPT
: _State(nullptr_t{}),
_Future_retrieved(false),
_Called(false)
{ // default initialize a packaged_task with no associated state
}
template<class _Alloc,
class _Callable>
_Packaged_task_base(const _Alloc& _Al, _Callable&& _Obj)
: _State(_Make_associated_state<
_Packaged_task_state<_Rx, _Alloc, decay_t<_Callable>, _Args...>
>(_Al, _STD forward<_Callable>(_Obj))),
_Future_retrieved(false),
_Called(false)
{ // initialize a packaged_task with an associated state wrapping _Obj using an allocator
}
_Packaged_task_base(_Packaged_task_base&& _Other) _NOEXCEPT
: _State(_STD exchange(_Other._State, nullptr_t{})),
_Future_retrieved(_STD exchange(_Other._Future_retrieved, false)),
_Called(_STD exchange(_Other._Called, false))
{ // move construct a packaged_task
}
~_Packaged_task_base() _NOEXCEPT
{ // destroy a packaged_task
_Abandon_state(_State, _Future_retrieved, _Called);
if (_State)
{
_State->_Release();
}
}
public:
void _Swap(_Packaged_task_base& _Other) _NOEXCEPT
{ // swap packaged_task instances
_STD swap(_State, _Other._State);
_STD swap(_Future_retrieved, _Other._Future_retrieved);
_STD swap(_Called, _Other._Called);
}
bool valid() const _NOEXCEPT
{ // tests whether this packaged_task has an associated state
return (_State != nullptr_t{});
}
future<_Rx> get_future()
{ // gets the future for this packaged_task's associated state
_Verify_future_state(_State);
_Verify_future_not_retrieved(_Future_retrieved);
_Future_retrieved = true;
return (future<_Rx>{_State});
}
void operator()(_Args... _Vals)
{ // invokes callable stored in the state associated with this packaged_task and becomes ready
_Verify_future_state(_State);
_Verify_value_unset(_Called);
_State->_Call_and_make_ready(_STD forward<_Args>(_Vals)...);
_Called = true;
}
void make_ready_at_thread_exit(_Args... _Vals)
{ // invokes callable stored in the state associated with this packaged_task, and
// schedules the task to become ready at thread exit
_At_thread_exit_clr_check<_Rx>();
_Verify_future_state(_State);
_Verify_value_unset(_Called);
_State->_Call(_STD forward<_Args>(_Vals)...);
_Called = true;
_Schedule_ready_at_thread_exit(_State);
}
void reset()
{ // resets this packaged_task's state to allow execution again
_Verify_future_state(_State);
// exception safety: if _Reset throws, leave _State alone
_Interface_ptr _Old_state{_STD exchange(_State, _State->_Reset())};
_Abandon_state(_Old_state, _Future_retrieved, _Called);
_Future_retrieved = false;
_Called = false;
}
private:
_Interface_ptr _State;
bool _Future_retrieved;
bool _Called;
};
template<class _Fty>
class packaged_task
: public _Linearize_function_type< _STD _Packaged_task_base, _Fty>::type
{
private:
using _Mybase = typename _Linearize_function_type< _STD _Packaged_task_base, _Fty>::type;
public:
packaged_task() _NOEXCEPT = default;
template<class _Callable,
class = enable_if_t<!is_same<decay_t<_Callable>, packaged_task>::value>>
explicit packaged_task(_Callable&& _Obj)
: _Mybase(allocator<int>{}, _STD forward<_Callable>(_Obj))
{ // initialize a packaged_task with an associated state wrapping _Obj
}
template<class _Alloc,
class _Callable,
class = enable_if_t<!is_same<decay_t<_Callable>, packaged_task>::value>>
packaged_task(allocator_arg_t, const _Alloc& _Al, _Callable&& _Obj)
: _Mybase(_Al, _STD forward<_Callable>(_Obj))
{ // initialize a packaged_task with an associated state wrapping _Obj using an allocator
}
packaged_task(const packaged_task&) = delete;
packaged_task(packaged_task&&) _NOEXCEPT = default;
~packaged_task() _NOEXCEPT = default;
packaged_task& operator=(const packaged_task&) = delete;
packaged_task& operator=(packaged_task&& _Other) _NOEXCEPT
{ // move assign a packaged_task
packaged_task(_STD move(_Other))._Swap(*this); // TRANSITION, VSO#434382
return (*this);
}
void swap(packaged_task& _Other) _NOEXCEPT
{ // swap packaged_task instances
this->_Swap(_Other);
}
};
template<class _Fty> inline
void swap(packaged_task<_Fty>& _Lhs, packaged_task<_Fty>& _Rhs) _NOEXCEPT
{ // swaps packaged_task instances
_Lhs._Swap(_Rhs);
}
template<class _Fty,
class _Alloc>
struct uses_allocator<packaged_task<_Fty>, _Alloc>
: true_type
{ // asserts that packaged_task can use an allocator
};
// ALIAS TEMPLATE _Async_call_traits_t
template<class _Callable,
class... _Args>
struct _Async_call_traits
{ // gets the _Future_traits for a callable
using type = _Future_traits<result_of_t<decay_t<_Callable>(decay_t<_Args>...)>>;
};
template<class _Callable,
class... _Args>
using _Async_call_traits_t = typename _Async_call_traits<_Callable, _Args...>::type;
// ALIAS TEMPLATE _Async_state_stores_t
template<class _Callable,
class... _Args>
using _Async_state_stores_t = typename _Async_call_traits_t<_Callable, _Args...>::_State_stores_t;
// FUNCTION TEMPLATE _Async_capture_trace
#pragma warning(push)
#pragma warning(disable: 4127) // conditional expression is constant
template<size_t _StackFrames> inline
void _Async_capture_trace(void * (&_Target)[_StackFrames], void * const _Return_address)
{
if (_StackFrames == 1)
{
_Target[0] = _Return_address;
}
else
{
unsigned short _Captured{__std_async_capture_stack_trace(1, _StackFrames, _Target, nullptr_t{})};
_Fill_unchecked(_Target + _Captured, _Target + _StackFrames, nullptr_t{});
}
}
#pragma warning(pop)
// STRUCT TEMPLATE _Async_thread_state
template<size_t _StackFrames,
class _Callable,
class... _Args>
struct _Async_thread_state final
: _Future_state_interface<_Async_state_stores_t<_Callable, _Args...>>
{ // asynchronous shared state for the async launch::async policy
_Future_return_storage<_Async_state_stores_t<_Callable, _Args...>> _Return_storage;
_Shared_state_synchronizer _Sync;
tuple<decay_t<_Callable>, decay_t<_Args>...> _Deferred_invoke;
thread _Thread;
atomic<bool> _Thread_joined;
void * _Async_stack_trace[_StackFrames]; // debugging aid only
template<size_t... _Indices>
struct _Invoker
{ // helper to pass to thread to invoke the user callable
_Async_thread_state& _State;
explicit _Invoker(_Async_thread_state& _Init) _NOEXCEPT
: _State(_Init)
{ // construct the invoker
}
_Invoker(const _Invoker&) = delete;
_Invoker(_Invoker&&) = default;
void operator()() _NOEXCEPT
{ // invoke user callable
_Async_call_traits_t<_Callable, _Args...>::_Invoke(_State._Return_storage,
_STD move(_STD get<_Indices>(_State._Deferred_invoke))...);
_State._Sync._Make_ready();
}
};
template<size_t... _Indices>
_Invoker<_Indices...> _Get_invoker(index_sequence<_Indices...>) _NOEXCEPT
{ // creates the _Invoker for *this
return (static_cast<_Invoker<_Indices...>>(*this));
}
_Async_thread_state(_Callable&& _Obj, _Args&&... _Vals, void * const _Return_address)
: _Return_storage(),
_Sync(),
_Deferred_invoke(_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...),
_Thread(_Get_invoker(make_index_sequence<sizeof...(_Args) + 1>{})),
_Thread_joined(false)
{ // initializes an instance of _Async_thread_state
_Async_capture_trace(_Async_stack_trace, _Return_address);
}
virtual void _Add_ref() _NOEXCEPT override
{ // forward add ref to the synchronizer
_Sync._Add_ref();
}
virtual void _Release() _NOEXCEPT override
{ // forward release to the synchronizer, and join with the thread if this
// is the last future alive
if (!_Sync._Release_block())
{
return;
}
if (!_Thread_joined.load())
{
_Thread.join();
}
_Return_storage._Destroy();
delete this;
}
virtual void _Wait() _NOEXCEPT override
{ // forward wait to the synchronizer
// The first wait joins with the async thread.
if (_Thread_joined.exchange(true))
{
_Sync._Wait();
return;
}
_Thread.join();
}
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT override
{ // forward wait_for to the synchronizer
return (_Sync._Wait_for(_Rel_time));
}
virtual bool _Is_ready() const _NOEXCEPT override
{ // forwards _Is_ready query to the synchronizer
return (_Sync._Is_ready());
}
virtual _Future_return_storage<_Async_state_stores_t<_Callable, _Args...>>&
_Get_value() _NOEXCEPT override
{ // wait for the asynchronous shared state to become ready and return the result
_Wait();
return (_Return_storage);
}
};
// STRUCT TEMPLATE _Async_deferred_state
template<class _Callable,
class... _Args>
struct _Async_deferred_state final
: _Future_state_interface<_Async_state_stores_t<_Callable, _Args...>>
{ // asynchronous shared state for the async launch::deferred policy
_Future_return_storage<_Async_state_stores_t<_Callable, _Args...>> _Return_storage;
_Shared_state_synchronizer _Sync;
tuple<decay_t<_Callable>, decay_t<_Args>...> _Deferred_invoke;
atomic<bool> _Task_waited;
explicit _Async_deferred_state(_Callable&& _Obj, _Args&&... _Vals)
: _Return_storage(),
_Sync(),
_Deferred_invoke(_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...),
_Task_waited{false}
{ // initializes an instance of _Async_deferred_state
}
virtual void _Add_ref() _NOEXCEPT override
{ // forward add ref to the synchronizer
_Sync._Add_ref();
}
virtual void _Release() _NOEXCEPT override
{ // forward release to the synchronizer and delete this if all futures are dead
// note: user callable might not have been invoked if nobody waited
if (_Sync._Release_block())
{
_Return_storage._Destroy();
delete this;
}
}
template<size_t... _Indices>
void _Invoke_deferred(index_sequence<_Indices...>) _NOEXCEPT
{ // invokes user callable in deferred mode
_Async_call_traits_t<_Callable, _Args...>::_Invoke(_Return_storage,
_STD move(_STD get<_Indices>(_Deferred_invoke))...);
_Sync._Make_ready();
}
virtual void _Wait() _NOEXCEPT override
{ // invokes the deferred callable or waits for another thread to complete it
if (_Task_waited.exchange(true))
{
_Sync._Wait();
return;
}
_Invoke_deferred(make_index_sequence<sizeof...(_Args) + 1>{});
}
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT override
{ // forward wait_for to the synchronizer unless this state is still deferred
if (!_Task_waited.load())
{
return (future_status::deferred);
}
return (_Sync._Wait_for(_Rel_time));
}
virtual bool _Is_ready() const _NOEXCEPT override
{ // forwards _Is_ready query to the synchronizer
return (_Sync._Is_ready());
}
virtual _Future_return_storage<_Async_state_stores_t<_Callable, _Args...>>&
_Get_value() _NOEXCEPT override
{ // wait for the asynchronous shared state to become ready and return the result
_Wait();
return (_Return_storage);
}
};
// STRUCT TEMPLATE _Async_threadpool_state
template<class _State> inline
void __stdcall _Std_async_threadpool_invoker(void *, void * _Void_state, void *) _NOEXCEPT
{ // threadpool callback for async launch::async | launch::deferred
static_cast<_State *>(_Void_state)->_Invoke_deferred(typename _State::_Index_sequence{});
}
template<size_t _StackFrames,
class _Callable,
class... _Args>
struct _Async_threadpool_state final
: _Future_state_interface<_Async_state_stores_t<_Callable, _Args...>>
{ // asynchronous shared state for the async launch::async | launch::deferred policy
using _Index_sequence = make_index_sequence<sizeof...(_Args) + 1>;
_Future_return_storage<_Async_state_stores_t<_Callable, _Args...>> _Return_storage;
_Shared_state_synchronizer _Sync;
__std_threadpool_work_handle _Threadpool_work;
tuple<decay_t<_Callable>, decay_t<_Args>...> _Deferred_invoke;
atomic<bool> _Task_waited;
const bool _Task_scheduled_on_threadpool;
void * _Async_stack_trace[_StackFrames]; // debugging aid only
_Async_threadpool_state(_Callable&& _Obj, _Args&&... _Vals, void * const _Return_address)
: _Return_storage(),
_Sync(),
_Threadpool_work(),
_Deferred_invoke(_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...),
_Task_waited(false),
_Task_scheduled_on_threadpool(__std_threadpool_schedule_work(
&_Threadpool_work,
&_Std_async_threadpool_invoker<_Async_threadpool_state>,
this) == __std_win32_success)
{ // initializes an instance of _Async_threadpool_state
// note: if the threadpool failed to schedule our work, become "deferred only"
_Async_capture_trace(_Async_stack_trace, _Return_address);
}
template<size_t... _Indices>
void _Invoke_deferred(index_sequence<_Indices...>) _NOEXCEPT
{ // invokes user callable in deferred mode
_Async_call_traits_t<_Callable, _Args...>::_Invoke(_Return_storage,
_STD move(_STD get<_Indices>(_Deferred_invoke))...);
_Sync._Make_ready();
}
virtual void _Add_ref() _NOEXCEPT override
{ // forward add ref to the synchronizer
_Sync._Add_ref();
}
virtual void _Release() _NOEXCEPT override
{ // forward release to the synchronizer and delete this if all futures are dead
// note: user callable might not have been invoked if nobody waited and we were
// destroyed before the threadpool completed the work
if (!_Sync._Release_block())
{
return;
}
// if nobody waited on the task, nobody cares about the result, so try to yank
// it off the threadpool
if (!_Task_waited.load()
&& _Task_scheduled_on_threadpool
&& __std_threadpool_try_join_work(&_Threadpool_work) != __std_win32_success)
{ // failed to cancel the threadpool task, so wait for it to complete
_Sync._Wait();
}
_Return_storage._Destroy();
delete this;
}
virtual void _Wait() _NOEXCEPT override
{ // invokes the deferred callable or waits for another thread (or the threadpool) to invoke it
if (_Task_waited.exchange(true))
{ // isn't the lucky thread
_Sync._Wait();
return;
}
// Try to yank the work off the threadpool if it has not yet started.
if (_Task_scheduled_on_threadpool
&& __std_threadpool_try_join_work(&_Threadpool_work) != __std_win32_success)
{ // failed to join with the threadpool; wait for the pool to complete the work
_Sync._Wait();
return;
}
// We joined with the threadpool, so it might not have done the work.
// If it didn't do the work, invoke it as a deferred callable.
if (!_Sync._Is_ready())
{ // If we joined with the pool but aren't ready, that means we yanked
// it out of the pool's queue and it never ran there, so run deferred here.
_Invoke_deferred(_Index_sequence{});
}
}
virtual future_status _Wait_for(chrono::milliseconds _Rel_time) _NOEXCEPT override
{ // forward wait_for to the synchronizer unless this state is still deferred
if (!_Task_waited.load())
{
return (future_status::deferred);
}
return (_Sync._Wait_for(_Rel_time));
}
virtual bool _Is_ready() const _NOEXCEPT override
{ // forwards _Is_ready query to the synchronizer
return (_Sync._Is_ready());
}
virtual _Future_return_storage<_Async_state_stores_t<_Callable, _Args...>>&
_Get_value() _NOEXCEPT override
{ // wait for the asynchronous shared state to become ready and return the result
_Wait();
return (_Return_storage);
}
};
// MACRO _STD_FUTURE_STACK_TRACE_FRAMES
#ifndef _STD_FUTURE_STACK_TRACE_FRAMES
#ifdef _DEBUG
#define _STD_FUTURE_STACK_TRACE_FRAMES 10
#else /* ^^^ _DEBUG ^^^ // vvv !_DEBUG vvv */
#define _STD_FUTURE_STACK_TRACE_FRAMES 1
#endif /* _DEBUG */
#endif /* _STD_FUTURE_STACK_TRACE_FRAMES */
// FUNCTION TEMPLATE async
template<class _Callable,
class... _Args> inline
future<result_of_t<decay_t<_Callable>(decay_t<_Args>...)>>
async(_Callable&& _Obj, _Args&&... _Vals)
{ // launch a piece of work on the threadpool
return (future<result_of_t<decay_t<_Callable>(decay_t<_Args>...)>>{
new _Async_threadpool_state<_STD_FUTURE_STACK_TRACE_FRAMES, _Callable, _Args...>(
_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...,
_ReturnAddress()
)});
}
template<class _Callable,
class... _Args> inline
future<result_of_t<decay_t<_Callable>(decay_t<_Args>...)>>
async(launch _Policy, _Callable&& _Obj, _Args&&... _Vals)
{ // wrap a piece of work with a future, and schedule it according to
// the indicated _Policy:
// launch::async spawns the work on a new thread
// launch::deferred does not spawn the work, it is lazily completed by waiting on the future
// launch::async | launch::deferred spawns the work on the threadpool
_Future_state_interface<_Async_state_stores_t<_Callable, _Args...>> * _Result_state;
switch (_Policy)
{
case launch::async:
_Result_state = new _Async_thread_state<_STD_FUTURE_STACK_TRACE_FRAMES, _Callable, _Args...>(
_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...,
_ReturnAddress());
break;
case launch::deferred:
_Result_state = new _Async_deferred_state<_Callable, _Args...>(
_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...);
break;
case launch::_Threadpool:
_Result_state = new _Async_threadpool_state<_STD_FUTURE_STACK_TRACE_FRAMES, _Callable, _Args...>(
_STD forward<_Callable>(_Obj), _STD forward<_Args>(_Vals)...,
_ReturnAddress());
break;
default:
_STD terminate();
}
return (future<result_of_t<decay_t<_Callable>(decay_t<_Args>...)>>{_Result_state});
}
_STD_END
#ifdef _RESUMABLE_FUNCTIONS_SUPPORTED
_STD_X_BEGIN
template<class _Ty,
class... _ArgTypes>
struct coroutine_traits<future<_Ty>, _ArgTypes...>
{ // defines resumable traits for functions returning future<_Ty>
struct promise_type
{
promise<_Ty> _MyPromise;
future<_Ty> get_return_object()
{
return (_MyPromise.get_future());
}
bool initial_suspend() const
{
return (false);
}
bool final_suspend() const
{
return (false);
}
template<class _Ut>
void return_value(_Ut&& _Value)
{
_MyPromise.set_value(_STD forward<_Ut>(_Value));
}
void set_exception(exception_ptr _Exc)
{
_MyPromise.set_exception(_STD move(_Exc));
}
};
};
template<class... _ArgTypes>
struct coroutine_traits<future<void>, _ArgTypes...>
{ // defines resumable traits for functions returning future<void>
struct promise_type
{
promise<void> _MyPromise;
future<void> get_return_object()
{
return (_MyPromise.get_future());
}
bool initial_suspend() const
{
return (false);
}
bool final_suspend() const
{
return (false);
}
void return_void()
{
_MyPromise.set_value();
}
void set_exception(exception_ptr _Exc)
{
_MyPromise.set_exception(_STD move(_Exc));
}
};
};
_STD_X_END
_STD_BEGIN
template<class _Ty>
bool await_ready(future<_Ty>& _Fut)
{
return (_Fut._Is_ready());
}
template<class _Ty>
void await_suspend(future<_Ty>& _Fut,
experimental::coroutine_handle<> _ResumeCb)
{ // change to .then when future gets .then
thread _WaitingThread([&_Fut, _ResumeCb]{
_Fut.wait();
_ResumeCb();
});
_WaitingThread.detach();
}
template<class _Ty>
auto await_resume(future<_Ty>& _Fut)
{
return (_Fut.get());
}
_STD_END
#endif /* _RESUMABLE_FUNCTIONS_SUPPORTED */
#pragma pop_macro("new")
#pragma warning(pop)
#pragma pack(pop)
#endif /* RC_INVOKED */
#endif /* _FUTURE_ */
/*
* Copyright (c) by P.J. Plauger. All rights reserved.
* Consult your license regarding permissions and restrictions.
V6.50:0009 */
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment