Skip to content

Instantly share code, notes, and snippets.

@lewissbaker
Created September 26, 2024 00:01
Show Gist options
  • Save lewissbaker/86cee0a61a007b66ba537228917c3672 to your computer and use it in GitHub Desktop.
Save lewissbaker/86cee0a61a007b66ba537228917c3672 to your computer and use it in GitHub Desktop.
Benchmarks for single_inplace_stop_token
/*
* Copyright (c) 2021-2022 Facebook, Inc. and its affiliates
* Copyright (c) 2021-2024 NVIDIA Corporation
*
* Licensed under the Apache License Version 2.0 with LLVM Exceptions
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* https://llvm.org/LICENSE.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <version>
#include <cstdint>
#include <utility>
#include <type_traits>
#include <atomic>
#include <thread>
#include <concepts>
#include <cassert>
#include <xmmintrin.h>
// NOTE: std::inplace_stop_token implementation taken from https://github.com/NVIDIA/stdexec
//
// Some minor modifications applied:
// - added template deduction guide to inplace_stop_callback
// - added optimisation to request_stop() to avoid retaking lock after calling
// the last stop-callback
// - added _mm_pause() call inside the __spin_wait() implementation
namespace std {
struct inplace_stop_source;
struct inplace_stop_token;
namespace __stok {
struct __inplace_stop_callback_base {
void __execute() noexcept {
this->__execute_(this);
}
protected:
using __execute_fn_t = void(__inplace_stop_callback_base*) noexcept;
explicit __inplace_stop_callback_base( //
const inplace_stop_source* __source, //
__execute_fn_t* __execute) noexcept
: __source_(__source)
, __execute_(__execute) {
}
void __register_callback_() noexcept;
friend inplace_stop_source;
const inplace_stop_source* __source_;
__execute_fn_t* __execute_;
__inplace_stop_callback_base* __next_ = nullptr;
__inplace_stop_callback_base** __prev_ptr_ = nullptr;
bool* __removed_during_callback_ = nullptr;
std::atomic<bool> __callback_completed_{false};
};
struct __spin_wait {
__spin_wait() noexcept = default;
void __wait() noexcept {
if (__count_++ < __yield_threshold_) {
for (std::uint32_t i = 0; i < __count_; ++i)
_mm_pause();
} else {
if (__count_ == 0)
__count_ = __yield_threshold_;
std::this_thread::yield();
}
}
private:
static constexpr uint32_t __yield_threshold_ = 20;
uint32_t __count_ = 0;
};
} // namespace __stok
// [stoptoken.never], class never_stop_token
struct never_stop_token {
private:
struct __callback_type {
explicit __callback_type(never_stop_token, auto&&) noexcept {
}
};
public:
template <class>
using callback_type = __callback_type;
static constexpr auto stop_requested() noexcept -> bool {
return false;
}
static constexpr auto stop_possible() noexcept -> bool {
return false;
}
auto operator==(const never_stop_token&) const noexcept -> bool = default;
};
template <class _Callback>
class inplace_stop_callback;
// [stopsource.inplace], class inplace_stop_source
class inplace_stop_source {
public:
inplace_stop_source() noexcept = default;
~inplace_stop_source();
inplace_stop_source(inplace_stop_source&&) = delete;
auto get_token() const noexcept -> inplace_stop_token;
auto request_stop() noexcept -> bool;
auto stop_requested() const noexcept -> bool {
return (__state_.load(std::memory_order_acquire) & __stop_requested_flag_) != 0;
}
private:
friend inplace_stop_token;
friend __stok::__inplace_stop_callback_base;
template <class>
friend class inplace_stop_callback;
auto __lock_() const noexcept -> uint8_t;
void __unlock_(uint8_t) const noexcept;
auto __try_lock_unless_stop_requested_(bool) const noexcept -> bool;
auto __try_add_callback_(__stok::__inplace_stop_callback_base*) const noexcept -> bool;
void __remove_callback_(__stok::__inplace_stop_callback_base*) const noexcept;
static constexpr uint8_t __stop_requested_flag_ = 1;
static constexpr uint8_t __locked_flag_ = 2;
mutable std::atomic<uint8_t> __state_{0};
mutable __stok::__inplace_stop_callback_base* __callbacks_ = nullptr;
std::thread::id __notifying_thread_;
};
// [stoptoken.inplace], class inplace_stop_token
class inplace_stop_token {
public:
template <class _Fun>
using callback_type = inplace_stop_callback<_Fun>;
inplace_stop_token() noexcept
: __source_(nullptr) {
}
inplace_stop_token(const inplace_stop_token& __other) noexcept = default;
inplace_stop_token(inplace_stop_token&& __other) noexcept
: __source_(std::exchange(__other.__source_, {})) {
}
auto operator=(const inplace_stop_token& __other) noexcept -> inplace_stop_token& = default;
auto operator=(inplace_stop_token&& __other) noexcept -> inplace_stop_token& {
__source_ = std::exchange(__other.__source_, nullptr);
return *this;
}
[[nodiscard]]
auto stop_requested() const noexcept -> bool {
return __source_ != nullptr && __source_->stop_requested();
}
[[nodiscard]]
auto stop_possible() const noexcept -> bool {
return __source_ != nullptr;
}
void swap(inplace_stop_token& __other) noexcept {
std::swap(__source_, __other.__source_);
}
auto operator==(const inplace_stop_token&) const noexcept -> bool = default;
private:
friend inplace_stop_source;
template <class>
friend class inplace_stop_callback;
explicit inplace_stop_token(const inplace_stop_source* __source) noexcept
: __source_(__source) {
}
const inplace_stop_source* __source_;
};
inline auto inplace_stop_source::get_token() const noexcept -> inplace_stop_token {
return inplace_stop_token{this};
}
// [stopcallback.inplace], class template inplace_stop_callback
template <class _Fun>
class inplace_stop_callback : __stok::__inplace_stop_callback_base {
public:
template <class _Fun2>
requires std::constructible_from<_Fun, _Fun2>
explicit inplace_stop_callback(
inplace_stop_token __token,
_Fun2&& __fun) //
noexcept(std::is_nothrow_constructible_v<_Fun, _Fun2>)
: __stok::__inplace_stop_callback_base(
__token.__source_,
&inplace_stop_callback::__execute_impl_)
, __fun_(static_cast<_Fun2&&>(__fun)) {
__register_callback_();
}
~inplace_stop_callback() {
if (__source_ != nullptr)
__source_->__remove_callback_(this);
}
private:
static void __execute_impl_(__stok::__inplace_stop_callback_base* cb) noexcept {
std::move(static_cast<inplace_stop_callback*>(cb)->__fun_)();
}
[[no_unique_address]]
_Fun __fun_;
};
namespace __stok {
inline void __inplace_stop_callback_base::__register_callback_() noexcept {
if (__source_ != nullptr) {
if (!__source_->__try_add_callback_(this)) {
__source_ = nullptr;
// Callback not registered because stop_requested() was true.
// Execute inline here.
__execute();
}
}
}
} // namespace __stok
inline inplace_stop_source::~inplace_stop_source() {
assert((__state_.load(std::memory_order_relaxed) & __locked_flag_) == 0);
assert(__callbacks_ == nullptr);
}
inline auto inplace_stop_source::request_stop() noexcept -> bool {
if (!__try_lock_unless_stop_requested_(true))
return false;
__notifying_thread_ = std::this_thread::get_id();
// We are responsible for executing callbacks.
while (__callbacks_ != nullptr) {
auto* __callbk = __callbacks_;
__callbk->__prev_ptr_ = nullptr;
__callbacks_ = __callbk->__next_;
if (__callbacks_ != nullptr)
__callbacks_->__prev_ptr_ = &__callbacks_;
bool __any_more_callbacks = (__callbacks_ != nullptr);
__state_.store(__stop_requested_flag_, std::memory_order_release);
bool __removed_during_callback = false;
__callbk->__removed_during_callback_ = &__removed_during_callback;
__callbk->__execute();
if (!__removed_during_callback) {
__callbk->__removed_during_callback_ = nullptr;
__callbk->__callback_completed_.store(true, std::memory_order_release);
}
if (!__any_more_callbacks) return true;
__lock_();
}
__state_.store(__stop_requested_flag_, std::memory_order_release);
return true;
}
inline auto inplace_stop_source::__lock_() const noexcept -> uint8_t {
__stok::__spin_wait __spin;
auto __old_state = __state_.load(std::memory_order_relaxed);
do {
while ((__old_state & __locked_flag_) != 0) {
__spin.__wait();
__old_state = __state_.load(std::memory_order_relaxed);
}
} while (!__state_.compare_exchange_weak(
__old_state,
__old_state | __locked_flag_,
std::memory_order_acquire,
std::memory_order_relaxed));
return __old_state;
}
inline void inplace_stop_source::__unlock_(uint8_t __old_state) const noexcept {
(void) __state_.store(__old_state, std::memory_order_release);
}
inline auto inplace_stop_source::__try_lock_unless_stop_requested_(
bool __set_stop_requested) const noexcept -> bool {
__stok::__spin_wait __spin;
auto __old_state = __state_.load(std::memory_order_relaxed);
do {
while (true) {
if ((__old_state & __stop_requested_flag_) != 0) {
// Stop already requested.
return false;
} else if (__old_state == 0) {
break;
} else {
__spin.__wait();
__old_state = __state_.load(std::memory_order_relaxed);
}
}
} while (!__state_.compare_exchange_weak(
__old_state,
__set_stop_requested ? (__locked_flag_ | __stop_requested_flag_) : __locked_flag_,
std::memory_order_acq_rel,
std::memory_order_relaxed));
// Lock acquired successfully
return true;
}
inline auto inplace_stop_source::__try_add_callback_(
__stok::__inplace_stop_callback_base* __callbk) const noexcept -> bool {
if (!__try_lock_unless_stop_requested_(false)) {
return false;
}
__callbk->__next_ = __callbacks_;
__callbk->__prev_ptr_ = &__callbacks_;
if (__callbacks_ != nullptr) {
__callbacks_->__prev_ptr_ = &__callbk->__next_;
}
__callbacks_ = __callbk;
__unlock_(0);
return true;
}
inline void inplace_stop_source::__remove_callback_(
__stok::__inplace_stop_callback_base* __callbk) const noexcept {
auto __old_state = __lock_();
if (__callbk->__prev_ptr_ != nullptr) {
// Callback has not been executed yet.
// Remove from the list.
*__callbk->__prev_ptr_ = __callbk->__next_;
if (__callbk->__next_ != nullptr) {
__callbk->__next_->__prev_ptr_ = __callbk->__prev_ptr_;
}
__unlock_(__old_state);
} else {
auto __notifying_thread = __notifying_thread_;
__unlock_(__old_state);
// Callback has either already been executed or is
// currently executing on another thread.
if (std::this_thread::get_id() == __notifying_thread) {
if (__callbk->__removed_during_callback_ != nullptr) {
*__callbk->__removed_during_callback_ = true;
}
} else {
// Concurrently executing on another thread.
// Wait until the other thread finishes executing the callback.
__stok::__spin_wait __spin;
while (!__callbk->__callback_completed_.load(std::memory_order_acquire)) {
__spin.__wait();
}
}
}
}
template<typename CB>
inplace_stop_callback(inplace_stop_token, CB) -> inplace_stop_callback<CB>;
} // namespace std
namespace std
{
class single_inplace_stop_token;
template<typename CB>
class single_inplace_stop_callback;
class single_inplace_stop_source {
public:
single_inplace_stop_source() noexcept : state_(no_callback_state()) {}
bool request_stop() noexcept;
bool stop_requested() const noexcept;
single_inplace_stop_token get_token() const noexcept;
private:
template<typename CB>
friend class single_inplace_stop_callback;
struct callback_base {
void(*execute)(callback_base* self) noexcept;
};
bool try_register_callback(callback_base* cb) const noexcept;
void deregister_callback(callback_base* cb) const noexcept;
void* stop_requested_state() const noexcept {
return &state_;
}
void* stop_requested_callback_done_state() const noexcept {
return &thread_requesting_stop_;
}
static void* no_callback_state() noexcept {
return nullptr;
}
bool is_stop_requested_state(void* state) const noexcept {
#if 1
bool result = (state == stop_requested_state());
result |= (state == stop_requested_callback_done_state());
return result;
#else
return state == stop_requested_state() || state == stop_requested_callback_done_state();
#endif
}
// nullptr - no stop-request or stop-callback
// &state_ - stop-requested
// &thread_requesting_stop - stop-requested, callback-done
// other - pointer to callback_base
mutable atomic<void*> state_;
mutable atomic<thread::id> thread_requesting_stop_;
};
inline bool single_inplace_stop_source::stop_requested() const noexcept {
void* state = state_.load(std::memory_order_acquire);
return is_stop_requested_state(state);
}
class single_inplace_stop_token {
public:
template<typename CB>
using callback_type = single_inplace_stop_callback<CB>;
single_inplace_stop_token() noexcept : source_(nullptr) {}
bool stop_possible() noexcept { return source_ != nullptr; }
bool stop_requested() noexcept { return stop_possible() && source_->stop_requested(); }
private:
friend single_inplace_stop_source;
template<typename CB>
friend class single_inplace_stop_callback;
explicit single_inplace_stop_token(const single_inplace_stop_source* source) noexcept
: source_(source)
{}
const single_inplace_stop_source* source_;
};
template<typename CB>
struct single_inplace_stop_callback
: private single_inplace_stop_source::callback_base {
public:
template<typename Init>
requires std::constructible_from<CB, Init>
single_inplace_stop_callback(single_inplace_stop_token st, Init&& init)
noexcept(is_nothrow_constructible_v<CB, Init>)
: source_(st.source_)
, callback_(std::forward<Init>(init))
{
this->execute = &execute_impl;
if (source_ != nullptr) {
if (!source_->try_register_callback(this)) {
source_ = nullptr;
execute_impl(this);
}
}
}
~single_inplace_stop_callback() {
if (source_ != nullptr) {
source_->deregister_callback(this);
}
}
single_inplace_stop_callback(single_inplace_stop_callback&&) = delete;
single_inplace_stop_callback(const single_inplace_stop_callback&) = delete;
single_inplace_stop_callback& operator=(single_inplace_stop_callback&&) = delete;
single_inplace_stop_callback& operator=(const single_inplace_stop_callback&) = delete;
private:
static void execute_impl(single_inplace_stop_source::callback_base* base) noexcept {
auto& self = *static_cast<single_inplace_stop_callback*>(base);
self.callback_();
}
const single_inplace_stop_source* source_;
CB callback_;
};
template<typename CB>
single_inplace_stop_callback(single_inplace_stop_token, CB) -> single_inplace_stop_callback<CB>;
inline single_inplace_stop_token single_inplace_stop_source::get_token() const noexcept {
return single_inplace_stop_token{this};
}
inline bool single_inplace_stop_source::request_stop() noexcept {
void* old_state = state_.load(std::memory_order_relaxed);
do {
if (is_stop_requested_state(old_state)) {
return false;
}
} while (!state_.compare_exchange_weak(old_state,
stop_requested_state(),
memory_order_acq_rel,
memory_order_relaxed));
if (old_state != no_callback_state()) {
auto* callback = static_cast<callback_base*>(old_state);
thread_requesting_stop_.store(this_thread::get_id(), memory_order_relaxed);
callback->execute(callback);
state_.store(stop_requested_callback_done_state(), memory_order_release);
state_.notify_one();
}
return true;
}
inline bool single_inplace_stop_source::try_register_callback(callback_base* base) const noexcept {
void* old_state = state_.load(memory_order_acquire);
if (is_stop_requested_state(old_state)) {
return false;
}
assert(old_state == no_callback_state());
if (state_.compare_exchange_strong(old_state,
static_cast<void*>(base),
memory_order_release,
memory_order_acquire)) {
// Successfully registered callback.
return true;
}
// Stop request arrived while we were trying to register
assert(old_state == stop_requested_state());
return false;
}
inline void single_inplace_stop_source::deregister_callback(callback_base* base) const noexcept {
// Initially assume that the callback has not been invoked and that the state
// still points to the registered callback_base structure.
void* old_state = static_cast<void*>(base);
if (state_.compare_exchange_strong(old_state,
no_callback_state(),
memory_order_relaxed,
memory_order_acquire)) {
// Successfully deregistered the callback before it could be invoked.
return;
}
// Otherwise, a call to request_stop() is invoking the callback.
if (old_state == stop_requested_state()) {
// Callback not finished executing yet.
if (thread_requesting_stop_.load(std::memory_order_relaxed) ==
std::this_thread::get_id()) {
// Deregistering from the same thread that is invoking the callback.
// Either the invocation of the callback has completed and the thread
// has gone on to do other things (in which case it's safe to destroy)
// or we are still in the middle of executing the callback (in which
// case we can't block as it would cause a deadlock).
return;
}
// Otherwise, callback is being called from another thread.
// Wait for callback to finish (state changes from stop_requested_state()
// to stop_requested_callback_done_state()).
state_.wait(old_state, memory_order_acquire);
}
}
}
/////////////////////////////////////////////////////////////////////////////////
// Benchmarking Code Below Here
/////////////////////////////////////////////////////////////////////////////////
#include <chrono>
#include <print>
#include <semaphore>
#include <algorithm>
#include <numeric>
#include <vector>
// Define some helper functions
constexpr std::uint32_t iteration_count = 100'000;
constexpr std::uint32_t pass_count = 20;
template<typename F>
void timed_invoke(const char* label, F f) {
auto start = std::chrono::steady_clock::now();
f();
auto end = std::chrono::steady_clock::now();
auto time = (end - start);
auto time_ns = std::chrono::duration_cast<std::chrono::nanoseconds>(time).count();
std::print("{} took {}.{:03}us\n", label, time_ns / 1000, time_ns % 1000);
}
void print_times(const char* label, std::vector<std::chrono::nanoseconds>& times) {
std::sort(times.begin(), times.end());
auto min_time_ns = times.front().count();
auto max_time_ns = times.back().count();
auto total_time = std::accumulate(times.begin(), times.end(), std::chrono::nanoseconds{});
auto avg_time = total_time / times.size();
auto avg_time_ns = avg_time.count();
std::print("{} took {}.{:03} - {}.{:03}us (avg {}.{:03}us)\n",
label,
min_time_ns / 1000, min_time_ns % 1000,
max_time_ns / 1000, max_time_ns % 1000,
avg_time_ns / 1000, avg_time_ns % 1000);
}
template<typename F>
void timed_invoke_multi(const char* label, std::uint32_t count, F f) {
if (count == 0) return;
std::vector<std::chrono::nanoseconds> times;
times.reserve(count);
for (std::uint32_t i = 0; i < count; ++i) {
auto start = std::chrono::steady_clock::now();
f();
auto end = std::chrono::steady_clock::now();
times.push_back(std::chrono::duration_cast<std::chrono::nanoseconds>(end - start));
}
print_times(label, times);
}
//
// Single-Thread Regsiter/Unregister Callback
//
void single_thread_register_unregister_1() {
std::inplace_stop_source ss;
auto cb = [x=1] noexcept {};
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::inplace_stop_callback scb{ss.get_token(), cb};
}
}
void single_thread_register_unregister_2() {
std::single_inplace_stop_source ss;
auto cb = [x=1] noexcept {};
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::single_inplace_stop_callback scb{ss.get_token(), cb};
}
}
//
// Single-Thread No Callback + request_stop
//
void single_thread_no_callback_stop_1() {
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::inplace_stop_source ss;
ss.request_stop();
}
}
void single_thread_no_callback_stop_2() {
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::single_inplace_stop_source ss;
ss.request_stop();
}
}
//
// Single-Thread Register/Unregister Nx Callback + request_stop
//
template<std::size_t Count>
void single_thread_register_multiple_with_stop_1() {
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::inplace_stop_source ss;
std::size_t count = 0;
auto cb = [&] { ++count; };
using stop_callback_t = std::inplace_stop_callback<decltype(cb)>;
auto cbs = [&]<std::size_t... Is>(std::index_sequence<Is...>) {
return std::array<stop_callback_t, Count>{
((void)Is, stop_callback_t{ss.get_token(), cb})...
};
}(std::make_index_sequence<Count>{});
ss.request_stop();
if (count != Count) {
std::terminate();
}
}
}
template<std::size_t Count>
void single_thread_register_multiple_with_stop_2() {
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::array<std::single_inplace_stop_source, Count> ss;
std::size_t count = 0;
auto cb = [&] { ++count; };
using stop_callback_t = std::single_inplace_stop_callback<decltype(cb)>;
auto cbs = [&]<std::size_t... Is>(std::index_sequence<Is...>) {
return std::array<stop_callback_t, Count>{
stop_callback_t{ss[Is].get_token(), cb}...
};
}(std::make_index_sequence<Count>{});
for (std::size_t j = 0; j < Count; ++j) {
ss[j].request_stop();
}
if (count != Count) {
std::terminate();
}
}
}
//
// Two-Threads Register/Unregister Callback
//
template<typename Func1, typename Func2>
std::vector<std::chrono::nanoseconds> run_two_threads_concurrently(
Func1 func1, Func2 func2) {
std::atomic<bool> t1_ready{false};
std::atomic<bool> t2_ready{false};
auto compute_times = [&](auto& func, std::atomic<bool>& ready) {
std::vector<std::chrono::nanoseconds> times;
times.reserve(pass_count);
for (std::uint32_t pass = 0; pass < pass_count; ++pass) {
ready.store(true);
while (ready.load()) {}
auto start = std::chrono::steady_clock::now();
func();
auto end = std::chrono::steady_clock::now();
auto time = (end - start);
times.push_back(time);
}
return times;
};
std::vector<std::chrono::nanoseconds> t1_times;
std::vector<std::chrono::nanoseconds> t2_times;
std::thread t1{[&] {
t1_times = compute_times(func1, t1_ready);
}};
std::thread t2{[&] {
t2_times = compute_times(func2, t2_ready);
}};
for (std::uint32_t pass = 0; pass < pass_count; ++pass) {
while (!t1_ready.load()) {}
while (!t2_ready.load()) {}
t1_ready.store(false);
t2_ready.store(false);
}
t1.join();
t2.join();
std::vector<std::chrono::nanoseconds> all_times;
all_times.reserve(t1_times.size() + t2_times.size());
all_times.insert(all_times.end(), t1_times.begin(), t1_times.end());
all_times.insert(all_times.end(), t2_times.begin(), t2_times.end());
return all_times;
}
void two_threads_register_unregister_1() {
std::inplace_stop_source ss;
auto register_callbacks = [&] {
auto cb = [x=1] {};
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::inplace_stop_callback scb{ss.get_token(), cb};
}
};
auto times = run_two_threads_concurrently(register_callbacks, register_callbacks);
print_times("multi: 2 threads", times);
}
void two_threads_register_unregister_2() {
std::single_inplace_stop_source ss1;
std::single_inplace_stop_source ss2;
auto register_callbacks_1 = [&] {
auto cb = [x=1] {};
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::single_inplace_stop_callback scb{ss1.get_token(), cb};
}
};
auto register_callbacks_2 = [&] {
auto cb = [x=1] {};
for (std::uint32_t i = 0; i < iteration_count; ++i) {
std::single_inplace_stop_callback scb{ss2.get_token(), cb};
}
};
auto times = run_two_threads_concurrently(
register_callbacks_1,
register_callbacks_2);
print_times("single: 2 threads", times);
}
int main() {
{
std::inplace_stop_source ss;
timed_invoke_multi("multi: 100k stop-callbacks ST", pass_count, [&] {
single_thread_register_unregister_1();
});
}
{
std::single_inplace_stop_source ss;
timed_invoke_multi("single: 100k stop-callbacks ST", pass_count, [&] {
single_thread_register_unregister_2();
});
}
timed_invoke_multi("multi: ST request_stop x0", pass_count, [&] {
single_thread_no_callback_stop_1();
});
timed_invoke_multi("single: ST request_stop x0", pass_count, [&] {
single_thread_no_callback_stop_2();
});
timed_invoke_multi("multi: ST request_stop x1", pass_count, [&] {
single_thread_register_multiple_with_stop_1<1>();
});
timed_invoke_multi("single: ST request_stop x1", pass_count, [&] {
single_thread_register_multiple_with_stop_2<1>();
});
timed_invoke_multi("multi: ST request_stop x2", pass_count, [&] {
single_thread_register_multiple_with_stop_1<2>();
});
timed_invoke_multi("single: ST request_stop x2", pass_count, [&] {
single_thread_register_multiple_with_stop_2<2>();
});
timed_invoke_multi("multi: ST request_stop x10", pass_count, [&] {
single_thread_register_multiple_with_stop_1<10>();
});
timed_invoke_multi("single: ST request_stop x10", pass_count, [&] {
single_thread_register_multiple_with_stop_2<10>();
});
two_threads_register_unregister_1();
two_threads_register_unregister_2();
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment