}; | }; | ||||
} | } | ||||
resumef::g_scheduler.run_until_notask(); | |||||
resumef::this_scheduler()->run_until_notask(); | |||||
} | } |
| |||||
#pragma once | |||||
namespace resumef | |||||
{ | |||||
template <typename T> | |||||
struct counted_ptr | |||||
{ | |||||
counted_ptr() = default; | |||||
counted_ptr(const counted_ptr& cp) : _p(cp._p) | |||||
{ | |||||
_lock(); | |||||
} | |||||
counted_ptr(T* p) : _p(p) | |||||
{ | |||||
_lock(); | |||||
} | |||||
counted_ptr(counted_ptr&& cp) | |||||
{ | |||||
std::swap(_p, cp._p); | |||||
} | |||||
counted_ptr& operator=(const counted_ptr& cp) | |||||
{ | |||||
if (&cp != this) | |||||
{ | |||||
_unlock(); | |||||
_lock(cp._p); | |||||
} | |||||
return *this; | |||||
} | |||||
counted_ptr& operator=(counted_ptr&& cp) | |||||
{ | |||||
if (&cp != this) | |||||
std::swap(_p, cp._p); | |||||
return *this; | |||||
} | |||||
~counted_ptr() | |||||
{ | |||||
_unlock(); | |||||
} | |||||
T* operator->() const | |||||
{ | |||||
return _p; | |||||
} | |||||
T* get() const | |||||
{ | |||||
return _p; | |||||
} | |||||
void reset() | |||||
{ | |||||
_unlock(); | |||||
} | |||||
private: | |||||
void _unlock() | |||||
{ | |||||
if (_p != nullptr) | |||||
{ | |||||
auto t = _p; | |||||
_p = nullptr; | |||||
t->unlock(); | |||||
} | |||||
} | |||||
void _lock(T* p) | |||||
{ | |||||
if (p != nullptr) | |||||
p->lock(); | |||||
_p = p; | |||||
} | |||||
void _lock() | |||||
{ | |||||
if (_p != nullptr) | |||||
_p->lock(); | |||||
} | |||||
T* _p = nullptr; | |||||
}; | |||||
template <typename T> | |||||
counted_ptr<T> make_counted() | |||||
{ | |||||
return new T{}; | |||||
} | |||||
} | |||||
#endif | #endif | ||||
#if RESUMEF_DEBUG_COUNTER | #if RESUMEF_DEBUG_COUNTER | ||||
extern std::mutex g_resumef_cout_mutex; | |||||
extern std::atomic<intptr_t> g_resumef_state_count; | extern std::atomic<intptr_t> g_resumef_state_count; | ||||
extern std::atomic<intptr_t> g_resumef_task_count; | extern std::atomic<intptr_t> g_resumef_task_count; | ||||
extern std::atomic<intptr_t> g_resumef_evtctx_count; | extern std::atomic<intptr_t> g_resumef_evtctx_count; |
}); | }); | ||||
_event->wait_(awaker); | _event->wait_(awaker); | ||||
g_scheduler.timer()->add(tp, | |||||
this_scheduler()->timer()->add(tp, | |||||
[awaker](bool bValue) | [awaker](bool bValue) | ||||
{ | { | ||||
awaker->awake(nullptr, 1); | awaker->awake(nullptr, 1); | ||||
e->wait_(awaker); | e->wait_(awaker); | ||||
} | } | ||||
g_scheduler.timer()->add(tp, | |||||
this_scheduler()->timer()->add(tp, | |||||
[awaker](bool bValue) | [awaker](bool bValue) | ||||
{ | { | ||||
awaker->awake(nullptr, 1); | awaker->awake(nullptr, 1); | ||||
awaitable_t<bool> awaitable; | awaitable_t<bool> awaitable; | ||||
if (evts.size() <= 0) | if (evts.size() <= 0) | ||||
{ | { | ||||
g_scheduler.timer()->add_handler(tp, | |||||
this_scheduler()->timer()->add_handler(tp, | |||||
[st = awaitable._state](bool bValue) | [st = awaitable._state](bool bValue) | ||||
{ | { | ||||
st->set_value(false); | st->set_value(false); | ||||
ctx->st = awaitable._state; | ctx->st = awaitable._state; | ||||
ctx->evts_waited.reserve(evts.size()); | ctx->evts_waited.reserve(evts.size()); | ||||
ctx->evts = std::move(evts); | ctx->evts = std::move(evts); | ||||
ctx->th = std::move(g_scheduler.timer()->add_handler(tp, | |||||
ctx->th = std::move(this_scheduler()->timer()->add_handler(tp, | |||||
[ctx](bool bValue) | [ctx](bool bValue) | ||||
{ | { | ||||
ctx->awake(nullptr); | ctx->awake(nullptr); |
future_impl_t(const counted_ptr<state_type>& state) : _state(state) | future_impl_t(const counted_ptr<state_type>& state) : _state(state) | ||||
{ | { | ||||
_state->_future_acquired = true; | |||||
} | } | ||||
// movable, but not copyable | // movable, but not copyable | ||||
using future_vt = future_t<void>; | using future_vt = future_t<void>; | ||||
template<class state_type> | |||||
struct awaitor_initial_suspend | |||||
{ | |||||
counted_ptr<state_type> _state; | |||||
bool await_ready() noexcept; | |||||
void await_suspend(coroutine_handle<> resume_cb) noexcept; | |||||
void await_resume() noexcept; | |||||
}; | |||||
template <typename T> | template <typename T> | ||||
struct promise_impl_t | struct promise_impl_t | ||||
{ | { | ||||
//callback里,不应该调用 get_return_object() | //callback里,不应该调用 get_return_object() | ||||
future_type get_future() | future_type get_future() | ||||
{ | { | ||||
if (_state->_future_acquired) | |||||
throw future_exception{ future_error::already_acquired }; | |||||
return future_type(_state); | return future_type(_state); | ||||
} | } | ||||
// cause multiple callbacks. | // cause multiple callbacks. | ||||
future_type next_future() | future_type next_future() | ||||
{ | { | ||||
// reset and return another future | |||||
if (_state->_future_acquired) | |||||
_state->reset(); | |||||
return future_type(_state); | return future_type(_state); | ||||
} | } | ||||
auto initial_suspend() noexcept | auto initial_suspend() noexcept | ||||
{ | { | ||||
return std::experimental::suspend_never{}; | return std::experimental::suspend_never{}; | ||||
//return awaitor_initial_suspend<state_type>{ _state }; | |||||
} | } | ||||
//这在一个协程被销毁之时调用。 | //这在一个协程被销毁之时调用。 | ||||
return nullptr; | return nullptr; | ||||
} | } | ||||
/* | |||||
inline scheduler * state_base::parent_scheduler() const | inline scheduler * state_base::parent_scheduler() const | ||||
{ | { | ||||
auto promise_ = parent_promise(); | auto promise_ = parent_promise(); | ||||
return promise_->_state->current_scheduler(); | return promise_->_state->current_scheduler(); | ||||
return nullptr; | return nullptr; | ||||
} | } | ||||
*/ | |||||
template<class state_type> | |||||
bool awaitor_initial_suspend<state_type>::await_ready() noexcept | |||||
{ | |||||
return false; | |||||
} | |||||
template<class state_type> | |||||
void awaitor_initial_suspend<state_type>::await_suspend(coroutine_handle<> resume_cb) noexcept | |||||
{ | |||||
_state->await_suspend(resume_cb); | |||||
scheduler * sch_ = _state->parent_scheduler(); | |||||
if (sch_ != nullptr) | |||||
{ | |||||
_state->current_scheduler(sch_); | |||||
_state->resume(); | |||||
} | |||||
} | |||||
template<class state_type> | |||||
void awaitor_initial_suspend<state_type>::await_resume() noexcept | |||||
{ | |||||
} | |||||
} | } | ||||
}); | }); | ||||
_locker->lock_(awaker); | _locker->lock_(awaker); | ||||
g_scheduler.timer()->add(tp, | |||||
this_scheduler()->timer()->add(tp, | |||||
[awaker](bool bValue) | [awaker](bool bValue) | ||||
{ | { | ||||
awaker->awake(nullptr, 1); | awaker->awake(nullptr, 1); |
#endif | #endif | ||||
} | } | ||||
template<class state_tt> | |||||
struct awaitable_task_t : public task_base | |||||
{ | |||||
counted_ptr<state_tt> _state; | |||||
awaitable_task_t() {} | |||||
awaitable_task_t(state_tt * state) | |||||
: _state(state) | |||||
{ | |||||
} | |||||
virtual bool is_suspend() override | |||||
{ | |||||
return !_state->ready(); | |||||
} | |||||
virtual bool go_next(scheduler * schdler) override | |||||
{ | |||||
_state->current_scheduler(schdler); | |||||
_state->resume(); | |||||
return false; | |||||
} | |||||
virtual void cancel() override | |||||
{ | |||||
_state->cancel(); | |||||
} | |||||
virtual void * get_id() override | |||||
{ | |||||
return _state.get(); | |||||
} | |||||
}; | |||||
state_base::~state_base() | |||||
{ | |||||
#if RESUMEF_DEBUG_COUNTER | |||||
--g_resumef_state_count; | |||||
#endif | |||||
} | |||||
void state_base::set_value_none_lock() | |||||
{ | |||||
// Set all members first as calling coroutine may reset stuff here. | |||||
_ready = true; | |||||
auto sch_ = this->parent_scheduler(); | |||||
sch_->push_task_internal(new awaitable_task_t<state_base>(this)); | |||||
} | |||||
void state_base::set_exception(std::exception_ptr && e_) | |||||
{ | |||||
scoped_lock<std::mutex> __guard(_mtx); | |||||
_exception = std::move(e_); | |||||
// Set all members first as calling coroutine may reset stuff here. | |||||
_ready = true; | |||||
auto sch_ = this->parent_scheduler(); | |||||
sch_->push_task_internal(new awaitable_task_t<state_base>(this)); | |||||
} | |||||
} | } |
virtual bool go_next(scheduler *) = 0; | virtual bool go_next(scheduler *) = 0; | ||||
virtual void cancel() = 0; | virtual void cancel() = 0; | ||||
virtual void * get_id() = 0; | virtual void * get_id() = 0; | ||||
virtual void bind(scheduler *) = 0; | |||||
}; | }; | ||||
//---------------------------------------------------------------------------------------------- | //---------------------------------------------------------------------------------------------- | ||||
{ | { | ||||
return nullptr; | return nullptr; | ||||
} | } | ||||
virtual void bind(scheduler *) override | |||||
{ | |||||
} | |||||
}; | }; | ||||
template<class _Ty> | template<class _Ty> | ||||
virtual bool go_next(scheduler * schdler) override | virtual bool go_next(scheduler * schdler) override | ||||
{ | { | ||||
auto * _state = _future._state.get(); | auto * _state = _future._state.get(); | ||||
_state->current_scheduler(schdler); | |||||
_state->resume(); | _state->resume(); | ||||
return !_state->ready() && !_state->_done; | return !_state->ready() && !_state->_done; | ||||
} | } | ||||
{ | { | ||||
return _future._state.get(); | return _future._state.get(); | ||||
} | } | ||||
virtual void bind(scheduler * schdler) override | |||||
{ | |||||
auto * _state = _future._state.get(); | |||||
_state->current_scheduler(schdler); | |||||
} | |||||
}; | }; | ||||
//---------------------------------------------------------------------------------------------- | //---------------------------------------------------------------------------------------------- |
#include <assert.h> | #include <assert.h> | ||||
#if RESUMEF_DEBUG_COUNTER | #if RESUMEF_DEBUG_COUNTER | ||||
std::mutex g_resumef_cout_mutex; | |||||
std::atomic<intptr_t> g_resumef_state_count = 0; | std::atomic<intptr_t> g_resumef_state_count = 0; | ||||
std::atomic<intptr_t> g_resumef_task_count = 0; | std::atomic<intptr_t> g_resumef_task_count = 0; | ||||
std::atomic<intptr_t> g_resumef_evtctx_count = 0; | std::atomic<intptr_t> g_resumef_evtctx_count = 0; | ||||
return future_error_string[(size_t)(fe)]; | return future_error_string[(size_t)(fe)]; | ||||
} | } | ||||
thread_local scheduler * th_scheduler_ptr = nullptr; | |||||
//获得当前线程下的调度器 | //获得当前线程下的调度器 | ||||
scheduler * this_scheduler() | scheduler * this_scheduler() | ||||
{ | { | ||||
return &g_scheduler; | |||||
return th_scheduler_ptr ? th_scheduler_ptr : &scheduler::g_scheduler; | |||||
} | } | ||||
//获得当前线程下,正在由调度器调度的协程 | //获得当前线程下,正在由调度器调度的协程 | ||||
} | } | ||||
} | } | ||||
*/ | */ | ||||
local_scheduler::local_scheduler() | |||||
{ | |||||
if (th_scheduler_ptr == nullptr) | |||||
{ | |||||
_scheduler_ptr = new scheduler; | |||||
th_scheduler_ptr = _scheduler_ptr; | |||||
} | |||||
} | |||||
local_scheduler::~local_scheduler() | |||||
{ | |||||
if (th_scheduler_ptr == _scheduler_ptr) | |||||
th_scheduler_ptr = nullptr; | |||||
delete _scheduler_ptr; | |||||
} | |||||
scheduler::scheduler() | scheduler::scheduler() | ||||
: _task() | : _task() | ||||
scheduler::~scheduler() | scheduler::~scheduler() | ||||
{ | { | ||||
cancel_all_task_(); | cancel_all_task_(); | ||||
} | |||||
scheduler::scheduler(scheduler && right_) | |||||
{ | |||||
this->swap(right_); | |||||
} | |||||
scheduler & scheduler::operator = (scheduler && right_) | |||||
{ | |||||
this->swap(right_); | |||||
return *this; | |||||
if (th_scheduler_ptr == this) | |||||
th_scheduler_ptr = nullptr; | |||||
} | } | ||||
void scheduler::new_task(task_base * task) | void scheduler::new_task(task_base * task) | ||||
if (task) | if (task) | ||||
{ | { | ||||
scoped_lock<std::recursive_mutex> __guard(_mtx_ready); | scoped_lock<std::recursive_mutex> __guard(_mtx_ready); | ||||
task->bind(this); | |||||
this->_ready_task.push_back(task); | this->_ready_task.push_back(task); | ||||
} | } | ||||
} | } | ||||
void scheduler::run_one_batch() | void scheduler::run_one_batch() | ||||
{ | { | ||||
if (th_scheduler_ptr == nullptr) | |||||
th_scheduler_ptr = this; | |||||
{ | { | ||||
scoped_lock<std::recursive_mutex> __guard(_mtx_task); | scoped_lock<std::recursive_mutex> __guard(_mtx_task); | ||||
this->run_one_batch(); | this->run_one_batch(); | ||||
} | } | ||||
void scheduler::swap(scheduler & right_) | |||||
{ | |||||
if (this != &right_) | |||||
{ | |||||
scoped_lock<std::recursive_mutex, std::recursive_mutex, std::recursive_mutex, std::recursive_mutex> | |||||
__guard(this->_mtx_ready, this->_mtx_task, right_._mtx_ready, right_._mtx_task); | |||||
std::swap(this->_ready_task, right_._ready_task); | |||||
std::swap(this->_task, right_._task); | |||||
std::swap(this->_timer, right_._timer); | |||||
} | |||||
} | |||||
scheduler g_scheduler; | |||||
scheduler scheduler::g_scheduler; | |||||
} | } |
namespace resumef | namespace resumef | ||||
{ | { | ||||
struct local_scheduler; | |||||
struct scheduler : public std::enable_shared_from_this<scheduler> | struct scheduler : public std::enable_shared_from_this<scheduler> | ||||
{ | { | ||||
private: | private: | ||||
} | } | ||||
RF_API void break_all(); | RF_API void break_all(); | ||||
RF_API void swap(scheduler & right_); | |||||
inline timer_manager * timer() const | inline timer_manager * timer() const | ||||
{ | { | ||||
} | } | ||||
friend struct task_base; | friend struct task_base; | ||||
friend struct local_scheduler; | |||||
protected: | |||||
RF_API scheduler(); | RF_API scheduler(); | ||||
public: | |||||
RF_API ~scheduler(); | RF_API ~scheduler(); | ||||
RF_API scheduler(scheduler && right_); | |||||
RF_API scheduler & operator = (scheduler && right_); | |||||
scheduler(scheduler && right_) = delete; | |||||
scheduler & operator = (scheduler && right_) = delete; | |||||
scheduler(const scheduler &) = delete; | scheduler(const scheduler &) = delete; | ||||
scheduler & operator = (const scheduler &) = delete; | scheduler & operator = (const scheduler &) = delete; | ||||
static scheduler g_scheduler; | |||||
}; | }; | ||||
struct local_scheduler | |||||
{ | |||||
RF_API local_scheduler(); | |||||
RF_API ~local_scheduler(); | |||||
local_scheduler(local_scheduler && right_) = delete; | |||||
local_scheduler & operator = (local_scheduler && right_) = delete; | |||||
local_scheduler(const local_scheduler &) = delete; | |||||
local_scheduler & operator = (const local_scheduler &) = delete; | |||||
private: | |||||
scheduler * _scheduler_ptr; | |||||
}; | |||||
//-------------------------------------------------------------------------------------------------- | //-------------------------------------------------------------------------------------------------- | ||||
extern scheduler g_scheduler; | |||||
#if !defined(_DISABLE_RESUMEF_GO_MACRO) | #if !defined(_DISABLE_RESUMEF_GO_MACRO) | ||||
#define go (*::resumef::this_scheduler()) + | #define go (*::resumef::this_scheduler()) + | ||||
#define GO (*::resumef::this_scheduler()) + [=]()->resumef::future_vt | #define GO (*::resumef::this_scheduler()) + [=]()->resumef::future_vt | ||||
//-------------------------------------------------------------------------------------------------- | //-------------------------------------------------------------------------------------------------- | ||||
} | } | ||||
namespace std | |||||
{ | |||||
inline void swap(resumef::scheduler & _Left, resumef::scheduler & right_) | |||||
{ | |||||
_Left.swap(right_); | |||||
} | |||||
} | |||||
template<class _Rep, class _Period> | template<class _Rep, class _Period> | ||||
awaitable_t<bool> sleep_for(const std::chrono::duration<_Rep, _Period>& dt_) | awaitable_t<bool> sleep_for(const std::chrono::duration<_Rep, _Period>& dt_) | ||||
{ | { | ||||
return std::move(sleep_for_(std::chrono::duration_cast<std::chrono::system_clock::duration>(dt_), g_scheduler)); | |||||
return std::move(sleep_for_(std::chrono::duration_cast<std::chrono::system_clock::duration>(dt_), *this_scheduler())); | |||||
} | } | ||||
template<class _Clock, class _Duration = typename _Clock::duration> | template<class _Clock, class _Duration = typename _Clock::duration> | ||||
template<class _Clock, class _Duration> | template<class _Clock, class _Duration> | ||||
awaitable_t<bool> sleep_until(const std::chrono::time_point<_Clock, _Duration>& tp_) | awaitable_t<bool> sleep_until(const std::chrono::time_point<_Clock, _Duration>& tp_) | ||||
{ | { | ||||
return std::move(sleep_until_(std::chrono::time_point_cast<std::chrono::system_clock::duration>(tp_), g_scheduler)); | |||||
return std::move(sleep_until_(std::chrono::time_point_cast<std::chrono::system_clock::duration>(tp_), *this_scheduler())); | |||||
} | } | ||||
} | } |
| |||||
#include "rf_task.h" | |||||
#include "scheduler.h" | |||||
#include <assert.h> | |||||
namespace resumef | |||||
{ | |||||
template<class state_tt> | |||||
struct awaitable_task_t : public task_base | |||||
{ | |||||
counted_ptr<state_tt> _state; | |||||
awaitable_task_t() {} | |||||
awaitable_task_t(state_tt * state) | |||||
: _state(state) | |||||
{ | |||||
} | |||||
virtual bool is_suspend() override | |||||
{ | |||||
return !_state->ready(); | |||||
} | |||||
virtual bool go_next(scheduler * schdler) override | |||||
{ | |||||
_state->resume(); | |||||
return false; | |||||
} | |||||
virtual void cancel() override | |||||
{ | |||||
_state->cancel(); | |||||
} | |||||
virtual void * get_id() override | |||||
{ | |||||
return _state.get(); | |||||
} | |||||
virtual void bind(scheduler * ) override | |||||
{ | |||||
} | |||||
}; | |||||
state_base::~state_base() | |||||
{ | |||||
#if RESUMEF_DEBUG_COUNTER | |||||
--g_resumef_state_count; | |||||
#endif | |||||
} | |||||
void state_base::set_value_none_lock() | |||||
{ | |||||
// Set all members first as calling coroutine may reset stuff here. | |||||
_ready = true; | |||||
if (_coro) | |||||
{ | |||||
auto sch_ = this->current_scheduler(); | |||||
/* | |||||
if (sch_ == nullptr) | |||||
sch_ = this_scheduler(); | |||||
*/ | |||||
sch_->push_task_internal(new awaitable_task_t<state_base>(this)); | |||||
} | |||||
} | |||||
void state_base::set_exception(std::exception_ptr && e_) | |||||
{ | |||||
scoped_lock<std::mutex> __guard(_mtx); | |||||
_exception = std::move(e_); | |||||
// Set all members first as calling coroutine may reset stuff here. | |||||
_ready = true; | |||||
if (_coro) | |||||
{ | |||||
auto sch_ = this->current_scheduler(); | |||||
/* | |||||
if (sch_ == nullptr) | |||||
sch_ = this_scheduler(); | |||||
*/ | |||||
sch_->push_task_internal(new awaitable_task_t<state_base>(this)); | |||||
} | |||||
} | |||||
void state_base::await_suspend(coroutine_handle<> resume_cb) | |||||
{ | |||||
_coro = resume_cb; | |||||
if (_current_scheduler == nullptr) | |||||
{ | |||||
auto * promise_ = this->parent_promise(); | |||||
if (promise_) | |||||
{ | |||||
scheduler * sch_ = promise_->_state->current_scheduler(); | |||||
if (sch_) | |||||
this->current_scheduler(sch_); | |||||
else | |||||
promise_->_state->_depend_states.push_back(counted_ptr<state_base>(this)); | |||||
} | |||||
} | |||||
else | |||||
{ | |||||
for (auto & stptr : _depend_states) | |||||
stptr->current_scheduler(_current_scheduler); | |||||
_depend_states.clear(); | |||||
} | |||||
} | |||||
void state_base::current_scheduler(scheduler * sch_) | |||||
{ | |||||
_current_scheduler = sch_; | |||||
for (auto & stptr : _depend_states) | |||||
stptr->current_scheduler(sch_); | |||||
_depend_states.clear(); | |||||
} | |||||
} |
#pragma once | #pragma once | ||||
#include "def.h" | #include "def.h" | ||||
#include "counted_ptr.h" | |||||
#include <iostream> | #include <iostream> | ||||
namespace resumef | namespace resumef | ||||
private: | private: | ||||
void * _this_promise = nullptr; | void * _this_promise = nullptr; | ||||
scheduler * _current_scheduler = nullptr; | scheduler * _current_scheduler = nullptr; | ||||
std::vector<counted_ptr<state_base>> _depend_states; | |||||
public: | public: | ||||
coroutine_handle<> _coro; | coroutine_handle<> _coro; | ||||
std::atomic<intptr_t> _count = 0; // tracks reference count of state object | std::atomic<intptr_t> _count = 0; // tracks reference count of state object | ||||
std::exception_ptr _exception; | std::exception_ptr _exception; | ||||
bool _ready = false; | bool _ready = false; | ||||
bool _future_acquired = false; | |||||
bool _cancellation = false; | bool _cancellation = false; | ||||
bool _done = false; | bool _done = false; | ||||
{ | { | ||||
_coro = nullptr; | _coro = nullptr; | ||||
_ready = false; | _ready = false; | ||||
_future_acquired = false; | |||||
} | } | ||||
void cancel() | void cancel() | ||||
{ | { | ||||
if (_coro) | if (_coro) | ||||
{ | { | ||||
std::cout << "scheduler=" << current_scheduler() | |||||
<< ",coro=" << _coro.address() | |||||
<< ",this_promise=" << this_promise() | |||||
<< ",parent_promise=" << parent_promise() << std::endl; | |||||
#if RESUMEF_DEBUG_COUNTER | |||||
{ | |||||
scoped_lock<std::mutex> __lock(g_resumef_cout_mutex); | |||||
std::cout << "scheduler=" << current_scheduler() | |||||
<< ",coro=" << _coro.address() | |||||
<< ",this_promise=" << this_promise() | |||||
<< ",parent_promise=" << parent_promise() | |||||
<< ",thread=" << std::this_thread::get_id() | |||||
<< std::endl; | |||||
} | |||||
#endif | |||||
auto coro = _coro; | auto coro = _coro; | ||||
_coro = nullptr; | _coro = nullptr; | ||||
coro(); | coro(); | ||||
} | } | ||||
promise_t<void> * parent_promise() const; | promise_t<void> * parent_promise() const; | ||||
scheduler * parent_scheduler() const; | |||||
//scheduler * parent_scheduler() const; | |||||
void * this_promise() const | void * this_promise() const | ||||
{ | { | ||||
{ | { | ||||
return _current_scheduler; | return _current_scheduler; | ||||
} | } | ||||
void current_scheduler(scheduler * sch_) | |||||
{ | |||||
_current_scheduler = sch_; | |||||
} | |||||
void current_scheduler(scheduler * sch_); | |||||
//------------------------------------------------------------------------------------------ | //------------------------------------------------------------------------------------------ | ||||
//以下是通过future_t/promise_t, 与编译器生成的resumable function交互的接口 | //以下是通过future_t/promise_t, 与编译器生成的resumable function交互的接口 | ||||
bool await_ready() | |||||
{ | |||||
return _ready; | |||||
} | |||||
void await_suspend(coroutine_handle<> resume_cb) | |||||
{ | |||||
_coro = resume_cb; | |||||
} | |||||
void await_resume() | |||||
{ | |||||
} | |||||
void await_suspend(coroutine_handle<> resume_cb); | |||||
void final_suspend() | void final_suspend() | ||||
{ | { | ||||
_done = true; | _done = true; | ||||
} | } | ||||
bool cancellation_requested() const | |||||
{ | |||||
return _cancellation; | |||||
} | |||||
//以上是通过future_t/promise_t, 与编译器生成的resumable function交互的接口 | //以上是通过future_t/promise_t, 与编译器生成的resumable function交互的接口 | ||||
//------------------------------------------------------------------------------------------ | //------------------------------------------------------------------------------------------ | ||||
}; | }; | ||||
} | } | ||||
}; | }; | ||||
// counted_ptr is similar to shared_ptr but allows explicit control | |||||
// | |||||
template <typename T> | |||||
struct counted_ptr | |||||
{ | |||||
counted_ptr() = default; | |||||
counted_ptr(const counted_ptr& cp) : _p(cp._p) | |||||
{ | |||||
_lock(); | |||||
} | |||||
counted_ptr(T* p) : _p(p) | |||||
{ | |||||
_lock(); | |||||
} | |||||
counted_ptr(counted_ptr&& cp) | |||||
{ | |||||
std::swap(_p, cp._p); | |||||
} | |||||
counted_ptr& operator=(const counted_ptr& cp) | |||||
{ | |||||
if (&cp != this) | |||||
{ | |||||
_unlock(); | |||||
_lock(cp._p); | |||||
} | |||||
return *this; | |||||
} | |||||
counted_ptr& operator=(counted_ptr&& cp) | |||||
{ | |||||
if (&cp != this) | |||||
std::swap(_p, cp._p); | |||||
return *this; | |||||
} | |||||
~counted_ptr() | |||||
{ | |||||
_unlock(); | |||||
} | |||||
T* operator->() const | |||||
{ | |||||
return _p; | |||||
} | |||||
T* get() const | |||||
{ | |||||
return _p; | |||||
} | |||||
void reset() | |||||
{ | |||||
_unlock(); | |||||
} | |||||
protected: | |||||
void _unlock() | |||||
{ | |||||
if (_p != nullptr) | |||||
{ | |||||
auto t = _p; | |||||
_p = nullptr; | |||||
t->unlock(); | |||||
} | |||||
} | |||||
void _lock(T* p) | |||||
{ | |||||
if (p != nullptr) | |||||
p->lock(); | |||||
_p = p; | |||||
} | |||||
void _lock() | |||||
{ | |||||
if (_p != nullptr) | |||||
_p->lock(); | |||||
} | |||||
T* _p = nullptr; | |||||
}; | |||||
template <typename T> | |||||
counted_ptr<T> make_counted() | |||||
{ | |||||
return new T{}; | |||||
} | |||||
} | } | ||||
auto val = co_await loop_get_long(2); | auto val = co_await loop_get_long(2); | ||||
std::cout << val << std::endl; | std::cout << val << std::endl; | ||||
}; | }; | ||||
//resumef::g_scheduler.run_until_notask(); | |||||
//resumef::this_scheduler()->run_until_notask(); | |||||
go resumable_get_long(3); | go resumable_get_long(3); | ||||
resumef::g_scheduler.run_until_notask(); | |||||
resumef::this_scheduler()->run_until_notask(); | |||||
} | } |
go test_channel_read(c); | go test_channel_read(c); | ||||
go test_channel_write(c); | go test_channel_write(c); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void test_channel_write_first() | void test_channel_write_first() | ||||
go test_channel_write(c); | go test_channel_write(c); | ||||
go test_channel_read(c); | go test_channel_read(c); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void resumable_main_channel() | void resumable_main_channel() |
go co_star(1); | go co_star(1); | ||||
go co_star(2); | go co_star(2); | ||||
resumef::g_scheduler.run_until_notask(); | |||||
resumef::this_scheduler()->run_until_notask(); | |||||
std::cout << "dynamic_go_count = " << dynamic_go_count << std::endl; | std::cout << "dynamic_go_count = " << dynamic_go_count << std::endl; | ||||
for (auto & j : dynamic_cells) | for (auto & j : dynamic_cells) |
event_t evt; | event_t evt; | ||||
go resumable_wait_event(evt); | go resumable_wait_event(evt); | ||||
auto tt = async_set_event(evt, 1000ms); | auto tt = async_set_event(evt, 1000ms); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
tt.join(); | tt.join(); | ||||
} | } | ||||
std::cout << std::this_thread::get_id() << std::endl; | std::cout << std::this_thread::get_id() << std::endl; | ||||
auto tt = async_set_event(evt2, 1000ms); | auto tt = async_set_event(evt2, 1000ms); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
tt.join(); | tt.join(); | ||||
} | } | ||||
vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | ||||
} | } | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
for (auto & tt : vtt) | for (auto & tt : vtt) | ||||
tt.join(); | tt.join(); | ||||
vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | ||||
} | } | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
for (auto & tt : vtt) | for (auto & tt : vtt) | ||||
tt.join(); | tt.join(); | ||||
vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | vtt.emplace_back(async_set_event(e, 1ms * (500 + rand() % 1000))); | ||||
} | } | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
for (auto & tt : vtt) | for (auto & tt : vtt) | ||||
tt.join(); | tt.join(); |
async_set_event(evt, 10s + 50ms); | async_set_event(evt, 10s + 50ms); | ||||
//go resumalbe_set_event(evt, 10s + 50ms); | //go resumalbe_set_event(evt, 10s + 50ms); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void test_wait_timeout_any_invalid() | void test_wait_timeout_any_invalid() | ||||
assert(idx < 0); | assert(idx < 0); | ||||
std::cout << "invalid wait!" << std::endl; | std::cout << "invalid wait!" << std::endl; | ||||
}; | }; | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void test_wait_timeout_any() | void test_wait_timeout_any() | ||||
} | } | ||||
//取消剩下的定时器,以便于协程调度器退出来 | //取消剩下的定时器,以便于协程调度器退出来 | ||||
g_scheduler.timer()->clear(); | |||||
this_scheduler()->timer()->clear(); | |||||
}; | }; | ||||
srand((int)time(nullptr)); | srand((int)time(nullptr)); | ||||
async_set_event(e, 1ms * (1000 + rand() % 5000)); | async_set_event(e, 1ms * (1000 + rand() % 5000)); | ||||
} | } | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void test_wait_timeout_all_invalid() | void test_wait_timeout_all_invalid() | ||||
assert(!result); | assert(!result); | ||||
std::cout << "invalid wait!" << std::endl; | std::cout << "invalid wait!" << std::endl; | ||||
}; | }; | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void test_wait_timeout_all() | void test_wait_timeout_all() | ||||
//async_set_event(e, 1ms * (1000 + rand() % 5000)); | //async_set_event(e, 1ms * (1000 + rand() % 5000)); | ||||
} | } | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
void resumable_main_event_timeout() | void resumable_main_event_timeout() |
void resumable_main_exception() | void resumable_main_exception() | ||||
{ | { | ||||
go test_signal_exception(); | go test_signal_exception(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
std::cout << std::endl; | std::cout << std::endl; | ||||
go test_bomb_exception(); | go test_bomb_exception(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } |
| |||||
#include <chrono> | |||||
#include <iostream> | |||||
#include <string> | |||||
#include <conio.h> | |||||
#include <thread> | |||||
#include "librf.h" | |||||
using namespace resumef; | |||||
std::mutex cout_mutex; | |||||
//这是一个重度计算任务,只能单开线程来避免主线程被阻塞 | |||||
auto async_heavy_computing_tasks(int64_t val) | |||||
{ | |||||
using namespace std::chrono; | |||||
awaitable_t<int64_t> awaitable; | |||||
std::thread([val, st = awaitable._state] | |||||
{ | |||||
std::this_thread::sleep_for(500ms); | |||||
st->set_value(val * val); | |||||
}).detach(); | |||||
return awaitable; | |||||
} | |||||
future_vt heavy_computing_sequential(int64_t val) | |||||
{ | |||||
for(size_t i = 0; i < 3; ++i) | |||||
{ | |||||
{ | |||||
scoped_lock<std::mutex> __lock(cout_mutex); | |||||
std::cout << val << " @" << std::this_thread::get_id() << std::endl; | |||||
} | |||||
val = co_await async_heavy_computing_tasks(val); | |||||
} | |||||
{ | |||||
scoped_lock<std::mutex> __lock(cout_mutex); | |||||
std::cout << val << " @" << std::this_thread::get_id() << std::endl; | |||||
} | |||||
} | |||||
void test_use_single_thread() | |||||
{ | |||||
//使用local_scheduler来申明一个绑定到本线程的调度器 my_scheduler | |||||
//后续在本线程运行的协程,通过this_scheduler()获得my_scheduler的地址 | |||||
//从而将这些协程的所有操作都绑定到my_scheduler里面去调度 | |||||
//实现一个协程始终绑定到一个线程的目的 | |||||
//在同一个线程里,申明多个local_scheduler会怎么样? | |||||
//----我也不知道 | |||||
//如果不申明my_scheduler,则this_scheduler()获得默认主调度器的地址 | |||||
local_scheduler my_scheduler; | |||||
{ | |||||
scoped_lock<std::mutex> __lock(cout_mutex); | |||||
std::cout << "running in thread @" << std::this_thread::get_id() << std::endl; | |||||
} | |||||
go heavy_computing_sequential(2); | |||||
this_scheduler()->run_until_notask(); | |||||
} | |||||
const size_t N = 2; | |||||
void test_use_multi_thread() | |||||
{ | |||||
std::thread th_array[N]; | |||||
for (size_t i = 0; i < N; ++i) | |||||
th_array[i] = std::thread(&test_use_single_thread); | |||||
test_use_single_thread(); | |||||
for (auto & th : th_array) | |||||
th.join(); | |||||
} | |||||
void resumable_main_multi_thread() | |||||
{ | |||||
std::cout << "test_use_single_thread @" << std::this_thread::get_id() << std::endl << std::endl; | |||||
test_use_single_thread(); | |||||
std::cout << std::endl; | |||||
std::cout << "test_use_multi_thread @" << std::this_thread::get_id() << std::endl << std::endl; | |||||
test_use_multi_thread(); | |||||
//运行主调度器里面的协程 | |||||
//但本范例不应该有协程存在,仅演示不要忽略了主调度器 | |||||
scheduler::g_scheduler.run_until_notask(); | |||||
} |
go test_mutex_push(); | go test_mutex_push(); | ||||
go test_mutex_pop(1); | go test_mutex_pop(1); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } |
return N / coro; | return N / coro; | ||||
}; | }; | ||||
} | } | ||||
resumef::g_scheduler.run_until_notask(); | |||||
resumef::this_scheduler()->run_until_notask(); | |||||
auto end = std::chrono::steady_clock::now(); | auto end = std::chrono::steady_clock::now(); | ||||
dump("BenchmarkSwitch_" + std::to_string(coro), N, start, end); | dump("BenchmarkSwitch_" + std::to_string(coro), N, start, end); |
{ | { | ||||
go test_routine_use_timer_2(); | go test_routine_use_timer_2(); | ||||
//go test_routine_use_timer(); | //go test_routine_use_timer(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } |
}; | }; | ||||
} | } | ||||
while (!g_scheduler.empty()) | |||||
while (!this_scheduler()->empty()) | |||||
{ | { | ||||
g_scheduler.run_one_batch(); | |||||
this_scheduler()->run_one_batch(); | |||||
//std::cout << "press any key to continue." << std::endl; | //std::cout << "press any key to continue." << std::endl; | ||||
//_getch(); | //_getch(); | ||||
} | } | ||||
void resumable_main_sleep() | void resumable_main_sleep() | ||||
{ | { | ||||
go test_sleep_use_timer(); | go test_sleep_use_timer(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
std::cout << std::endl; | std::cout << std::endl; | ||||
test_wait_all_events_with_signal_by_sleep(); | test_wait_all_events_with_signal_by_sleep(); |
{ | { | ||||
go test_recursive_await(); | go test_recursive_await(); | ||||
go test_recursive_go(); | go test_recursive_go(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } | ||||
/* | /* |
{ | { | ||||
using namespace std::chrono; | using namespace std::chrono; | ||||
auto th = g_scheduler.timer()->add_handler(system_clock::now() + 5s, | |||||
auto th = this_scheduler()->timer()->add_handler(system_clock::now() + 5s, | |||||
[](bool bValue) | [](bool bValue) | ||||
{ | { | ||||
if (bValue) | if (bValue) | ||||
std::cout << "timer after 5s." << std::endl; | std::cout << "timer after 5s." << std::endl; | ||||
}); | }); | ||||
auto th2 = g_scheduler.timer()->add_handler(1s, | |||||
auto th2 = this_scheduler()->timer()->add_handler(1s, | |||||
[&th](bool) | [&th](bool) | ||||
{ | { | ||||
std::cout << "timer after 1s." << std::endl; | std::cout << "timer after 1s." << std::endl; | ||||
th.stop(); | th.stop(); | ||||
}); | }); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
th2.stop(); //but th2 is invalid | th2.stop(); //but th2 is invalid |
void resumable_main_yield_return() | void resumable_main_yield_return() | ||||
{ | { | ||||
go test_yield_int(); | go test_yield_int(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
go test_yield_void(); | go test_yield_void(); | ||||
g_scheduler.run_until_notask(); | |||||
this_scheduler()->run_until_notask(); | |||||
} | } |
extern void resumable_main_dynamic_go(); | extern void resumable_main_dynamic_go(); | ||||
extern void resumable_main_channel(); | extern void resumable_main_channel(); | ||||
extern void resumable_main_cb(); | extern void resumable_main_cb(); | ||||
extern void resumable_main_multi_thread(); | |||||
extern void resumable_main_benchmark_mem(); | extern void resumable_main_benchmark_mem(); | ||||
int main(int argc, const char * argv[]) | int main(int argc, const char * argv[]) | ||||
{ | { | ||||
resumable_main_routine(); | |||||
resumable_main_multi_thread(); | |||||
return 0; | return 0; | ||||
resumable_main_yield_return(); | resumable_main_yield_return(); |
<ClCompile> | <ClCompile> | ||||
<WarningLevel>Level3</WarningLevel> | <WarningLevel>Level3</WarningLevel> | ||||
<Optimization>Disabled</Optimization> | <Optimization>Disabled</Optimization> | ||||
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions> | |||||
<PreprocessorDefinitions>_DEBUG;_CONSOLE;RESUMEF_DEBUG_COUNTER=1;%(PreprocessorDefinitions)</PreprocessorDefinitions> | |||||
<SDLCheck>true</SDLCheck> | <SDLCheck>true</SDLCheck> | ||||
<AdditionalIncludeDirectories>..\librf;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | <AdditionalIncludeDirectories>..\librf;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> | ||||
<AdditionalOptions>/await /std:c++latest </AdditionalOptions> | <AdditionalOptions>/await /std:c++latest </AdditionalOptions> | ||||
<ClCompile Include="..\librf\src\rf_task.cpp" /> | <ClCompile Include="..\librf\src\rf_task.cpp" /> | ||||
<ClCompile Include="..\librf\src\scheduler.cpp" /> | <ClCompile Include="..\librf\src\scheduler.cpp" /> | ||||
<ClCompile Include="..\librf\src\sleep.cpp" /> | <ClCompile Include="..\librf\src\sleep.cpp" /> | ||||
<ClCompile Include="..\librf\src\state.cpp" /> | |||||
<ClCompile Include="..\librf\src\timer.cpp" /> | <ClCompile Include="..\librf\src\timer.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_cb.cpp" /> | <ClCompile Include="..\tutorial\test_async_cb.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_channel.cpp" /> | <ClCompile Include="..\tutorial\test_async_channel.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_event.cpp" /> | <ClCompile Include="..\tutorial\test_async_event.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_event_timeout.cpp" /> | <ClCompile Include="..\tutorial\test_async_event_timeout.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_exception.cpp" /> | <ClCompile Include="..\tutorial\test_async_exception.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_multi_thread.cpp" /> | |||||
<ClCompile Include="..\tutorial\test_async_mutex.cpp" /> | <ClCompile Include="..\tutorial\test_async_mutex.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_resumable.cpp" /> | <ClCompile Include="..\tutorial\test_async_resumable.cpp" /> | ||||
<ClCompile Include="..\tutorial\test_async_routine.cpp" /> | <ClCompile Include="..\tutorial\test_async_routine.cpp" /> | ||||
<ClInclude Include="..\librf\librf.h" /> | <ClInclude Include="..\librf\librf.h" /> | ||||
<ClInclude Include="..\librf\src\asio_task.h" /> | <ClInclude Include="..\librf\src\asio_task.h" /> | ||||
<ClInclude Include="..\librf\src\channel.h" /> | <ClInclude Include="..\librf\src\channel.h" /> | ||||
<ClInclude Include="..\librf\src\counted_ptr.h" /> | |||||
<ClInclude Include="..\librf\src\def.h" /> | <ClInclude Include="..\librf\src\def.h" /> | ||||
<ClInclude Include="..\librf\src\event.h" /> | <ClInclude Include="..\librf\src\event.h" /> | ||||
<ClInclude Include="..\librf\src\future.h" /> | <ClInclude Include="..\librf\src\future.h" /> |
<ClCompile Include="..\benchmark\benchmark_async_mem.cpp"> | <ClCompile Include="..\benchmark\benchmark_async_mem.cpp"> | ||||
<Filter>benchmark</Filter> | <Filter>benchmark</Filter> | ||||
</ClCompile> | </ClCompile> | ||||
<ClCompile Include="..\librf\src\state.cpp"> | |||||
<Filter>librf\src</Filter> | |||||
</ClCompile> | |||||
<ClCompile Include="..\tutorial\test_async_multi_thread.cpp"> | |||||
<Filter>tutorial</Filter> | |||||
</ClCompile> | |||||
</ItemGroup> | </ItemGroup> | ||||
<ItemGroup> | <ItemGroup> | ||||
<ClInclude Include="..\librf\librf.h"> | <ClInclude Include="..\librf\librf.h"> | ||||
<ClInclude Include="..\librf\src\unix\coroutine.h"> | <ClInclude Include="..\librf\src\unix\coroutine.h"> | ||||
<Filter>librf\src\unix</Filter> | <Filter>librf\src\unix</Filter> | ||||
</ClInclude> | </ClInclude> | ||||
<ClInclude Include="..\librf\src\counted_ptr.h"> | |||||
<Filter>librf\src</Filter> | |||||
</ClInclude> | |||||
</ItemGroup> | </ItemGroup> | ||||
</Project> | </Project> |