Browse Source

完成mutex_t的无死锁批量加锁功能

tags/v2.9.7
tearshark 4 years ago
parent
commit
a949d1f4c8

+ 1
- 1
librf/src/def.h View File

@@ -1,6 +1,6 @@
#pragma once
#define LIB_RESUMEF_VERSION 20803 // 2.8.3
#define LIB_RESUMEF_VERSION 20900 // 2.9.0
#if defined(RESUMEF_MODULE_EXPORT)
#define RESUMEF_NS export namespace resumef

+ 27
- 0
librf/src/mutex_v2.h View File

@@ -10,6 +10,7 @@ RESUMEF_NS
inline namespace mutex_v2
{
struct [[nodiscard]] scoped_lock_mutex_t;
struct [[nodiscard]] scoped_unlock_range_t;

//支持递归的锁
struct mutex_t
@@ -51,6 +52,26 @@ RESUMEF_NS
bool try_lock_until(const std::chrono::time_point<_Rep, _Period>& tp, void* unique_address);
void unlock(void* unique_address) const;

struct _MutexAwaitAssembleT;

template<class... _Mtxs
, typename = std::enable_if_t<std::conjunction_v<std::is_same<std::remove_cvref_t<_Mtxs>, mutex_t>...>>
>
static future_t<scoped_unlock_range_t> lock(_Mtxs&... mtxs);

template<class... _Mtxs
, typename = std::enable_if_t<std::conjunction_v<std::is_same<std::remove_cvref_t<_Mtxs>, mutex_t>...>>
>
static scoped_unlock_range_t lock(void* unique_address, _Mtxs&... mtxs);

template<class... _Mtxs
, typename = std::enable_if_t<std::conjunction_v<std::is_same<std::remove_cvref_t<_Mtxs>, mutex_t>...>>
>
static void unlock(void* unique_address, _Mtxs&... mtxs)
{
unlock_address(unique_address, mtxs...);
}

mutex_t(const mutex_t&) = default;
mutex_t(mutex_t&&) = default;
mutex_t& operator = (const mutex_t&) = default;
@@ -58,6 +79,12 @@ RESUMEF_NS
private:
friend struct scoped_lock_mutex_t;
mutex_impl_ptr _mutex;

template<class... _Mtxs
, typename = std::enable_if_t<std::conjunction_v<std::is_same<std::remove_cvref_t<_Mtxs>, mutex_t>...>>
>
static void unlock_address(void* unique_address, mutex_t& _First, _Mtxs&... _Rest);
static void unlock_address(void*) {}
};
}
}

+ 159
- 0
librf/src/mutex_v2.inl View File

@@ -110,6 +110,58 @@ RESUMEF_NS
mutex_v2_impl& operator=(const mutex_v2_impl&) = delete;
mutex_v2_impl& operator=(mutex_v2_impl&&) = delete;
};

struct _MutexAddressAssembleT
{
private:
void* _Address;
public:
std::vector<mutex_t> _Lks;

template<class... _Mtxs>
_MutexAddressAssembleT(void* unique_address, _Mtxs&... mtxs)
: _Address(unique_address)
, _Lks({ mtxs... })
{}
size_t size() const
{
return _Lks.size();
}
mutex_t& operator[](int _Idx)
{
return _Lks[_Idx];
}
void _Lock_ref(mutex_t& _LkN) const
{
return _LkN.lock(_Address);
}
bool _Try_lock_ref(mutex_t& _LkN) const
{
return _LkN.try_lock(_Address);
}
void _Unlock_ref(mutex_t& _LkN) const
{
_LkN.unlock(_Address);
}
void _Yield() const
{
std::this_thread::yield();
}
void _ReturnValue() const noexcept {}
template<class U>
U _ReturnValue(U v) const noexcept
{
return v;
}
};

#define LOCK_ASSEMBLE_NAME(fnName) mutex_lock_await_##fnName
#define LOCK_ASSEMBLE_AWAIT(a) co_await (a)
#define LOCK_ASSEMBLE_RETURN(a) co_return (a)
#include "without_deadlock_assemble.inl"
#undef LOCK_ASSEMBLE_NAME
#undef LOCK_ASSEMBLE_AWAIT
#undef LOCK_ASSEMBLE_RETURN
}

inline namespace mutex_v2
@@ -393,5 +445,112 @@ RESUMEF_NS
{
_mutex->unlock(unique_address);
}

struct [[nodiscard]] scoped_unlock_range_t
{
//此函数,应该在try_lock()获得锁后使用
//或者在协程里,由awaiter使用
scoped_unlock_range_t(std::vector<mutex_t>&& mtxs, void* sch)
: _mutex(std::move(mtxs))
, _owner(sch)
{}

~scoped_unlock_range_t()
{
if (_owner != nullptr)
{
for(mutex_t& mtx : _mutex)
mtx.unlock(_owner);
}
}

inline void unlock() noexcept
{
if (_owner != nullptr)
{
for (mutex_t& mtx : _mutex)
mtx.unlock(_owner);
_owner = nullptr;
}
}

scoped_unlock_range_t(const scoped_unlock_range_t&) = delete;
scoped_unlock_range_t& operator = (const scoped_unlock_range_t&) = delete;
scoped_unlock_range_t(scoped_unlock_range_t&&) = default;
scoped_unlock_range_t& operator = (scoped_unlock_range_t&&) = default;
private:
std::vector<mutex_t> _mutex;
void* _owner;
};

struct mutex_t::_MutexAwaitAssembleT
{
private:
void* _Address;
public:
std::vector<mutex_t> _Lks;

template<class... _Mtxs>
_MutexAwaitAssembleT(void* unique_address, _Mtxs&... mtxs)
: _Address(unique_address)
, _Lks({ mtxs... })
{}
size_t size() const
{
return _Lks.size();
}
mutex_t& operator[](int _Idx)
{
return _Lks[_Idx];
}
auto _Lock_ref(mutex_t& _LkN) const
{
return _LkN.lock();
}
auto _Try_lock_ref(mutex_t& _LkN) const
{
return _LkN.try_lock();
}
void _Unlock_ref(mutex_t& _LkN) const
{
_LkN.unlock(_Address);
}
future_t<> _Yield() const
{
for (int cnt = rand() % (1 + _Lks.size()); cnt >= 0; --cnt)
co_await ::resumef::yield();
}
future_t<> _ReturnValue() const;
template<class U>
future_t<U> _ReturnValue(U v) const;
};

template<class... _Mtxs, typename>
inline future_t<scoped_unlock_range_t> mutex_t::lock(_Mtxs&... mtxs)
{
auto* root = root_state();
_MutexAwaitAssembleT MAA(root, mtxs...);
co_await detail::mutex_lock_await_lock_impl::_Lock_range(MAA);

co_return scoped_unlock_range_t{ std::move(MAA._Lks), root };
}


template<class... _Mtxs, typename>
inline scoped_unlock_range_t mutex_t::lock(void* unique_address, _Mtxs&... mtxs)
{
detail::_MutexAddressAssembleT MAA(unique_address, mtxs...);
detail::scoped_lock_range_lock_impl::_Lock_range(MAA);
return scoped_unlock_range_t{ std::move(MAA._Lks), unique_address };
}

template<class... _Mtxs, typename>
inline void mutex_t::unlock_address(void* unique_address, mutex_t& _First, _Mtxs&... _Rest)
{
_First.unlock(unique_address);
unlock_address(unique_address, _Rest...);
}
}
}


+ 63
- 49
librf/src/spinlock.h View File

@@ -91,31 +91,13 @@ RESUMEF_NS
namespace detail
{
#if RESUMEF_ENABLE_CONCEPT
template<typename T>
concept _LockAssembleT = requires(T && v)
{
{ v.size() };
{ v[0] };
{ v._Lock_ref(v[0]) } ->void;
{ v._Try_lock_ref(v[0]) } ->bool;
{ v._Unlock_ref(v[0]) } ->void;
{ v._Yield() };
{ v._ReturnValue() };
{ v._ReturnValue(0) };
requires std::is_integral_v<decltype(v.size())>;
};
#else
#define _LockAssembleT typename
#endif
template<class _Ty>
template<class _Ty, class _Cont = std::vector<_Ty>>
struct _LockVectorAssembleT
{
private:
std::vector<_Ty>& _Lks;
_Cont& _Lks;
public:
_LockVectorAssembleT(std::vector<_Ty>& _LkN)
_LockVectorAssembleT(_Cont& _LkN)
: _Lks(_LkN)
{}
size_t size() const
@@ -142,21 +124,18 @@ RESUMEF_NS
{
std::this_thread::yield();
}
void _ReturnValue() const noexcept {}
void _ReturnValue() const;
template<class U>
U _ReturnValue(U v) const noexcept
{
return v;
}
U _ReturnValue(U v) const;
};
template<class _Ty>
struct _LockVectorAssembleT<std::reference_wrapper<_Ty>>
template<class _Ty, class _Cont>
struct _LockVectorAssembleT<std::reference_wrapper<_Ty>, _Cont>
{
private:
std::vector<std::reference_wrapper<_Ty>>& _Lks;
_Cont& _Lks;
public:
_LockVectorAssembleT(std::vector<std::reference_wrapper<_Ty>>& _LkN)
_LockVectorAssembleT(_Cont& _LkN)
: _Lks(_LkN)
{}
size_t size() const
@@ -183,48 +162,83 @@ RESUMEF_NS
{
std::this_thread::yield();
}
void _ReturnValue() const noexcept {}
void _ReturnValue() const;
template<class U>
U _ReturnValue(U v) const noexcept
{
return v;
}
U _ReturnValue(U v) const;
};
}
}
#define LOCK_ASSEMBLE_NAME(fnName) scoped_lock_range_##fnName
#define LOCK_ASSEMBLE_AWAIT(a) (a)
#define LOCK_ASSEMBLE_RETURN(a) return (a)
#include "without_deadlock_assemble.inl"
#undef LOCK_ASSEMBLE_NAME
#undef LOCK_ASSEMBLE_AWAIT
#undef LOCK_ASSEMBLE_RETURN
}
RESUMEF_NS
{
template<class _Ty>
template<class _Ty, class _Cont = std::vector<_Ty>, class _Assemble = detail::_LockVectorAssembleT<_Ty, _Cont>>
class scoped_lock_range { // class with destructor that unlocks mutexes
public:
explicit scoped_lock_range(std::vector<_Ty>& locks_)
: _LkN(locks_)
explicit scoped_lock_range(_Cont& locks_)
: _LkN(&locks_)
, _LA(*_LkN)
{
detail::scoped_lock_range_lock_impl::_Lock_range(_LA);
}
explicit scoped_lock_range(_Cont& locks_, _Assemble& la_)
: _LkN(&locks_)
, _LA(la_)
{
detail::_LockVectorAssembleT<_Ty> LA{ _LkN };
detail::_Lock_range(LA);
detail::scoped_lock_range_lock_impl::_Lock_range(_LA);
}
explicit scoped_lock_range(std::adopt_lock_t, std::vector<_Ty>& locks_)
: _LkN(locks_)
explicit scoped_lock_range(std::adopt_lock_t, _Cont& locks_)
: _LkN(&locks_)
, _LA(*_LkN)
{ // construct but don't lock
}
explicit scoped_lock_range(std::adopt_lock_t, _Cont& locks_, _Assemble& la_)
: _LkN(&locks_)
, _LA(la_)
{ // construct but don't lock
}
~scoped_lock_range() noexcept
{
detail::_LockVectorAssembleT<_Ty> LA{ _LkN };
detail::_Unlock_locks(0, (int)_LkN.size(), LA);
if (_LkN != nullptr)
detail::scoped_lock_range_lock_impl::_Unlock_locks(0, (int)_LA.size(), _LA);
}
void unlock()
{
if (_LkN != nullptr)
{
_LkN = nullptr;
detail::scoped_lock_range_lock_impl::_Unlock_locks(0, (int)_LA.size(), _LA);
}
}
scoped_lock_range(const scoped_lock_range&) = delete;
scoped_lock_range& operator=(const scoped_lock_range&) = delete;
scoped_lock_range(scoped_lock_range&& _Right)
: _LkN(_Right._LkN)
, _LA(std::move(_Right._LA))
{
_Right._LkN = nullptr;
}
scoped_lock_range& operator=(scoped_lock_range&& _Right)
{
if (this != &_Right)
{
_LkN = _Right._LkN;
_Right._LkN = nullptr;
_LA = std::move(_Right._LA);
}
}
private:
std::vector<_Ty>& _LkN;
_Cont* _LkN;
_Assemble _LA;
};
}

+ 20
- 2
librf/src/type_concept.inl View File

@@ -2,9 +2,9 @@
#ifndef RESUMEF_ENABLE_CONCEPT
#ifdef __cpp_lib_concepts
#define RESUMEF_ENABLE_CONCEPT 1
#define RESUMEF_ENABLE_CONCEPT 0
#else
#define RESUMEF_ENABLE_CONCEPT 1
#define RESUMEF_ENABLE_CONCEPT 0
#endif //#ifdef __cpp_lib_concepts
#endif //#ifndef RESUMEF_ENABLE_CONCEPT
@@ -119,4 +119,22 @@ RESUMEF_NS
#endif
#if RESUMEF_ENABLE_CONCEPT
template<typename T>
concept _LockAssembleT = requires(T && v)
{
{ v.size() };
{ v[0] };
{ v._Lock_ref(v[0]) };
{ v._Try_lock_ref(v[0]) };
{ v._Unlock_ref(v[0]) } ->void;
{ v._Yield() };
{ v._ReturnValue() };
{ v._ReturnValue(0) };
requires std::is_integral_v<decltype(v.size())>;
};
#else
#define _LockAssembleT typename
#endif
}

+ 105
- 108
librf/src/without_deadlock_assemble.inl View File

@@ -1,134 +1,131 @@

RESUMEF_NS
struct LOCK_ASSEMBLE_NAME(lock_impl)
{
namespace detail
// FUNCTION TEMPLATE _Unlock_locks
template<_LockAssembleT _LA>
static void _Unlock_locks(int _First, int _Last, _LA& _LkN) noexcept /* terminates */
{
// FUNCTION TEMPLATE _Unlock_locks
template<_LockAssembleT _LA>
auto _Unlock_locks(int _First, int _Last, _LA& _LkN) noexcept /* terminates */
->decltype(_LkN._ReturnValue())
{
for (; _First != _Last; ++_First) {
LOCK_ASSEMBLE_AWAIT(_LkN._Unlock_ref(_LkN[_First]));
}
for (; _First != _Last; ++_First) {
_LkN._Unlock_ref(_LkN[_First]);
}
}
// FUNCTION TEMPLATE try_lock
template<_LockAssembleT _LA>
auto _Try_lock_range(const int _First, const int _Last, _LA& _LkN)
->decltype(_LkN._ReturnValue<int>(0))
{
int _Next = _First;
try {
for (; _Next != _Last; ++_Next)
{
if (!LOCK_ASSEMBLE_AWAIT(_LkN._Try_lock_ref(_LkN[_Next])))
{ // try_lock failed, backout
LOCK_ASSEMBLE_AWAIT(_Unlock_locks(_First, _Next, _LkN));
LOCK_ASSEMBLE_RETURN(_Next);
}
// FUNCTION TEMPLATE try_lock
template<_LockAssembleT _LA>
static auto _Try_lock_range(const int _First, const int _Last, _LA& _LkN)
->decltype(_LkN._ReturnValue<int>(0))
{
int _Next = _First;
try {
for (; _Next != _Last; ++_Next)
{
if (!LOCK_ASSEMBLE_AWAIT(_LkN._Try_lock_ref(_LkN[_Next])))
{ // try_lock failed, backout
_Unlock_locks(_First, _Next, _LkN);
LOCK_ASSEMBLE_RETURN(_Next);
}
}
catch (...) {
LOCK_ASSEMBLE_AWAIT(_Unlock_locks(_First, _Next, _LkN));
throw;
}
LOCK_ASSEMBLE_RETURN(-1);
}
catch (...) {
_Unlock_locks(_First, _Next, _LkN);
throw;
}
// FUNCTION TEMPLATE lock
template<_LockAssembleT _LA>
auto _Lock_attempt(const int _Hard_lock, _LA& _LkN)
->decltype(_LkN._ReturnValue<int>(0))
{
// attempt to lock 3 or more locks, starting by locking _LkN[_Hard_lock] and trying to lock the rest
LOCK_ASSEMBLE_AWAIT(_LkN._Lock_ref(_LkN[_Hard_lock]));
int _Failed = -1;
int _Backout_start = _Hard_lock; // that is, unlock _Hard_lock
LOCK_ASSEMBLE_RETURN(-1);
}
// FUNCTION TEMPLATE lock
template<_LockAssembleT _LA>
static auto _Lock_attempt(const int _Hard_lock, _LA& _LkN)
->decltype(_LkN._ReturnValue<int>(0))
{
// attempt to lock 3 or more locks, starting by locking _LkN[_Hard_lock] and trying to lock the rest
LOCK_ASSEMBLE_AWAIT(_LkN._Lock_ref(_LkN[_Hard_lock]));
int _Failed = -1;
int _Backout_start = _Hard_lock; // that is, unlock _Hard_lock
try {
_Failed = LOCK_ASSEMBLE_AWAIT(_Try_lock_range(0, _Hard_lock, _LkN));
if (_Failed == -1)
{
_Backout_start = 0; // that is, unlock [0, _Hard_lock] if the next throws
_Failed = LOCK_ASSEMBLE_AWAIT(_Try_lock_range(_Hard_lock + 1, (int)_LkN.size(), _LkN));
if (_Failed == -1) { // we got all the locks
LOCK_ASSEMBLE_RETURN(-1);
}
try {
_Failed = LOCK_ASSEMBLE_AWAIT(_Try_lock_range(0, _Hard_lock, _LkN));
if (_Failed == -1)
{
_Backout_start = 0; // that is, unlock [0, _Hard_lock] if the next throws
_Failed = LOCK_ASSEMBLE_AWAIT(_Try_lock_range(_Hard_lock + 1, (int)_LkN.size(), _LkN));
if (_Failed == -1) { // we got all the locks
LOCK_ASSEMBLE_RETURN(-1);
}
}
catch (...) {
LOCK_ASSEMBLE_AWAIT(_Unlock_locks(_Backout_start, _Hard_lock + 1, _LkN));
throw;
}
}
catch (...) {
_Unlock_locks(_Backout_start, _Hard_lock + 1, _LkN);
throw;
}
// we didn't get all the locks, backout
LOCK_ASSEMBLE_AWAIT(_Unlock_locks(_Backout_start, _Hard_lock + 1, _LkN));
LOCK_ASSEMBLE_AWAIT(_LkN._Yield());
// we didn't get all the locks, backout
_Unlock_locks(_Backout_start, _Hard_lock + 1, _LkN);
LOCK_ASSEMBLE_AWAIT(_LkN._Yield());
LOCK_ASSEMBLE_RETURN(_Failed);
}
LOCK_ASSEMBLE_RETURN(_Failed);
}
template<_LockAssembleT _LA>
auto _Lock_nonmember3(_LA& _LkN) ->decltype(_LkN._ReturnValue())
{
// lock 3 or more locks, without deadlock
int _Hard_lock = 0;
while (_Hard_lock != -1) {
_Hard_lock = LOCK_ASSEMBLE_AWAIT(_Lock_attempt(_Hard_lock, _LkN));
}
template<_LockAssembleT _LA>
static auto _Lock_nonmember3(_LA& _LkN) ->decltype(_LkN._ReturnValue())
{
// lock 3 or more locks, without deadlock
int _Hard_lock = 0;
while (_Hard_lock != -1) {
_Hard_lock = LOCK_ASSEMBLE_AWAIT(_Lock_attempt(_Hard_lock, _LkN));
}
}
template<_LockAssembleT _LA>
auto _Lock_attempt_small2(_LA& _LkN, const int _Idx0, const int _Idx1)
->decltype(_LkN._ReturnValue<bool>(false))
{
// attempt to lock 2 locks, by first locking _Lk0, and then trying to lock _Lk1 returns whether to try again
LOCK_ASSEMBLE_AWAIT(_LkN._Lock_ref(_LkN[_Idx0]));
try {
if (LOCK_ASSEMBLE_AWAIT(_LkN._Try_lock_ref(_LkN[_Idx1])))
LOCK_ASSEMBLE_RETURN(false);
}
catch (...) {
LOCK_ASSEMBLE_AWAIT(_LkN._Unlock_ref(_LkN[_Idx0]));
throw;
}
template<_LockAssembleT _LA>
static auto _Lock_attempt_small2(_LA& _LkN, const int _Idx0, const int _Idx1)
->decltype(_LkN._ReturnValue<bool>(false))
{
// attempt to lock 2 locks, by first locking _Lk0, and then trying to lock _Lk1 returns whether to try again
LOCK_ASSEMBLE_AWAIT(_LkN._Lock_ref(_LkN[_Idx0]));
try {
if (LOCK_ASSEMBLE_AWAIT(_LkN._Try_lock_ref(_LkN[_Idx1])))
LOCK_ASSEMBLE_RETURN(false);
}
catch (...) {
_LkN._Unlock_ref(_LkN[_Idx0]);
throw;
}
_LkN._Unlock_ref(_LkN[_Idx0]);
LOCK_ASSEMBLE_AWAIT(_LkN._Yield());
LOCK_ASSEMBLE_AWAIT(_LkN._Unlock_ref(_LkN[_Idx0]));
LOCK_ASSEMBLE_AWAIT(_LkN._Yield());
LOCK_ASSEMBLE_RETURN(true);
}
LOCK_ASSEMBLE_RETURN(true);
template<_LockAssembleT _LA>
static auto _Lock_nonmember2(_LA& _LkN) ->decltype(_LkN._ReturnValue())
{
// lock 2 locks, without deadlock, special case for better codegen and reduced metaprogramming for common case
while (LOCK_ASSEMBLE_AWAIT(_Lock_attempt_small2(_LkN, 0, 1)) &&
LOCK_ASSEMBLE_AWAIT(_Lock_attempt_small2(_LkN, 1, 0)))
{ // keep trying
}
}
template<_LockAssembleT _LA>
auto _Lock_nonmember2(_LA& _LkN) ->decltype(_LkN._ReturnValue())
template<_LockAssembleT _LA>
static auto _Lock_range(_LA& lockes) ->decltype(lockes._ReturnValue())
{
if (lockes.size() == 0)
{
// lock 2 locks, without deadlock, special case for better codegen and reduced metaprogramming for common case
while (LOCK_ASSEMBLE_AWAIT(_Lock_attempt_small2(_LkN, 0, 1)) &&
LOCK_ASSEMBLE_AWAIT(_Lock_attempt_small2(_LkN, 1, 0)))
{ // keep trying
}
}
template<_LockAssembleT _LA>
auto _Lock_range(_LA& lockes) ->decltype(lockes._ReturnValue())
else if (lockes.size() == 1)
{
if (lockes.size() == 0)
{
}
else if (lockes.size() == 1)
{
LOCK_ASSEMBLE_AWAIT(lockes._Lock_ref(lockes[0]));
}
else if (lockes.size() == 2)
{
LOCK_ASSEMBLE_AWAIT(_Lock_nonmember2(lockes));
}
else
{
LOCK_ASSEMBLE_AWAIT(_Lock_nonmember3(lockes));
}
LOCK_ASSEMBLE_AWAIT(lockes._Lock_ref(lockes[0]));
}
else if (lockes.size() == 2)
{
LOCK_ASSEMBLE_AWAIT(_Lock_nonmember2(lockes));
}
else
{
LOCK_ASSEMBLE_AWAIT(_Lock_nonmember3(lockes));
}
}
}
};

+ 53
- 0
tutorial/test_async_mutex.cpp View File

@@ -141,10 +141,63 @@ static void resumable_mutex_async()
std::cout << "result:" << g_counter << std::endl;
}
static future_t<> resumable_mutex_range_push(size_t idx, mutex_t a, mutex_t b, mutex_t c)
{
for (int i = 0; i < 10; ++i)
{
auto __lockers = mutex_t::lock(a, b, c);
++g_counter;
std::cout << "push:" << g_counter << " on " << idx << std::endl;
co_await 5ms;
}
}
static future_t<> resumable_mutex_range_pop(size_t idx, mutex_t a, mutex_t b, mutex_t c)
{
for (int i = 0; i < 10; ++i)
{
auto __lockers = mutex_t::lock(a, b, c);
--g_counter;
std::cout << "pop :" << g_counter << " on " << idx << std::endl;
co_await 5ms;
}
}
static void resumable_mutex_lock_range()
{
mutex_t mtxA, mtxB, mtxC;
//不同的线程里加锁也需要是线程安全的
std::thread push_th([&]
{
local_scheduler __ls__;
go resumable_mutex_range_push(10, mtxA, mtxB, mtxC);
go resumable_mutex_range_push(11, mtxA, mtxC, mtxB);
this_scheduler()->run_until_notask();
});
go resumable_mutex_range_pop(12, mtxC, mtxB, mtxA);
go resumable_mutex_range_pop(13, mtxB, mtxA, mtxC);
this_scheduler()->run_until_notask();
push_th.join();
std::cout << "result:" << g_counter << std::endl;
}
void resumable_main_mutex()
{
resumable_mutex_synch();
std::cout << std::endl;
resumable_mutex_async();
std::cout << std::endl;
resumable_mutex_lock_range();
}

+ 0
- 1
vs_proj/librf.cpp View File

@@ -43,7 +43,6 @@ int main(int argc, const char* argv[])
//test_ring_queue<resumef::ring_queue_spinlock<int, false, uint32_t>>();
//test_ring_queue<resumef::ring_queue_lockfree<int, uint64_t>>();
resumable_main_event();
resumable_main_mutex();
return 0;

Loading…
Cancel
Save