librf
ring_queue_spinlock.h
1 #pragma once
2 
3 namespace resumef
4 {
5  //使用自旋锁完成的线程安全的环形队列。
6  //支持多个线程同时push和pop。
7  //_Option : 如果队列保存的数据不支持拷贝只支持移动,则需要设置为true;或者数据希望pop后销毁,都需要设置为true。
8  //_Sty : 内存保持数量和索引的整数类型。用于外部控制队列的结构体大小。
9  template<class _Ty, bool _Option = false, class _Sty = uint32_t>
10  struct ring_queue_spinlock
11  {
12  using value_type = _Ty;
13  using size_type = _Sty;
14 
15  static constexpr bool use_option = _Option;
16  using optional_type = std::conditional_t<use_option, std::optional<value_type>, value_type>;
17  public:
18  ring_queue_spinlock(size_t sz);
19 
20  ring_queue_spinlock(const ring_queue_spinlock&) = delete;
21  ring_queue_spinlock(ring_queue_spinlock&&) = default;
22  ring_queue_spinlock& operator =(const ring_queue_spinlock&) = delete;
23  ring_queue_spinlock& operator =(ring_queue_spinlock&&) = default;
24 
25  auto size() const noexcept->size_type;
26  auto capacity() const noexcept->size_type;
27  bool empty() const noexcept;
28  bool full() const noexcept;
29  template<class U>
30  bool try_push(U&& value) noexcept(std::is_nothrow_move_assignable_v<U>);
31  bool try_pop(value_type& value) noexcept(std::is_nothrow_move_assignable_v<value_type>);
32  private:
33  using container_type = std::conditional_t<std::is_same_v<value_type, bool>, std::unique_ptr<optional_type[]>, std::vector<optional_type>>;
34  container_type m_bufferPtr;
35  size_type m_bufferSize;
36 
37  size_type m_writeIndex;
38  size_type m_readIndex;
39  mutable resumef::spinlock m_lock;
40  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
41  std::atomic<size_type> m_count;
42  #endif
43 
44  auto nextIndex(size_type a_count) const noexcept->size_type;
45  };
46 
47  template<class _Ty, bool _Option, class _Sty>
48  ring_queue_spinlock<_Ty, _Option, _Sty>::ring_queue_spinlock(size_t sz)
49  : m_bufferSize(static_cast<size_type>(sz + 1))
50  , m_writeIndex(0)
51  , m_readIndex(0)
52  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
53  , m_count(0)
54  #endif
55  {
56  if constexpr (std::is_same_v<value_type, bool>)
57  m_bufferPtr = container_type{ new optional_type[sz + 1] };
58  else
59  m_bufferPtr.resize(sz + 1);
60 
61  assert(sz < (std::numeric_limits<size_type>::max)());
62  }
63 
64  template<class _Ty, bool _Option, class _Sty>
65  auto ring_queue_spinlock<_Ty, _Option, _Sty>::nextIndex(size_type a_count) const noexcept->size_type
66  {
67  return static_cast<size_type>((a_count + 1) % m_bufferSize);
68  }
69 
70  template<class _Ty, bool _Option, class _Sty>
71  auto ring_queue_spinlock<_Ty, _Option, _Sty>::size() const noexcept->size_type
72  {
73  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
74  return m_count.load(std::memory_order_acquire);
75  #else
76  std::scoped_lock __guard(this->m_lock);
77 
78  if (m_writeIndex >= m_readIndex)
79  return (m_writeIndex - m_readIndex);
80  else
81  return (m_bufferSize + m_writeIndex - m_readIndex);
82  #endif // _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
83  }
84 
85  template<class _Ty, bool _Option, class _Sty>
86  auto ring_queue_spinlock<_Ty, _Option, _Sty>::capacity() const noexcept->size_type
87  {
88  return m_bufferSize - 1;
89  }
90 
91  template<class _Ty, bool _Option, class _Sty>
92  bool ring_queue_spinlock<_Ty, _Option, _Sty>::empty() const noexcept
93  {
94  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
95  return m_count.load(std::memory_order_acquire) == 0;
96  #else
97  std::scoped_lock __guard(this->m_lock);
98 
99  return m_writeIndex == m_readIndex;
100  #endif // _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
101  }
102 
103  template<class _Ty, bool _Option, class _Sty>
104  bool ring_queue_spinlock<_Ty, _Option, _Sty>::full() const noexcept
105  {
106  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
107  return (m_count.load(std::memory_order_acquire) == (m_bufferSize - 1));
108  #else
109  std::scoped_lock __guard(this->m_lock);
110 
111  return nextIndex(m_writeIndex) == m_readIndex;
112  #endif // _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
113  }
114 
115  template<class _Ty, bool _Option, class _Sty>
116  template<class U>
117  bool ring_queue_spinlock<_Ty, _Option, _Sty>::try_push(U&& value) noexcept(std::is_nothrow_move_assignable_v<U>)
118  {
119  std::scoped_lock __guard(this->m_lock);
120 
121  auto nextWriteIndex = nextIndex(m_writeIndex);
122  if (nextWriteIndex == m_readIndex)
123  return false;
124 
125  assert(m_writeIndex < m_bufferSize);
126 
127  m_bufferPtr[m_writeIndex] = std::move(value);
128  m_writeIndex = nextWriteIndex;
129 
130  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
131  m_count.fetch_add(1, std::memory_order_acq_rel);
132  #endif
133  return true;
134  }
135 
136  template<class _Ty, bool _Option, class _Sty>
137  bool ring_queue_spinlock<_Ty, _Option, _Sty>::try_pop(value_type& value) noexcept(std::is_nothrow_move_assignable_v<value_type>)
138  {
139  std::scoped_lock __guard(this->m_lock);
140 
141  if (m_readIndex == m_writeIndex)
142  return false;
143 
144  optional_type& ov = m_bufferPtr[m_readIndex];
145  if constexpr (use_option)
146  {
147  value = std::move(ov.value());
148  ov = std::nullopt;
149  }
150  else
151  {
152  value = std::move(ov);
153  }
154 
155  m_readIndex = nextIndex(m_readIndex);
156 
157  #ifdef _WITH_LOCK_FREE_Q_KEEP_REAL_SIZE
158  m_count.fetch_sub(1, std::memory_order_acq_rel);
159  #endif
160  return true;
161  }
162 }