1// <stop_token> -*- C++ -*-
2
3// Copyright (C) 2019-2024 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file include/stop_token
26 * This is a Standard C++ Library header.
27 */
28
29#ifndef _GLIBCXX_STOP_TOKEN
30#define _GLIBCXX_STOP_TOKEN
31
32#include <bits/requires_hosted.h> // concurrency
33
34#define __glibcxx_want_jthread
35#include <bits/version.h>
36
37#if __cplusplus > 201703L
38
39#include <atomic>
40#include <bits/std_thread.h>
41
42#include <semaphore>
43
44namespace std _GLIBCXX_VISIBILITY(default)
45{
46_GLIBCXX_BEGIN_NAMESPACE_VERSION
47
48 /// Tag type indicating a stop_source should have no shared-stop-state.
49 struct nostopstate_t { explicit nostopstate_t() = default; };
50 inline constexpr nostopstate_t nostopstate{};
51
52 class stop_source;
53
54 /// Allow testing whether a stop request has been made on a `stop_source`.
55 class stop_token
56 {
57 public:
58 stop_token() noexcept = default;
59
60 stop_token(const stop_token&) noexcept = default;
61 stop_token(stop_token&&) noexcept = default;
62
63 ~stop_token() = default;
64
65 stop_token&
66 operator=(const stop_token&) noexcept = default;
67
68 stop_token&
69 operator=(stop_token&&) noexcept = default;
70
71 [[nodiscard]]
72 bool
73 stop_possible() const noexcept
74 {
75 return static_cast<bool>(_M_state) && _M_state->_M_stop_possible();
76 }
77
78 [[nodiscard]]
79 bool
80 stop_requested() const noexcept
81 {
82 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
83 }
84
85 void
86 swap(stop_token& __rhs) noexcept
87 { _M_state.swap(other&: __rhs._M_state); }
88
89 [[nodiscard]]
90 friend bool
91 operator==(const stop_token& __a, const stop_token& __b)
92 { return __a._M_state == __b._M_state; }
93
94 friend void
95 swap(stop_token& __lhs, stop_token& __rhs) noexcept
96 { __lhs.swap(__rhs); }
97
98 private:
99 friend class stop_source;
100 template<typename _Callback>
101 friend class stop_callback;
102
103 static void
104 _S_yield() noexcept
105 {
106#if defined __i386__ || defined __x86_64__
107 __builtin_ia32_pause();
108#endif
109 this_thread::yield();
110 }
111
112#ifndef __glibcxx_semaphore
113 struct binary_semaphore
114 {
115 explicit binary_semaphore(int __d) : _M_counter(__d > 0) { }
116
117 void release() { _M_counter.fetch_add(1, memory_order::release); }
118
119 void acquire()
120 {
121 int __old = 1;
122 while (!_M_counter.compare_exchange_weak(__old, 0,
123 memory_order::acquire,
124 memory_order::relaxed))
125 {
126 __old = 1;
127 _S_yield();
128 }
129 }
130
131 atomic<int> _M_counter;
132 };
133#endif
134
135 struct _Stop_cb
136 {
137 using __cb_type = void(_Stop_cb*) noexcept;
138 __cb_type* _M_callback;
139 _Stop_cb* _M_prev = nullptr;
140 _Stop_cb* _M_next = nullptr;
141 bool* _M_destroyed = nullptr;
142 binary_semaphore _M_done{0};
143
144 [[__gnu__::__nonnull__]]
145 explicit
146 _Stop_cb(__cb_type* __cb)
147 : _M_callback(__cb)
148 { }
149
150 void _M_run() noexcept { _M_callback(this); }
151 };
152
153 struct _Stop_state_t
154 {
155 using value_type = uint32_t;
156 static constexpr value_type _S_stop_requested_bit = 1;
157 static constexpr value_type _S_locked_bit = 2;
158 static constexpr value_type _S_ssrc_counter_inc = 4;
159
160 std::atomic<value_type> _M_owners{1};
161 std::atomic<value_type> _M_value{_S_ssrc_counter_inc};
162 _Stop_cb* _M_head = nullptr;
163 std::thread::id _M_requester;
164
165 _Stop_state_t() = default;
166
167 bool
168 _M_stop_possible() noexcept
169 {
170 // true if a stop request has already been made or there are still
171 // stop_source objects that would allow one to be made.
172 return _M_value.load(m: memory_order::acquire) & ~_S_locked_bit;
173 }
174
175 bool
176 _M_stop_requested() noexcept
177 {
178 return _M_value.load(m: memory_order::acquire) & _S_stop_requested_bit;
179 }
180
181 void
182 _M_add_owner() noexcept
183 {
184 _M_owners.fetch_add(i: 1, m: memory_order::relaxed);
185 }
186
187 void
188 _M_release_ownership() noexcept
189 {
190 if (_M_owners.fetch_sub(i: 1, m: memory_order::acq_rel) == 1)
191 delete this;
192 }
193
194 void
195 _M_add_ssrc() noexcept
196 {
197 _M_value.fetch_add(i: _S_ssrc_counter_inc, m: memory_order::relaxed);
198 }
199
200 void
201 _M_sub_ssrc() noexcept
202 {
203 _M_value.fetch_sub(i: _S_ssrc_counter_inc, m: memory_order::release);
204 }
205
206 // Obtain lock.
207 void
208 _M_lock() noexcept
209 {
210 // Can use relaxed loads to get the current value.
211 // The successful call to _M_try_lock is an acquire operation.
212 auto __old = _M_value.load(m: memory_order::relaxed);
213 while (!_M_try_lock(curval&: __old, failure: memory_order::relaxed))
214 { }
215 }
216
217 // Precondition: calling thread holds the lock.
218 void
219 _M_unlock() noexcept
220 {
221 _M_value.fetch_sub(i: _S_locked_bit, m: memory_order::release);
222 }
223
224 bool
225 _M_request_stop() noexcept
226 {
227 // obtain lock and set stop_requested bit
228 auto __old = _M_value.load(m: memory_order::acquire);
229 do
230 {
231 if (__old & _S_stop_requested_bit) // stop request already made
232 return false;
233 }
234 while (!_M_try_lock_and_stop(curval&: __old));
235
236 _M_requester = this_thread::get_id();
237
238 while (_M_head)
239 {
240 bool __last_cb;
241 _Stop_cb* __cb = _M_head;
242 _M_head = _M_head->_M_next;
243 if (_M_head)
244 {
245 _M_head->_M_prev = nullptr;
246 __last_cb = false;
247 }
248 else
249 __last_cb = true;
250
251 // Allow other callbacks to be unregistered while __cb runs.
252 _M_unlock();
253
254 bool __destroyed = false;
255 __cb->_M_destroyed = &__destroyed;
256
257 // run callback
258 __cb->_M_run();
259
260 if (!__destroyed)
261 {
262 __cb->_M_destroyed = nullptr;
263
264 // synchronize with destructor of stop_callback that owns *__cb
265 if (!__gnu_cxx::__is_single_threaded())
266 __cb->_M_done.release();
267 }
268
269 // Avoid relocking if we already know there are no more callbacks.
270 if (__last_cb)
271 return true;
272
273 _M_lock();
274 }
275
276 _M_unlock();
277 return true;
278 }
279
280 [[__gnu__::__nonnull__]]
281 bool
282 _M_register_callback(_Stop_cb* __cb) noexcept
283 {
284 auto __old = _M_value.load(m: memory_order::acquire);
285 do
286 {
287 if (__old & _S_stop_requested_bit) // stop request already made
288 {
289 __cb->_M_run(); // run synchronously
290 return false;
291 }
292
293 if (__old < _S_ssrc_counter_inc) // no stop_source owns *this
294 // No need to register callback if no stop request can be made.
295 // Returning false also means the stop_callback does not share
296 // ownership of this state, but that's not observable.
297 return false;
298 }
299 while (!_M_try_lock(curval&: __old));
300
301 __cb->_M_next = _M_head;
302 if (_M_head)
303 {
304 _M_head->_M_prev = __cb;
305 }
306 _M_head = __cb;
307 _M_unlock();
308 return true;
309 }
310
311 // Called by ~stop_callback just before destroying *__cb.
312 [[__gnu__::__nonnull__]]
313 void
314 _M_remove_callback(_Stop_cb* __cb)
315 {
316 _M_lock();
317
318 if (__cb == _M_head)
319 {
320 _M_head = _M_head->_M_next;
321 if (_M_head)
322 _M_head->_M_prev = nullptr;
323 _M_unlock();
324 return;
325 }
326 else if (__cb->_M_prev)
327 {
328 __cb->_M_prev->_M_next = __cb->_M_next;
329 if (__cb->_M_next)
330 __cb->_M_next->_M_prev = __cb->_M_prev;
331 _M_unlock();
332 return;
333 }
334
335 _M_unlock();
336
337 // Callback is not in the list, so must have been removed by a call to
338 // _M_request_stop.
339
340 // Despite appearances there is no data race on _M_requester. The only
341 // write to it happens before the callback is removed from the list,
342 // and removing it from the list happens before this read.
343 if (!(_M_requester == this_thread::get_id()))
344 {
345 // Synchronize with completion of callback.
346 __cb->_M_done.acquire();
347 // Safe for ~stop_callback to destroy *__cb now.
348 return;
349 }
350
351 if (__cb->_M_destroyed)
352 *__cb->_M_destroyed = true;
353 }
354
355 // Try to obtain the lock.
356 // Returns true if the lock is acquired (with memory order acquire).
357 // Otherwise, sets __curval = _M_value.load(__failure) and returns false.
358 // Might fail spuriously, so must be called in a loop.
359 bool
360 _M_try_lock(value_type& __curval,
361 memory_order __failure = memory_order::acquire) noexcept
362 {
363 return _M_do_try_lock(__curval, newbits: 0, success: memory_order::acquire, __failure);
364 }
365
366 // Try to obtain the lock to make a stop request.
367 // Returns true if the lock is acquired and the _S_stop_requested_bit is
368 // set (with memory order acq_rel so that other threads see the request).
369 // Otherwise, sets __curval = _M_value.load(memory_order::acquire) and
370 // returns false.
371 // Might fail spuriously, so must be called in a loop.
372 bool
373 _M_try_lock_and_stop(value_type& __curval) noexcept
374 {
375 return _M_do_try_lock(__curval, newbits: _S_stop_requested_bit,
376 success: memory_order::acq_rel, failure: memory_order::acquire);
377 }
378
379 bool
380 _M_do_try_lock(value_type& __curval, value_type __newbits,
381 memory_order __success, memory_order __failure) noexcept
382 {
383 if (__curval & _S_locked_bit)
384 {
385 _S_yield();
386 __curval = _M_value.load(m: __failure);
387 return false;
388 }
389 __newbits |= _S_locked_bit;
390 return _M_value.compare_exchange_weak(i1&: __curval, i2: __curval | __newbits,
391 m1: __success, m2: __failure);
392 }
393 };
394
395 struct _Stop_state_ref
396 {
397 _Stop_state_ref() = default;
398
399 [[__gnu__::__access__(__none__, 2)]]
400 explicit
401 _Stop_state_ref(const stop_source&)
402 : _M_ptr(new _Stop_state_t())
403 { }
404
405 _Stop_state_ref(const _Stop_state_ref& __other) noexcept
406 : _M_ptr(__other._M_ptr)
407 {
408 if (_M_ptr)
409 _M_ptr->_M_add_owner();
410 }
411
412 _Stop_state_ref(_Stop_state_ref&& __other) noexcept
413 : _M_ptr(__other._M_ptr)
414 {
415 __other._M_ptr = nullptr;
416 }
417
418 _Stop_state_ref&
419 operator=(const _Stop_state_ref& __other) noexcept
420 {
421 if (auto __ptr = __other._M_ptr; __ptr != _M_ptr)
422 {
423 if (__ptr)
424 __ptr->_M_add_owner();
425 if (_M_ptr)
426 _M_ptr->_M_release_ownership();
427 _M_ptr = __ptr;
428 }
429 return *this;
430 }
431
432 _Stop_state_ref&
433 operator=(_Stop_state_ref&& __other) noexcept
434 {
435 _Stop_state_ref(std::move(__other)).swap(other&: *this);
436 return *this;
437 }
438
439 ~_Stop_state_ref()
440 {
441 if (_M_ptr)
442 _M_ptr->_M_release_ownership();
443 }
444
445 void
446 swap(_Stop_state_ref& __other) noexcept
447 { std::swap(a&: _M_ptr, b&: __other._M_ptr); }
448
449 explicit operator bool() const noexcept { return _M_ptr != nullptr; }
450
451 _Stop_state_t* operator->() const noexcept { return _M_ptr; }
452
453#if __cpp_impl_three_way_comparison >= 201907L
454 friend bool
455 operator==(const _Stop_state_ref&, const _Stop_state_ref&) = default;
456#else
457 friend bool
458 operator==(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
459 noexcept
460 { return __lhs._M_ptr == __rhs._M_ptr; }
461
462 friend bool
463 operator!=(const _Stop_state_ref& __lhs, const _Stop_state_ref& __rhs)
464 noexcept
465 { return __lhs._M_ptr != __rhs._M_ptr; }
466#endif
467
468 private:
469 _Stop_state_t* _M_ptr = nullptr;
470 };
471
472 _Stop_state_ref _M_state;
473
474 explicit
475 stop_token(const _Stop_state_ref& __state) noexcept
476 : _M_state{__state}
477 { }
478 };
479
480 /// A type that allows a stop request to be made.
481 class stop_source
482 {
483 public:
484 stop_source() : _M_state(*this)
485 { }
486
487 explicit stop_source(std::nostopstate_t) noexcept
488 { }
489
490 stop_source(const stop_source& __other) noexcept
491 : _M_state(__other._M_state)
492 {
493 if (_M_state)
494 _M_state->_M_add_ssrc();
495 }
496
497 stop_source(stop_source&&) noexcept = default;
498
499 stop_source&
500 operator=(const stop_source& __other) noexcept
501 {
502 if (_M_state != __other._M_state)
503 {
504 stop_source __sink(std::move(*this));
505 _M_state = __other._M_state;
506 if (_M_state)
507 _M_state->_M_add_ssrc();
508 }
509 return *this;
510 }
511
512 stop_source&
513 operator=(stop_source&&) noexcept = default;
514
515 ~stop_source()
516 {
517 if (_M_state)
518 _M_state->_M_sub_ssrc();
519 }
520
521 [[nodiscard]]
522 bool
523 stop_possible() const noexcept
524 {
525 return static_cast<bool>(_M_state);
526 }
527
528 [[nodiscard]]
529 bool
530 stop_requested() const noexcept
531 {
532 return static_cast<bool>(_M_state) && _M_state->_M_stop_requested();
533 }
534
535 bool
536 request_stop() const noexcept
537 {
538 if (stop_possible())
539 return _M_state->_M_request_stop();
540 return false;
541 }
542
543 [[nodiscard]]
544 stop_token
545 get_token() const noexcept
546 {
547 return stop_token{_M_state};
548 }
549
550 void
551 swap(stop_source& __other) noexcept
552 {
553 _M_state.swap(other&: __other._M_state);
554 }
555
556 [[nodiscard]]
557 friend bool
558 operator==(const stop_source& __a, const stop_source& __b) noexcept
559 {
560 return __a._M_state == __b._M_state;
561 }
562
563 friend void
564 swap(stop_source& __lhs, stop_source& __rhs) noexcept
565 {
566 __lhs.swap(other&: __rhs);
567 }
568
569 private:
570 stop_token::_Stop_state_ref _M_state;
571 };
572
573 /// A wrapper for callbacks to be run when a stop request is made.
574 template<typename _Callback>
575 class [[nodiscard]] stop_callback
576 {
577 static_assert(is_nothrow_destructible_v<_Callback>);
578 static_assert(is_invocable_v<_Callback>);
579
580 public:
581 using callback_type = _Callback;
582
583 template<typename _Cb,
584 enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0>
585 explicit
586 stop_callback(const stop_token& __token, _Cb&& __cb)
587 noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
588 : _M_cb(std::forward<_Cb>(__cb))
589 {
590 if (auto __state = __token._M_state)
591 {
592 if (__state->_M_register_callback(cb: &_M_cb))
593 _M_state.swap(other&: __state);
594 }
595 }
596
597 template<typename _Cb,
598 enable_if_t<is_constructible_v<_Callback, _Cb>, int> = 0>
599 explicit
600 stop_callback(stop_token&& __token, _Cb&& __cb)
601 noexcept(is_nothrow_constructible_v<_Callback, _Cb>)
602 : _M_cb(std::forward<_Cb>(__cb))
603 {
604 if (auto& __state = __token._M_state)
605 {
606 if (__state->_M_register_callback(cb: &_M_cb))
607 _M_state.swap(other&: __state);
608 }
609 }
610
611 ~stop_callback()
612 {
613 if (_M_state)
614 {
615 _M_state->_M_remove_callback(cb: &_M_cb);
616 }
617 }
618
619 stop_callback(const stop_callback&) = delete;
620 stop_callback& operator=(const stop_callback&) = delete;
621 stop_callback(stop_callback&&) = delete;
622 stop_callback& operator=(stop_callback&&) = delete;
623
624 private:
625 struct _Cb_impl : stop_token::_Stop_cb
626 {
627 template<typename _Cb>
628 explicit
629 _Cb_impl(_Cb&& __cb)
630 : _Stop_cb(&_S_execute),
631 _M_cb(std::forward<_Cb>(__cb))
632 { }
633
634 _Callback _M_cb;
635
636 [[__gnu__::__nonnull__]]
637 static void
638 _S_execute(_Stop_cb* __that) noexcept
639 {
640 _Callback& __cb = static_cast<_Cb_impl*>(__that)->_M_cb;
641 std::forward<_Callback>(__cb)();
642 }
643 };
644
645 _Cb_impl _M_cb;
646 stop_token::_Stop_state_ref _M_state;
647 };
648
649 template<typename _Callback>
650 stop_callback(stop_token, _Callback) -> stop_callback<_Callback>;
651
652_GLIBCXX_END_NAMESPACE_VERSION
653} // namespace
654#endif // __cplusplus > 201703L
655#endif // _GLIBCXX_STOP_TOKEN
656