diff --git a/include/bits/error_constants.h b/include/bits/error_constants.h
new file mode 100644
index 0000000..2038914
--- /dev/null
+++ b/include/bits/error_constants.h
@@ -0,0 +1,180 @@
+// Specific definitions for generic platforms -*- C++ -*-
+
+// Copyright (C) 2007-2024 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/error_constants.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{system_error}
+ */
+
+#ifndef _GLIBCXX_ERROR_CONSTANTS
+#define _GLIBCXX_ERROR_CONSTANTS 1
+
+#include
+#include
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ enum class errc
+ {
+ address_family_not_supported = EAFNOSUPPORT,
+ address_in_use = EADDRINUSE,
+ address_not_available = EADDRNOTAVAIL,
+ already_connected = EISCONN,
+ argument_list_too_long = E2BIG,
+ argument_out_of_domain = EDOM,
+ bad_address = EFAULT,
+ bad_file_descriptor = EBADF,
+
+#ifdef EBADMSG
+ bad_message = EBADMSG,
+#endif
+
+ broken_pipe = EPIPE,
+ connection_aborted = ECONNABORTED,
+ connection_already_in_progress = EALREADY,
+ connection_refused = ECONNREFUSED,
+ connection_reset = ECONNRESET,
+ cross_device_link = EXDEV,
+ destination_address_required = EDESTADDRREQ,
+ device_or_resource_busy = EBUSY,
+ directory_not_empty = ENOTEMPTY,
+ executable_format_error = ENOEXEC,
+ file_exists = EEXIST,
+ file_too_large = EFBIG,
+ filename_too_long = ENAMETOOLONG,
+ function_not_supported = ENOSYS,
+ host_unreachable = EHOSTUNREACH,
+
+#ifdef EIDRM
+ identifier_removed = EIDRM,
+#endif
+
+ illegal_byte_sequence = EILSEQ,
+ inappropriate_io_control_operation = ENOTTY,
+ interrupted = EINTR,
+ invalid_argument = EINVAL,
+ invalid_seek = ESPIPE,
+ io_error = EIO,
+ is_a_directory = EISDIR,
+ message_size = EMSGSIZE,
+ network_down = ENETDOWN,
+ network_reset = ENETRESET,
+ network_unreachable = ENETUNREACH,
+ no_buffer_space = ENOBUFS,
+ no_child_process = ECHILD,
+
+#ifdef ENOLINK
+ no_link = ENOLINK,
+#endif
+
+ no_lock_available = ENOLCK,
+
+#ifdef ENODATA
+ no_message_available = ENODATA,
+#endif
+
+ no_message = ENOMSG,
+ no_protocol_option = ENOPROTOOPT,
+ no_space_on_device = ENOSPC,
+
+#ifdef ENOSR
+ no_stream_resources = ENOSR,
+#endif
+
+ no_such_device_or_address = ENXIO,
+ no_such_device = ENODEV,
+ no_such_file_or_directory = ENOENT,
+ no_such_process = ESRCH,
+ not_a_directory = ENOTDIR,
+ not_a_socket = ENOTSOCK,
+
+#ifdef ENOSTR
+ not_a_stream = ENOSTR,
+#endif
+
+ not_connected = ENOTCONN,
+ not_enough_memory = ENOMEM,
+
+#ifdef ENOTSUP
+ not_supported = ENOTSUP,
+#endif
+
+#ifdef ECANCELED
+ operation_canceled = ECANCELED,
+#endif
+
+ operation_in_progress = EINPROGRESS,
+ operation_not_permitted = EPERM,
+ operation_not_supported = EOPNOTSUPP,
+ operation_would_block = EWOULDBLOCK,
+
+#ifdef EOWNERDEAD
+ owner_dead = EOWNERDEAD,
+#endif
+
+ permission_denied = EACCES,
+
+#ifdef EPROTO
+ protocol_error = EPROTO,
+#endif
+
+ protocol_not_supported = EPROTONOSUPPORT,
+ read_only_file_system = EROFS,
+ resource_deadlock_would_occur = EDEADLK,
+ resource_unavailable_try_again = EAGAIN,
+ result_out_of_range = ERANGE,
+
+#ifdef ENOTRECOVERABLE
+ state_not_recoverable = ENOTRECOVERABLE,
+#endif
+
+#ifdef ETIME
+ stream_timeout = ETIME,
+#endif
+
+#ifdef ETXTBSY
+ text_file_busy = ETXTBSY,
+#endif
+
+ timed_out = ETIMEDOUT,
+ too_many_files_open_in_system = ENFILE,
+ too_many_files_open = EMFILE,
+ too_many_links = EMLINK,
+ too_many_symbolic_link_levels = ELOOP,
+
+#ifdef EOVERFLOW
+ value_too_large = EOVERFLOW,
+#elif defined __AVR__
+ value_too_large = 999,
+#endif
+
+ wrong_protocol_type = EPROTOTYPE
+ };
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+#endif
diff --git a/include/bits/functexcept.h b/include/bits/functexcept.h
index 85aa393..b7494f3 100644
--- a/include/bits/functexcept.h
+++ b/include/bits/functexcept.h
@@ -38,6 +38,7 @@
#include
#include
+#include
namespace std _GLIBCXX_VISIBILITY(default)
{
@@ -90,6 +91,10 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION
void
__throw_underflow_error(const char* __s = "") __attribute__((__noreturn__));
+ // Helpers for exception objects in
+ void
+ __throw_system_error(int) __attribute__((__noreturn__));
+
// Helpers for exception objects in
void
__throw_bad_function_call() __attribute__((__noreturn__));
diff --git a/include/bits/std_mutex.h b/include/bits/std_mutex.h
new file mode 100644
index 0000000..aef12b6
--- /dev/null
+++ b/include/bits/std_mutex.h
@@ -0,0 +1,176 @@
+// std::mutex implementation -*- C++ -*-
+
+// Copyright (C) 2003-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/std_mutex.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{mutex}
+ */
+
+#ifndef _GLIBCXX_MUTEX_H
+#define _GLIBCXX_MUTEX_H 1
+
+#pragma GCC system_header
+
+#if __cplusplus < 201103L
+# include
+#else
+
+#include
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @defgroup mutexes Mutexes
+ * @ingroup concurrency
+ *
+ * Classes for mutex support.
+ * @{
+ */
+
+#ifdef _GLIBCXX_HAS_GTHREADS
+ // Common base class for std::mutex and std::timed_mutex
+ class __mutex_base
+ {
+ protected:
+ typedef __gthread_mutex_t __native_type;
+
+#ifdef __GTHREAD_MUTEX_INIT
+ __native_type _M_mutex = __GTHREAD_MUTEX_INIT;
+
+ constexpr __mutex_base() noexcept = default;
+#else
+ __native_type _M_mutex;
+
+ __mutex_base() noexcept
+ {
+ // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
+ __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
+ }
+
+ ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
+#endif
+
+ __mutex_base(const __mutex_base&) = delete;
+ __mutex_base& operator=(const __mutex_base&) = delete;
+ };
+
+ /// The standard mutex type.
+ class mutex : private __mutex_base
+ {
+ public:
+ typedef __native_type* native_handle_type;
+
+#ifdef __GTHREAD_MUTEX_INIT
+ constexpr
+#endif
+ mutex() noexcept = default;
+ ~mutex() = default;
+
+ mutex(const mutex&) = delete;
+ mutex& operator=(const mutex&) = delete;
+
+ void
+ lock()
+ {
+ int __e = __gthread_mutex_lock(&_M_mutex);
+
+ // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
+ if (__e)
+ __throw_system_error(__e);
+ }
+
+ bool
+ try_lock() noexcept
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ return !__gthread_mutex_trylock(&_M_mutex);
+ }
+
+ void
+ unlock()
+ {
+ // XXX EINVAL, EAGAIN, EPERM
+ __gthread_mutex_unlock(&_M_mutex);
+ }
+
+ native_handle_type
+ native_handle() noexcept
+ { return &_M_mutex; }
+ };
+
+#endif // _GLIBCXX_HAS_GTHREADS
+
+ /// Do not acquire ownership of the mutex.
+ struct defer_lock_t { explicit defer_lock_t() = default; };
+
+ /// Try to acquire ownership of the mutex without blocking.
+ struct try_to_lock_t { explicit try_to_lock_t() = default; };
+
+ /// Assume the calling thread has already obtained mutex ownership
+ /// and manage it.
+ struct adopt_lock_t { explicit adopt_lock_t() = default; };
+
+ /// Tag used to prevent a scoped lock from acquiring ownership of a mutex.
+ _GLIBCXX17_INLINE constexpr defer_lock_t defer_lock { };
+
+ /// Tag used to prevent a scoped lock from blocking if a mutex is locked.
+ _GLIBCXX17_INLINE constexpr try_to_lock_t try_to_lock { };
+
+ /// Tag used to make a scoped lock take ownership of a locked mutex.
+ _GLIBCXX17_INLINE constexpr adopt_lock_t adopt_lock { };
+
+ /** @brief A simple scoped lock type.
+ *
+ * A lock_guard controls mutex ownership within a scope, releasing
+ * ownership in the destructor.
+ */
+ template
+ class lock_guard
+ {
+ public:
+ typedef _Mutex mutex_type;
+
+ explicit lock_guard(mutex_type& __m) : _M_device(__m)
+ { _M_device.lock(); }
+
+ lock_guard(mutex_type& __m, adopt_lock_t) noexcept : _M_device(__m)
+ { } // calling thread owns mutex
+
+ ~lock_guard()
+ { _M_device.unlock(); }
+
+ lock_guard(const lock_guard&) = delete;
+ lock_guard& operator=(const lock_guard&) = delete;
+
+ private:
+ mutex_type& _M_device;
+ };
+
+ // @} group mutexes
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+#endif // C++11
+#endif // _GLIBCXX_MUTEX_H
diff --git a/include/bits/unique_lock.h b/include/bits/unique_lock.h
new file mode 100644
index 0000000..82eef44
--- /dev/null
+++ b/include/bits/unique_lock.h
@@ -0,0 +1,243 @@
+// std::unique_lock implementation -*- C++ -*-
+
+// Copyright (C) 2008-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file bits/unique_lock.h
+ * This is an internal header file, included by other library headers.
+ * Do not attempt to use it directly. @headername{mutex}
+ */
+
+#ifndef _GLIBCXX_UNIQUE_LOCK_H
+#define _GLIBCXX_UNIQUE_LOCK_H 1
+
+#pragma GCC system_header
+
+#if __cplusplus < 201103L
+# include
+#else
+
+#include
+#include
+#include // for std::swap
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /** @brief A movable scoped lock type.
+ *
+ * A unique_lock controls mutex ownership within a scope. Ownership of the
+ * mutex can be delayed until after construction and can be transferred
+ * to another unique_lock by move construction or move assignment. If a
+ * mutex lock is owned when the destructor runs ownership will be released.
+ *
+ * @ingroup mutexes
+ */
+ template
+ class unique_lock
+ {
+ public:
+ typedef _Mutex mutex_type;
+
+ unique_lock() noexcept
+ : _M_device(0), _M_owns(false)
+ { }
+
+ explicit unique_lock(mutex_type& __m)
+ : _M_device(std::__addressof(__m)), _M_owns(false)
+ {
+ lock();
+ _M_owns = true;
+ }
+
+ unique_lock(mutex_type& __m, defer_lock_t) noexcept
+ : _M_device(std::__addressof(__m)), _M_owns(false)
+ { }
+
+ unique_lock(mutex_type& __m, try_to_lock_t)
+ : _M_device(std::__addressof(__m)), _M_owns(_M_device->try_lock())
+ { }
+
+ unique_lock(mutex_type& __m, adopt_lock_t) noexcept
+ : _M_device(std::__addressof(__m)), _M_owns(true)
+ {
+ // XXX calling thread owns mutex
+ }
+
+ template
+ unique_lock(mutex_type& __m,
+ const chrono::time_point<_Clock, _Duration>& __atime)
+ : _M_device(std::__addressof(__m)),
+ _M_owns(_M_device->try_lock_until(__atime))
+ { }
+
+ template
+ unique_lock(mutex_type& __m,
+ const chrono::duration<_Rep, _Period>& __rtime)
+ : _M_device(std::__addressof(__m)),
+ _M_owns(_M_device->try_lock_for(__rtime))
+ { }
+
+ ~unique_lock()
+ {
+ if (_M_owns)
+ unlock();
+ }
+
+ unique_lock(const unique_lock&) = delete;
+ unique_lock& operator=(const unique_lock&) = delete;
+
+ unique_lock(unique_lock&& __u) noexcept
+ : _M_device(__u._M_device), _M_owns(__u._M_owns)
+ {
+ __u._M_device = 0;
+ __u._M_owns = false;
+ }
+
+ unique_lock& operator=(unique_lock&& __u) noexcept
+ {
+ if(_M_owns)
+ unlock();
+
+ unique_lock(std::move(__u)).swap(*this);
+
+ __u._M_device = 0;
+ __u._M_owns = false;
+
+ return *this;
+ }
+
+ void
+ lock()
+ {
+ if (!_M_device)
+ __throw_system_error(int(errc::operation_not_permitted));
+ else if (_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ else
+ {
+ _M_device->lock();
+ _M_owns = true;
+ }
+ }
+
+ bool
+ try_lock()
+ {
+ if (!_M_device)
+ __throw_system_error(int(errc::operation_not_permitted));
+ else if (_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ else
+ {
+ _M_owns = _M_device->try_lock();
+ return _M_owns;
+ }
+ }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+ if (!_M_device)
+ __throw_system_error(int(errc::operation_not_permitted));
+ else if (_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ else
+ {
+ _M_owns = _M_device->try_lock_until(__atime);
+ return _M_owns;
+ }
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ if (!_M_device)
+ __throw_system_error(int(errc::operation_not_permitted));
+ else if (_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ else
+ {
+ _M_owns = _M_device->try_lock_for(__rtime);
+ return _M_owns;
+ }
+ }
+
+ void
+ unlock()
+ {
+ if (!_M_owns)
+ __throw_system_error(int(errc::operation_not_permitted));
+ else if (_M_device)
+ {
+ _M_device->unlock();
+ _M_owns = false;
+ }
+ }
+
+ void
+ swap(unique_lock& __u) noexcept
+ {
+ std::swap(_M_device, __u._M_device);
+ std::swap(_M_owns, __u._M_owns);
+ }
+
+ mutex_type*
+ release() noexcept
+ {
+ mutex_type* __ret = _M_device;
+ _M_device = 0;
+ _M_owns = false;
+ return __ret;
+ }
+
+ bool
+ owns_lock() const noexcept
+ { return _M_owns; }
+
+ explicit operator bool() const noexcept
+ { return owns_lock(); }
+
+ mutex_type*
+ mutex() const noexcept
+ { return _M_device; }
+
+ private:
+ mutex_type* _M_device;
+ bool _M_owns;
+ };
+
+ /// Swap overload for unique_lock objects.
+ /// @relates unique_lock
+ template
+ inline void
+ swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
+ { __x.swap(__y); }
+
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+#endif // C++11
+#endif // _GLIBCXX_UNIQUE_LOCK_H
diff --git a/include/mutex b/include/mutex
new file mode 100644
index 0000000..389504d
--- /dev/null
+++ b/include/mutex
@@ -0,0 +1,746 @@
+// -*- C++ -*-
+
+// Copyright (C) 2003-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file include/mutex
+ * This is a Standard C++ Library header.
+ */
+
+#ifndef _GLIBCXX_MUTEX
+#define _GLIBCXX_MUTEX 1
+
+#pragma GCC system_header
+
+#if __cplusplus < 201103L
+# include
+#else
+
+#include
+#include
+#include
+#include
+#ifndef _GLIBCXX_HAVE_TLS
+# include
+#endif
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @addtogroup mutexes
+ * @{
+ */
+
+#ifdef _GLIBCXX_HAS_GTHREADS
+
+ // Common base class for std::recursive_mutex and std::recursive_timed_mutex
+ class __recursive_mutex_base
+ {
+ protected:
+ typedef __gthread_recursive_mutex_t __native_type;
+
+ __recursive_mutex_base(const __recursive_mutex_base&) = delete;
+ __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
+
+#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
+ __native_type _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
+
+ __recursive_mutex_base() = default;
+#else
+ __native_type _M_mutex;
+
+ __recursive_mutex_base()
+ {
+ // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
+ __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
+ }
+
+ ~__recursive_mutex_base()
+ { __gthread_recursive_mutex_destroy(&_M_mutex); }
+#endif
+ };
+
+ /// The standard recursive mutex type.
+ class recursive_mutex : private __recursive_mutex_base
+ {
+ public:
+ typedef __native_type* native_handle_type;
+
+ recursive_mutex() = default;
+ ~recursive_mutex() = default;
+
+ recursive_mutex(const recursive_mutex&) = delete;
+ recursive_mutex& operator=(const recursive_mutex&) = delete;
+
+ void
+ lock()
+ {
+ int __e = __gthread_recursive_mutex_lock(&_M_mutex);
+
+ // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
+ if (__e)
+ __throw_system_error(__e);
+ }
+
+ bool
+ try_lock() noexcept
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ return !__gthread_recursive_mutex_trylock(&_M_mutex);
+ }
+
+ void
+ unlock()
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ __gthread_recursive_mutex_unlock(&_M_mutex);
+ }
+
+ native_handle_type
+ native_handle() noexcept
+ { return &_M_mutex; }
+ };
+
+#if _GTHREAD_USE_MUTEX_TIMEDLOCK
+ template
+ class __timed_mutex_impl
+ {
+ protected:
+ template
+ bool
+ _M_try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
+ using __clock = chrono::steady_clock;
+#else
+ using __clock = chrono::system_clock;
+#endif
+
+ auto __rt = chrono::duration_cast<__clock::duration>(__rtime);
+ if (ratio_greater<__clock::period, _Period>())
+ ++__rt;
+ return _M_try_lock_until(__clock::now() + __rt);
+ }
+
+ template
+ bool
+ _M_try_lock_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts = {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ return static_cast<_Derived*>(this)->_M_timedlock(__ts);
+ }
+
+#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
+ template
+ bool
+ _M_try_lock_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts = {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ return static_cast<_Derived*>(this)->_M_clocklock(CLOCK_MONOTONIC,
+ __ts);
+ }
+#endif
+
+ template
+ bool
+ _M_try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+#if __cplusplus > 201703L
+ static_assert(chrono::is_clock_v<_Clock>);
+#endif
+ // The user-supplied clock may not tick at the same rate as
+ // steady_clock, so we must loop in order to guarantee that
+ // the timeout has expired before returning false.
+ auto __now = _Clock::now();
+ do {
+ auto __rtime = __atime - __now;
+ if (_M_try_lock_for(__rtime))
+ return true;
+ __now = _Clock::now();
+ } while (__atime > __now);
+ return false;
+ }
+ };
+
+ /// The standard timed mutex type.
+ class timed_mutex
+ : private __mutex_base, public __timed_mutex_impl
+ {
+ public:
+ typedef __native_type* native_handle_type;
+
+ timed_mutex() = default;
+ ~timed_mutex() = default;
+
+ timed_mutex(const timed_mutex&) = delete;
+ timed_mutex& operator=(const timed_mutex&) = delete;
+
+ void
+ lock()
+ {
+ int __e = __gthread_mutex_lock(&_M_mutex);
+
+ // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
+ if (__e)
+ __throw_system_error(__e);
+ }
+
+ bool
+ try_lock() noexcept
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ return !__gthread_mutex_trylock(&_M_mutex);
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ { return _M_try_lock_for(__rtime); }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ { return _M_try_lock_until(__atime); }
+
+ void
+ unlock()
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ __gthread_mutex_unlock(&_M_mutex);
+ }
+
+ native_handle_type
+ native_handle() noexcept
+ { return &_M_mutex; }
+
+ private:
+ friend class __timed_mutex_impl;
+
+ bool
+ _M_timedlock(const __gthread_time_t& __ts)
+ { return !__gthread_mutex_timedlock(&_M_mutex, &__ts); }
+
+#if _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
+ bool
+ _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
+ { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
+#endif
+ };
+
+ /// recursive_timed_mutex
+ class recursive_timed_mutex
+ : private __recursive_mutex_base,
+ public __timed_mutex_impl
+ {
+ public:
+ typedef __native_type* native_handle_type;
+
+ recursive_timed_mutex() = default;
+ ~recursive_timed_mutex() = default;
+
+ recursive_timed_mutex(const recursive_timed_mutex&) = delete;
+ recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
+
+ void
+ lock()
+ {
+ int __e = __gthread_recursive_mutex_lock(&_M_mutex);
+
+ // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
+ if (__e)
+ __throw_system_error(__e);
+ }
+
+ bool
+ try_lock() noexcept
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ return !__gthread_recursive_mutex_trylock(&_M_mutex);
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ { return _M_try_lock_for(__rtime); }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ { return _M_try_lock_until(__atime); }
+
+ void
+ unlock()
+ {
+ // XXX EINVAL, EAGAIN, EBUSY
+ __gthread_recursive_mutex_unlock(&_M_mutex);
+ }
+
+ native_handle_type
+ native_handle() noexcept
+ { return &_M_mutex; }
+
+ private:
+ friend class __timed_mutex_impl;
+
+ bool
+ _M_timedlock(const __gthread_time_t& __ts)
+ { return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); }
+
+#ifdef _GLIBCXX_USE_PTHREAD_MUTEX_CLOCKLOCK
+ bool
+ _M_clocklock(clockid_t clockid, const __gthread_time_t& __ts)
+ { return !pthread_mutex_clocklock(&_M_mutex, clockid, &__ts); }
+#endif
+ };
+
+#else // !_GTHREAD_USE_MUTEX_TIMEDLOCK
+
+ /// timed_mutex
+ class timed_mutex
+ {
+ mutex _M_mut;
+ condition_variable _M_cv;
+ bool _M_locked = false;
+
+ public:
+
+ timed_mutex() = default;
+ ~timed_mutex() { __glibcxx_assert( !_M_locked ); }
+
+ timed_mutex(const timed_mutex&) = delete;
+ timed_mutex& operator=(const timed_mutex&) = delete;
+
+ void
+ lock()
+ {
+ unique_lock __lk(_M_mut);
+ _M_cv.wait(__lk, [&]{ return !_M_locked; });
+ _M_locked = true;
+ }
+
+ bool
+ try_lock()
+ {
+ lock_guard __lk(_M_mut);
+ if (_M_locked)
+ return false;
+ _M_locked = true;
+ return true;
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ unique_lock __lk(_M_mut);
+ if (!_M_cv.wait_for(__lk, __rtime, [&]{ return !_M_locked; }))
+ return false;
+ _M_locked = true;
+ return true;
+ }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+ unique_lock __lk(_M_mut);
+ if (!_M_cv.wait_until(__lk, __atime, [&]{ return !_M_locked; }))
+ return false;
+ _M_locked = true;
+ return true;
+ }
+
+ void
+ unlock()
+ {
+ lock_guard __lk(_M_mut);
+ __glibcxx_assert( _M_locked );
+ _M_locked = false;
+ _M_cv.notify_one();
+ }
+ };
+
+ /// recursive_timed_mutex
+ class recursive_timed_mutex
+ {
+ mutex _M_mut;
+ condition_variable _M_cv;
+ thread::id _M_owner;
+ unsigned _M_count = 0;
+
+ // Predicate type that tests whether the current thread can lock a mutex.
+ struct _Can_lock
+ {
+ // Returns true if the mutex is unlocked or is locked by _M_caller.
+ bool
+ operator()() const noexcept
+ { return _M_mx->_M_count == 0 || _M_mx->_M_owner == _M_caller; }
+
+ const recursive_timed_mutex* _M_mx;
+ thread::id _M_caller;
+ };
+
+ public:
+
+ recursive_timed_mutex() = default;
+ ~recursive_timed_mutex() { __glibcxx_assert( _M_count == 0 ); }
+
+ recursive_timed_mutex(const recursive_timed_mutex&) = delete;
+ recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
+
+ void
+ lock()
+ {
+ auto __id = this_thread::get_id();
+ _Can_lock __can_lock{this, __id};
+ unique_lock __lk(_M_mut);
+ _M_cv.wait(__lk, __can_lock);
+ if (_M_count == -1u)
+ __throw_system_error(EAGAIN); // [thread.timedmutex.recursive]/3
+ _M_owner = __id;
+ ++_M_count;
+ }
+
+ bool
+ try_lock()
+ {
+ auto __id = this_thread::get_id();
+ _Can_lock __can_lock{this, __id};
+ lock_guard __lk(_M_mut);
+ if (!__can_lock())
+ return false;
+ if (_M_count == -1u)
+ return false;
+ _M_owner = __id;
+ ++_M_count;
+ return true;
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ auto __id = this_thread::get_id();
+ _Can_lock __can_lock{this, __id};
+ unique_lock __lk(_M_mut);
+ if (!_M_cv.wait_for(__lk, __rtime, __can_lock))
+ return false;
+ if (_M_count == -1u)
+ return false;
+ _M_owner = __id;
+ ++_M_count;
+ return true;
+ }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+ auto __id = this_thread::get_id();
+ _Can_lock __can_lock{this, __id};
+ unique_lock __lk(_M_mut);
+ if (!_M_cv.wait_until(__lk, __atime, __can_lock))
+ return false;
+ if (_M_count == -1u)
+ return false;
+ _M_owner = __id;
+ ++_M_count;
+ return true;
+ }
+
+ void
+ unlock()
+ {
+ lock_guard __lk(_M_mut);
+ __glibcxx_assert( _M_owner == this_thread::get_id() );
+ __glibcxx_assert( _M_count > 0 );
+ if (--_M_count == 0)
+ {
+ _M_owner = {};
+ _M_cv.notify_one();
+ }
+ }
+ };
+
+#endif
+#endif // _GLIBCXX_HAS_GTHREADS
+
+ /// @cond undocumented
+ template
+ inline unique_lock<_Lock>
+ __try_to_lock(_Lock& __l)
+ { return unique_lock<_Lock>{__l, try_to_lock}; }
+
+ template
+ struct __try_lock_impl
+ {
+ template
+ static void
+ __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
+ {
+ __idx = _Idx;
+ auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
+ if (__lock.owns_lock())
+ {
+ constexpr bool __cont = _Idx + 2 < sizeof...(_Lock);
+ using __try_locker = __try_lock_impl<_Idx + 1, __cont>;
+ __try_locker::__do_try_lock(__locks, __idx);
+ if (__idx == -1)
+ __lock.release();
+ }
+ }
+ };
+
+ template
+ struct __try_lock_impl<_Idx, false>
+ {
+ template
+ static void
+ __do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
+ {
+ __idx = _Idx;
+ auto __lock = std::__try_to_lock(std::get<_Idx>(__locks));
+ if (__lock.owns_lock())
+ {
+ __idx = -1;
+ __lock.release();
+ }
+ }
+ };
+ /// @endcond
+
+ /** @brief Generic try_lock.
+ * @param __l1 Meets Lockable requirements (try_lock() may throw).
+ * @param __l2 Meets Lockable requirements (try_lock() may throw).
+ * @param __l3 Meets Lockable requirements (try_lock() may throw).
+ * @return Returns -1 if all try_lock() calls return true. Otherwise returns
+ * a 0-based index corresponding to the argument that returned false.
+ * @post Either all arguments are locked, or none will be.
+ *
+ * Sequentially calls try_lock() on each argument.
+ */
+ template
+ int
+ try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
+ {
+ int __idx;
+ auto __locks = std::tie(__l1, __l2, __l3...);
+ __try_lock_impl<0>::__do_try_lock(__locks, __idx);
+ return __idx;
+ }
+
+ /** @brief Generic lock.
+ * @param __l1 Meets Lockable requirements (try_lock() may throw).
+ * @param __l2 Meets Lockable requirements (try_lock() may throw).
+ * @param __l3 Meets Lockable requirements (try_lock() may throw).
+ * @throw An exception thrown by an argument's lock() or try_lock() member.
+ * @post All arguments are locked.
+ *
+ * All arguments are locked via a sequence of calls to lock(), try_lock()
+ * and unlock(). If the call exits via an exception any locks that were
+ * obtained will be released.
+ */
+ template
+ void
+ lock(_L1& __l1, _L2& __l2, _L3&... __l3)
+ {
+ while (true)
+ {
+ using __try_locker = __try_lock_impl<0, sizeof...(_L3) != 0>;
+ unique_lock<_L1> __first(__l1);
+ int __idx;
+ auto __locks = std::tie(__l2, __l3...);
+ __try_locker::__do_try_lock(__locks, __idx);
+ if (__idx == -1)
+ {
+ __first.release();
+ return;
+ }
+ }
+ }
+
+#if __cplusplus >= 201703L
+#define __cpp_lib_scoped_lock 201703
+ /** @brief A scoped lock type for multiple lockable objects.
+ *
+ * A scoped_lock controls mutex ownership within a scope, releasing
+ * ownership in the destructor.
+ */
+ template
+ class scoped_lock
+ {
+ public:
+ explicit scoped_lock(_MutexTypes&... __m) : _M_devices(std::tie(__m...))
+ { std::lock(__m...); }
+
+ explicit scoped_lock(adopt_lock_t, _MutexTypes&... __m) noexcept
+ : _M_devices(std::tie(__m...))
+ { } // calling thread owns mutex
+
+ ~scoped_lock()
+ { std::apply([](auto&... __m) { (__m.unlock(), ...); }, _M_devices); }
+
+ scoped_lock(const scoped_lock&) = delete;
+ scoped_lock& operator=(const scoped_lock&) = delete;
+
+ private:
+ tuple<_MutexTypes&...> _M_devices;
+ };
+
+ template<>
+ class scoped_lock<>
+ {
+ public:
+ explicit scoped_lock() = default;
+ explicit scoped_lock(adopt_lock_t) noexcept { }
+ ~scoped_lock() = default;
+
+ scoped_lock(const scoped_lock&) = delete;
+ scoped_lock& operator=(const scoped_lock&) = delete;
+ };
+
+ template
+ class scoped_lock<_Mutex>
+ {
+ public:
+ using mutex_type = _Mutex;
+
+ explicit scoped_lock(mutex_type& __m) : _M_device(__m)
+ { _M_device.lock(); }
+
+ explicit scoped_lock(adopt_lock_t, mutex_type& __m) noexcept
+ : _M_device(__m)
+ { } // calling thread owns mutex
+
+ ~scoped_lock()
+ { _M_device.unlock(); }
+
+ scoped_lock(const scoped_lock&) = delete;
+ scoped_lock& operator=(const scoped_lock&) = delete;
+
+ private:
+ mutex_type& _M_device;
+ };
+#endif // C++17
+
+#ifdef _GLIBCXX_HAS_GTHREADS
+ /// Flag type used by std::call_once
+ struct once_flag
+ {
+ private:
+ typedef __gthread_once_t __native_type;
+ __native_type _M_once = __GTHREAD_ONCE_INIT;
+
+ public:
+ /// Constructor
+ constexpr once_flag() noexcept = default;
+
+ /// Deleted copy constructor
+ once_flag(const once_flag&) = delete;
+ /// Deleted assignment operator
+ once_flag& operator=(const once_flag&) = delete;
+
+ template
+ friend void
+ call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
+ };
+
+ /// @cond undocumented
+#ifdef _GLIBCXX_HAVE_TLS
+ extern __thread void* __once_callable;
+ extern __thread void (*__once_call)();
+#else
+ extern function __once_functor;
+
+ extern void
+ __set_once_functor_lock_ptr(unique_lock*);
+
+ extern mutex&
+ __get_once_mutex();
+#endif
+
+ extern "C" void __once_proxy(void);
+ /// @endcond
+
+ /// Invoke a callable and synchronize with other calls using the same flag
+ template
+ void
+ call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
+ {
+ // _GLIBCXX_RESOLVE_LIB_DEFECTS
+ // 2442. call_once() shouldn't DECAY_COPY()
+ auto __callable = [&] {
+ std::__invoke(std::forward<_Callable>(__f),
+ std::forward<_Args>(__args)...);
+ };
+#ifdef _GLIBCXX_HAVE_TLS
+ __once_callable = std::__addressof(__callable);
+ __once_call = []{ (*(decltype(__callable)*)__once_callable)(); };
+#else
+ unique_lock __functor_lock(__get_once_mutex());
+ __once_functor = __callable;
+ __set_once_functor_lock_ptr(&__functor_lock);
+#endif
+
+ int __e = __gthread_once(&__once._M_once, &__once_proxy);
+
+#ifndef _GLIBCXX_HAVE_TLS
+ if (__functor_lock)
+ __set_once_functor_lock_ptr(0);
+#endif
+
+#ifdef __clang_analyzer__
+ // PR libstdc++/82481
+ __once_callable = nullptr;
+ __once_call = nullptr;
+#endif
+
+ if (__e)
+ __throw_system_error(__e);
+ }
+#endif // _GLIBCXX_HAS_GTHREADS
+
+ // @} group mutexes
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+#endif // C++11
+
+#endif // _GLIBCXX_MUTEX
diff --git a/include/shared_mutex b/include/shared_mutex
new file mode 100644
index 0000000..3438861
--- /dev/null
+++ b/include/shared_mutex
@@ -0,0 +1,855 @@
+// -*- C++ -*-
+
+// Copyright (C) 2013-2020 Free Software Foundation, Inc.
+//
+// This file is part of the GNU ISO C++ Library. This library is free
+// software; you can redistribute it and/or modify it under the
+// terms of the GNU General Public License as published by the
+// Free Software Foundation; either version 3, or (at your option)
+// any later version.
+
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// Under Section 7 of GPL version 3, you are granted additional
+// permissions described in the GCC Runtime Library Exception, version
+// 3.1, as published by the Free Software Foundation.
+
+// You should have received a copy of the GNU General Public License and
+// a copy of the GCC Runtime Library Exception along with this program;
+// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+// .
+
+/** @file include/shared_mutex
+ * This is a Standard C++ Library header.
+ */
+
+#ifndef _GLIBCXX_SHARED_MUTEX
+#define _GLIBCXX_SHARED_MUTEX 1
+
+#pragma GCC system_header
+
+#if __cplusplus >= 201402L
+
+#include
+#include
+
+namespace std _GLIBCXX_VISIBILITY(default)
+{
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+ /**
+ * @addtogroup mutexes
+ * @{
+ */
+
+#ifdef _GLIBCXX_HAS_GTHREADS
+
+#if __cplusplus >= 201703L
+#define __cpp_lib_shared_mutex 201505
+ class shared_mutex;
+#endif
+
+#define __cpp_lib_shared_timed_mutex 201402
+ class shared_timed_mutex;
+
+ /// @cond undocumented
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+#ifdef __gthrw
+#define _GLIBCXX_GTHRW(name) \
+ __gthrw(pthread_ ## name); \
+ static inline int \
+ __glibcxx_ ## name (pthread_rwlock_t *__rwlock) \
+ { \
+ if (__gthread_active_p ()) \
+ return __gthrw_(pthread_ ## name) (__rwlock); \
+ else \
+ return 0; \
+ }
+ _GLIBCXX_GTHRW(rwlock_rdlock)
+ _GLIBCXX_GTHRW(rwlock_tryrdlock)
+ _GLIBCXX_GTHRW(rwlock_wrlock)
+ _GLIBCXX_GTHRW(rwlock_trywrlock)
+ _GLIBCXX_GTHRW(rwlock_unlock)
+# ifndef PTHREAD_RWLOCK_INITIALIZER
+ _GLIBCXX_GTHRW(rwlock_destroy)
+ __gthrw(pthread_rwlock_init);
+ static inline int
+ __glibcxx_rwlock_init (pthread_rwlock_t *__rwlock)
+ {
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_rwlock_init) (__rwlock, NULL);
+ else
+ return 0;
+ }
+# endif
+# if _GTHREAD_USE_MUTEX_TIMEDLOCK
+ __gthrw(pthread_rwlock_timedrdlock);
+ static inline int
+ __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
+ const timespec *__ts)
+ {
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_rwlock_timedrdlock) (__rwlock, __ts);
+ else
+ return 0;
+ }
+ __gthrw(pthread_rwlock_timedwrlock);
+ static inline int
+ __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
+ const timespec *__ts)
+ {
+ if (__gthread_active_p ())
+ return __gthrw_(pthread_rwlock_timedwrlock) (__rwlock, __ts);
+ else
+ return 0;
+ }
+# endif
+#else
+ static inline int
+ __glibcxx_rwlock_rdlock (pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_rdlock (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_tryrdlock (pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_tryrdlock (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_wrlock (pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_wrlock (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_trywrlock (pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_trywrlock (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_unlock (pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_unlock (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_destroy(pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_destroy (__rwlock); }
+ static inline int
+ __glibcxx_rwlock_init(pthread_rwlock_t *__rwlock)
+ { return pthread_rwlock_init (__rwlock, NULL); }
+# if _GTHREAD_USE_MUTEX_TIMEDLOCK
+ static inline int
+ __glibcxx_rwlock_timedrdlock (pthread_rwlock_t *__rwlock,
+ const timespec *__ts)
+ { return pthread_rwlock_timedrdlock (__rwlock, __ts); }
+ static inline int
+ __glibcxx_rwlock_timedwrlock (pthread_rwlock_t *__rwlock,
+ const timespec *__ts)
+ { return pthread_rwlock_timedwrlock (__rwlock, __ts); }
+# endif
+#endif
+
+ /// A shared mutex type implemented using pthread_rwlock_t.
+ class __shared_mutex_pthread
+ {
+ friend class shared_timed_mutex;
+
+#ifdef PTHREAD_RWLOCK_INITIALIZER
+ pthread_rwlock_t _M_rwlock = PTHREAD_RWLOCK_INITIALIZER;
+
+ public:
+ __shared_mutex_pthread() = default;
+ ~__shared_mutex_pthread() = default;
+#else
+ pthread_rwlock_t _M_rwlock;
+
+ public:
+ __shared_mutex_pthread()
+ {
+ int __ret = __glibcxx_rwlock_init(&_M_rwlock);
+ if (__ret == ENOMEM)
+ __throw_bad_alloc();
+ else if (__ret == EAGAIN)
+ __throw_system_error(int(errc::resource_unavailable_try_again));
+ else if (__ret == EPERM)
+ __throw_system_error(int(errc::operation_not_permitted));
+ // Errors not handled: EBUSY, EINVAL
+ __glibcxx_assert(__ret == 0);
+ }
+
+ ~__shared_mutex_pthread()
+ {
+ int __ret __attribute((__unused__)) = __glibcxx_rwlock_destroy(&_M_rwlock);
+ // Errors not handled: EBUSY, EINVAL
+ __glibcxx_assert(__ret == 0);
+ }
+#endif
+
+ __shared_mutex_pthread(const __shared_mutex_pthread&) = delete;
+ __shared_mutex_pthread& operator=(const __shared_mutex_pthread&) = delete;
+
+ void
+ lock()
+ {
+ int __ret = __glibcxx_rwlock_wrlock(&_M_rwlock);
+ if (__ret == EDEADLK)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ }
+
+ bool
+ try_lock()
+ {
+ int __ret = __glibcxx_rwlock_trywrlock(&_M_rwlock);
+ if (__ret == EBUSY) return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+
+ void
+ unlock()
+ {
+ int __ret __attribute((__unused__)) = __glibcxx_rwlock_unlock(&_M_rwlock);
+ // Errors not handled: EPERM, EBUSY, EINVAL
+ __glibcxx_assert(__ret == 0);
+ }
+
+ // Shared ownership
+
+ void
+ lock_shared()
+ {
+ int __ret;
+ // We retry if we exceeded the maximum number of read locks supported by
+ // the POSIX implementation; this can result in busy-waiting, but this
+ // is okay based on the current specification of forward progress
+ // guarantees by the standard.
+ do
+ __ret = __glibcxx_rwlock_rdlock(&_M_rwlock);
+ while (__ret == EAGAIN);
+ if (__ret == EDEADLK)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ }
+
+ bool
+ try_lock_shared()
+ {
+ int __ret = __glibcxx_rwlock_tryrdlock(&_M_rwlock);
+ // If the maximum number of read locks has been exceeded, we just fail
+ // to acquire the lock. Unlike for lock(), we are not allowed to throw
+ // an exception.
+ if (__ret == EBUSY || __ret == EAGAIN) return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+
+ void
+ unlock_shared()
+ {
+ unlock();
+ }
+
+ void* native_handle() { return &_M_rwlock; }
+ };
+#endif
+
+#if ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+ /// A shared mutex type implemented using std::condition_variable.
+ class __shared_mutex_cv
+ {
+ friend class shared_timed_mutex;
+
+ // Based on Howard Hinnant's reference implementation from N2406.
+
+ // The high bit of _M_state is the write-entered flag which is set to
+ // indicate a writer has taken the lock or is queuing to take the lock.
+ // The remaining bits are the count of reader locks.
+ //
+ // To take a reader lock, block on gate1 while the write-entered flag is
+ // set or the maximum number of reader locks is held, then increment the
+ // reader lock count.
+ // To release, decrement the count, then if the write-entered flag is set
+ // and the count is zero then signal gate2 to wake a queued writer,
+ // otherwise if the maximum number of reader locks was held signal gate1
+ // to wake a reader.
+ //
+ // To take a writer lock, block on gate1 while the write-entered flag is
+ // set, then set the write-entered flag to start queueing, then block on
+ // gate2 while the number of reader locks is non-zero.
+ // To release, unset the write-entered flag and signal gate1 to wake all
+ // blocked readers and writers.
+ //
+ // This means that when no reader locks are held readers and writers get
+ // equal priority. When one or more reader locks is held a writer gets
+ // priority and no more reader locks can be taken while the writer is
+ // queued.
+
+ // Only locked when accessing _M_state or waiting on condition variables.
+ mutex _M_mut;
+ // Used to block while write-entered is set or reader count at maximum.
+ condition_variable _M_gate1;
+ // Used to block queued writers while reader count is non-zero.
+ condition_variable _M_gate2;
+ // The write-entered flag and reader count.
+ unsigned _M_state;
+
+ static constexpr unsigned _S_write_entered
+ = 1U << (sizeof(unsigned)*__CHAR_BIT__ - 1);
+ static constexpr unsigned _S_max_readers = ~_S_write_entered;
+
+ // Test whether the write-entered flag is set. _M_mut must be locked.
+ bool _M_write_entered() const { return _M_state & _S_write_entered; }
+
+ // The number of reader locks currently held. _M_mut must be locked.
+ unsigned _M_readers() const { return _M_state & _S_max_readers; }
+
+ public:
+ __shared_mutex_cv() : _M_state(0) {}
+
+ ~__shared_mutex_cv()
+ {
+ __glibcxx_assert( _M_state == 0 );
+ }
+
+ __shared_mutex_cv(const __shared_mutex_cv&) = delete;
+ __shared_mutex_cv& operator=(const __shared_mutex_cv&) = delete;
+
+ // Exclusive ownership
+
+ void
+ lock()
+ {
+ unique_lock __lk(_M_mut);
+ // Wait until we can set the write-entered flag.
+ _M_gate1.wait(__lk, [=]{ return !_M_write_entered(); });
+ _M_state |= _S_write_entered;
+ // Then wait until there are no more readers.
+ _M_gate2.wait(__lk, [=]{ return _M_readers() == 0; });
+ }
+
+ bool
+ try_lock()
+ {
+ unique_lock __lk(_M_mut, try_to_lock);
+ if (__lk.owns_lock() && _M_state == 0)
+ {
+ _M_state = _S_write_entered;
+ return true;
+ }
+ return false;
+ }
+
+ void
+ unlock()
+ {
+ lock_guard __lk(_M_mut);
+ __glibcxx_assert( _M_write_entered() );
+ _M_state = 0;
+ // call notify_all() while mutex is held so that another thread can't
+ // lock and unlock the mutex then destroy *this before we make the call.
+ _M_gate1.notify_all();
+ }
+
+ // Shared ownership
+
+ void
+ lock_shared()
+ {
+ unique_lock __lk(_M_mut);
+ _M_gate1.wait(__lk, [=]{ return _M_state < _S_max_readers; });
+ ++_M_state;
+ }
+
+ bool
+ try_lock_shared()
+ {
+ unique_lock __lk(_M_mut, try_to_lock);
+ if (!__lk.owns_lock())
+ return false;
+ if (_M_state < _S_max_readers)
+ {
+ ++_M_state;
+ return true;
+ }
+ return false;
+ }
+
+ void
+ unlock_shared()
+ {
+ lock_guard __lk(_M_mut);
+ __glibcxx_assert( _M_readers() > 0 );
+ auto __prev = _M_state--;
+ if (_M_write_entered())
+ {
+ // Wake the queued writer if there are no more readers.
+ if (_M_readers() == 0)
+ _M_gate2.notify_one();
+ // No need to notify gate1 because we give priority to the queued
+ // writer, and that writer will eventually notify gate1 after it
+ // clears the write-entered flag.
+ }
+ else
+ {
+ // Wake any thread that was blocked on reader overflow.
+ if (__prev == _S_max_readers)
+ _M_gate1.notify_one();
+ }
+ }
+ };
+#endif
+ /// @endcond
+
+#if __cplusplus > 201402L
+ /// The standard shared mutex type.
+ class shared_mutex
+ {
+ public:
+ shared_mutex() = default;
+ ~shared_mutex() = default;
+
+ shared_mutex(const shared_mutex&) = delete;
+ shared_mutex& operator=(const shared_mutex&) = delete;
+
+ // Exclusive ownership
+
+ void lock() { _M_impl.lock(); }
+ bool try_lock() { return _M_impl.try_lock(); }
+ void unlock() { _M_impl.unlock(); }
+
+ // Shared ownership
+
+ void lock_shared() { _M_impl.lock_shared(); }
+ bool try_lock_shared() { return _M_impl.try_lock_shared(); }
+ void unlock_shared() { _M_impl.unlock_shared(); }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T
+ typedef void* native_handle_type;
+ native_handle_type native_handle() { return _M_impl.native_handle(); }
+
+ private:
+ __shared_mutex_pthread _M_impl;
+#else
+ private:
+ __shared_mutex_cv _M_impl;
+#endif
+ };
+#endif // C++17
+
+ /// @cond undocumented
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+ using __shared_timed_mutex_base = __shared_mutex_pthread;
+#else
+ using __shared_timed_mutex_base = __shared_mutex_cv;
+#endif
+ /// @endcond
+
+ /// The standard shared timed mutex type.
+ class shared_timed_mutex
+ : private __shared_timed_mutex_base
+ {
+ using _Base = __shared_timed_mutex_base;
+
+ // Must use the same clock as condition_variable for __shared_mutex_cv.
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+ using __clock_t = chrono::steady_clock;
+#else
+ using __clock_t = chrono::system_clock;
+#endif
+
+ public:
+ shared_timed_mutex() = default;
+ ~shared_timed_mutex() = default;
+
+ shared_timed_mutex(const shared_timed_mutex&) = delete;
+ shared_timed_mutex& operator=(const shared_timed_mutex&) = delete;
+
+ // Exclusive ownership
+
+ void lock() { _Base::lock(); }
+ bool try_lock() { return _Base::try_lock(); }
+ void unlock() { _Base::unlock(); }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
+ if (ratio_greater<__clock_t::period, _Period>())
+ ++__rt;
+ return try_lock_until(__clock_t::now() + __rt);
+ }
+
+ // Shared ownership
+
+ void lock_shared() { _Base::lock_shared(); }
+ bool try_lock_shared() { return _Base::try_lock_shared(); }
+ void unlock_shared() { _Base::unlock_shared(); }
+
+ template
+ bool
+ try_lock_shared_for(const chrono::duration<_Rep, _Period>& __rtime)
+ {
+ auto __rt = chrono::duration_cast<__clock_t::duration>(__rtime);
+ if (ratio_greater<__clock_t::period, _Period>())
+ ++__rt;
+ return try_lock_shared_until(__clock_t::now() + __rt);
+ }
+
+#if _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+
+ // Exclusive ownership
+
+ template
+ bool
+ try_lock_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ int __ret = __glibcxx_rwlock_timedwrlock(&_M_rwlock, &__ts);
+ // On self-deadlock, we just fail to acquire the lock. Technically,
+ // the program violated the precondition.
+ if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+ template
+ bool
+ try_lock_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ int __ret = pthread_rwlock_clockwrlock(&_M_rwlock, CLOCK_MONOTONIC,
+ &__ts);
+ // On self-deadlock, we just fail to acquire the lock. Technically,
+ // the program violated the precondition.
+ if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+#endif
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
+ {
+#if __cplusplus > 201703L
+ static_assert(chrono::is_clock_v<_Clock>);
+#endif
+ // The user-supplied clock may not tick at the same rate as
+ // steady_clock, so we must loop in order to guarantee that
+ // the timeout has expired before returning false.
+ typename _Clock::time_point __now = _Clock::now();
+ do {
+ auto __rtime = __atime - __now;
+ if (try_lock_for(__rtime))
+ return true;
+ __now = _Clock::now();
+ } while (__atime > __now);
+ return false;
+ }
+
+ // Shared ownership
+
+ template
+ bool
+ try_lock_shared_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ int __ret;
+ // Unlike for lock(), we are not allowed to throw an exception so if
+ // the maximum number of read locks has been exceeded, or we would
+ // deadlock, we just try to acquire the lock again (and will time out
+ // eventually).
+ // In cases where we would exceed the maximum number of read locks
+ // throughout the whole time until the timeout, we will fail to
+ // acquire the lock even if it would be logically free; however, this
+ // is allowed by the standard, and we made a "strong effort"
+ // (see C++14 30.4.1.4p26).
+ // For cases where the implementation detects a deadlock we
+ // intentionally block and timeout so that an early return isn't
+ // mistaken for a spurious failure, which might help users realise
+ // there is a deadlock.
+ do
+ __ret = __glibcxx_rwlock_timedrdlock(&_M_rwlock, &__ts);
+ while (__ret == EAGAIN || __ret == EDEADLK);
+ if (__ret == ETIMEDOUT)
+ return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+
+#ifdef _GLIBCXX_USE_PTHREAD_RWLOCK_CLOCKLOCK
+ template
+ bool
+ try_lock_shared_until(const chrono::time_point& __atime)
+ {
+ auto __s = chrono::time_point_cast(__atime);
+ auto __ns = chrono::duration_cast(__atime - __s);
+
+ __gthread_time_t __ts =
+ {
+ static_cast(__s.time_since_epoch().count()),
+ static_cast(__ns.count())
+ };
+
+ int __ret = pthread_rwlock_clockrdlock(&_M_rwlock, CLOCK_MONOTONIC,
+ &__ts);
+ // On self-deadlock, we just fail to acquire the lock. Technically,
+ // the program violated the precondition.
+ if (__ret == ETIMEDOUT || __ret == EDEADLK)
+ return false;
+ // Errors not handled: EINVAL
+ __glibcxx_assert(__ret == 0);
+ return true;
+ }
+#endif
+
+ template
+ bool
+ try_lock_shared_until(const chrono::time_point<_Clock,
+ _Duration>& __atime)
+ {
+#if __cplusplus > 201703L
+ static_assert(chrono::is_clock_v<_Clock>);
+#endif
+ // The user-supplied clock may not tick at the same rate as
+ // steady_clock, so we must loop in order to guarantee that
+ // the timeout has expired before returning false.
+ typename _Clock::time_point __now = _Clock::now();
+ do {
+ auto __rtime = __atime - __now;
+ if (try_lock_shared_for(__rtime))
+ return true;
+ __now = _Clock::now();
+ } while (__atime > __now);
+ return false;
+ }
+
+#else // ! (_GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK)
+
+ // Exclusive ownership
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
+ {
+ unique_lock __lk(_M_mut);
+ if (!_M_gate1.wait_until(__lk, __abs_time,
+ [=]{ return !_M_write_entered(); }))
+ {
+ return false;
+ }
+ _M_state |= _S_write_entered;
+ if (!_M_gate2.wait_until(__lk, __abs_time,
+ [=]{ return _M_readers() == 0; }))
+ {
+ _M_state ^= _S_write_entered;
+ // Wake all threads blocked while the write-entered flag was set.
+ _M_gate1.notify_all();
+ return false;
+ }
+ return true;
+ }
+
+ // Shared ownership
+
+ template
+ bool
+ try_lock_shared_until(const chrono::time_point<_Clock,
+ _Duration>& __abs_time)
+ {
+ unique_lock __lk(_M_mut);
+ if (!_M_gate1.wait_until(__lk, __abs_time,
+ [=]{ return _M_state < _S_max_readers; }))
+ {
+ return false;
+ }
+ ++_M_state;
+ return true;
+ }
+
+#endif // _GLIBCXX_USE_PTHREAD_RWLOCK_T && _GTHREAD_USE_MUTEX_TIMEDLOCK
+ };
+#endif // _GLIBCXX_HAS_GTHREADS
+
+ /// shared_lock
+ template
+ class shared_lock
+ {
+ public:
+ typedef _Mutex mutex_type;
+
+ // Shared locking
+
+ shared_lock() noexcept : _M_pm(nullptr), _M_owns(false) { }
+
+ explicit
+ shared_lock(mutex_type& __m)
+ : _M_pm(std::__addressof(__m)), _M_owns(true)
+ { __m.lock_shared(); }
+
+ shared_lock(mutex_type& __m, defer_lock_t) noexcept
+ : _M_pm(std::__addressof(__m)), _M_owns(false) { }
+
+ shared_lock(mutex_type& __m, try_to_lock_t)
+ : _M_pm(std::__addressof(__m)), _M_owns(__m.try_lock_shared()) { }
+
+ shared_lock(mutex_type& __m, adopt_lock_t)
+ : _M_pm(std::__addressof(__m)), _M_owns(true) { }
+
+ template
+ shared_lock(mutex_type& __m,
+ const chrono::time_point<_Clock, _Duration>& __abs_time)
+ : _M_pm(std::__addressof(__m)),
+ _M_owns(__m.try_lock_shared_until(__abs_time)) { }
+
+ template
+ shared_lock(mutex_type& __m,
+ const chrono::duration<_Rep, _Period>& __rel_time)
+ : _M_pm(std::__addressof(__m)),
+ _M_owns(__m.try_lock_shared_for(__rel_time)) { }
+
+ ~shared_lock()
+ {
+ if (_M_owns)
+ _M_pm->unlock_shared();
+ }
+
+ shared_lock(shared_lock const&) = delete;
+ shared_lock& operator=(shared_lock const&) = delete;
+
+ shared_lock(shared_lock&& __sl) noexcept : shared_lock()
+ { swap(__sl); }
+
+ shared_lock&
+ operator=(shared_lock&& __sl) noexcept
+ {
+ shared_lock(std::move(__sl)).swap(*this);
+ return *this;
+ }
+
+ void
+ lock()
+ {
+ _M_lockable();
+ _M_pm->lock_shared();
+ _M_owns = true;
+ }
+
+ bool
+ try_lock()
+ {
+ _M_lockable();
+ return _M_owns = _M_pm->try_lock_shared();
+ }
+
+ template
+ bool
+ try_lock_for(const chrono::duration<_Rep, _Period>& __rel_time)
+ {
+ _M_lockable();
+ return _M_owns = _M_pm->try_lock_shared_for(__rel_time);
+ }
+
+ template
+ bool
+ try_lock_until(const chrono::time_point<_Clock, _Duration>& __abs_time)
+ {
+ _M_lockable();
+ return _M_owns = _M_pm->try_lock_shared_until(__abs_time);
+ }
+
+ void
+ unlock()
+ {
+ if (!_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+ _M_pm->unlock_shared();
+ _M_owns = false;
+ }
+
+ // Setters
+
+ void
+ swap(shared_lock& __u) noexcept
+ {
+ std::swap(_M_pm, __u._M_pm);
+ std::swap(_M_owns, __u._M_owns);
+ }
+
+ mutex_type*
+ release() noexcept
+ {
+ _M_owns = false;
+ return std::exchange(_M_pm, nullptr);
+ }
+
+ // Getters
+
+ bool owns_lock() const noexcept { return _M_owns; }
+
+ explicit operator bool() const noexcept { return _M_owns; }
+
+ mutex_type* mutex() const noexcept { return _M_pm; }
+
+ private:
+ void
+ _M_lockable() const
+ {
+ if (_M_pm == nullptr)
+ __throw_system_error(int(errc::operation_not_permitted));
+ if (_M_owns)
+ __throw_system_error(int(errc::resource_deadlock_would_occur));
+
+ }
+
+ mutex_type* _M_pm;
+ bool _M_owns;
+ };
+
+ /// Swap specialization for shared_lock
+ /// @relates shared_mutex
+ template
+ void
+ swap(shared_lock<_Mutex>& __x, shared_lock<_Mutex>& __y) noexcept
+ { __x.swap(__y); }
+
+ // @} group mutexes
+_GLIBCXX_END_NAMESPACE_VERSION
+} // namespace
+
+#endif // C++14
+
+#endif // _GLIBCXX_SHARED_MUTEX