17template<
typename T,
typename TableType = shm_table>
18 requires std::is_trivially_copyable_v<T> && std::atomic<T>::is_always_lock_free
21 std::atomic<T>* atom_ptr{
nullptr};
22 const typename TableType::entry* table_entry{
nullptr};
24 std::atomic<T>* get_atomic_ptr(
void* base_addr,
size_t offset) {
25 return reinterpret_cast<std::atomic<T>*
>(
26 static_cast<char*
>(base_addr) + offset
37 template<
typename ShmType>
39 static_assert(std::is_same_v<typename ShmType::table_type, TableType>,
40 "SharedMemory table type must match atomic table type");
42 auto* table =
static_cast<TableType*
>(shm.get_base_addr());
44 char name_buf[TableType::MAX_NAME_SIZE]{};
45 size_t copy_len = std::min(
name.size(),
sizeof(name_buf) - 1);
46 std::copy_n(
name.begin(), copy_len, name_buf);
48 auto* entry = table->find(name_buf);
52 if (entry->elem_size !=
sizeof(std::atomic<T>)) {
53 throw std::runtime_error(
"Type size mismatch for atomic");
55 atom_ptr = get_atomic_ptr(shm.get_base_addr(), entry->offset);
59 size_t required_size =
sizeof(std::atomic<T>);
60 size_t table_size =
sizeof(TableType);
61 size_t current_used = table->get_total_allocated_size();
63 size_t offset = table_size + current_used;
64 atom_ptr = get_atomic_ptr(shm.get_base_addr(), offset);
67 new (atom_ptr) std::atomic<T>(initial_value);
70 if (!table->add(name_buf, offset, required_size,
sizeof(std::atomic<T>), 1)) {
71 throw std::runtime_error(
"Failed to add atomic to table");
73 table_entry = table->find(name_buf);
79 return atom_ptr->is_lock_free();
84 void store(T value, std::memory_order order = std::memory_order_seq_cst)
noexcept {
85 atom_ptr->store(value, order);
88 [[nodiscard]] T
load(std::memory_order order = std::memory_order_seq_cst)
const noexcept {
89 return atom_ptr->load(order);
92 [[nodiscard]] T
exchange(T value, std::memory_order order = std::memory_order_seq_cst)
noexcept {
93 return atom_ptr->exchange(value, order);
97 std::memory_order success = std::memory_order_seq_cst,
98 std::memory_order failure = std::memory_order_seq_cst)
noexcept {
99 return atom_ptr->compare_exchange_weak(expected, desired, success, failure);
103 std::memory_order success = std::memory_order_seq_cst,
104 std::memory_order failure = std::memory_order_seq_cst)
noexcept {
105 return atom_ptr->compare_exchange_strong(expected, desired, success, failure);
109 template<
typename U = T>
110 requires std::is_integral_v<U> || std::is_floating_point_v<U>
111 T
fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst)
noexcept {
112 return atom_ptr->fetch_add(arg, order);
115 template<
typename U = T>
116 requires std::is_integral_v<U> || std::is_floating_point_v<U>
117 T
fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst)
noexcept {
118 return atom_ptr->fetch_sub(arg, order);
122 template<
typename U = T>
123 requires std::is_integral_v<U>
124 T
fetch_and(T arg, std::memory_order order = std::memory_order_seq_cst)
noexcept {
125 return atom_ptr->fetch_and(arg, order);
128 template<
typename U = T>
129 requires std::is_integral_v<U>
130 T
fetch_or(T arg, std::memory_order order = std::memory_order_seq_cst)
noexcept {
131 return atom_ptr->fetch_or(arg, order);
134 template<
typename U = T>
135 requires std::is_integral_v<U>
136 T
fetch_xor(T arg, std::memory_order order = std::memory_order_seq_cst)
noexcept {
137 return atom_ptr->fetch_xor(arg, order);
146 operator T() const noexcept {
151 template<
typename U = T>
152 requires std::is_integral_v<U>
157 template<
typename U = T>
158 requires std::is_integral_v<U>
163 template<
typename U = T>
164 requires std::is_integral_v<U>
169 template<
typename U = T>
170 requires std::is_integral_v<U>
176 template<
typename U = T>
177 requires std::is_integral_v<U> || std::is_floating_point_v<U>
182 template<
typename U = T>
183 requires std::is_integral_v<U> || std::is_floating_point_v<U>
188 template<
typename U = T>
189 requires std::is_integral_v<U>
194 template<
typename U = T>
195 requires std::is_integral_v<U>
200 template<
typename U = T>
201 requires std::is_integral_v<U>
207 [[nodiscard]] std::string_view
name() const noexcept {
208 return table_entry ? std::string_view(table_entry->name.data()) : std::string_view{};
212 void wait(T old, std::memory_order order = std::memory_order_seq_cst)
const noexcept {
213 atom_ptr->wait(old, order);
217 atom_ptr->notify_one();
221 atom_ptr->notify_all();
226template<
typename TableType = shm_table>
229template<
typename TableType = shm_table>
232template<
typename TableType = shm_table>
235template<
typename TableType = shm_table>
238template<
typename TableType = shm_table>
241template<
typename TableType = shm_table>
Shared memory atomic value with auto-discovery.
T fetch_sub(T arg, std::memory_order order=std::memory_order_seq_cst) noexcept
T fetch_add(T arg, std::memory_order order=std::memory_order_seq_cst) noexcept
T operator-=(T arg) noexcept
T operator--(int) noexcept
bool is_lock_free() const noexcept
void notify_all() noexcept
T operator^=(T arg) noexcept
T fetch_or(T arg, std::memory_order order=std::memory_order_seq_cst) noexcept
std::string_view name() const noexcept
T exchange(T value, std::memory_order order=std::memory_order_seq_cst) noexcept
static constexpr bool is_always_lock_free
T operator&=(T arg) noexcept
shm_atomic(ShmType &shm, std::string_view name, T initial_value=T{})
Create or open a shared atomic value.
bool compare_exchange_strong(T &expected, T desired, std::memory_order success=std::memory_order_seq_cst, std::memory_order failure=std::memory_order_seq_cst) noexcept
T operator+=(T arg) noexcept
bool compare_exchange_weak(T &expected, T desired, std::memory_order success=std::memory_order_seq_cst, std::memory_order failure=std::memory_order_seq_cst) noexcept
T operator=(T value) noexcept
void store(T value, std::memory_order order=std::memory_order_seq_cst) noexcept
T fetch_xor(T arg, std::memory_order order=std::memory_order_seq_cst) noexcept
T fetch_and(T arg, std::memory_order order=std::memory_order_seq_cst) noexcept
T load(std::memory_order order=std::memory_order_seq_cst) const noexcept
void wait(T old, std::memory_order order=std::memory_order_seq_cst) const noexcept
T operator++(int) noexcept
void notify_one() noexcept
T operator|=(T arg) noexcept
Core POSIX shared memory management with automatic reference counting.