Commit 43f1f85d authored by Jens Gustedt's avatar Jens Gustedt
Browse files

now that we have them use the explicit atomics

parent 25e98023
......@@ -351,7 +351,7 @@ P99_WEAK(p99_constraint_handler)
void p99_constraint_handler(const char * restrict p00_msg,
void * restrict p00_ptr,
errno_t p00_err) {
constraint_handler_t p00_func = atomic_load(&p00_constraint_handler);
constraint_handler_t p00_func = atomic_load_explicit(&p00_constraint_handler, memory_order_acquire);
if (p00_func) p00_func(p00_msg, p00_ptr, p00_err);
}
......@@ -377,7 +377,7 @@ void abort_handler_s(const char * restrict p00_msg,
p99_inline
constraint_handler_t set_constraint_handler_s(constraint_handler_t p00_hand) {
if (!p00_hand) p00_hand = P99_CONSTRAINT_HANDLER;
return atomic_exchange(&p00_constraint_handler, p00_hand);
return atomic_exchange_explicit(&p00_constraint_handler, p00_hand, memory_order_acq_rel);
}
# endif
......@@ -387,6 +387,9 @@ errno_t p00_constraint_call(errno_t p00_cond, char const* p00_file, char const*
if (p00_cond) {
if (p00_file) P00_JMP_BUF_FILE = p00_file;
if (p00_context) P00_JMP_BUF_CONTEXT = p00_context;
/* Ensure that all dependent data for this error has been */ \
/* synchronized. */ \
atomic_thread_fence(memory_order_seq_cst); \
p99_constraint_handler(p00_info, 0, p00_cond);
}
return p00_cond;
......
......@@ -55,27 +55,27 @@ do { \
register const P99_MACRO_VAR(p00_h, &(*p00_l)[0]); \
register const P99_MACRO_VAR(p00_t, &(*p00_l)[1]); \
p00_el->p99_lifo = 0; \
P99_MACRO_VAR(p00_head, atomic_load(p00_h)); \
P99_MACRO_VAR(p00_head, atomic_load_explicit(p00_h, memory_order_relaxed)); \
for (;;) { \
if (p00_head) { \
/* spin lock the whole fifo */ \
if (atomic_compare_exchange_weak(p00_h, &p00_head, 0)) { \
if (atomic_compare_exchange_weak_explicit(p00_h, &p00_head, 0, memory_order_acq_rel, memory_order_relaxed)) { \
/* make p00_el the last element */ \
atomic_exchange(p00_t, p00_el)->p99_lifo = p00_el; \
atomic_exchange_explicit(p00_t, p00_el, memory_order_acq_rel)->p99_lifo = p00_el; \
/* unlock the fifo */ \
atomic_store(p00_h, p00_head); \
atomic_store_explicit(p00_h, p00_head, memory_order_release); \
break; \
} \
} else { \
P99_MACRO_VAR(p00_tail, atomic_load(p00_t)); \
P99_MACRO_VAR(p00_tail, atomic_load_explicit(p00_t, memory_order_relaxed)); \
if (!p00_tail \
&& atomic_compare_exchange_weak(p00_t, &p00_tail, p00_el)) { \
&& atomic_compare_exchange_weak_explicit(p00_t, &p00_tail, p00_el, memory_order_acq_rel, memory_order_relaxed)) { \
/* the fifo was empty, our element is inserted, update the head */ \
atomic_store(p00_h, p00_el); \
atomic_store_explicit(p00_h, p00_el, memory_order_release); \
break; \
} \
/* we were in the middle of an update of another thread */ \
p00_head = atomic_load(p00_h); \
p00_head = atomic_load_explicit(p00_h, memory_order_consume); \
} \
} \
} while (false)
......@@ -129,26 +129,26 @@ p99_extension \
register const P99_MACRO_VAR(p00_l, (L)); \
register const P99_MACRO_VAR(p00_h, &(*p00_l)[0]); \
register const P99_MACRO_VAR(p00_t, &(*p00_l)[1]); \
P99_MACRO_VAR(p00_head, atomic_load(p00_h)); \
P99_MACRO_VAR(p00_head, atomic_load_explicit(p00_h, memory_order_relaxed)); \
for (;;) { \
if (p00_head) { \
/* spin lock the whole fifo */ \
if (atomic_compare_exchange_weak(p00_h, &p00_head, 0)) { \
if (atomic_compare_exchange_weak_explicit(p00_h, &p00_head, 0, memory_order_acq_rel, memory_order_consume)) { \
if (p00_head->p99_lifo) \
/* there is still another element to come in the fifo, make it \
the head */ \
atomic_store(p00_h, p00_head->p99_lifo); \
atomic_store_explicit(p00_h, p00_head->p99_lifo, memory_order_release); \
else \
/* this was the last element in the fifo, set the tail to 0, \
too */ \
atomic_store(p00_t, 0); \
atomic_store_explicit(p00_t, 0, memory_order_release); \
p00_head->p99_lifo = 0; \
break; \
} \
} else { \
register P99_MACRO_VAR(p00_tail, atomic_load(p00_t)); \
register P99_MACRO_VAR(p00_tail, atomic_load_explicit(p00_t, memory_order_consume)); \
if (!p00_tail) break; \
p00_head = atomic_load(p00_h); \
p00_head = atomic_load_explicit(p00_h, memory_order_relaxed); \
} \
} \
/* make sure that the result can not be used as an lvalue */ \
......@@ -174,18 +174,18 @@ p99_extension \
register const P99_MACRO_VAR(p00_l, (L)); \
register const P99_MACRO_VAR(p00_h, &(*p00_l)[0]); \
register const P99_MACRO_VAR(p00_t, &(*p00_l)[1]); \
P99_MACRO_VAR(p00_head, atomic_load(p00_h)); \
P99_MACRO_VAR(p00_head, atomic_load_explicit(p00_h, memory_order_relaxed)); \
for (;;) { \
if (p00_head) { \
/* spin lock the whole fifo */ \
if (atomic_compare_exchange_weak(p00_h, &p00_head, 0)) { \
atomic_store(p00_t, 0); \
if (atomic_compare_exchange_weak_explicit(p00_h, &p00_head, 0, memory_order_acq_rel, memory_order_consume)) { \
atomic_store_explicit(p00_t, 0, memory_order_release); \
break; \
} \
} else { \
register const P99_MACRO_VAR(p00_tail, atomic_load(p00_t)); \
register const P99_MACRO_VAR(p00_tail, atomic_load_explicit(p00_t, memory_order_consume)); \
if (!p00_tail) break; \
p00_head = atomic_load(p00_h); \
p00_head = atomic_load_explicit(p00_h, memory_order_relaxed);\
} \
} \
/* make sure that the result can not be used as an lvalue */ \
......
......@@ -317,13 +317,18 @@ p00_thrd ** p00_foreign_tab;
P99_WEAK(p00_foreign_cleanup)
void p00_foreign_cleanup(void) {
size_t p00_foreign = atomic_load(&p00_foreign_nb);
size_t p00_foreign = atomic_load_explicit(&p00_foreign_nb, memory_order_consume);
p00_thrd ** p00_thrd = p00_foreign_tab;
p00_foreign_tab = 0;
for (size_t p00_i = 0; p00_i < p00_foreign; ++p00_i) {
if (!pthread_equal(p00_thrd[p00_i]->p00_id, pthread_self()))
fputs("found foreign thread\n", stderr);
free(p00_thrd[p00_i]);
if (p00_foreign) {
/* Ensure that all data is synchronized with all threads, not only
with the last one that changed p00_foreign_nb. */
atomic_thread_fence(memory_order_seq_cst);
for (size_t p00_i = 0; p00_i < p00_foreign; ++p00_i) {
if (!pthread_equal(p00_thrd[p00_i]->p00_id, pthread_self()))
fputs("found foreign thread\n", stderr);
free(p00_thrd[p00_i]);
}
}
free(p00_thrd);
}
......@@ -337,7 +342,7 @@ p99_inline
thrd_t thrd_current(void) {
p00_thrd * p00_loc = P00_THRD_LOCAL;
if (P99_UNLIKELY(!p00_loc)) {
size_t p00_nb = atomic_fetch_add(&p00_foreign_nb, 1);
size_t p00_nb = atomic_fetch_add_explicit(&p00_foreign_nb, 1, memory_order_acq_rel);
if (!p00_nb) atexit(p00_foreign_cleanup);
if ((p00_nb^(p00_nb-1)) == (p00_nb+(p00_nb-1))) {
p00_foreign_tab = realloc(p00_foreign_tab, sizeof(p00_thrd*[2*(p00_nb+1)]));
......
......@@ -119,7 +119,7 @@ uintptr_t p00_tp_tick_get(void) {
if (P99_UNLIKELY(!(*p00_ret & p00_mask))) {
uintptr_t p00_tack = 0;
while (!p00_tack) {
p00_tack = atomic_fetch_add(&p00_tp_tack, 1u);
p00_tack = atomic_fetch_add_explicit(&p00_tp_tack, 1u, memory_order_acq_rel);
p00_tack &= p00_mask;
}
*p00_ret = (p00_tack << p00_bits);
......@@ -184,7 +184,7 @@ struct p99_tp_state {
p99_inline
bool p00_tp_cmpxchg(_Atomic(p00_tp_glue) volatile*const p00_p, p00_tp_glue volatile*const p00_prev, p00_tp_glue p00_new) {
P99_MARK("wide cmpxchg start");
bool ret = atomic_compare_exchange_weak(p00_p, p00_prev, p00_new);
bool ret = atomic_compare_exchange_weak_explicit(p00_p, p00_prev, p00_new, memory_order_acq_rel, memory_order_consume);
P99_MARK("wide cmpxchg end");
return ret;
}
......@@ -193,7 +193,7 @@ p99_inline
p00_tp_glue p00_tp_get(register p99_tp volatile*const p00_tp) {
register p00_tp_glue p00_ret
= P99_LIKELY(p00_tp)
? atomic_load(&p00_tp->p00_val)
? atomic_load_explicit(&p00_tp->p00_val, memory_order_consume)
: (p00_tp_glue)P00_TP_GLUE_INITIALIZER((void*)0, p00_tp_tick_get());
if (p00_tp && P99_UNLIKELY(!p00_tp_i2i(p00_ret))) {
/* Only store it in addressable memory if we can't avoid it. */
......@@ -214,7 +214,7 @@ p99_inline
p00_tp_glue p99_tp_xchg(p99_tp volatile* p00_tp, void* p00_val) {
p00_tp_glue p00_ret
= P99_LIKELY(p00_tp)
? atomic_load(&p00_tp->p00_val)
? atomic_load_explicit(&p00_tp->p00_val, memory_order_consume)
: (p00_tp_glue)P00_TP_GLUE_INITIALIZER((void*)0, p00_tp_tick_get());
if (P99_LIKELY(p00_tp)) {
register p00_tp_glue p00_rep = P00_TP_GLUE_INITIALIZER(p00_val, p00_tp_tick_get());
......@@ -429,7 +429,7 @@ p99_extension ({ \
/* ensure that the pointer is converted to the */ \
/* base type, and that the return can't be used as lvalue */ \
register __typeof__(p00_ref) const p00_r = p00_ref; \
if (p00_r) atomic_fetch_add(&p00_r->p99_cnt, 1); \
if (p00_r) atomic_fetch_add_explicit(&p00_r->p99_cnt, 1, memory_order_acq_rel); \
p00_r; \
})
......@@ -439,7 +439,7 @@ p99_extension ({ \
/* ensure that pointer that is returned is converted to the */ \
/* base type, and that the return can't be used as lvalue */ \
register P99_TP_TYPE(p00_tp)* const p00_r = (REF); \
if (p00_r) atomic_fetch_add(&p00_r->p99_cnt, 1); \
if (p00_r) atomic_fetch_add_explicit(&p00_r->p99_cnt, 1, memory_order_acq_rel); \
p00_r; \
})
......@@ -451,7 +451,7 @@ p99_extension ({ \
/* ensure that the pointer is converted to the */ \
/* base type, and that the return can't be used as lvalue */ \
register __typeof__(*p00_ref)*const p00_r = p00_ref; \
if (p00_r && (atomic_fetch_sub(&p00_r->p99_cnt, 1) == 1)) \
if (p00_r && (atomic_fetch_sub_explicit(&p00_r->p99_cnt, 1, memory_order_acq_rel) == 1)) \
p00_d(p00_r); \
p00_r; \
})
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment