void-packages/srcpkgs/mariadb/patches/force-c11-atomics.patch

295 lines
10 KiB
Diff

--- include/my_atomic.h
+++ include/my_atomic.h
@@ -104,217 +104,6 @@
but can be added, if necessary.
*/
-#define intptr void *
-/**
- Currently we don't support 8-bit and 16-bit operations.
- It can be added later if needed.
-*/
-#undef MY_ATOMIC_HAS_8_16
-
-/*
- * Attempt to do atomic ops without locks
- */
-#include "atomic/nolock.h"
-
-#ifndef make_atomic_cas_body
-/* nolock.h was not able to generate even a CAS function, fall back */
-#error atomic ops for this platform are not implemented
-#endif
-
-/* define missing functions by using the already generated ones */
-#ifndef make_atomic_add_body
-#define make_atomic_add_body(S) \
- int ## S tmp=*a; \
- while (!my_atomic_cas ## S(a, &tmp, tmp+v)) ; \
- v=tmp;
-#endif
-#ifndef make_atomic_fas_body
-#define make_atomic_fas_body(S) \
- int ## S tmp=*a; \
- while (!my_atomic_cas ## S(a, &tmp, v)) ; \
- v=tmp;
-#endif
-#ifndef make_atomic_load_body
-#define make_atomic_load_body(S) \
- ret= 0; /* avoid compiler warning */ \
- (void)(my_atomic_cas ## S(a, &ret, ret));
-#endif
-#ifndef make_atomic_store_body
-#define make_atomic_store_body(S) \
- (void)(my_atomic_fas ## S (a, v));
-#endif
-
-/*
- transparent_union doesn't work in g++
- Bug ?
-
- Darwin's gcc doesn't want to put pointers in a transparent_union
- when built with -arch ppc64. Complains:
- warning: 'transparent_union' attribute ignored
-*/
-#if defined(__GNUC__) && !defined(__cplusplus) && \
- ! (defined(__APPLE__) && (defined(_ARCH_PPC64) ||defined (_ARCH_PPC)))
-/*
- we want to be able to use my_atomic_xxx functions with
- both signed and unsigned integers. But gcc will issue a warning
- "passing arg N of `my_atomic_XXX' as [un]signed due to prototype"
- if the signedness of the argument doesn't match the prototype, or
- "pointer targets in passing argument N of my_atomic_XXX differ in signedness"
- if int* is used where uint* is expected (or vice versa).
- Let's shut these warnings up
-*/
-#define make_transparent_unions(S) \
- typedef union { \
- int ## S i; \
- uint ## S u; \
- } U_ ## S __attribute__ ((transparent_union)); \
- typedef union { \
- int ## S volatile *i; \
- uint ## S volatile *u; \
- } Uv_ ## S __attribute__ ((transparent_union));
-#define uintptr intptr
-make_transparent_unions(8)
-make_transparent_unions(16)
-make_transparent_unions(32)
-make_transparent_unions(64)
-make_transparent_unions(ptr)
-#undef uintptr
-#undef make_transparent_unions
-#define a U_a.i
-#define cmp U_cmp.i
-#define v U_v.i
-#define set U_set.i
-#else
-#define U_8 int8
-#define U_16 int16
-#define U_32 int32
-#define U_64 int64
-#define U_ptr intptr
-#define Uv_8 int8
-#define Uv_16 int16
-#define Uv_32 int32
-#define Uv_64 int64
-#define Uv_ptr intptr
-#define U_a volatile *a
-#define U_cmp *cmp
-#define U_v v
-#define U_set set
-#endif /* __GCC__ transparent_union magic */
-
-#define make_atomic_cas(S) \
-static inline int my_atomic_cas ## S(Uv_ ## S U_a, \
- Uv_ ## S U_cmp, U_ ## S U_set) \
-{ \
- int8 ret; \
- make_atomic_cas_body(S); \
- return ret; \
-}
-
-#define make_atomic_add(S) \
-static inline int ## S my_atomic_add ## S( \
- Uv_ ## S U_a, U_ ## S U_v) \
-{ \
- make_atomic_add_body(S); \
- return v; \
-}
-
-#define make_atomic_fas(S) \
-static inline int ## S my_atomic_fas ## S( \
- Uv_ ## S U_a, U_ ## S U_v) \
-{ \
- make_atomic_fas_body(S); \
- return v; \
-}
-
-#define make_atomic_load(S) \
-static inline int ## S my_atomic_load ## S(Uv_ ## S U_a) \
-{ \
- int ## S ret; \
- make_atomic_load_body(S); \
- return ret; \
-}
-
-#define make_atomic_store(S) \
-static inline void my_atomic_store ## S( \
- Uv_ ## S U_a, U_ ## S U_v) \
-{ \
- make_atomic_store_body(S); \
-}
-
-#ifdef MY_ATOMIC_HAS_8_16
-make_atomic_cas(8)
-make_atomic_cas(16)
-#endif
-make_atomic_cas(32)
-make_atomic_cas(64)
-make_atomic_cas(ptr)
-
-#ifdef MY_ATOMIC_HAS_8_16
-make_atomic_add(8)
-make_atomic_add(16)
-#endif
-make_atomic_add(32)
-make_atomic_add(64)
-
-#ifdef MY_ATOMIC_HAS_8_16
-make_atomic_load(8)
-make_atomic_load(16)
-#endif
-make_atomic_load(32)
-make_atomic_load(64)
-make_atomic_load(ptr)
-
-#ifdef MY_ATOMIC_HAS_8_16
-make_atomic_fas(8)
-make_atomic_fas(16)
-#endif
-make_atomic_fas(32)
-make_atomic_fas(64)
-make_atomic_fas(ptr)
-
-#ifdef MY_ATOMIC_HAS_8_16
-make_atomic_store(8)
-make_atomic_store(16)
-#endif
-make_atomic_store(32)
-make_atomic_store(64)
-make_atomic_store(ptr)
-
-#ifdef _atomic_h_cleanup_
-#include _atomic_h_cleanup_
-#undef _atomic_h_cleanup_
-#endif
-
-#undef U_8
-#undef U_16
-#undef U_32
-#undef U_64
-#undef U_ptr
-#undef Uv_8
-#undef Uv_16
-#undef Uv_32
-#undef Uv_64
-#undef Uv_ptr
-#undef a
-#undef cmp
-#undef v
-#undef set
-#undef U_a
-#undef U_cmp
-#undef U_v
-#undef U_set
-#undef make_atomic_add
-#undef make_atomic_cas
-#undef make_atomic_load
-#undef make_atomic_store
-#undef make_atomic_fas
-#undef make_atomic_add_body
-#undef make_atomic_cas_body
-#undef make_atomic_load_body
-#undef make_atomic_store_body
-#undef make_atomic_fas_body
-#undef intptr
-
/*
the macro below defines (as an expression) the code that
will be run in spin-loops. Intel manuals recummend to have PAUSE there.
@@ -328,7 +117,8 @@ make_atomic_store(ptr)
#define MY_ATOMIC_NOT_1CPU 1
extern int my_atomic_initialize();
-#ifdef __ATOMIC_SEQ_CST
+#define MY_ATOMIC_MODE "gcc-atomics-smp"
+
#define MY_MEMORY_ORDER_RELAXED __ATOMIC_RELAXED
#define MY_MEMORY_ORDER_CONSUME __ATOMIC_CONSUME
#define MY_MEMORY_ORDER_ACQUIRE __ATOMIC_ACQUIRE
@@ -364,43 +154,27 @@ extern int my_atomic_initialize();
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
__atomic_compare_exchange_n((P), (E), (D), false, (S), (F))
-#else
-#define MY_MEMORY_ORDER_RELAXED
-#define MY_MEMORY_ORDER_CONSUME
-#define MY_MEMORY_ORDER_ACQUIRE
-#define MY_MEMORY_ORDER_RELEASE
-#define MY_MEMORY_ORDER_ACQ_REL
-#define MY_MEMORY_ORDER_SEQ_CST
-
-#define my_atomic_store32_explicit(P, D, O) my_atomic_store32((P), (D))
-#define my_atomic_store64_explicit(P, D, O) my_atomic_store64((P), (D))
-#define my_atomic_storeptr_explicit(P, D, O) my_atomic_storeptr((P), (D))
-
-#define my_atomic_load32_explicit(P, O) my_atomic_load32((P))
-#define my_atomic_load64_explicit(P, O) my_atomic_load64((P))
-#define my_atomic_loadptr_explicit(P, O) my_atomic_loadptr((P))
-
-#define my_atomic_fas32_explicit(P, D, O) my_atomic_fas32((P), (D))
-#define my_atomic_fas64_explicit(P, D, O) my_atomic_fas64((P), (D))
-#define my_atomic_fasptr_explicit(P, D, O) my_atomic_fasptr((P), (D))
-
-#define my_atomic_add32_explicit(P, A, O) my_atomic_add32((P), (A))
-#define my_atomic_add64_explicit(P, A, O) my_atomic_add64((P), (A))
-#define my_atomic_addptr_explicit(P, A, O) my_atomic_addptr((P), (A))
-#define my_atomic_cas32_weak_explicit(P, E, D, S, F) \
- my_atomic_cas32((P), (E), (D))
-#define my_atomic_cas64_weak_explicit(P, E, D, S, F) \
- my_atomic_cas64((P), (E), (D))
-#define my_atomic_casptr_weak_explicit(P, E, D, S, F) \
- my_atomic_casptr((P), (E), (D))
+#define my_atomic_store32(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_store64(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_storeptr(P, D) __atomic_store_n((P), (D), __ATOMIC_SEQ_CST)
-#define my_atomic_cas32_strong_explicit(P, E, D, S, F) \
- my_atomic_cas32((P), (E), (D))
-#define my_atomic_cas64_strong_explicit(P, E, D, S, F) \
- my_atomic_cas64((P), (E), (D))
-#define my_atomic_casptr_strong_explicit(P, E, D, S, F) \
- my_atomic_casptr((P), (E), (D))
-#endif
+#define my_atomic_load32(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+#define my_atomic_load64(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+#define my_atomic_loadptr(P) __atomic_load_n((P), __ATOMIC_SEQ_CST)
+
+#define my_atomic_fas32(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_fas64(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+#define my_atomic_fasptr(P, D) __atomic_exchange_n((P), (D), __ATOMIC_SEQ_CST)
+
+#define my_atomic_add32(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST)
+#define my_atomic_add64(P, A) __atomic_fetch_add((P), (A), __ATOMIC_SEQ_CST)
+
+#define my_atomic_cas32(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define my_atomic_cas64(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
+#define my_atomic_casptr(P, E, D) \
+ __atomic_compare_exchange_n((P), (E), (D), 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#endif /* MY_ATOMIC_INCLUDED */