Atomics: Make naming more obvious about which value is being returned
This commit is contained in:
@@ -77,13 +77,13 @@
|
||||
/* Function prototypes. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
|
||||
ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
||||
ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
||||
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x);
|
||||
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x);
|
||||
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new);
|
||||
#endif
|
||||
|
||||
ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
||||
ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
||||
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x);
|
||||
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x);
|
||||
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new);
|
||||
|
||||
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x);
|
||||
@@ -93,18 +93,18 @@ ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x);
|
||||
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b);
|
||||
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b);
|
||||
|
||||
ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x);
|
||||
ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x);
|
||||
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x);
|
||||
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x);
|
||||
ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new);
|
||||
|
||||
ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x);
|
||||
ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x);
|
||||
ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x);
|
||||
ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x);
|
||||
ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new);
|
||||
|
||||
/* WARNING! Float 'atomics' are really faked ones, those are actually closer to some kind of spinlock-sync'ed operation,
|
||||
* which means they are only efficient if collisions are highly unlikely (i.e. if probability of two threads
|
||||
* working on the same pointer at the same time is very low). */
|
||||
ATOMIC_INLINE float atomic_add_fl(float *p, const float x);
|
||||
ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Include system-dependent implementations. */
|
||||
|
@@ -56,25 +56,25 @@
|
||||
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
ATOMIC_INLINE size_t atomic_add_z(size_t *p, size_t x)
|
||||
ATOMIC_INLINE size_t atomic_add_and_fetch_z(size_t *p, size_t x)
|
||||
{
|
||||
assert(sizeof(size_t) == LG_SIZEOF_PTR);
|
||||
|
||||
#if (LG_SIZEOF_PTR == 8)
|
||||
return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
|
||||
return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_PTR == 4)
|
||||
return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
|
||||
return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
|
||||
#endif
|
||||
}
|
||||
|
||||
ATOMIC_INLINE size_t atomic_sub_z(size_t *p, size_t x)
|
||||
ATOMIC_INLINE size_t atomic_sub_and_fetch_z(size_t *p, size_t x)
|
||||
{
|
||||
assert(sizeof(size_t) == LG_SIZEOF_PTR);
|
||||
|
||||
#if (LG_SIZEOF_PTR == 8)
|
||||
return (size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
|
||||
return (size_t)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 4)
|
||||
return (size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
|
||||
return (size_t)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -91,25 +91,25 @@ ATOMIC_INLINE size_t atomic_cas_z(size_t *v, size_t old, size_t _new)
|
||||
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
ATOMIC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x)
|
||||
ATOMIC_INLINE unsigned atomic_add_and_fetch_u(unsigned *p, unsigned x)
|
||||
{
|
||||
assert(sizeof(unsigned) == LG_SIZEOF_INT);
|
||||
|
||||
#if (LG_SIZEOF_INT == 8)
|
||||
return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x);
|
||||
return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_INT == 4)
|
||||
return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x);
|
||||
return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)x);
|
||||
#endif
|
||||
}
|
||||
|
||||
ATOMIC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x)
|
||||
ATOMIC_INLINE unsigned atomic_sub_and_fetch_u(unsigned *p, unsigned x)
|
||||
{
|
||||
assert(sizeof(unsigned) == LG_SIZEOF_INT);
|
||||
|
||||
#if (LG_SIZEOF_INT == 8)
|
||||
return (unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
|
||||
return (unsigned)atomic_add_and_fetch_uint64((uint64_t *)p, (uint64_t)-((int64_t)x));
|
||||
#elif (LG_SIZEOF_INT == 4)
|
||||
return (unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
|
||||
return (unsigned)atomic_add_and_fetch_uint32((uint32_t *)p, (uint32_t)-((int32_t)x));
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -127,7 +127,7 @@ ATOMIC_INLINE unsigned atomic_cas_u(unsigned *v, unsigned old, unsigned _new)
|
||||
/******************************************************************************/
|
||||
/* float operations. */
|
||||
|
||||
ATOMIC_INLINE float atomic_add_fl(float *p, const float x)
|
||||
ATOMIC_INLINE float atomic_add_and_fetch_fl(float *p, const float x)
|
||||
{
|
||||
assert(sizeof(float) == sizeof(uint32_t));
|
||||
|
||||
|
@@ -43,12 +43,12 @@
|
||||
/******************************************************************************/
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
|
||||
ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
return InterlockedExchangeAdd64((int64_t *)p, (int64_t)x) + x;
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
return InterlockedExchangeAdd64((int64_t *)p, -((int64_t)x)) - x;
|
||||
}
|
||||
@@ -61,12 +61,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
|
||||
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
return InterlockedExchangeAdd(p, x) + x;
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
|
||||
}
|
||||
|
@@ -58,12 +58,12 @@
|
||||
/* 64-bit operations. */
|
||||
#if (LG_SIZEOF_PTR == 8 || LG_SIZEOF_INT == 8)
|
||||
# if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
return __sync_add_and_fetch(p, x);
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
return __sync_sub_and_fetch(p, x);
|
||||
}
|
||||
@@ -73,7 +73,7 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
|
||||
return __sync_val_compare_and_swap(v, old, _new);
|
||||
}
|
||||
# elif (defined(__amd64__) || defined(__x86_64__))
|
||||
ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
asm volatile (
|
||||
"lock; xaddq %0, %1;"
|
||||
@@ -83,7 +83,7 @@ ATOMIC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x)
|
||||
return x;
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x)
|
||||
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
asm volatile (
|
||||
@@ -112,12 +112,12 @@ ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _ne
|
||||
/******************************************************************************/
|
||||
/* 32-bit operations. */
|
||||
#if (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
return __sync_add_and_fetch(p, x);
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
return __sync_sub_and_fetch(p, x);
|
||||
}
|
||||
@@ -127,7 +127,7 @@ ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _ne
|
||||
return __sync_val_compare_and_swap(v, old, _new);
|
||||
}
|
||||
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
uint32_t ret = x;
|
||||
asm volatile (
|
||||
@@ -138,7 +138,7 @@ ATOMIC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x)
|
||||
return ret+x;
|
||||
}
|
||||
|
||||
ATOMIC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x)
|
||||
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
ret = (uint32_t)(-(int32_t)x);
|
||||
asm volatile (
|
||||
|
Reference in New Issue
Block a user