/*
* Atomic operations (Architecture Dependent)
*/
-#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, resptr) \
- do {*resptr = __sync_val_compare_and_swap(ptr, oldval, newval); } while (0)
+#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, res) \
+ do { res = __sync_val_compare_and_swap(ptr, oldval, newval); } while (0)
#define R_ATOMIC_XCHG(ptr, val) \
- do {val = __sync_lock_test_and_set(ptr, val); } while (0)
+ do { val = __sync_lock_test_and_set(ptr, val); } while (0)
-#define R_ATOMIC_ADD(ptr, val) \
- do { __sync_fetch_and_add(ptr, val); } while (0)
+#define R_ATOMIC_ADD(ptr, val, res) \
+ do { res = __sync_fetch_and_add(ptr, val); } while (0)
-#define R_ATOMIC_SUB(ptr, val) \
- do { __sync_fetch_and_sub(ptr, val); } while (0)
+#define R_ATOMIC_SUB(ptr, val, res) \
+ do { res = __sync_fetch_and_sub(ptr, val); } while (0)
+
+#define R_ATOMIC_GET(ptr, res) \
+ do { __sync_synchronize (); res = *ptr; } while (0)
+
+#define R_ATOMIC_SET(ptr, val) \
+ do { *ptr = val; __sync_synchronize (); } while (0)
/*
* Atomic operations (Architecture Dependent)
*/
-#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, resptr) \
- do { __asm__ __volatile__ ("lock; cmpxchgl %2, %1" \
- : "=a" (*(resptr)), "=m" (*ptr) \
- : "r" (newval), "m" (*ptr), "0" (oldval)); } while (0)
+#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, res) \
+ do { res = __sync_val_compare_and_swap(ptr, oldval, newval); } while (0)
#define R_ATOMIC_XCHG(ptr, val) \
- do { __asm__ __volatile__("lock; xchgl %0,%1" \
- :"=r" ((ruint32) val) \
- :"m" (*(volatile ruint32 *)ptr), "0" (val) \
- :"memory"); } while (0)
-
-#define R_ATOMIC_ADD(ptr, val) \
- do { __asm__ __volatile__ ("addl %1,%0" \
- : "=m" (*ptr) \
- : "ir" (val), "m" (*ptr)); } while (0)
-
-#define R_ATOMIC_SUB(ptr, val) \
- do { __asm__ __volatile__ ("subl %1,%0" \
- : "=m" (*ptr) \
- : "ir" (val), "m" (*ptr)); } while (0)
+ do { val = __sync_lock_test_and_set(ptr, val); } while (0)
+
+#define R_ATOMIC_ADD(ptr, val, res) \
+ do { res = __sync_fetch_and_add(ptr, val); } while (0)
+
+#define R_ATOMIC_SUB(ptr, val, res) \
+ do { res = __sync_fetch_and_sub(ptr, val); } while (0)
+
+#define R_ATOMIC_GET(ptr, res) \
+ do { __sync_synchronize (); res = *ptr; } while (0)
+
+#define R_ATOMIC_SET(ptr, val) \
+ do { *ptr = val; __sync_synchronize (); } while (0)
#define R_DEBUG_BRAKE __asm__ ("int $3")
+++ /dev/null
-/*
- * Regular Pattern Analyzer Toolkit (RPA/Tk)
- * Copyright (c) 2009-2012 Martin Stoilov
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * Martin Stoilov <martin@rpasearch.com>
- */
-
-#ifndef _RPADEF_H_
-#define _RPADEF_H_
-
-
-typedef unsigned long rpa_word_t;
-typedef long rpa_sword_t;
-typedef unsigned short rpa_wchar_t;
-
-#ifndef NULL
-#ifdef __cplusplus
-#define NULL 0
-#else
-#define NULL ((void *)0)
-#endif
-#endif
-
-#endif
/*
* Atomic operations (Architecture Dependent)
*/
-#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, resptr) \
- do { __asm__ __volatile__ ("lock; cmpxchgl %2, %1" \
- : "=a" (*(resptr)), "=m" (*ptr) \
- : "r" (newval), "m" (*ptr), "0" (oldval)); } while (0)
+#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, res) \
+ do { res = __sync_val_compare_and_swap(ptr, oldval, newval); } while (0)
#define R_ATOMIC_XCHG(ptr, val) \
- do { __asm__ __volatile__("lock; xchgl %0,%1" \
- :"=r" ( val) \
- :"m" (*(volatile ratomic_t *)ptr), "0" (val) \
- :"memory"); } while (0)
+ do { val = __sync_lock_test_and_set(ptr, val); } while (0)
-#define R_ATOMIC_ADD(ptr, val) \
- do { __asm__ __volatile__ ("addl %1,%0" \
- : "=m" (*ptr) \
- : "ir" (val), "m" (*ptr)); } while (0)
+#define R_ATOMIC_ADD(ptr, val, res) \
+ do { res = __sync_fetch_and_add(ptr, val); } while (0)
-#define R_ATOMIC_SUB(ptr, val) \
- do { __asm__ __volatile__ ("subl %1,%0" \
- : "=m" (*ptr) \
- : "ir" (val), "m" (*ptr)); } while (0)
+#define R_ATOMIC_SUB(ptr, val, res) \
+ do { res = __sync_fetch_and_sub(ptr, val); } while (0)
+#define R_ATOMIC_GET(ptr, res) \
+ do { __sync_synchronize (); res = *ptr; } while (0)
+
+#define R_ATOMIC_SET(ptr, val) \
+ do { *ptr = val; __sync_synchronize (); } while (0)
#define R_DEBUG_BRAKE __asm__ ("int $3")
#define R_ASSERT(__a__) do {if (!(__a__)) R_DEBUG_BRAKE; } while (0)
/*
* Atomic operations (Architecture Dependent)
*/
-#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, resptr) \
- do { InterlockedCompareExchange (ptr, newval, oldval); *resptr = *ptr; } while (0)
+#define R_ATOMIC_CMPXCHG(ptr, oldval, newval, res) \
+ do { res = InterlockedCompareExchange (ptr, newval, oldval); } while (0)
#define R_ATOMIC_XCHG(ptr, val) \
do { val = InterlockedExchange (ptr, val); } while (0)
do { InterlockedExchangeAdd (ptr, val); } while (0)
#define R_ATOMIC_SUB(ptr, val) \
- do { InterlockedExchangeAdd (ptr, -val); } while (0)
+ do { InterlockedExchangeSubtract (ptr, val); } while (0)
+
+#define R_ATOMIC_GET(ptr, res) \
+ do { res = *ptr; } while (0)
+
+#define R_ATOMIC_SET(ptr, val) \
+ do { *ptr = val; } while (0)
#define R_DEBUG_BRAKE do { __asm int 3 } while (0)
rboolean r_atomic_compare_and_exchange(volatile ratomic_t *ptr, ratomic_t oldval, ratomic_t newval)
{
- volatile ratomic_t result;
+ volatile ratomic_t res;
- R_ATOMIC_CMPXCHG(ptr, oldval, newval, &result);
- return (result == oldval);
+ R_ATOMIC_CMPXCHG(ptr, oldval, newval, res);
+ return (res == oldval);
}
}
-void r_atomic_add(volatile ratomic_t *ptr, ratomic_t val)
+ratomic_t r_atomic_add(volatile ratomic_t *ptr, ratomic_t val)
{
- R_ATOMIC_ADD(ptr, val);
+ ratomic_t res = 0;
+ R_ATOMIC_ADD(ptr, val, res);
+ return res;
}
-void r_atomic_sub(volatile ratomic_t *ptr, ratomic_t val)
+ratomic_t r_atomic_sub(volatile ratomic_t *ptr, ratomic_t val)
{
- R_ATOMIC_SUB(ptr, val);
+ ratomic_t res = 0;
+ R_ATOMIC_SUB(ptr, val, res);
+ return res;
+}
+
+
+ratomic_t r_atomic_get(volatile ratomic_t *ptr)
+{
+ ratomic_t res;
+ R_ATOMIC_GET(ptr, res);
+ return res;
+}
+
+
+void r_atomic_set(volatile ratomic_t *ptr, ratomic_t val)
+{
+ R_ATOMIC_SET(ptr, val);
}
rboolean r_atomic_compare_and_exchange(volatile ratomic_t *ptr, ratomic_t oldval, ratomic_t newval);
ratomic_t r_atomic_exchange(volatile ratomic_t *ptr, volatile ratomic_t val);
-void r_atomic_add(volatile ratomic_t *ptr, ratomic_t val);
-void r_atomic_sub(volatile ratomic_t *ptr, ratomic_t val);
+ratomic_t r_atomic_add(volatile ratomic_t *ptr, ratomic_t val);
+ratomic_t r_atomic_sub(volatile ratomic_t *ptr, ratomic_t val);
+ratomic_t r_atomic_get(volatile ratomic_t *ptr);
+void r_atomic_set(volatile ratomic_t *ptr, ratomic_t val);
#ifdef __cplusplus
}
void r_spinlock_init(rspinlock_t *lock)
{
- *lock = 0;
+ r_atomic_set(lock, 0);
}
{
while (1)
{
- if (!r_atomic_exchange(lock, R_SPINLOCK_BUSY))
+ if (r_atomic_compare_and_exchange(lock, 0, R_SPINLOCK_BUSY))
return;
- while (*lock) {
- /*
- * Spin...
- */
- }
+
}
}
rboolean r_spinlock_trylock(rspinlock_t *lock)
{
- return (!r_atomic_exchange(lock, R_SPINLOCK_BUSY));
+ return (r_atomic_compare_and_exchange(lock, 0, R_SPINLOCK_BUSY));
}
LDFLAGS = $(MACH)
-include ../testrpa2.mk
+include ../testrex.mk