1 #ifndef DEF_PTHREAD_TOOLS_HPP
2 #define DEF_PTHREAD_TOOLS_HPP
18 #undef _POSIX_SPIN_LOCKS
19 #define _POSIX_SPIN_LOCKS -1
40 mutable pthread_mutex_t m_mut;
43 int error = pthread_mutex_init(&m_mut, NULL);
46 inline void lock()
const {
47 int error = pthread_mutex_lock( &m_mut );
50 inline void unlock()
const {
51 int error = pthread_mutex_unlock( &m_mut );
54 inline bool try_lock()
const {
55 return pthread_mutex_trylock( &m_mut ) == 0;
58 int error = pthread_mutex_destroy( &m_mut );
64 #if _POSIX_SPIN_LOCKS >= 0
79 mutable pthread_spinlock_t m_spin;
82 int error = pthread_spin_init(&m_spin, PTHREAD_PROCESS_PRIVATE);
86 inline void lock()
const {
87 int error = pthread_spin_lock( &m_spin );
90 inline void unlock()
const {
91 int error = pthread_spin_unlock( &m_spin );
94 inline bool try_lock()
const {
95 return pthread_spin_trylock( &m_spin ) == 0;
98 int error = pthread_spin_destroy( &m_spin );
101 friend class conditional;
103 #define SPINLOCK_SUPPORTED 1
107 #define SPINLOCK_SUPPORTED 0
117 mutable pthread_cond_t m_cond;
120 int error = pthread_cond_init(&m_cond, NULL);
123 inline void wait(
const mutex& mut)
const {
124 int error = pthread_cond_wait(&m_cond, &mut.m_mut);
127 inline int timedwait(
const mutex& mut,
int sec)
const {
128 struct timespec timeout;
131 gettimeofday(&tv, &tz);
133 timeout.tv_sec = tv.tv_sec + sec;
134 return pthread_cond_timedwait(&m_cond, &mut.m_mut, &timeout);
136 inline void signal()
const {
137 int error = pthread_cond_signal(&m_cond);
140 inline void broadcast()
const {
141 int error = pthread_cond_broadcast(&m_cond);
145 int error = pthread_cond_destroy(&m_cond);
159 int error = sem_init(&m_sem, 0,0);
162 inline void post()
const {
163 int error = sem_post(&m_sem);
166 inline void wait()
const {
167 int error = sem_wait(&m_sem);
171 int error = sem_destroy(&m_sem);
179 #define atomic_xadd(P, V) __sync_fetch_and_add((P), (V))
180 #define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N))
181 #define atomic_inc(P) __sync_add_and_fetch((P), 1)
195 __extension__
struct {
201 mutable bool writing;
202 mutable volatile rwticket l;
205 memset(const_cast<rwticket*>(&l), 0,
sizeof(rwticket));
207 inline void writelock()
const {
208 unsigned me = atomic_xadd(&l.u, (1<<16));
209 unsigned char val = me >> 16;
211 while (val != l.s.write) sched_yield();
215 inline void wrunlock()
const{
216 rwticket t = *
const_cast<rwticket*
>(&l);
221 *(
volatile unsigned short *) (&l) = t.us;
226 inline void readlock()
const {
227 unsigned me = atomic_xadd(&l.u, (1<<16));
228 unsigned char val = me >> 16;
230 while (val != l.s.read) sched_yield();
234 inline void rdunlock()
const {
235 atomic_inc(&l.s.write);
238 inline void unlock()
const {
239 if (!writing) rdunlock();
255 mutable pthread_rwlock_t m_rwlock;
258 int error = pthread_rwlock_init(&m_rwlock, NULL);
262 int error = pthread_rwlock_destroy(&m_rwlock);
265 inline void readlock()
const {
266 pthread_rwlock_rdlock(&m_rwlock);
269 inline void writelock()
const {
270 pthread_rwlock_wrlock(&m_rwlock);
273 inline void unlock()
const {
274 pthread_rwlock_unlock(&m_rwlock);
277 inline void rdunlock()
const {
280 inline void wrunlock()
const {
296 mutable pthread_barrier_t m_barrier;
298 barrier(
size_t numthreads) { pthread_barrier_init(&m_barrier, NULL, numthreads); }
299 ~barrier() { pthread_barrier_destroy(&m_barrier); }
300 inline void wait()
const { pthread_barrier_wait(&m_barrier); }
316 std::vector<unsigned char> waiting;
320 needed = (int)numthreads;
322 waiting.resize(numthreads);
323 std::fill(waiting.begin(), waiting.end(), 0);
332 size_t myid = called;
336 if (called == needed) {
342 std::fill(waiting.begin(), waiting.end(), 0);
347 while(waiting[myid]) c.wait(m);
356 inline void prefetch_range(
void *addr,
size_t len) {
358 char *end = (
char*)(addr) + len;
360 for (cp = (
char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 0);
362 inline void prefetch_range_write(
void *addr,
size_t len) {
364 char *end = (
char*)(addr) + len;
366 for (cp = (
char*)(addr); cp < end; cp += 64) __builtin_prefetch(cp, 1);