首页 > 代码库 > Notes on Locks
Notes on Locks
Spinlock
an non-scalable implementation in C++
# if defined __GNUC__
# include <pthread.h>class Spinlock {# if (__GNUC__ >= 4) private: pthread_spinlock_t _spinlock;public: Spinlock() { pthread_spin_init(&_spinlock, PTHREAD_PROCESS_PRIVATE); } ~Spinlock() { pthread_spin_destroy(&_spinlock); } void lock() { pthread_spin_lock(&_spinlock); } void unlock() { pthread_spin_unlock(&_spinlock); }# elseprivate: volatile int _inter_lock;public: Spinlock() : _inter_lock(0) { } void lock() { register int temp; __asm__ __volatile__ ( "1: \n" " cmp $0, %0 \n" // cmp(0, _inter_lock) " je 2f \n" // if _inter_lock == 0, goto 2 " pause \n" " jmp 1b \n" // goto 1 "2: \n" " movl $1, %1 \n" // temp = 1 " xchg %0, %1 \n" // exchange(_inter_lock, temp) " test %1, %1 \n" // test(temp, temp), do AND " jne 1b \n" // if temp == 1, goto 1 : "=m"(_inter_lock), "=r"(temp) ); // A note on ‘pause‘: // ‘pause‘ gives hint to processor that improves performance // of spin-wait loops } void unlock() { // _inter_lock = 0; __asm__ __volatile__ ( " movl $0, %0 \n" : "=m"(_inter_lock) ); }# endif // __GNUC__ >= 4private: // non-copyable Spinlock(const Spinlock&); Spinlock& operator=(const Spinlock&);};# endif // __GNUC__
Pseudocode for ticket locks in Linux (The ticket lock is the default lock since kernel version 2.6.25 , released in April 2008)
struct spinlock_t { int current_ticket; int next_ticket; };void spin_lock (spinlock_t *lock) { int t = atomic_fetch_and_inc(&lock->next_ticket); while (t != lock->current_ticket) ; /* spin */ } void spin_unlock (spinlock_t *lock) { lock->current_ticket++; }
MCS spinlock (scalable, for implementing Linux mutex)
// atomic exchange: // *ptr, val = val, *ptr// return valstatic inline long xchg(long *ptr, long val){ __asm__ volatile( "lock; xchgq %0, %1\n\t" : "+m" (*ptr), "+r" (val) : : "memory", "cc"); return val;}// cmpxchg:// if (*ptr == old) {// *ptr = new;// return old;// } else // return *ptr static inline long cmpxchg(long *ptr, long old, long val){ uint64_t out; __asm__ volatile( "lock; cmpxchgq %2, %1" : "=a" (out), "+m" (*ptr) : "q" (val), "0"(old) : "memory"); return out;}struct qnode { volatile void *next; volatile char locked; // 1 if lock acquired char __pad[0] __attribute__((aligned(CACHELINE)));};typedef struct { // tail of queue of threads holding or waiting the lock struct qnode *tail __attribute__((aligned(64))); int lock_idx __attribute__((aligned(64)));} mcslock_t;// initialize main lockstatic inline voidmcs_init(mcslock_t *l){ l->tail = NULL;}static inline voidmcs_lock(mcslock_t *l, volatile struct qnode *mynode){ struct qnode *predecessor; mynode->next = NULL; predecessor = (struct qnode *)xchg((long *)&l->tail, (long)mynode); if (predecessor) { mynode->locked = 1; // mark the lock as taken asm volatile("":::"memory") // barrier predecessor->next = mynode; while (mynode->locked) // busy waiting nop_pause(); }}static inline voidmcs_unlock(mcslock_t *l, volatile struct qnode *mynode){ if (!mynode->next) { // if no cores waiting // atomic lock-free pop: // if we are the only node in the queue, reset l->tail and return if (cmpxchg((long *)&l->tail, (long)mynode, 0) == (long)mynode) return; while (!mynode->next) nop_pause(); } ((struct qnode *)mynode->next)->locked = 0; // handoff lock to the successor}
For scalable locks
See:
Non-scalable locks are dangerous
my notes on scalable spinlocks
Mutex
Linux kernel bug: A surprise with mutexes and reference counts
Russ Cox on debugging an interesting concurrency bug
adaptive mutex:
Notes on Locks
声明:以上内容来自用户投稿及互联网公开渠道收集整理发布,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任,若内容有误或涉及侵权可进行投诉: 投诉/举报 工作人员会在5个工作日内联系你,一经查实,本站将立刻删除涉嫌侵权内容。