usermode/library/common/spinlock.h

00001 #ifndef _ASM_X86_SPINLOCK_H
00002 #define _ASM_X86_SPINLOCK_H
00003 
00004 /*
00005  * Your basic SMP spinlocks, allowing only a single CPU anywhere
00006  *
00007  * Simple spin lock operations.  There are two variants, one clears IRQ's
00008  * on the local processor, one does not.
00009  *
00010  * These are fair FIFO ticket locks, which are currently limited to 256
00011  * CPUs.
00012  *
00013  * (the type definitions are in asm/spinlock_types.h)
00014  */
00015 
00016 typedef struct arch_spinlock {
00017         unsigned int slock;
00018 } arch_spinlock_t;
00019 
00020 # define LOCK_PTR_REG "D"
00021 # define REG_PTR_MODE "q"
00022 
00023 
00024 /*
00025  * Ticket locks are conceptually two parts, one indicating the current head of
00026  * the queue, and the other indicating the current tail. The lock is acquired
00027  * by atomically noting the tail and incrementing it by one (thus adding
00028  * ourself to the queue and noting our position), then waiting until the head
00029  * becomes equal to the the initial value of the tail.
00030  *
00031  * We use an xadd covering *both* parts of the lock, to increment the tail and
00032  * also load the position of the head, which takes care of memory ordering
00033  * issues and should be optimal for the uncontended case. Note the tail must be
00034  * in the high part, because a wide xadd increment of the low part would carry
00035  * up and contaminate the high part.
00036  *
00037  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
00038  * save some instructions and make the code more elegant. There really isn't
00039  * much between them in performance though, especially as locks are out of line.
00040  */
00041 #define TICKET_SHIFT 8
00042 
00043 static __always_inline void __ticket_spin_lock(volatile arch_spinlock_t *lock)
00044 {
00045         short inc = 0x0100;
00046 
00047         asm volatile (
00048                 "lock; xaddw %w0, %1\n"
00049                 "1:\t"
00050                 "cmpb %h0, %b0\n\t"
00051                 "je 2f\n\t"
00052                 "rep ; nop\n\t"
00053                 "movb %1, %b0\n\t"
00054                 /* don't need lfence here, because loads are in-order */
00055                 "jmp 1b\n"
00056                 "2:"
00057                 : "+Q" (inc), "+m" (lock->slock)
00058                 :
00059                 : "memory", "cc");
00060 }
00061 
00062 /*
00063 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
00064 {
00065         int tmp, new;
00066 
00067         asm volatile("movzwl %2, %0\n\t"
00068                      "cmpb %h0,%b0\n\t"
00069                      "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
00070                      "jne 1f\n\t"
00071                      LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
00072                      "1:"
00073                      "sete %b1\n\t"
00074                      "movzbl %b1,%0\n\t"
00075                      : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
00076                      :
00077                      : "memory", "cc");
00078 
00079         return tmp;
00080 }
00081 */
00082 
00083 static __always_inline void __ticket_spin_unlock(volatile arch_spinlock_t *lock)
00084 {
00085         asm volatile("incb %0"
00086                      : "+m" (lock->slock)
00087                      :
00088                      : "memory", "cc");
00089 }
00090 
00091 /*
00092 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
00093 {
00094         int tmp = ACCESS_ONCE(lock->slock);
00095 
00096         return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
00097 }
00098 
00099 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
00100 {
00101         int tmp = ACCESS_ONCE(lock->slock);
00102 
00103         return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
00104 }
00105 
00106 */
00107 
00108 
00109 #endif /* _ASM_X86_SPINLOCK_H */

Generated on Sat Apr 23 11:43:35 2011 for Mnemosyne by  doxygen 1.4.7