usermode/library/atomic_ops/x86_64.h

00001 /*
00002  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
00003  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
00004  * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
00005  *
00006  *
00007  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
00008  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
00009  *
00010  * Permission is hereby granted to use or copy this program
00011  * for any purpose,  provided the above notices are retained on all copies.
00012  * Permission to modify the code and to distribute modified code is granted,
00013  * provided the above notices are retained, and a notice that the code was
00014  * modified is included with the above copyright notice.
00015  *
00016  * Some of the machine specific code was borrowed from our GC distribution.
00017  */
00018 
00019 #include "./aligned_atomic_load_store.h"
00020 
00021 /* Real X86 implementations appear                                      */
00022 /* to enforce ordering between memory operations, EXCEPT that a later   */
00023 /* read can pass earlier writes, presumably due to the visible          */
00024 /* presence of store buffers.                                           */
00025 /* We ignore the fact that the official specs                           */
00026 /* seem to be much weaker (and arguably too weak to be usable).         */
00027 
00028 #include "./ordered_except_wr.h"
00029 
00030 #include "./test_and_set_t_is_char.h"
00031 
00032 #include "./standard_ao_double_t.h"
00033 
00034 AO_INLINE void
00035 AO_nop_full(void)
00036 {
00037   /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips.      */
00038   __asm__ __volatile__("mfence" : : : "memory");
00039 }
00040 
00041 #define AO_HAVE_nop_full
00042 
00043 /* As far as we can tell, the lfence and sfence instructions are not    */
00044 /* currently needed or useful for cached memory accesses.               */
00045 
00046 AO_INLINE AO_t
00047 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
00048 {
00049   AO_t result;
00050 
00051   __asm__ __volatile__ ("lock; xaddq %0, %1" :
00052                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
00053                         : "memory");
00054   return result;
00055 }
00056 
00057 #define AO_HAVE_fetch_and_add_full
00058 
00059 AO_INLINE unsigned char
00060 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
00061 {
00062   unsigned char result;
00063 
00064   __asm__ __volatile__ ("lock; xaddb %0, %1" :
00065                         "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
00066                         : "memory");
00067   return result;
00068 }
00069 
00070 #define AO_HAVE_char_fetch_and_add_full
00071 
00072 AO_INLINE unsigned short
00073 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
00074 {
00075   unsigned short result;
00076 
00077   __asm__ __volatile__ ("lock; xaddw %0, %1" :
00078                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
00079                         : "memory");
00080   return result;
00081 }
00082 
00083 #define AO_HAVE_short_fetch_and_add_full
00084 
00085 AO_INLINE unsigned int
00086 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
00087 {
00088   unsigned int result;
00089 
00090   __asm__ __volatile__ ("lock; xaddl %0, %1" :
00091                         "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
00092                         : "memory");
00093   return result;
00094 }
00095 
00096 #define AO_HAVE_int_fetch_and_add_full
00097 
00098 AO_INLINE void
00099 AO_or_full (volatile AO_t *p, AO_t incr)
00100 {
00101   __asm__ __volatile__ ("lock; orq %1, %0" :
00102                         "=m" (*p) : "r" (incr), "m" (*p) : "memory");
00103 }
00104 
00105 #define AO_HAVE_or_full
00106 
00107 AO_INLINE AO_TS_VAL_t
00108 AO_test_and_set_full(volatile AO_TS_t *addr)
00109 {
00110   unsigned char oldval;
00111   /* Note: the "xchg" instruction does not need a "lock" prefix */
00112   __asm__ __volatile__("xchgb %0, %1"
00113                 : "=q"(oldval), "=m"(*addr)
00114                 : "0"(0xff), "m"(*addr) : "memory");
00115   return (AO_TS_VAL_t)oldval;
00116 }
00117 
00118 #define AO_HAVE_test_and_set_full
00119 
00120 /* Returns nonzero if the comparison succeeded. */
00121 AO_INLINE int
00122 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
00123 {
00124 # ifdef AO_USE_SYNC_CAS_BUILTIN
00125     return (int)__sync_bool_compare_and_swap(addr, old, new_val);
00126 # else
00127     char result;
00128     __asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1"
00129                          : "=m" (*addr), "=a" (result)
00130                          : "m" (*addr), "r" (new_val), "a" (old) : "memory");
00131     return (int) result;
00132 # endif
00133 }
00134 
00135 #define AO_HAVE_compare_and_swap_full
00136 
00137 #ifdef AO_CMPXCHG16B_AVAILABLE
00138 /* NEC LE-IT: older AMD Opterons are missing this instruction.
00139  * On these machines SIGILL will be thrown.
00140  * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
00141  * (lock based) version available */
00142 /* HB: Changed this to not define either by default.  There are
00143  * enough machines and tool chains around on which cmpxchg16b
00144  * doesn't work.  And the emulation is unsafe by our usual rules.
00145  * Hoewever both are clearly useful in certain cases.
00146  */
00147 AO_INLINE int
00148 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
00149                                        AO_t old_val1, AO_t old_val2,
00150                                        AO_t new_val1, AO_t new_val2)
00151 {
00152   char result;
00153   __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
00154                        : "=m"(*addr), "=a"(result)
00155                        : "m"(*addr), "d" (old_val2), "a" (old_val1),
00156                          "c" (new_val2), "b" (new_val1) : "memory");
00157   return (int) result;
00158 }
00159 #define AO_HAVE_compare_double_and_swap_double_full
00160 #else
00161 /* this one provides spinlock based emulation of CAS implemented in     */
00162 /* atomic_ops.c.  We probably do not want to do this here, since it is  */
00163 /* not atomic with respect to other kinds of updates of *addr.  On the  */
00164 /* other hand, this may be a useful facility on occasion.               */
00165 #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
00166 int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
00167                                                 AO_t old_val1, AO_t old_val2,
00168                                                 AO_t new_val1, AO_t new_val2);
00169 
00170 AO_INLINE int
00171 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
00172                                        AO_t old_val1, AO_t old_val2,
00173                                        AO_t new_val1, AO_t new_val2)
00174 {
00175         return AO_compare_double_and_swap_double_emulation(addr,
00176                                                            old_val1, old_val2,
00177                                                            new_val1, new_val2);
00178 }
00179 #define AO_HAVE_compare_double_and_swap_double_full
00180 #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
00181 #endif /* AO_CMPXCHG16B_AVAILABLE */

Generated on Sat Apr 23 11:43:34 2011 for Mnemosyne by  doxygen 1.4.7