00001 /* -*- mode:C++; c-basic-offset:4 -*- 00002 Shore-MT -- Multi-threaded port of the SHORE storage manager 00003 00004 Copyright (c) 2007-2009 00005 Data Intensive Applications and Systems Labaratory (DIAS) 00006 Ecole Polytechnique Federale de Lausanne 00007 00008 All Rights Reserved. 00009 00010 Permission to use, copy, modify and distribute this software and 00011 its documentation is hereby granted, provided that both the 00012 copyright notice and this permission notice appear in all copies of 00013 the software, derivative works or modified versions, and any 00014 portions thereof, and that both notices appear in supporting 00015 documentation. 00016 00017 This code is distributed in the hope that it will be useful, but 00018 WITHOUT ANY WARRANTY; without even the implied warranty of 00019 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. THE AUTHORS 00020 DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER 00021 RESULTING FROM THE USE OF THIS SOFTWARE. 00022 */ 00023 /*<std-header orig-src='shore' incl-file-exclusion='ATOMIC_CONTAINER_H'> 00024 00025 $Id: atomic_container.h,v 1.6 2012/01/02 17:02:13 nhall Exp $ 00026 00027 SHORE -- Scalable Heterogeneous Object REpository 00028 00029 Copyright (c) 1994-99 Computer Sciences Department, University of 00030 Wisconsin -- Madison 00031 All Rights Reserved. 00032 00033 Permission to use, copy, modify and distribute this software and its 00034 documentation is hereby granted, provided that both the copyright 00035 notice and this permission notice appear in all copies of the 00036 software, derivative works or modified versions, and any portions 00037 thereof, and that both notices appear in supporting documentation. 00038 00039 THE AUTHORS AND THE COMPUTER SCIENCES DEPARTMENT OF THE UNIVERSITY 00040 OF WISCONSIN - MADISON ALLOW FREE USE OF THIS SOFTWARE IN ITS 00041 "AS IS" CONDITION, AND THEY DISCLAIM ANY LIABILITY OF ANY KIND 00042 FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 00043 00044 This software was developed with support by the Advanced Research 00045 Project Agency, ARPA order number 018 (formerly 8230), monitored by 00046 the U.S. Army Research Laboratory under contract DAAB07-91-C-Q518. 00047 Further funding for this work was provided by DARPA through 00048 Rome Research Laboratory Contract No. F30602-97-2-0247. 00049 00050 */ 00051 00052 #ifndef ATOMIC_CONTAINER_H 00053 #define ATOMIC_CONTAINER_H 00054 00055 #include "atomic_templates.h" 00056 00057 // for placement new support, which users need 00058 #include <new> 00059 #include <cassert> 00060 #include <stdlib.h> 00061 00062 /** \brief A thread-safe, lock-free, almost wait-free atomic 00063 * container for untyped items. 00064 * 00065 * This class takes care of pushing and 00066 * popping elements from the container for multiple concurrent threads. 00067 * 00068 * It is up to the user (client code) to determine what is in the 00069 * container (including, if apropos, allocate and deallocate the 00070 * storage for items pushed on this container). In the SM, the 00071 * buffer manager uses this as a list of free buffer frames (pointers 00072 * into the buffer pool). 00073 * 00074 * The objects being stored here must have an embedded next pointer. 00075 * The offset given in the constructor tells the container the offset 00076 * of the "next" pointer in the objects being stored here. The offset 00077 * can be + or - from the pointer being given in push(). 00078 * 00079 * WARNING: in order to avoid the so-called "ABA" problem, the 00080 * container must begin with and maintain a reasonably large pool. 00081 * There is the possibility of recently-freed objects being reused 00082 * very quickly, in turn 00083 * enabling internal corruption from a possible race where a thread 00084 * begins to allocate an object, but other threads do enough 00085 * pops and pushes to cycle through 8 version numbers, and all this 00086 * happens before the first thread finishes. It's unlikely but 00087 * possible. 00088 */ 00089 class atomic_container { 00090 /// \cond skipdoc 00091 protected: 00092 struct ptr { ptr* next; }; 00093 /// Unions for punning, i.e., type conversion 00094 union vpn { void* v; ptr* p; long n; }; 00095 union pvn { ptr* p; void* v; long n; }; 00096 /// \endcond skipdoc 00097 00098 public: 00099 typedef long int offset_typ; 00100 00101 atomic_container(offset_typ offset) 00102 : _offset(offset), _locked(0), _active(0), _backup(0) 00103 { } 00104 00105 /// Pop an item off the stack. 00106 /// If we don't find any to pop, return a null ptr. 00107 /// We do not go to the global heap. The client must do that. 00108 void* pop() { 00109 pvn old_value = {*&_active}; 00110 while(pointer(old_value)) { 00111 // swap if the head's pointer and version are unchanged 00112 pvn new_value = next_pointer(old_value); 00113 void* cur_value = atomic_cas_ptr(&_active, old_value.v, new_value.v); 00114 if(old_value.v == cur_value) 00115 break; 00116 00117 // try again... 00118 old_value.v = cur_value; 00119 } 00120 00121 // slow alloc? 00122 return pointer(old_value)? prepare(old_value) : slow_pop(); 00123 } 00124 00125 /// Push an item onto the stack 00126 void push(void* v) { 00127 // back up to the real start of this allocation 00128 vpn u = {v}; 00129 u.n += _offset; 00130 00131 // enqueue it 00132 pvn old_value = { _backup }; 00133 while(1) { 00134 u.p->next = old_value.p; 00135 membar_producer(); 00136 void* cur_value = atomic_cas_ptr(&_backup, old_value.v, u.v); 00137 if(old_value.v == cur_value) 00138 break; 00139 00140 // try again... 00141 old_value.v = cur_value; 00142 } 00143 } 00144 /// Only for debugging. 00145 offset_typ offset() const { return _offset; } 00146 00147 virtual ~atomic_container() { // for shutdown/restart purposes: 00148 _locked = 0; _active = 0; _backup = 0; 00149 } 00150 00151 protected: 00152 /// Strip off the pointer's version number and hide the header. 00153 template<class Union> 00154 void* prepare(Union rval) { 00155 rval.n = pointer(rval) - _offset; 00156 return rval.v; 00157 } 00158 00159 /// Return a null pointer (i.e., it contains the offset only). 00160 void* null() { 00161 union { 00162 offset_typ i; 00163 void *v; 00164 } _pun = { _offset }; 00165 return _pun.v; 00166 } 00167 00168 offset_typ const _offset; 00169 00170 private: 00171 unsigned volatile _locked; 00172 /// The list of active stuff. 00173 /// Pop will pull things off this list until it's empty. 00174 ptr* volatile _active; 00175 /// The list of things recently pushed. 00176 /// Push uses this list to avoid interference with pop. 00177 ptr* volatile _backup; 00178 00179 #ifdef ARCH_LP64 00180 enum { VERSION_MASK=0x7 }; 00181 #else 00182 enum { VERSION_MASK=0x3 }; //alas. few versions 00183 #endif 00184 00185 ///Return the pointer with the version mask removed. 00186 template<class Union> 00187 static long pointer(Union value) { return value.n &~VERSION_MASK; } 00188 00189 ///Return the version mask that's embedded in the pointer. 00190 static long version(long value) { return value & VERSION_MASK; } 00191 00192 ///Return a non-dereferencable pointer to the next item after the given one. 00193 /// The given value might have the version number embedded (or not). 00194 /// The returned ptr will have the same version as that in the ptr. 00195 static pvn next_pointer(pvn value) { 00196 long ver = version(value.n); 00197 value.n -= ver; // subtract out the version 00198 value.p = value.p->next; // get ptr to the next item 00199 value.n += ver; // add back in the version 00200 return value; 00201 } 00202 00203 /// Spin until acquiring the lock is free or noticing that _active 00204 /// has become non-null. 00205 bool attempt_lock() { 00206 bool mine = false; 00207 pvn active = { *&_active }; 00208 while(!mine) { 00209 while(*&_locked && !pointer(active)) active.p = *&_active; 00210 if(pointer(active)) return false; 00211 mine = !atomic_swap_32(&_locked, true); 00212 } 00213 if(mine) { 00214 membar_enter(); 00215 active.p = *&_active; 00216 if(!pointer(active)) 00217 return true; 00218 00219 release_lock(); 00220 } 00221 return false; 00222 } 00223 ///Release the lock. 00224 void release_lock() { 00225 membar_exit(); 00226 _locked = false; 00227 } 00228 00229 /// Grab a lock, swap active and backup lists, 00230 /// and try again to pop. 00231 void* slow_pop() { 00232 if(!attempt_lock()) 00233 return pop(); // try again 00234 00235 /* At this point (holding the lock) the _active list will 00236 not change to non-null on us. The _backup list may 00237 continue to grow so we atomically cut it loose 00238 */ 00239 vpn rval = { atomic_swap_ptr(&_backup, NULL) }; 00240 if(rval.p) { 00241 // keep head for ourselves, rest becomes new _active 00242 pvn old_list = { _active }; 00243 pvn new_list = {rval.p->next}; 00244 new_list.n += version(old_list.n+1); 00245 _active = new_list.p; 00246 } 00247 else { 00248 rval.v = null(); 00249 } 00250 00251 release_lock(); 00252 return prepare(rval); 00253 } 00254 00255 }; 00256 00257 #endif