00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062 #ifndef _RWSET_H
00063 #define _RWSET_H
00064
00065
00066
00067
00068 static inline
00069 r_entry_t *
00070 mtm_has_read(mtm_tx_t *tx, mode_data_t *modedata, volatile mtm_word_t *lock)
00071 {
00072 r_entry_t *r;
00073 int i;
00074
00075 PRINT_DEBUG("==> mtm_has_read(%p[%lu-%lu],%p)\n", tx,
00076 (unsigned long)modedata->start, (unsigned long)modedata->end, lock);
00077
00078
00079 assert(tx->status == TX_ACTIVE);
00080
00081
00082 r = modedata->r_set.entries;
00083 for (i = modedata->r_set.nb_entries; i > 0; i--, r++) {
00084 if (r->lock == lock) {
00085
00086 return r;
00087 }
00088 }
00089 return NULL;
00090 }
00091
00092
00093
00094
00095
00096 static inline
00097 int
00098 mtm_validate(mtm_tx_t *tx, mode_data_t *modedata)
00099 {
00100 r_entry_t *r;
00101 int i;
00102 mtm_word_t l;
00103
00104 PRINT_DEBUG("==> mtm_validate(%p[%lu-%lu])\n", tx,
00105 (unsigned long)modedata->start, (unsigned long)modedata->end);
00106
00107
00108 assert(tx->status == TX_ACTIVE);
00109
00110
00111 r = modedata->r_set.entries;
00112 for (i = modedata->r_set.nb_entries; i > 0; i--, r++) {
00113
00114 l = ATOMIC_LOAD(r->lock);
00115
00116 if (LOCK_GET_OWNED(l)) {
00117
00118 #if DESIGN == WRITE_THROUGH
00119 if ((mtm_tx_t *)LOCK_GET_ADDR(l) != tx)
00120 #else
00121 w_entry_t *w = (w_entry_t *)LOCK_GET_ADDR(l);
00122
00123 if (!(modedata->w_set.entries <= w &&
00124 w < modedata->w_set.entries + modedata->w_set.nb_entries))
00125 #endif
00126 {
00127
00128 return 0;
00129 }
00130
00131 } else {
00132 if (LOCK_GET_TIMESTAMP(l) != r->version) {
00133
00134 return 0;
00135 }
00136
00137 }
00138 }
00139 return 1;
00140 }
00141
00142
00143
00144
00145
00146 static inline
00147 void
00148 mtm_allocate_rs_entries(mtm_tx_t *tx, mode_data_t *data, int extend)
00149 {
00150 if (extend) {
00151
00152 data->r_set.size *= 2;
00153 PRINT_DEBUG2("==> reallocate read set (%p[%lu-%lu],%d)\n", tx,
00154 (unsigned long)data->start,
00155 (unsigned long)data->end,
00156 data->r_set.size);
00157 if ((data->r_set.entries =
00158 (r_entry_t *)realloc(data->r_set.entries,
00159 data->r_set.size * sizeof(r_entry_t))) == NULL)
00160 {
00161 perror("realloc");
00162 exit(1);
00163 }
00164 } else {
00165
00166 if ((data->r_set.entries =
00167 (r_entry_t *)malloc(data->r_set.size * sizeof(r_entry_t))) == NULL)
00168 {
00169 perror("malloc");
00170 exit(1);
00171 }
00172 }
00173 }
00174
00175
00176
00177
00178
00179 static inline
00180 void
00181 mtm_allocate_ws_entries(mtm_tx_t *tx, mode_data_t *data, int extend)
00182 {
00183 #if defined(READ_LOCKED_DATA) || defined(CONFLICT_TRACKING)
00184 int i;
00185 int first = (extend ? data->w_set.size : 0);
00186 #endif
00187
00188 if (extend) {
00189
00190 data->w_set.size *= 2;
00191 PRINT_DEBUG("==> reallocate write set (%p[%lu-%lu],%d)\n", tx,
00192 (unsigned long)data->start, (unsigned long)data->end, data->w_set.size);
00193 if ((data->w_set.entries =
00194 (w_entry_t *)realloc(data->w_set.entries,
00195 data->w_set.size * sizeof(w_entry_t))) == NULL)
00196 {
00197 perror("realloc");
00198 exit(1);
00199 }
00200 } else {
00201
00202 #if ALIGNMENT == 1
00203 if ((data->w_set.entries =
00204 (w_entry_t *)malloc(data->w_set.size * sizeof(w_entry_t))) == NULL)
00205 {
00206 perror("malloc");
00207 exit(1);
00208 }
00209 #else
00210 if (posix_memalign((void **)&data->w_set.entries,
00211 ALIGNMENT,
00212 data->w_set.size * sizeof(w_entry_t)) != 0)
00213 {
00214 fprintf(stderr, "Error: cannot allocate aligned memory\n");
00215 exit(1);
00216 }
00217 #endif
00218 }
00219
00220 #if defined(READ_LOCKED_DATA) || defined(CONFLICT_TRACKING)
00221
00222 for (i = first; i < data->w_set.size; i++) {
00223 data->w_set.entries[i].tx = tx;
00224 }
00225 #endif
00226 }
00227
00228 #endif