00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058 #include <assert.h>
00059 #include <stdio.h>
00060 #include <stdint.h>
00061
00062 #include <pthread.h>
00063
00064 #include "gc.h"
00065
00066 #include "atomic.h"
00067 #include "mtm_i.h"
00068
00069
00070
00071
00072
00073
00074
00075
00076 #define MAX_THREADS 1024
00077 #define EPOCH_MAX (~(gc_word_t)0)
00078
00079 #ifndef NO_PERIODIC_CLEANUP
00080 # ifndef CLEANUP_FREQUENCY
00081 # define CLEANUP_FREQUENCY 1
00082 # endif
00083 #endif
00084
00085
00086
00087
00088
00089
00090 enum {
00091 GC_NULL = 0,
00092 GC_BUSY = 1,
00093 GC_FREE_EMPTY = 2,
00094 GC_FREE_FULL = 3
00095 };
00096
00097 typedef struct mem_block {
00098 void *addr;
00099 struct mem_block *next;
00100 } mem_block_t;
00101
00102 typedef struct mem_region {
00103 struct mem_block *blocks;
00104 gc_word_t ts;
00105 struct mem_region *next;
00106 } mem_region_t;
00107
00108 typedef struct tm_thread {
00109 gc_word_t used;
00110 pthread_t thread;
00111 gc_word_t ts;
00112 mem_region_t *head;
00113 mem_region_t *tail;
00114 #ifndef NO_PERIODIC_CLEANUP
00115 unsigned int frees;
00116 #endif
00117 } tm_thread_t;
00118
00119 static volatile tm_thread_t *threads;
00120 static volatile gc_word_t nb_threads;
00121
00122 static gc_word_t (*current_epoch)();
00123
00124 #ifdef TLS
00125 static __thread int thread_idx;
00126 #else
00127 static pthread_key_t thread_idx;
00128 #endif
00129
00130
00131
00132
00133
00134
00135
00136
00137 static inline int gc_get_idx()
00138 {
00139 #ifdef TLS
00140 return thread_idx;
00141 #else
00142 return (int)pthread_getspecific(thread_idx);
00143 #endif
00144 }
00145
00146
00147
00148
00149 static inline gc_word_t gc_compute_min(gc_word_t now)
00150 {
00151 int i;
00152 gc_word_t min, ts;
00153 mtm_word_t used;
00154
00155 PRINT_DEBUG("==> gc_compute_min(%d)\n", gc_get_idx());
00156
00157 min = now;
00158 for (i = 0; i < MAX_THREADS; i++) {
00159 used = (gc_word_t)ATOMIC_LOAD(&threads[i].used);
00160 if (used == GC_NULL)
00161 break;
00162 if (used != GC_BUSY)
00163 continue;
00164
00165 ts = (gc_word_t)ATOMIC_LOAD(&threads[i].ts);
00166 if (ts < min)
00167 min = ts;
00168 }
00169
00170 PRINT_DEBUG("==> gc_compute_min(%d,m=%lu)\n", gc_get_idx(), (unsigned long)min);
00171
00172 return min;
00173 }
00174
00175
00176
00177
00178 static inline void gc_clean_blocks(mem_block_t *mb)
00179 {
00180 mem_block_t *next_mb;
00181
00182 while (mb != NULL) {
00183 PRINT_DEBUG("==> free(%d,a=%p)\n", gc_get_idx(), mb->addr);
00184 free(mb->addr);
00185 next_mb = mb->next;
00186 free(mb);
00187 mb = next_mb;
00188 }
00189 }
00190
00191
00192
00193
00194 static inline void gc_clean_regions(mem_region_t *mr)
00195 {
00196 mem_region_t *next_mr;
00197
00198 while (mr != NULL) {
00199 gc_clean_blocks(mr->blocks);
00200 next_mr = mr->next;
00201 free(mr);
00202 mr = next_mr;
00203 }
00204 }
00205
00206
00207
00208
00209 void gc_cleanup_thread(int idx, gc_word_t min)
00210 {
00211 mem_region_t *mr;
00212
00213 PRINT_DEBUG("==> gc_cleanup_thread(%d,m=%lu)\n", idx, (unsigned long)min);
00214
00215 if (threads[idx].head == NULL) {
00216
00217 return;
00218 }
00219
00220 while (min > threads[idx].head->ts) {
00221 gc_clean_blocks(threads[idx].head->blocks);
00222 mr = threads[idx].head->next;
00223 free(threads[idx].head);
00224 threads[idx].head = mr;
00225 if(mr == NULL) {
00226
00227 threads[idx].tail = NULL;
00228 break;
00229 }
00230 }
00231 }
00232
00233
00234
00235
00236
00237
00238
00239
00240 void gc_init(gc_word_t (*epoch)())
00241 {
00242 int i;
00243
00244 PRINT_DEBUG("==> gc_init()\n");
00245
00246 current_epoch = epoch;
00247 if ((threads = (tm_thread_t *)malloc(MAX_THREADS * sizeof(tm_thread_t))) == NULL) {
00248 perror("malloc");
00249 exit(1);
00250 }
00251 for (i = 0; i < MAX_THREADS; i++) {
00252 threads[i].used = GC_NULL;
00253 threads[i].ts = EPOCH_MAX;
00254 threads[i].head = threads[i].tail = NULL;
00255 #ifndef NO_PERIODIC_CLEANUP
00256 threads[i].frees = 0;
00257 #endif
00258 }
00259 nb_threads = 0;
00260 #ifndef TLS
00261 if (pthread_key_create(&thread_idx, NULL) != 0) {
00262 fprintf(stderr, "Error creating thread local\n");
00263 exit(1);
00264 }
00265 #endif
00266 }
00267
00268
00269
00270
00271 void gc_exit()
00272 {
00273 int i;
00274
00275 PRINT_DEBUG("==> gc_exit()\n");
00276
00277
00278 if (ATOMIC_LOAD(&nb_threads) != 0) {
00279 fprintf(stderr, "Error: some threads have not been cleaned up\n");
00280 exit(1);
00281 }
00282
00283 for (i = 0; i < MAX_THREADS; i++)
00284 gc_clean_regions(threads[i].head);
00285
00286 free((void *)threads);
00287 }
00288
00289
00290
00291
00292 void gc_init_thread()
00293 {
00294 int i, idx;
00295 gc_word_t used;
00296
00297 PRINT_DEBUG("==> gc_init_thread()\n");
00298
00299 if (ATOMIC_FETCH_INC_FULL(&nb_threads) >= MAX_THREADS) {
00300 fprintf(stderr, "Error: too many concurrent threads created\n");
00301 exit(1);
00302 }
00303
00304 i = 0;
00305
00306 while (1) {
00307 used = (gc_word_t)ATOMIC_LOAD(&threads[i].used);
00308 if (used != GC_BUSY) {
00309 if (ATOMIC_CAS_FULL(&threads[i].used, used, GC_BUSY) != 0) {
00310 idx = i;
00311
00312 ATOMIC_STORE(&threads[idx].ts, current_epoch());
00313 break;
00314 }
00315
00316 assert (threads[i].used != GC_NULL);
00317 }
00318 if (++i >= MAX_THREADS)
00319 i = 0;
00320 }
00321 #ifdef TLS
00322 thread_idx = idx;
00323 #else
00324 pthread_setspecific(thread_idx, (void *)idx);
00325 #endif
00326
00327 PRINT_DEBUG("==> gc_init_thread(i=%d)\n", idx);
00328 }
00329
00330
00331
00332
00333 void gc_exit_thread()
00334 {
00335 int idx = gc_get_idx();
00336
00337 PRINT_DEBUG("==> gc_exit_thread(%d)\n", idx);
00338
00339
00340 ATOMIC_STORE(&threads[idx].ts, EPOCH_MAX);
00341
00342 ATOMIC_STORE(&threads[idx].used, threads[idx].head == NULL ? GC_FREE_EMPTY : GC_FREE_FULL);
00343 ATOMIC_FETCH_DEC_FULL(&nb_threads);
00344
00345 }
00346
00347
00348
00349
00350
00351 void gc_set_epoch(gc_word_t epoch)
00352 {
00353 int idx = gc_get_idx();
00354
00355 PRINT_DEBUG("==> gc_set_epoch(%d,%lu)\n", idx, (unsigned long)epoch);
00356
00357 if (epoch >= EPOCH_MAX) {
00358 fprintf(stderr, "Exceeded maximum epoch number: 0x%lx\n", (unsigned long)epoch);
00359
00360 return;
00361 }
00362
00363
00364 ATOMIC_STORE(&threads[idx].ts, epoch);
00365 }
00366
00367
00368
00369
00370 void gc_free(void *addr, gc_word_t epoch)
00371 {
00372 mem_region_t *mr;
00373 mem_block_t *mb;
00374 int idx = gc_get_idx();
00375
00376 PRINT_DEBUG("==> gc_free(%d,%lu)\n", idx, (unsigned long)epoch);
00377
00378 if (threads[idx].head == NULL || threads[idx].head->ts < epoch) {
00379
00380 if ((mr = (mem_region_t *)malloc(sizeof(mem_region_t))) == NULL) {
00381 perror("malloc");
00382 exit(1);
00383 }
00384 mr->ts = epoch;
00385 mr->blocks = NULL;
00386 mr->next = NULL;
00387 if (threads[idx].head == NULL) {
00388 threads[idx].head = threads[idx].tail = mr;
00389 } else {
00390 threads[idx].tail->next = mr;
00391 threads[idx].tail = mr;
00392 }
00393 } else {
00394
00395 mr = threads[idx].head;
00396 }
00397
00398
00399 if ((mb = (mem_block_t *)malloc(sizeof(mem_block_t))) == NULL) {
00400 perror("malloc");
00401 exit(1);
00402 }
00403 mb->addr = addr;
00404 mb->next = mr->blocks;
00405 mr->blocks = mb;
00406
00407 #ifndef NO_PERIODIC_CLEANUP
00408 threads[idx].frees++;
00409 if (threads[idx].frees % CLEANUP_FREQUENCY == 0)
00410 gc_cleanup();
00411 #endif
00412 }
00413
00414
00415
00416
00417
00418 void gc_cleanup()
00419 {
00420 gc_word_t min;
00421 int idx = gc_get_idx();
00422
00423 PRINT_DEBUG("==> gc_cleanup(%d)\n", idx);
00424
00425 if (threads[idx].head == NULL) {
00426
00427 return;
00428 }
00429
00430 min = gc_compute_min(current_epoch());
00431
00432 gc_cleanup_thread(idx, min);
00433 }
00434
00435
00436
00437
00438
00439 void gc_cleanup_all()
00440 {
00441 int i;
00442 gc_word_t min = EPOCH_MAX;
00443
00444 PRINT_DEBUG("==> gc_cleanup_all()\n");
00445
00446 for (i = 0; i < MAX_THREADS; i++) {
00447 if ((gc_word_t)ATOMIC_LOAD(&threads[i].used) == GC_NULL)
00448 break;
00449 if ((gc_word_t)ATOMIC_LOAD(&threads[i].used) == GC_FREE_FULL) {
00450 if (ATOMIC_CAS_FULL(&threads[i].used, GC_FREE_FULL, GC_BUSY) != 0) {
00451 if (min == EPOCH_MAX)
00452 min = gc_compute_min(current_epoch());
00453 gc_cleanup_thread(i, min);
00454 ATOMIC_STORE(&threads[i].used, threads[i].head == NULL ? GC_FREE_EMPTY : GC_FREE_FULL);
00455 }
00456 }
00457 }
00458 }
00459
00460
00461
00462
00463
00464 void gc_reset()
00465 {
00466 int i;
00467
00468 PRINT_DEBUG("==> gc_reset()\n");
00469
00470 assert(nb_threads == 0);
00471
00472 for (i = 0; i < MAX_THREADS; i++) {
00473 if (threads[i].used == GC_NULL)
00474 break;
00475 gc_clean_regions(threads[i].head);
00476 threads[i].ts = EPOCH_MAX;
00477 threads[i].head = threads[i].tail = NULL;
00478 #ifndef NO_PERIODIC_CLEANUP
00479 threads[i].frees = 0;
00480 #endif
00481 }
00482 }