usermode/library/mtm/src/gc.c

00001 /*
00002     Copyright (C) 2011 Computer Sciences Department, 
00003     University of Wisconsin -- Madison
00004 
00005     ----------------------------------------------------------------------
00006 
00007     This file is part of Mnemosyne: Lightweight Persistent Memory, 
00008     originally developed at the University of Wisconsin -- Madison.
00009 
00010     Mnemosyne was originally developed primarily by Haris Volos
00011     with contributions from Andres Jaan Tack.
00012 
00013     ----------------------------------------------------------------------
00014 
00015     Mnemosyne is free software; you can redistribute it and/or
00016     modify it under the terms of the GNU General Public License
00017     as published by the Free Software Foundation, version 2
00018     of the License.
00019  
00020     Mnemosyne is distributed in the hope that it will be useful,
00021     but WITHOUT ANY WARRANTY; without even the implied warranty of
00022     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00023     GNU General Public License for more details.
00024 
00025     You should have received a copy of the GNU General Public License
00026     along with this program; if not, write to the Free Software
00027     Foundation, Inc., 51 Franklin Street, Fifth Floor, 
00028     Boston, MA  02110-1301, USA.
00029 
00030 ### END HEADER ###
00031 */
00032 
00033 /*
00034  * Source code is partially derived from TinySTM (license is attached)
00035  *
00036  *
00037  * File:
00038  *   gc.c
00039  * Author(s):
00040  *   Pascal Felber <pascal.felber@unine.ch>
00041  *   Patrick Marlier <patrick.marlier@unine.ch>
00042  * Description:
00043  *   Epoch-based garbage collector.
00044  *
00045  * Copyright (c) 2007-2009.
00046  *
00047  * This program is free software; you can redistribute it and/or
00048  * modify it under the terms of the GNU General Public License
00049  * as published by the Free Software Foundation, version 2
00050  * of the License.
00051  *
00052  * This program is distributed in the hope that it will be useful,
00053  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00054  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00055  * GNU General Public License for more details.
00056  */
00057 
00058 #include <assert.h>
00059 #include <stdio.h>
00060 #include <stdint.h>
00061 
00062 #include <pthread.h>
00063 
00064 #include "gc.h"
00065 
00066 #include "atomic.h"
00067 #include "mtm_i.h"
00068 
00069 
00070 /* TODO: could be made much more efficient by allocating large chunks. */
00071 
00072 /* ################################################################### *
00073  * DEFINES
00074  * ################################################################### */
00075 
00076 #define MAX_THREADS                     1024
00077 #define EPOCH_MAX                       (~(gc_word_t)0)
00078 
00079 #ifndef NO_PERIODIC_CLEANUP
00080 # ifndef CLEANUP_FREQUENCY
00081 #  define CLEANUP_FREQUENCY             1
00082 # endif /* ! CLEANUP_FREQUENCY */
00083 #endif /* ! NO_PERIODIC_CLEANUP */
00084 
00085 
00086 /* ################################################################### *
00087  * TYPES
00088  * ################################################################### */
00089 
00090 enum {                                  /* Descriptor status */
00091   GC_NULL = 0,
00092   GC_BUSY = 1,
00093   GC_FREE_EMPTY = 2,
00094   GC_FREE_FULL = 3
00095 };
00096 
00097 typedef struct mem_block {              /* Block of allocated memory */
00098   void *addr;                           /* Address of memory */
00099   struct mem_block *next;               /* Next block */
00100 } mem_block_t;
00101 
00102 typedef struct mem_region {             /* A list of allocated memory blocks */
00103   struct mem_block *blocks;             /* Memory blocks */
00104   gc_word_t ts;                         /* Allocation timestamp */
00105   struct mem_region *next;              /* Next region */
00106 } mem_region_t;
00107 
00108 typedef struct tm_thread {              /* Descriptor of an active thread */
00109   gc_word_t used;                       /* Is this entry used? */
00110   pthread_t thread;                     /* Thread descriptor */
00111   gc_word_t ts;                         /* Start timestamp */
00112   mem_region_t *head;                   /* First memory region(s) assigned to thread */
00113   mem_region_t *tail;                   /* Last memory region(s) assigned to thread */
00114 #ifndef NO_PERIODIC_CLEANUP
00115   unsigned int frees;                   /* How many blocks have been freed? */
00116 #endif /* ! NO_PERIODIC_CLEANUP */
00117 } tm_thread_t;
00118 
00119 static volatile tm_thread_t *threads;   /* Array of active threads */
00120 static volatile gc_word_t nb_threads;   /* Number of active threads */
00121 
00122 static gc_word_t (*current_epoch)();    /* Read the value of the current epoch */
00123 
00124 #ifdef TLS
00125 static __thread int thread_idx;
00126 #else /* ! TLS */
00127 static pthread_key_t thread_idx;
00128 #endif /* ! TLS */
00129 
00130 /* ################################################################### *
00131  * STATIC
00132  * ################################################################### */
00133 
00134 /*
00135  * Returns the index of the CURRENT thread.
00136  */
00137 static inline int gc_get_idx()
00138 {
00139 #ifdef TLS
00140   return thread_idx;
00141 #else /* ! TLS */
00142   return (int)pthread_getspecific(thread_idx);
00143 #endif /* ! TLS */
00144 }
00145 
00146 /*
00147  * Compute a lower bound on the minimum start time of all active transactions.
00148  */
00149 static inline gc_word_t gc_compute_min(gc_word_t now)
00150 {
00151   int i;
00152   gc_word_t min, ts;
00153   mtm_word_t used;
00154 
00155   PRINT_DEBUG("==> gc_compute_min(%d)\n", gc_get_idx());
00156 
00157   min = now;
00158   for (i = 0; i < MAX_THREADS; i++) {
00159     used = (gc_word_t)ATOMIC_LOAD(&threads[i].used);
00160     if (used == GC_NULL)
00161       break;
00162     if (used != GC_BUSY)
00163       continue;
00164     /* Used entry */
00165     ts = (gc_word_t)ATOMIC_LOAD(&threads[i].ts);
00166     if (ts < min)
00167       min = ts;
00168   }
00169 
00170   PRINT_DEBUG("==> gc_compute_min(%d,m=%lu)\n", gc_get_idx(), (unsigned long)min);
00171 
00172   return min;
00173 }
00174 
00175 /*
00176  * Free block list.
00177  */
00178 static inline void gc_clean_blocks(mem_block_t *mb)
00179 {
00180   mem_block_t *next_mb;
00181 
00182   while (mb != NULL) {
00183     PRINT_DEBUG("==> free(%d,a=%p)\n", gc_get_idx(), mb->addr);
00184     free(mb->addr);
00185     next_mb = mb->next;
00186     free(mb);
00187     mb = next_mb;
00188   }
00189 }
00190 
00191 /*
00192  * Free region list.
00193  */
00194 static inline void gc_clean_regions(mem_region_t *mr)
00195 {
00196   mem_region_t *next_mr;
00197 
00198   while (mr != NULL) {
00199     gc_clean_blocks(mr->blocks);
00200     next_mr = mr->next;
00201     free(mr);
00202     mr = next_mr;
00203   }
00204 }
00205 
00206 /*
00207  * Garbage-collect old data associated with a thread.
00208  */
00209 void gc_cleanup_thread(int idx, gc_word_t min)
00210 {
00211   mem_region_t *mr;
00212 
00213   PRINT_DEBUG("==> gc_cleanup_thread(%d,m=%lu)\n", idx, (unsigned long)min);
00214 
00215   if (threads[idx].head == NULL) {
00216     /* Nothing to clean up */
00217     return;
00218   }
00219 
00220   while (min > threads[idx].head->ts) {
00221     gc_clean_blocks(threads[idx].head->blocks);
00222     mr = threads[idx].head->next;
00223     free(threads[idx].head);
00224     threads[idx].head = mr;
00225     if(mr == NULL) {
00226       /* All memory regions deleted */
00227       threads[idx].tail = NULL;
00228       break;
00229     }
00230   }
00231 }
00232 
00233 /* ################################################################### *
00234  * FUNCTIONS
00235  * ################################################################### */
00236 
00237 /*
00238  * Initialize GC library (to be called from main thread).
00239  */
00240 void gc_init(gc_word_t (*epoch)())
00241 {
00242   int i;
00243 
00244   PRINT_DEBUG("==> gc_init()\n");
00245 
00246   current_epoch = epoch;
00247   if ((threads = (tm_thread_t *)malloc(MAX_THREADS * sizeof(tm_thread_t))) == NULL) {
00248     perror("malloc");
00249     exit(1);
00250   }
00251   for (i = 0; i < MAX_THREADS; i++) {
00252     threads[i].used = GC_NULL;
00253     threads[i].ts = EPOCH_MAX;
00254     threads[i].head = threads[i].tail = NULL;
00255 #ifndef NO_PERIODIC_CLEANUP
00256     threads[i].frees = 0;
00257 #endif /* ! NO_PERIODIC_CLEANUP */
00258   }
00259   nb_threads = 0;
00260 #ifndef TLS
00261   if (pthread_key_create(&thread_idx, NULL) != 0) {
00262     fprintf(stderr, "Error creating thread local\n");
00263     exit(1);
00264   }
00265 #endif /* ! TLS */
00266 }
00267 
00268 /*
00269  * Clean up GC library (to be called from main thread).
00270  */
00271 void gc_exit()
00272 {
00273   int i;
00274 
00275   PRINT_DEBUG("==> gc_exit()\n");
00276 
00277   /* Make sure that all threads have been stopped */
00278   if (ATOMIC_LOAD(&nb_threads) != 0) {
00279     fprintf(stderr, "Error: some threads have not been cleaned up\n");
00280     exit(1);
00281   }
00282   /* Clean up memory */
00283   for (i = 0; i < MAX_THREADS; i++)
00284     gc_clean_regions(threads[i].head);
00285 
00286   free((void *)threads);
00287 }
00288 
00289 /*
00290  * Initialize thread-specific GC resources (to be called once by each thread).
00291  */
00292 void gc_init_thread()
00293 {
00294   int i, idx;
00295   gc_word_t used;
00296 
00297   PRINT_DEBUG("==> gc_init_thread()\n");
00298 
00299   if (ATOMIC_FETCH_INC_FULL(&nb_threads) >= MAX_THREADS) {
00300     fprintf(stderr, "Error: too many concurrent threads created\n");
00301     exit(1);
00302   }
00303   /* Find entry in threads array */
00304   i = 0;
00305   /* TODO: not wait-free */
00306   while (1) {
00307     used = (gc_word_t)ATOMIC_LOAD(&threads[i].used);
00308     if (used != GC_BUSY) {
00309       if (ATOMIC_CAS_FULL(&threads[i].used, used, GC_BUSY) != 0) {
00310         idx = i;
00311         /* Sets lower bound to current time (transactions by this thread cannot happen before) */
00312         ATOMIC_STORE(&threads[idx].ts, current_epoch());
00313         break;
00314       }
00315       /* CAS failed: anotehr thread must have acquired slot */
00316       assert (threads[i].used != GC_NULL);
00317     }
00318     if (++i >= MAX_THREADS)
00319       i = 0;
00320   }
00321 #ifdef TLS
00322   thread_idx = idx;
00323 #else /* ! TLS */
00324   pthread_setspecific(thread_idx, (void *)idx);
00325 #endif /* ! TLS */
00326 
00327   PRINT_DEBUG("==> gc_init_thread(i=%d)\n", idx);
00328 }
00329 
00330 /*
00331  * Clean up thread-specific GC resources (to be called once by each thread).
00332  */
00333 void gc_exit_thread()
00334 {
00335   int idx = gc_get_idx();
00336 
00337   PRINT_DEBUG("==> gc_exit_thread(%d)\n", idx);
00338 
00339   /* No more lower bound for this thread */
00340   ATOMIC_STORE(&threads[idx].ts, EPOCH_MAX);
00341   /* Release slot */
00342   ATOMIC_STORE(&threads[idx].used, threads[idx].head == NULL ? GC_FREE_EMPTY : GC_FREE_FULL);
00343   ATOMIC_FETCH_DEC_FULL(&nb_threads);
00344   /* Leave memory for next thread to cleanup */
00345 }
00346 
00347 /*
00348  * Set new epoch (to be called by each thread, typically when starting
00349  * new transactions to indicate their start timestamp).
00350  */
00351 void gc_set_epoch(gc_word_t epoch)
00352 {
00353   int idx = gc_get_idx();
00354 
00355   PRINT_DEBUG("==> gc_set_epoch(%d,%lu)\n", idx, (unsigned long)epoch);
00356 
00357   if (epoch >= EPOCH_MAX) {
00358     fprintf(stderr, "Exceeded maximum epoch number: 0x%lx\n", (unsigned long)epoch);
00359     /* Do nothing (will prevent data from being garbage collected) */
00360     return;
00361   }
00362 
00363   /* Do not need a barrier as we only compute lower bounds */
00364   ATOMIC_STORE(&threads[idx].ts, epoch);
00365 }
00366 
00367 /*
00368  * Free memory (the thread must indicate the current timestamp).
00369  */
00370 void gc_free(void *addr, gc_word_t epoch)
00371 {
00372   mem_region_t *mr;
00373   mem_block_t *mb;
00374   int idx = gc_get_idx();
00375 
00376   PRINT_DEBUG("==> gc_free(%d,%lu)\n", idx, (unsigned long)epoch);
00377 
00378   if (threads[idx].head == NULL || threads[idx].head->ts < epoch) {
00379     /* Allocate a new region */
00380     if ((mr = (mem_region_t *)malloc(sizeof(mem_region_t))) == NULL) {
00381       perror("malloc");
00382       exit(1);
00383     }
00384     mr->ts = epoch;
00385     mr->blocks = NULL;
00386     mr->next = NULL;
00387     if (threads[idx].head == NULL) {
00388       threads[idx].head = threads[idx].tail = mr;
00389     } else {
00390       threads[idx].tail->next = mr;
00391       threads[idx].tail = mr;
00392     }
00393   } else {
00394     /* Add to current region */
00395     mr = threads[idx].head;
00396   }
00397 
00398   /* Allocate block */
00399   if ((mb = (mem_block_t *)malloc(sizeof(mem_block_t))) == NULL) {
00400     perror("malloc");
00401     exit(1);
00402   }
00403   mb->addr = addr;
00404   mb->next = mr->blocks;
00405   mr->blocks = mb;
00406 
00407 #ifndef NO_PERIODIC_CLEANUP
00408   threads[idx].frees++;
00409   if (threads[idx].frees % CLEANUP_FREQUENCY == 0)
00410     gc_cleanup();
00411 #endif /* ! NO_PERIODIC_CLEANUP */
00412 }
00413 
00414 /*
00415  * Garbage-collect old data associated with the current thread (should
00416  * be called periodically).
00417  */
00418 void gc_cleanup()
00419 {
00420   gc_word_t min;
00421   int idx = gc_get_idx();
00422 
00423   PRINT_DEBUG("==> gc_cleanup(%d)\n", idx);
00424 
00425   if (threads[idx].head == NULL) {
00426     /* Nothing to clean up */
00427     return;
00428   }
00429 
00430   min = gc_compute_min(current_epoch());
00431 
00432   gc_cleanup_thread(idx, min);
00433 }
00434 
00435 /*
00436  * Garbage-collect old data associated with all threads (should be
00437  * called periodically).
00438  */
00439 void gc_cleanup_all()
00440 {
00441   int i;
00442   gc_word_t min = EPOCH_MAX;
00443 
00444   PRINT_DEBUG("==> gc_cleanup_all()\n");
00445 
00446   for (i = 0; i < MAX_THREADS; i++) {
00447     if ((gc_word_t)ATOMIC_LOAD(&threads[i].used) == GC_NULL)
00448       break;
00449     if ((gc_word_t)ATOMIC_LOAD(&threads[i].used) == GC_FREE_FULL) {
00450       if (ATOMIC_CAS_FULL(&threads[i].used, GC_FREE_FULL, GC_BUSY) != 0) {
00451         if (min == EPOCH_MAX)
00452           min = gc_compute_min(current_epoch());
00453         gc_cleanup_thread(i, min);
00454         ATOMIC_STORE(&threads[i].used, threads[i].head == NULL ? GC_FREE_EMPTY : GC_FREE_FULL);
00455       }
00456     }
00457   }
00458 }
00459 
00460 /*
00461  * Reset all epochs for all threads (must be called with all threads
00462  * stopped and out of transactions, e.g., upon roll-over).
00463  */
00464 void gc_reset()
00465 {
00466   int i;
00467 
00468   PRINT_DEBUG("==> gc_reset()\n");
00469 
00470   assert(nb_threads == 0);
00471 
00472   for (i = 0; i < MAX_THREADS; i++) {
00473     if (threads[i].used == GC_NULL)
00474       break;
00475     gc_clean_regions(threads[i].head);
00476     threads[i].ts = EPOCH_MAX;
00477     threads[i].head = threads[i].tail = NULL;
00478 #ifndef NO_PERIODIC_CLEANUP
00479     threads[i].frees = 0;
00480 #endif /* ! NO_PERIODIC_CLEANUP */
00481   }
00482 }

Generated on Sat Apr 23 11:43:36 2011 for Mnemosyne by  doxygen 1.4.7