00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225 #ifdef WIN32
00226
00227 #define WIN32_LEAN_AND_MEAN
00228 #include <windows.h>
00229
00230
00231 #define LACKS_UNISTD_H
00232 #define LACKS_SYS_PARAM_H
00233 #define LACKS_SYS_MMAN_H
00234
00235
00236 #define MORECORE sbrk
00237 #define MORECORE_CONTIGUOUS 1
00238 #define MORECORE_FAILURE ((void*)(-1))
00239
00240
00241 #define HAVE_MMAP 1
00242 #define MUNMAP_FAILURE (-1)
00243 #define MMAP_CLEARS 1
00244
00245
00246 #define MAP_PRIVATE 1
00247 #define MAP_ANONYMOUS 2
00248 #define PROT_READ 1
00249 #define PROT_WRITE 2
00250
00251
00252
00253 #define USE_MALLOC_LOCK
00254 #define NEEDED
00255
00256
00257 #ifdef USE_MALLOC_LOCK
00258 static int slwait(int *sl);
00259 static int slrelease(int *sl);
00260 #endif
00261
00262 static long getpagesize(void);
00263 static long getregionsize(void);
00264 void *sbrk(long size);
00265 static void *mmap(void *ptr, long size, long prot, long type, long handle, long arg);
00266 static long munmap(void *ptr, long size);
00267
00268 static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed);
00269 static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user);
00270
00271 #endif
00272
00273
00274
00275
00276
00277
00278
00279 #ifndef __STD_C
00280 #if defined(__STDC__) || defined(_cplusplus)
00281 #define __STD_C 1
00282 #else
00283 #define __STD_C 0
00284 #endif
00285 #endif
00286
00287
00288
00289
00290
00291
00292 #ifndef Void_t
00293 #if (__STD_C || defined(WIN32))
00294 #define Void_t void
00295 #else
00296 #define Void_t char
00297 #endif
00298 #endif
00299
00300 #if __STD_C
00301 #include <stddef.h>
00302 #else
00303 #include <sys/types.h>
00304 #endif
00305
00306 #ifdef __cplusplus
00307 extern "C" {
00308 #endif
00309
00310
00311
00312
00313
00314 #ifndef LACKS_UNISTD_H
00315 #include <unistd.h>
00316 #endif
00317
00318
00319
00320
00321
00322
00323 #include <stdio.h>
00324 #include <errno.h>
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355 #if DEBUG
00356 #include <assert.h>
00357 #else
00358 #define assert(x) ((void)0)
00359 #endif
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392
00393 #ifndef INTERNAL_SIZE_T
00394 #define INTERNAL_SIZE_T size_t
00395 #endif
00396
00397
00398 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410 #ifndef MALLOC_ALIGNMENT
00411 #define MALLOC_ALIGNMENT (2 * SIZE_SZ)
00412 #endif
00413
00414
00415 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444 #ifndef TRIM_FASTBINS
00445 #define TRIM_FASTBINS 0
00446 #endif
00447
00448
00449
00450
00451
00452
00453
00454
00455 #define USE_DL_PREFIX
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479 #ifdef USE_MALLOC_LOCK
00480 #define USE_PUBLIC_MALLOC_WRAPPERS
00481 #else
00482
00483 #endif
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493 #ifndef USE_PUBLIC_MALLOC_WRAPPERS
00494 #define cALLOc public_cALLOc
00495 #define fREe public_fREe
00496 #define cFREe public_cFREe
00497 #define mALLOc public_mALLOc
00498 #define mEMALIGn public_mEMALIGn
00499 #define rEALLOc public_rEALLOc
00500 #define vALLOc public_vALLOc
00501 #define pVALLOc public_pVALLOc
00502 #define mALLINFo public_mALLINFo
00503 #define mALLOPt public_mALLOPt
00504 #define mTRIm public_mTRIm
00505 #define mSTATs public_mSTATs
00506 #define mUSABLe public_mUSABLe
00507 #define iCALLOc public_iCALLOc
00508 #define iCOMALLOc public_iCOMALLOc
00509 #endif
00510
00511 #ifdef USE_DL_PREFIX
00512 #define public_cALLOc dlcalloc
00513 #define public_fREe dlfree
00514 #define public_cFREe dlcfree
00515 #define public_mALLOc dlmalloc
00516 #define public_mEMALIGn dlmemalign
00517 #define public_rEALLOc dlrealloc
00518 #define public_vALLOc dlvalloc
00519 #define public_pVALLOc dlpvalloc
00520 #define public_mALLINFo dlmallinfo
00521 #define public_mALLOPt dlmallopt
00522 #define public_mTRIm dlmalloc_trim
00523 #define public_mSTATs dlmalloc_stats
00524 #define public_mUSABLe dlmalloc_usable_size
00525 #define public_iCALLOc dlindependent_calloc
00526 #define public_iCOMALLOc dlindependent_comalloc
00527 #else
00528 #define public_cALLOc calloc
00529 #define public_fREe free
00530 #define public_cFREe cfree
00531 #define public_mALLOc malloc
00532 #define public_mEMALIGn memalign
00533 #define public_rEALLOc realloc
00534 #define public_vALLOc valloc
00535 #define public_pVALLOc pvalloc
00536 #define public_mALLINFo mallinfo
00537 #define public_mALLOPt mallopt
00538 #define public_mTRIm malloc_trim
00539 #define public_mSTATs malloc_stats
00540 #define public_mUSABLe malloc_usable_size
00541 #define public_iCALLOc independent_calloc
00542 #define public_iCOMALLOc independent_comalloc
00543 #endif
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562 #ifndef USE_MEMCPY
00563 #ifdef HAVE_MEMCPY
00564 #define USE_MEMCPY 1
00565 #else
00566 #define USE_MEMCPY 0
00567 #endif
00568 #endif
00569
00570
00571 #if (__STD_C || defined(HAVE_MEMCPY))
00572
00573 #ifdef WIN32
00574
00575 #else
00576 #if __STD_C
00577 void* memset(void*, int, size_t);
00578 void* memcpy(void*, const void*, size_t);
00579 #else
00580 Void_t* memset();
00581 Void_t* memcpy();
00582 #endif
00583 #endif
00584 #endif
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594 #ifndef MALLOC_FAILURE_ACTION
00595 #if __STD_C
00596 #define MALLOC_FAILURE_ACTION \
00597 errno = ENOMEM;
00598
00599 #else
00600 #define MALLOC_FAILURE_ACTION
00601 #endif
00602 #endif
00603
00604
00605
00606
00607
00608
00609 #ifdef LACKS_UNISTD_H
00610 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
00611 #if __STD_C
00612 extern Void_t* sbrk(ptrdiff_t);
00613 #else
00614 extern Void_t* sbrk();
00615 #endif
00616 #endif
00617 #endif
00618
00619
00620
00621
00622
00623
00624
00625
00626 #ifndef MORECORE
00627 #define MORECORE sbrk
00628 #endif
00629
00630
00631
00632
00633
00634
00635
00636
00637 #ifndef MORECORE_FAILURE
00638 #define MORECORE_FAILURE (-1)
00639 #endif
00640
00641
00642
00643
00644
00645
00646
00647
00648
00649
00650
00651 #ifndef MORECORE_CONTIGUOUS
00652 #define MORECORE_CONTIGUOUS 1
00653 #endif
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677 #ifndef HAVE_MMAP
00678 #define HAVE_MMAP 1
00679
00680
00681
00682
00683
00684
00685 #ifndef MMAP_CLEARS
00686 #define MMAP_CLEARS 1
00687 #endif
00688
00689 #else
00690 #ifndef MMAP_CLEARS
00691 #define MMAP_CLEARS 0
00692 #endif
00693 #endif
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710 #ifndef MMAP_AS_MORECORE_SIZE
00711 #define MMAP_AS_MORECORE_SIZE (1024 * 1024)
00712 #endif
00713
00714
00715
00716
00717
00718
00719
00720 #ifndef HAVE_MREMAP
00721 #ifdef linux
00722 #define HAVE_MREMAP 1
00723 #else
00724 #define HAVE_MREMAP 0
00725 #endif
00726
00727 #endif
00728
00729
00730
00731
00732
00733
00734
00735
00736
00737
00738
00739
00740
00741
00742
00743 #ifndef malloc_getpagesize
00744
00745 #ifndef LACKS_UNISTD_H
00746 # include <unistd.h>
00747 #endif
00748
00749 # ifdef _SC_PAGESIZE
00750 # ifndef _SC_PAGE_SIZE
00751 # define _SC_PAGE_SIZE _SC_PAGESIZE
00752 # endif
00753 # endif
00754
00755 # ifdef _SC_PAGE_SIZE
00756 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
00757 # else
00758 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
00759 extern size_t getpagesize();
00760 # define malloc_getpagesize getpagesize()
00761 # else
00762 # ifdef WIN32
00763 # define malloc_getpagesize getpagesize()
00764 # else
00765 # ifndef LACKS_SYS_PARAM_H
00766 # include <sys/param.h>
00767 # endif
00768 # ifdef EXEC_PAGESIZE
00769 # define malloc_getpagesize EXEC_PAGESIZE
00770 # else
00771 # ifdef NBPG
00772 # ifndef CLSIZE
00773 # define malloc_getpagesize NBPG
00774 # else
00775 # define malloc_getpagesize (NBPG * CLSIZE)
00776 # endif
00777 # else
00778 # ifdef NBPC
00779 # define malloc_getpagesize NBPC
00780 # else
00781 # ifdef PAGESIZE
00782 # define malloc_getpagesize PAGESIZE
00783 # else
00784 # define malloc_getpagesize (4096)
00785 # endif
00786 # endif
00787 # endif
00788 # endif
00789 # endif
00790 # endif
00791 # endif
00792 #endif
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823 #ifdef HAVE_USR_INCLUDE_MALLOC_H
00824 #include "/usr/include/malloc.h"
00825 #else
00826
00827
00828
00829 struct mallinfo {
00830 int arena;
00831 int ordblks;
00832 int smblks;
00833 int hblks;
00834 int hblkhd;
00835 int usmblks;
00836 int fsmblks;
00837 int uordblks;
00838 int fordblks;
00839 int keepcost;
00840 };
00841
00842
00843
00844
00845
00846
00847
00848
00849 #endif
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868 #if __STD_C
00869 Void_t* public_mALLOc(size_t);
00870 #else
00871 Void_t* public_mALLOc();
00872 #endif
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885 #if __STD_C
00886 void public_fREe(Void_t*);
00887 #else
00888 void public_fREe();
00889 #endif
00890
00891
00892
00893
00894
00895
00896 #if __STD_C
00897 Void_t* public_cALLOc(size_t, size_t);
00898 #else
00899 Void_t* public_cALLOc();
00900 #endif
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928
00929 #if __STD_C
00930 Void_t* public_rEALLOc(Void_t*, size_t);
00931 #else
00932 Void_t* public_rEALLOc();
00933 #endif
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947 #if __STD_C
00948 Void_t* public_mEMALIGn(size_t, size_t);
00949 #else
00950 Void_t* public_mEMALIGn();
00951 #endif
00952
00953
00954
00955
00956
00957
00958 #if __STD_C
00959 Void_t* public_vALLOc(size_t);
00960 #else
00961 Void_t* public_vALLOc();
00962 #endif
00963
00964
00965
00966
00967
00968
00969
00970
00971
00972
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983
00984
00985
00986
00987 #if __STD_C
00988 int public_mALLOPt(int, int);
00989 #else
00990 int public_mALLOPt();
00991 #endif
00992
00993
00994
00995
00996
00997
00998
00999
01000
01001
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017 #if __STD_C
01018 struct mallinfo public_mALLINFo(void);
01019 #else
01020 struct mallinfo public_mALLINFo();
01021 #endif
01022
01023
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075 #if __STD_C
01076 Void_t** public_iCALLOc(size_t, size_t, Void_t**);
01077 #else
01078 Void_t** public_iCALLOc();
01079 #endif
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140 #if __STD_C
01141 Void_t** public_iCOMALLOc(size_t, size_t*, Void_t**);
01142 #else
01143 Void_t** public_iCOMALLOc();
01144 #endif
01145
01146
01147
01148
01149
01150
01151
01152 #if __STD_C
01153 Void_t* public_pVALLOc(size_t);
01154 #else
01155 Void_t* public_pVALLOc();
01156 #endif
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166 #if __STD_C
01167 void public_cFREe(Void_t*);
01168 #else
01169 void public_cFREe();
01170 #endif
01171
01172
01173
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196 #if __STD_C
01197 int public_mTRIm(size_t);
01198 #else
01199 int public_mTRIm();
01200 #endif
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217 #if __STD_C
01218 size_t public_mUSABLe(Void_t*);
01219 #else
01220 size_t public_mUSABLe();
01221 #endif
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238
01239
01240
01241
01242
01243 #if __STD_C
01244 void public_mSTATs();
01245 #else
01246 void public_mSTATs();
01247 #endif
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257
01258
01259
01260
01261
01262
01263
01264
01265
01266
01267
01268
01269
01270
01271
01272
01273
01274
01275
01276
01277
01278 #ifndef M_MXFAST
01279 #define M_MXFAST 1
01280 #endif
01281
01282 #ifndef DEFAULT_MXFAST
01283 #define DEFAULT_MXFAST 64
01284 #endif
01285
01286
01287
01288
01289
01290
01291
01292
01293
01294
01295
01296
01297
01298
01299
01300
01301
01302
01303
01304
01305
01306
01307
01308
01309
01310
01311
01312
01313
01314
01315
01316
01317
01318
01319
01320
01321
01322
01323
01324
01325
01326
01327
01328
01329
01330
01331
01332
01333
01334
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347 #define M_TRIM_THRESHOLD -1
01348
01349 #ifndef DEFAULT_TRIM_THRESHOLD
01350 #define DEFAULT_TRIM_THRESHOLD (1024 * 1024 * 1024)
01351 #endif
01352
01353
01354
01355
01356
01357
01358
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378
01379
01380 #define M_TOP_PAD -2
01381
01382 #ifndef DEFAULT_TOP_PAD
01383 #define DEFAULT_TOP_PAD (0)
01384 #endif
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401
01402
01403
01404
01405
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416
01417
01418
01419
01420
01421
01422
01423
01424
01425
01426
01427 #define M_MMAP_THRESHOLD -3
01428
01429 #ifndef DEFAULT_MMAP_THRESHOLD
01430 #define DEFAULT_MMAP_THRESHOLD (128 * 1024)
01431 #endif
01432
01433
01434
01435
01436
01437
01438
01439
01440
01441
01442
01443
01444
01445
01446 #define M_MMAP_MAX -4
01447
01448 #ifndef DEFAULT_MMAP_MAX
01449 #if HAVE_MMAP
01450 #define DEFAULT_MMAP_MAX (65536)
01451 #else
01452 #define DEFAULT_MMAP_MAX (0)
01453 #endif
01454 #endif
01455
01456 #ifdef __cplusplus
01457 };
01458 #endif
01459
01460
01461
01462
01463
01464
01465
01466
01467
01468
01469
01470
01471
01472 #ifdef USE_PUBLIC_MALLOC_WRAPPERS
01473
01474
01475 #if __STD_C
01476 static Void_t* mALLOc(size_t);
01477 static void fREe(Void_t*);
01478 static Void_t* rEALLOc(Void_t*, size_t);
01479 static Void_t* mEMALIGn(size_t, size_t);
01480 static Void_t* vALLOc(size_t);
01481 static Void_t* pVALLOc(size_t);
01482 static Void_t* cALLOc(size_t, size_t);
01483 static Void_t** iCALLOc(size_t, size_t, Void_t**);
01484 static Void_t** iCOMALLOc(size_t, size_t*, Void_t**);
01485 static void cFREe(Void_t*);
01486 static int mTRIm(size_t);
01487 static size_t mUSABLe(Void_t*);
01488 static void mSTATs();
01489 static int mALLOPt(int, int);
01490 static struct mallinfo mALLINFo(void);
01491 #else
01492 static Void_t* mALLOc();
01493 static void fREe();
01494 static Void_t* rEALLOc();
01495 static Void_t* mEMALIGn();
01496 static Void_t* vALLOc();
01497 static Void_t* pVALLOc();
01498 static Void_t* cALLOc();
01499 static Void_t** iCALLOc();
01500 static Void_t** iCOMALLOc();
01501 static void cFREe();
01502 static int mTRIm();
01503 static size_t mUSABLe();
01504 static void mSTATs();
01505 static int mALLOPt();
01506 static struct mallinfo mALLINFo();
01507 #endif
01508
01509
01510
01511
01512
01513
01514
01515
01516
01517
01518 #ifdef USE_MALLOC_LOCK
01519
01520 #ifdef WIN32
01521
01522 static int mALLOC_MUTEx;
01523 #define MALLOC_PREACTION slwait(&mALLOC_MUTEx)
01524 #define MALLOC_POSTACTION slrelease(&mALLOC_MUTEx)
01525
01526 #else
01527
01528 #include <pthread.h>
01529
01530 static pthread_mutex_t mALLOC_MUTEx = PTHREAD_MUTEX_INITIALIZER;
01531
01532 #define MALLOC_PREACTION pthread_mutex_lock(&mALLOC_MUTEx)
01533 #define MALLOC_POSTACTION pthread_mutex_unlock(&mALLOC_MUTEx)
01534
01535 #endif
01536
01537 #else
01538
01539
01540
01541 #define MALLOC_PREACTION (0)
01542 #define MALLOC_POSTACTION (0)
01543
01544 #endif
01545
01546 Void_t* public_mALLOc(size_t bytes) {
01547 Void_t* m;
01548 if (MALLOC_PREACTION != 0) {
01549 return 0;
01550 }
01551 m = mALLOc(bytes);
01552 if (MALLOC_POSTACTION != 0) {
01553 }
01554 return m;
01555 }
01556
01557 void public_fREe(Void_t* m) {
01558 if (MALLOC_PREACTION != 0) {
01559 return;
01560 }
01561 fREe(m);
01562 if (MALLOC_POSTACTION != 0) {
01563 }
01564 }
01565
01566 Void_t* public_rEALLOc(Void_t* m, size_t bytes) {
01567 if (MALLOC_PREACTION != 0) {
01568 return 0;
01569 }
01570 m = rEALLOc(m, bytes);
01571 if (MALLOC_POSTACTION != 0) {
01572 }
01573 return m;
01574 }
01575
01576 Void_t* public_mEMALIGn(size_t alignment, size_t bytes) {
01577 Void_t* m;
01578 if (MALLOC_PREACTION != 0) {
01579 return 0;
01580 }
01581 m = mEMALIGn(alignment, bytes);
01582 if (MALLOC_POSTACTION != 0) {
01583 }
01584 return m;
01585 }
01586
01587 Void_t* public_vALLOc(size_t bytes) {
01588 Void_t* m;
01589 if (MALLOC_PREACTION != 0) {
01590 return 0;
01591 }
01592 m = vALLOc(bytes);
01593 if (MALLOC_POSTACTION != 0) {
01594 }
01595 return m;
01596 }
01597
01598 Void_t* public_pVALLOc(size_t bytes) {
01599 Void_t* m;
01600 if (MALLOC_PREACTION != 0) {
01601 return 0;
01602 }
01603 m = pVALLOc(bytes);
01604 if (MALLOC_POSTACTION != 0) {
01605 }
01606 return m;
01607 }
01608
01609 Void_t* public_cALLOc(size_t n, size_t elem_size) {
01610 Void_t* m;
01611 if (MALLOC_PREACTION != 0) {
01612 return 0;
01613 }
01614 m = cALLOc(n, elem_size);
01615 if (MALLOC_POSTACTION != 0) {
01616 }
01617 return m;
01618 }
01619
01620
01621 Void_t** public_iCALLOc(size_t n, size_t elem_size, Void_t** chunks) {
01622 Void_t** m;
01623 if (MALLOC_PREACTION != 0) {
01624 return 0;
01625 }
01626 m = iCALLOc(n, elem_size, chunks);
01627 if (MALLOC_POSTACTION != 0) {
01628 }
01629 return m;
01630 }
01631
01632 Void_t** public_iCOMALLOc(size_t n, size_t sizes[], Void_t** chunks) {
01633 Void_t** m;
01634 if (MALLOC_PREACTION != 0) {
01635 return 0;
01636 }
01637 m = iCOMALLOc(n, sizes, chunks);
01638 if (MALLOC_POSTACTION != 0) {
01639 }
01640 return m;
01641 }
01642
01643 void public_cFREe(Void_t* m) {
01644 if (MALLOC_PREACTION != 0) {
01645 return;
01646 }
01647 cFREe(m);
01648 if (MALLOC_POSTACTION != 0) {
01649 }
01650 }
01651
01652 int public_mTRIm(size_t s) {
01653 int result;
01654 if (MALLOC_PREACTION != 0) {
01655 return 0;
01656 }
01657 result = mTRIm(s);
01658 if (MALLOC_POSTACTION != 0) {
01659 }
01660 return result;
01661 }
01662
01663 size_t public_mUSABLe(Void_t* m) {
01664 size_t result;
01665 if (MALLOC_PREACTION != 0) {
01666 return 0;
01667 }
01668 result = mUSABLe(m);
01669 if (MALLOC_POSTACTION != 0) {
01670 }
01671 return result;
01672 }
01673
01674 void public_mSTATs() {
01675 if (MALLOC_PREACTION != 0) {
01676 return;
01677 }
01678 mSTATs();
01679 if (MALLOC_POSTACTION != 0) {
01680 }
01681 }
01682
01683 struct mallinfo public_mALLINFo() {
01684 struct mallinfo m;
01685 if (MALLOC_PREACTION != 0) {
01686 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
01687 return nm;
01688 }
01689 m = mALLINFo();
01690 if (MALLOC_POSTACTION != 0) {
01691 }
01692 return m;
01693 }
01694
01695 int public_mALLOPt(int p, int v) {
01696 int result;
01697 if (MALLOC_PREACTION != 0) {
01698 return 0;
01699 }
01700 result = mALLOPt(p, v);
01701 if (MALLOC_POSTACTION != 0) {
01702 }
01703 return result;
01704 }
01705
01706 #endif
01707
01708
01709
01710
01711
01712
01713 #if USE_MEMCPY
01714
01715
01716
01717
01718
01719
01720 #define MALLOC_COPY(dest, src, nbytes) memcpy(dest, src, nbytes)
01721 #define MALLOC_ZERO(dest, nbytes) memset(dest, 0, nbytes)
01722
01723 #else
01724
01725
01726
01727 #define MALLOC_ZERO(charp, nbytes) \
01728 do { \
01729 INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
01730 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
01731 long mcn; \
01732 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
01733 switch (mctmp) { \
01734 case 0: for(;;) { *mzp++ = 0; \
01735 case 7: *mzp++ = 0; \
01736 case 6: *mzp++ = 0; \
01737 case 5: *mzp++ = 0; \
01738 case 4: *mzp++ = 0; \
01739 case 3: *mzp++ = 0; \
01740 case 2: *mzp++ = 0; \
01741 case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
01742 } \
01743 } while(0)
01744
01745 #define MALLOC_COPY(dest,src,nbytes) \
01746 do { \
01747 INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
01748 INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
01749 unsigned long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T); \
01750 long mcn; \
01751 if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
01752 switch (mctmp) { \
01753 case 0: for(;;) { *mcdst++ = *mcsrc++; \
01754 case 7: *mcdst++ = *mcsrc++; \
01755 case 6: *mcdst++ = *mcsrc++; \
01756 case 5: *mcdst++ = *mcsrc++; \
01757 case 4: *mcdst++ = *mcsrc++; \
01758 case 3: *mcdst++ = *mcsrc++; \
01759 case 2: *mcdst++ = *mcsrc++; \
01760 case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
01761 } \
01762 } while(0)
01763
01764 #endif
01765
01766
01767
01768
01769 #if HAVE_MMAP
01770
01771 #include <fcntl.h>
01772 #ifndef LACKS_SYS_MMAN_H
01773 #include <sys/mman.h>
01774 #endif
01775
01776 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
01777 #define MAP_ANONYMOUS MAP_ANON
01778 #endif
01779
01780
01781
01782
01783
01784
01785
01786 #ifndef MAP_ANONYMOUS
01787
01788 static int dev_zero_fd = -1;
01789
01790 #define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
01791 (dev_zero_fd = open("/dev/zero", O_RDWR), \
01792 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
01793 mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
01794
01795 #else
01796
01797 #define MMAP(addr, size, prot, flags) \
01798 (mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
01799
01800 #endif
01801
01802
01803 #endif
01804
01805
01806
01807
01808
01809
01810
01811
01812
01813
01814
01815
01816
01817 struct malloc_chunk {
01818
01819 INTERNAL_SIZE_T prev_size;
01820 INTERNAL_SIZE_T size;
01821
01822 struct malloc_chunk* fd;
01823 struct malloc_chunk* bk;
01824 };
01825
01826
01827 typedef struct malloc_chunk* mchunkptr;
01828
01829
01830
01831
01832
01833
01834
01835
01836
01837
01838
01839
01840
01841
01842
01843
01844
01845
01846
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869
01870
01871
01872
01873
01874
01875
01876
01877
01878
01879
01880
01881
01882
01883
01884
01885
01886
01887
01888
01889
01890
01891
01892
01893
01894
01895
01896
01897
01898
01899
01900
01901
01902
01903
01904
01905
01906
01907
01908
01909
01910
01911
01912
01913
01914
01915
01916
01917
01918
01919
01920
01921
01922 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
01923 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
01924
01925
01926 #define MIN_CHUNK_SIZE (sizeof(struct malloc_chunk))
01927
01928
01929
01930 #define MINSIZE \
01931 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
01932
01933
01934
01935 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
01936
01937
01938
01939
01940
01941
01942
01943
01944 #define REQUEST_OUT_OF_RANGE(req) \
01945 ((unsigned long)(req) >= \
01946 (unsigned long)(INTERNAL_SIZE_T)(-2 * MINSIZE))
01947
01948
01949
01950 #define request2size(req) \
01951 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
01952 MINSIZE : \
01953 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
01954
01955
01956
01957 #define checked_request2size(req, sz) \
01958 if (REQUEST_OUT_OF_RANGE(req)) { \
01959 MALLOC_FAILURE_ACTION; \
01960 return 0; \
01961 } \
01962 (sz) = request2size(req);
01963
01964
01965
01966
01967
01968
01969
01970 #define PREV_INUSE 0x1
01971
01972
01973 #define prev_inuse(p) ((p)->size & PREV_INUSE)
01974
01975
01976
01977 #define IS_MMAPPED 0x2
01978
01979
01980 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
01981
01982
01983
01984
01985
01986
01987
01988
01989
01990 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
01991
01992
01993 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
01994
01995
01996
01997 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
01998
01999
02000 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
02001
02002
02003 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
02004
02005
02006 #define inuse(p)\
02007 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
02008
02009
02010 #define set_inuse(p)\
02011 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
02012
02013 #define clear_inuse(p)\
02014 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
02015
02016
02017
02018 #define inuse_bit_at_offset(p, s)\
02019 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
02020
02021 #define set_inuse_bit_at_offset(p, s)\
02022 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
02023
02024 #define clear_inuse_bit_at_offset(p, s)\
02025 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
02026
02027
02028
02029 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
02030
02031
02032 #define set_head(p, s) ((p)->size = (s))
02033
02034
02035 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
02036
02037
02038
02039
02040
02041
02042
02043
02044
02045
02046
02047
02048
02049
02050
02051
02052
02053
02054
02055
02056
02057
02058
02059
02060
02061
02062
02063
02064
02065
02066
02067
02068
02069
02070
02071
02072
02073
02074
02075
02076
02077
02078
02079
02080
02081
02082
02083
02084
02085
02086
02087
02088
02089 typedef struct malloc_chunk* mbinptr;
02090
02091
02092 #define bin_at(m, i) ((mbinptr)((char*)&((m)->bins[(i)<<1]) - (SIZE_SZ<<1)))
02093
02094
02095 #define next_bin(b) ((mbinptr)((char*)(b) + (sizeof(mchunkptr)<<1)))
02096
02097
02098 #define first(b) ((b)->fd)
02099 #define last(b) ((b)->bk)
02100
02101
02102 #define unlink(P, BK, FD) { \
02103 FD = P->fd; \
02104 BK = P->bk; \
02105 FD->bk = BK; \
02106 BK->fd = FD; \
02107 }
02108
02109
02110
02111
02112
02113
02114
02115
02116
02117
02118
02119
02120
02121
02122
02123
02124
02125
02126
02127
02128
02129
02130 #define NBINS 128
02131 #define NSMALLBINS 64
02132 #define SMALLBIN_WIDTH 8
02133 #define MIN_LARGE_SIZE 512
02134
02135 #define in_smallbin_range(sz) \
02136 ((unsigned long)(sz) < (unsigned long)MIN_LARGE_SIZE)
02137
02138 #define smallbin_index(sz) (((unsigned)(sz)) >> 3)
02139
02140 #define largebin_index(sz) \
02141 (((((unsigned long)(sz)) >> 6) <= 32)? 56 + (((unsigned long)(sz)) >> 6): \
02142 ((((unsigned long)(sz)) >> 9) <= 20)? 91 + (((unsigned long)(sz)) >> 9): \
02143 ((((unsigned long)(sz)) >> 12) <= 10)? 110 + (((unsigned long)(sz)) >> 12): \
02144 ((((unsigned long)(sz)) >> 15) <= 4)? 119 + (((unsigned long)(sz)) >> 15): \
02145 ((((unsigned long)(sz)) >> 18) <= 2)? 124 + (((unsigned long)(sz)) >> 18): \
02146 126)
02147
02148 #define bin_index(sz) \
02149 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
02150
02151
02152
02153
02154
02155
02156
02157
02158
02159
02160
02161
02162
02163
02164 #define unsorted_chunks(M) (bin_at(M, 1))
02165
02166
02167
02168
02169
02170
02171
02172
02173
02174
02175
02176
02177
02178
02179
02180
02181
02182
02183
02184
02185 #define initial_top(M) (unsorted_chunks(M))
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197
02198
02199 #define BINMAPSHIFT 5
02200 #define BITSPERMAP (1U << BINMAPSHIFT)
02201 #define BINMAPSIZE (NBINS / BITSPERMAP)
02202
02203 #define idx2block(i) ((i) >> BINMAPSHIFT)
02204 #define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
02205
02206 #define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
02207 #define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
02208 #define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
02209
02210
02211
02212
02213
02214
02215
02216
02217
02218
02219
02220
02221
02222
02223
02224
02225
02226
02227 typedef struct malloc_chunk* mfastbinptr;
02228
02229
02230 #define fastbin_index(sz) ((((unsigned int)(sz)) >> 3) - 2)
02231
02232
02233 #define MAX_FAST_SIZE 80
02234
02235 #define NFASTBINS (fastbin_index(request2size(MAX_FAST_SIZE))+1)
02236
02237
02238
02239
02240
02241
02242
02243
02244
02245
02246
02247
02248 #define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
02249
02250
02251
02252
02253
02254
02255
02256
02257
02258
02259
02260
02261
02262
02263
02264
02265 #define FASTCHUNKS_BIT (1U)
02266
02267 #define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT) == 0)
02268 #define clear_fastchunks(M) ((M)->max_fast |= FASTCHUNKS_BIT)
02269 #define set_fastchunks(M) ((M)->max_fast &= ~FASTCHUNKS_BIT)
02270
02271
02272
02273
02274
02275
02276
02277
02278
02279
02280 #define NONCONTIGUOUS_BIT (2U)
02281
02282 #define contiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) == 0)
02283 #define noncontiguous(M) (((M)->max_fast & NONCONTIGUOUS_BIT) != 0)
02284 #define set_noncontiguous(M) ((M)->max_fast |= NONCONTIGUOUS_BIT)
02285 #define set_contiguous(M) ((M)->max_fast &= ~NONCONTIGUOUS_BIT)
02286
02287
02288
02289
02290
02291
02292
02293
02294 #define set_max_fast(M, s) \
02295 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
02296 FASTCHUNKS_BIT | \
02297 ((M)->max_fast & NONCONTIGUOUS_BIT)
02298
02299
02300
02301
02302
02303
02304 struct malloc_state {
02305
02306
02307 INTERNAL_SIZE_T max_fast;
02308
02309
02310 mfastbinptr fastbins[NFASTBINS];
02311
02312
02313 mchunkptr top;
02314
02315
02316 mchunkptr last_remainder;
02317
02318
02319 mchunkptr bins[NBINS * 2];
02320
02321
02322 unsigned int binmap[BINMAPSIZE];
02323
02324
02325 unsigned long trim_threshold;
02326 INTERNAL_SIZE_T top_pad;
02327 INTERNAL_SIZE_T mmap_threshold;
02328
02329
02330 int n_mmaps;
02331 int n_mmaps_max;
02332 int max_n_mmaps;
02333
02334
02335 unsigned int pagesize;
02336
02337
02338 INTERNAL_SIZE_T mmapped_mem;
02339 INTERNAL_SIZE_T sbrked_mem;
02340 INTERNAL_SIZE_T max_sbrked_mem;
02341 INTERNAL_SIZE_T max_mmapped_mem;
02342 INTERNAL_SIZE_T max_total_mem;
02343 };
02344
02345 typedef struct malloc_state *mstate;
02346
02347
02348
02349
02350
02351
02352
02353
02354
02355 static struct malloc_state av_;
02356
02357
02358
02359
02360
02361
02362
02363
02364
02365 #define get_malloc_state() (&(av_))
02366
02367
02368
02369
02370
02371
02372
02373
02374
02375
02376
02377 #if __STD_C
02378 static void malloc_init_state(mstate av)
02379 #else
02380 static void malloc_init_state(av) mstate av;
02381 #endif
02382 {
02383 int i;
02384 mbinptr bin;
02385
02386
02387 for (i = 1; i < NBINS; ++i) {
02388 bin = bin_at(av,i);
02389 bin->fd = bin->bk = bin;
02390 }
02391
02392 av->top_pad = DEFAULT_TOP_PAD;
02393 av->n_mmaps_max = DEFAULT_MMAP_MAX;
02394 av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
02395 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
02396
02397 #if !MORECORE_CONTIGUOUS
02398 set_noncontiguous(av);
02399 #endif
02400
02401 set_max_fast(av, DEFAULT_MXFAST);
02402
02403 av->top = initial_top(av);
02404 av->pagesize = malloc_getpagesize;
02405 }
02406
02407
02408
02409
02410
02411 #if __STD_C
02412 static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
02413 static int sYSTRIm(size_t, mstate);
02414 static void malloc_consolidate(mstate);
02415 static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
02416 #else
02417 static Void_t* sYSMALLOc();
02418 static int sYSTRIm();
02419 static void malloc_consolidate();
02420 static Void_t** iALLOc();
02421 #endif
02422
02423
02424
02425
02426
02427
02428
02429
02430
02431
02432
02433 #if ! DEBUG
02434
02435 #define check_chunk(P)
02436 #define check_free_chunk(P)
02437 #define check_inuse_chunk(P)
02438 #define check_remalloced_chunk(P,N)
02439 #define check_malloced_chunk(P,N)
02440 #define check_malloc_state()
02441
02442 #else
02443 #define check_chunk(P) do_check_chunk(P)
02444 #define check_free_chunk(P) do_check_free_chunk(P)
02445 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
02446 #define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
02447 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
02448 #define check_malloc_state() do_check_malloc_state()
02449
02450
02451
02452
02453
02454 #if __STD_C
02455 static void do_check_chunk(mchunkptr p)
02456 #else
02457 static void do_check_chunk(p) mchunkptr p;
02458 #endif
02459 {
02460 mstate av = get_malloc_state();
02461 unsigned long sz = chunksize(p);
02462
02463 char* max_address = (char*)(av->top) + chunksize(av->top);
02464 char* min_address = max_address - av->sbrked_mem;
02465
02466 if (!chunk_is_mmapped(p)) {
02467
02468
02469 if (p != av->top) {
02470 if (contiguous(av)) {
02471 assert(((char*)p) >= min_address);
02472 assert(((char*)p + sz) <= ((char*)(av->top)));
02473 }
02474 }
02475 else {
02476
02477 assert((unsigned long)(sz) >= MINSIZE);
02478
02479 assert(prev_inuse(p));
02480 }
02481
02482 }
02483 else {
02484 #if HAVE_MMAP
02485
02486 if (contiguous(av) && av->top != initial_top(av)) {
02487 assert(((char*)p) < min_address || ((char*)p) > max_address);
02488 }
02489
02490 assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
02491
02492 assert(aligned_OK(chunk2mem(p)));
02493 #else
02494
02495 assert(!chunk_is_mmapped(p));
02496 #endif
02497 }
02498 }
02499
02500
02501
02502
02503
02504 #if __STD_C
02505 static void do_check_free_chunk(mchunkptr p)
02506 #else
02507 static void do_check_free_chunk(p) mchunkptr p;
02508 #endif
02509 {
02510 mstate av = get_malloc_state();
02511
02512 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
02513 mchunkptr next = chunk_at_offset(p, sz);
02514
02515 do_check_chunk(p);
02516
02517
02518 assert(!inuse(p));
02519 assert (!chunk_is_mmapped(p));
02520
02521
02522 if ((unsigned long)(sz) >= MINSIZE)
02523 {
02524 assert((sz & MALLOC_ALIGN_MASK) == 0);
02525 assert(aligned_OK(chunk2mem(p)));
02526
02527 assert(next->prev_size == sz);
02528
02529 assert(prev_inuse(p));
02530 assert (next == av->top || inuse(next));
02531
02532
02533 assert(p->fd->bk == p);
02534 assert(p->bk->fd == p);
02535 }
02536 else
02537 assert(sz == SIZE_SZ);
02538 }
02539
02540
02541
02542
02543
02544 #if __STD_C
02545 static void do_check_inuse_chunk(mchunkptr p)
02546 #else
02547 static void do_check_inuse_chunk(p) mchunkptr p;
02548 #endif
02549 {
02550 mstate av = get_malloc_state();
02551 mchunkptr next;
02552 do_check_chunk(p);
02553
02554 if (chunk_is_mmapped(p))
02555 return;
02556
02557
02558 assert(inuse(p));
02559
02560 next = next_chunk(p);
02561
02562
02563
02564
02565
02566 if (!prev_inuse(p)) {
02567
02568 mchunkptr prv = prev_chunk(p);
02569 assert(next_chunk(prv) == p);
02570 do_check_free_chunk(prv);
02571 }
02572
02573 if (next == av->top) {
02574 assert(prev_inuse(next));
02575 assert(chunksize(next) >= MINSIZE);
02576 }
02577 else if (!inuse(next))
02578 do_check_free_chunk(next);
02579 }
02580
02581
02582
02583
02584
02585 #if __STD_C
02586 static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
02587 #else
02588 static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
02589 #endif
02590 {
02591 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
02592
02593 do_check_inuse_chunk(p);
02594
02595
02596 assert((sz & MALLOC_ALIGN_MASK) == 0);
02597 assert((unsigned long)(sz) >= MINSIZE);
02598
02599 assert(aligned_OK(chunk2mem(p)));
02600
02601 assert((long)(sz) - (long)(s) >= 0);
02602 assert((long)(sz) - (long)(s + MINSIZE) < 0);
02603 }
02604
02605
02606
02607
02608
02609 #if __STD_C
02610 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
02611 #else
02612 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
02613 #endif
02614 {
02615
02616 do_check_remalloced_chunk(p, s);
02617
02618
02619
02620
02621
02622
02623
02624
02625
02626
02627
02628 assert(prev_inuse(p));
02629 }
02630
02631
02632
02633
02634
02635
02636
02637
02638
02639
02640
02641
02642
02643 static void do_check_malloc_state()
02644 {
02645 mstate av = get_malloc_state();
02646 int i;
02647 mchunkptr p;
02648 mchunkptr q;
02649 mbinptr b;
02650 unsigned int binbit;
02651 int empty;
02652 unsigned int idx;
02653 INTERNAL_SIZE_T size;
02654 unsigned long total = 0;
02655 int max_fast_bin;
02656
02657
02658 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
02659
02660
02661 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
02662
02663
02664 if (av->top == 0 || av->top == initial_top(av))
02665 return;
02666
02667
02668 assert((av->pagesize & (av->pagesize-1)) == 0);
02669
02670
02671
02672
02673 assert((av->max_fast & ~1) <= request2size(MAX_FAST_SIZE));
02674
02675 max_fast_bin = fastbin_index(av->max_fast);
02676
02677 for (i = 0; i < NFASTBINS; ++i) {
02678 p = av->fastbins[i];
02679
02680
02681 if (i > max_fast_bin)
02682 assert(p == 0);
02683
02684 while (p != 0) {
02685
02686 do_check_inuse_chunk(p);
02687 total += chunksize(p);
02688
02689 assert(fastbin_index(chunksize(p)) == i);
02690 p = p->fd;
02691 }
02692 }
02693
02694 if (total != 0)
02695 assert(have_fastchunks(av));
02696 else if (!have_fastchunks(av))
02697 assert(total == 0);
02698
02699
02700 for (i = 1; i < NBINS; ++i) {
02701 b = bin_at(av,i);
02702
02703
02704 if (i >= 2) {
02705 binbit = get_binmap(av,i);
02706 empty = last(b) == b;
02707 if (!binbit)
02708 assert(empty);
02709 else if (!empty)
02710 assert(binbit);
02711 }
02712
02713 for (p = last(b); p != b; p = p->bk) {
02714
02715 do_check_free_chunk(p);
02716 size = chunksize(p);
02717 total += size;
02718 if (i >= 2) {
02719
02720 idx = bin_index(size);
02721 assert(idx == i);
02722
02723 assert(p->bk == b ||
02724 (unsigned long)chunksize(p->bk) >= (unsigned long)chunksize(p));
02725 }
02726
02727 for (q = next_chunk(p);
02728 (q != av->top && inuse(q) &&
02729 (unsigned long)(chunksize(q)) >= MINSIZE);
02730 q = next_chunk(q))
02731 do_check_inuse_chunk(q);
02732 }
02733 }
02734
02735
02736 check_chunk(av->top);
02737
02738
02739
02740 assert(total <= (unsigned long)(av->max_total_mem));
02741 assert(av->n_mmaps >= 0);
02742 assert(av->n_mmaps <= av->n_mmaps_max);
02743 assert(av->n_mmaps <= av->max_n_mmaps);
02744
02745 assert((unsigned long)(av->sbrked_mem) <=
02746 (unsigned long)(av->max_sbrked_mem));
02747
02748 assert((unsigned long)(av->mmapped_mem) <=
02749 (unsigned long)(av->max_mmapped_mem));
02750
02751 assert((unsigned long)(av->max_total_mem) >=
02752 (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
02753 }
02754 #endif
02755
02756
02757
02758
02759
02760
02761
02762
02763
02764
02765
02766 #if __STD_C
02767 static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
02768 #else
02769 static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
02770 #endif
02771 {
02772 mchunkptr old_top;
02773 INTERNAL_SIZE_T old_size;
02774 char* old_end;
02775
02776 long size;
02777 char* brk;
02778
02779 long correction;
02780 char* snd_brk;
02781
02782 INTERNAL_SIZE_T front_misalign;
02783 INTERNAL_SIZE_T end_misalign;
02784 char* aligned_brk;
02785
02786 mchunkptr p;
02787 mchunkptr remainder;
02788 unsigned long remainder_size;
02789
02790 unsigned long sum;
02791
02792 size_t pagemask = av->pagesize - 1;
02793
02794
02795 #if HAVE_MMAP
02796
02797
02798
02799
02800
02801
02802
02803
02804 if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
02805 (av->n_mmaps < av->n_mmaps_max)) {
02806
02807 char* mm;
02808
02809
02810
02811
02812
02813
02814 size = (nb + SIZE_SZ + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
02815
02816
02817 if ((unsigned long)(size) > (unsigned long)(nb)) {
02818
02819 #if linux
02820
02821
02822 mm = (char*)(MMAP((void *) 0x10000000, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
02823 #else
02824 mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
02825 #endif
02826
02827 if (mm != (char*)(MORECORE_FAILURE)) {
02828
02829
02830
02831
02832
02833
02834
02835
02836
02837 front_misalign = (INTERNAL_SIZE_T)chunk2mem(mm) & MALLOC_ALIGN_MASK;
02838 if (front_misalign > 0) {
02839 correction = MALLOC_ALIGNMENT - front_misalign;
02840 p = (mchunkptr)(mm + correction);
02841 p->prev_size = correction;
02842 set_head(p, (size - correction) |IS_MMAPPED);
02843 }
02844 else {
02845 p = (mchunkptr)mm;
02846 set_head(p, size|IS_MMAPPED);
02847 }
02848
02849
02850
02851 if (++av->n_mmaps > av->max_n_mmaps)
02852 av->max_n_mmaps = av->n_mmaps;
02853
02854 sum = av->mmapped_mem += size;
02855 if (sum > (unsigned long)(av->max_mmapped_mem))
02856 av->max_mmapped_mem = sum;
02857 sum += av->sbrked_mem;
02858 if (sum > (unsigned long)(av->max_total_mem))
02859 av->max_total_mem = sum;
02860
02861 check_chunk(p);
02862
02863 return chunk2mem(p);
02864 }
02865 }
02866 }
02867 #endif
02868
02869
02870
02871 old_top = av->top;
02872 old_size = chunksize(old_top);
02873 old_end = (char*)(chunk_at_offset(old_top, old_size));
02874
02875 brk = snd_brk = (char*)(MORECORE_FAILURE);
02876
02877
02878
02879
02880
02881
02882 assert((old_top == initial_top(av) && old_size == 0) ||
02883 ((unsigned long) (old_size) >= MINSIZE &&
02884 prev_inuse(old_top)));
02885
02886
02887 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
02888
02889
02890 assert(!have_fastchunks(av));
02891
02892
02893
02894
02895 size = nb + av->top_pad + MINSIZE;
02896
02897
02898
02899
02900
02901
02902
02903 if (contiguous(av))
02904 size -= old_size;
02905
02906
02907
02908
02909
02910
02911
02912
02913
02914 size = (size + pagemask) & ~pagemask;
02915
02916
02917
02918
02919
02920
02921
02922 if (size > 0)
02923 brk = (char*)(MORECORE(size));
02924
02925
02926
02927
02928
02929
02930
02931
02932
02933
02934 #if HAVE_MMAP
02935 if (brk == (char*)(MORECORE_FAILURE)) {
02936
02937
02938 if (contiguous(av))
02939 size = (size + old_size + pagemask) & ~pagemask;
02940
02941
02942 if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
02943 size = MMAP_AS_MORECORE_SIZE;
02944
02945
02946 if ((unsigned long)(size) > (unsigned long)(nb)) {
02947
02948 #if linux
02949 brk = (char*)(MMAP((void *) 0x10000000, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
02950 #else
02951 brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
02952 #endif
02953
02954 if (brk != (char*)(MORECORE_FAILURE)) {
02955
02956
02957 snd_brk = brk + size;
02958
02959
02960
02961
02962
02963
02964
02965 set_noncontiguous(av);
02966 }
02967 }
02968 }
02969 #endif
02970
02971 if (brk != (char*)(MORECORE_FAILURE)) {
02972 av->sbrked_mem += size;
02973
02974
02975
02976
02977
02978 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
02979 set_head(old_top, (size + old_size) | PREV_INUSE);
02980 }
02981
02982
02983
02984
02985
02986
02987
02988
02989
02990
02991
02992
02993
02994
02995
02996
02997
02998
02999
03000
03001 else {
03002 front_misalign = 0;
03003 end_misalign = 0;
03004 correction = 0;
03005 aligned_brk = brk;
03006
03007
03008 if (contiguous(av)) {
03009
03010
03011
03012 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
03013 if (front_misalign > 0) {
03014
03015
03016
03017
03018
03019
03020
03021
03022
03023 correction = MALLOC_ALIGNMENT - front_misalign;
03024 aligned_brk += correction;
03025 }
03026
03027
03028
03029
03030
03031
03032 correction += old_size;
03033
03034
03035 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
03036 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
03037
03038 assert(correction >= 0);
03039 snd_brk = (char*)(MORECORE(correction));
03040
03041
03042
03043
03044
03045
03046
03047
03048
03049
03050
03051 if (snd_brk == (char*)(MORECORE_FAILURE)) {
03052 correction = 0;
03053 snd_brk = (char*)(MORECORE(0));
03054 }
03055 }
03056
03057
03058 else {
03059
03060 assert(((unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK) == 0);
03061
03062
03063 if (snd_brk == (char*)(MORECORE_FAILURE)) {
03064 snd_brk = (char*)(MORECORE(0));
03065 }
03066 }
03067
03068
03069 if (snd_brk != (char*)(MORECORE_FAILURE)) {
03070 av->top = (mchunkptr)aligned_brk;
03071 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
03072 av->sbrked_mem += correction;
03073
03074
03075
03076
03077
03078
03079
03080
03081
03082
03083 if (old_size != 0) {
03084
03085
03086
03087
03088
03089 old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
03090 set_head(old_top, old_size | PREV_INUSE);
03091
03092
03093
03094
03095
03096
03097
03098 chunk_at_offset(old_top, old_size )->size =
03099 SIZE_SZ|PREV_INUSE;
03100
03101 chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
03102 SIZE_SZ|PREV_INUSE;
03103
03104
03105 if (old_size >= MINSIZE) {
03106 fREe(chunk2mem(old_top));
03107 }
03108
03109 }
03110 }
03111 }
03112
03113
03114 sum = av->sbrked_mem;
03115 if (sum > (unsigned long)(av->max_sbrked_mem))
03116 av->max_sbrked_mem = sum;
03117
03118 sum += av->mmapped_mem;
03119 if (sum > (unsigned long)(av->max_total_mem))
03120 av->max_total_mem = sum;
03121
03122 check_malloc_state();
03123
03124
03125 p = av->top;
03126 size = chunksize(p);
03127
03128
03129 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
03130 remainder_size = size - nb;
03131 remainder = chunk_at_offset(p, nb);
03132 av->top = remainder;
03133 set_head(p, nb | PREV_INUSE);
03134 set_head(remainder, remainder_size | PREV_INUSE);
03135 check_malloced_chunk(p, nb);
03136 return chunk2mem(p);
03137 }
03138 }
03139
03140
03141 MALLOC_FAILURE_ACTION;
03142 return 0;
03143 }
03144
03145
03146
03147
03148
03149
03150
03151
03152
03153
03154
03155 #if __STD_C
03156 static int sYSTRIm(size_t pad, mstate av)
03157 #else
03158 static int sYSTRIm(pad, av) size_t pad; mstate av;
03159 #endif
03160 {
03161 long top_size;
03162 long extra;
03163 long released;
03164 char* current_brk;
03165 char* new_brk;
03166 size_t pagesz;
03167
03168 pagesz = av->pagesize;
03169 top_size = chunksize(av->top);
03170
03171
03172 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
03173
03174 if (extra > 0) {
03175
03176
03177
03178
03179
03180 current_brk = (char*)(MORECORE(0));
03181 if (current_brk == (char*)(av->top) + top_size) {
03182
03183
03184
03185
03186
03187
03188
03189
03190
03191
03192
03193 MORECORE(-extra);
03194 new_brk = (char*)(MORECORE(0));
03195
03196 if (new_brk != (char*)MORECORE_FAILURE) {
03197 released = (long)(current_brk - new_brk);
03198
03199 if (released != 0) {
03200
03201 av->sbrked_mem -= released;
03202 set_head(av->top, (top_size - released) | PREV_INUSE);
03203 check_malloc_state();
03204 return 1;
03205 }
03206 }
03207 }
03208 }
03209 return 0;
03210 }
03211
03212
03213
03214
03215
03216 #if __STD_C
03217 Void_t* mALLOc(size_t bytes)
03218 #else
03219 Void_t* mALLOc(bytes) size_t bytes;
03220 #endif
03221 {
03222 mstate av = get_malloc_state();
03223
03224 INTERNAL_SIZE_T nb;
03225 unsigned int idx;
03226 mbinptr bin;
03227 mfastbinptr* fb;
03228
03229 mchunkptr victim;
03230 INTERNAL_SIZE_T size;
03231 int victim_index;
03232
03233 mchunkptr remainder;
03234 unsigned long remainder_size;
03235
03236 unsigned int block;
03237 unsigned int bit;
03238 unsigned int map;
03239
03240 mchunkptr fwd;
03241 mchunkptr bck;
03242
03243
03244
03245
03246
03247
03248
03249
03250
03251
03252 checked_request2size(bytes, nb);
03253
03254
03255
03256
03257
03258
03259
03260 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
03261 fb = &(av->fastbins[(fastbin_index(nb))]);
03262 if ( (victim = *fb) != 0) {
03263 *fb = victim->fd;
03264 check_remalloced_chunk(victim, nb);
03265 return chunk2mem(victim);
03266 }
03267 }
03268
03269
03270
03271
03272
03273
03274
03275
03276
03277 if (in_smallbin_range(nb)) {
03278 idx = smallbin_index(nb);
03279 bin = bin_at(av,idx);
03280
03281 if ( (victim = last(bin)) != bin) {
03282 if (victim == 0)
03283 malloc_consolidate(av);
03284 else {
03285 bck = victim->bk;
03286 set_inuse_bit_at_offset(victim, nb);
03287 bin->bk = bck;
03288 bck->fd = bin;
03289
03290 check_malloced_chunk(victim, nb);
03291 return chunk2mem(victim);
03292 }
03293 }
03294 }
03295
03296
03297
03298
03299
03300
03301
03302
03303
03304
03305
03306
03307 else {
03308 idx = largebin_index(nb);
03309 if (have_fastchunks(av))
03310 malloc_consolidate(av);
03311 }
03312
03313
03314
03315
03316
03317
03318
03319
03320
03321
03322
03323
03324
03325
03326 for(;;) {
03327
03328 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
03329 bck = victim->bk;
03330 size = chunksize(victim);
03331
03332
03333
03334
03335
03336
03337
03338
03339
03340 if (in_smallbin_range(nb) &&
03341 bck == unsorted_chunks(av) &&
03342 victim == av->last_remainder &&
03343 (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
03344
03345
03346 remainder_size = size - nb;
03347 remainder = chunk_at_offset(victim, nb);
03348 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
03349 av->last_remainder = remainder;
03350 remainder->bk = remainder->fd = unsorted_chunks(av);
03351
03352 set_head(victim, nb | PREV_INUSE);
03353 set_head(remainder, remainder_size | PREV_INUSE);
03354 set_foot(remainder, remainder_size);
03355
03356 check_malloced_chunk(victim, nb);
03357 return chunk2mem(victim);
03358 }
03359
03360
03361 unsorted_chunks(av)->bk = bck;
03362 bck->fd = unsorted_chunks(av);
03363
03364
03365
03366 if (size == nb) {
03367 set_inuse_bit_at_offset(victim, size);
03368 check_malloced_chunk(victim, nb);
03369 return chunk2mem(victim);
03370 }
03371
03372
03373
03374 if (in_smallbin_range(size)) {
03375 victim_index = smallbin_index(size);
03376 bck = bin_at(av, victim_index);
03377 fwd = bck->fd;
03378 }
03379 else {
03380 victim_index = largebin_index(size);
03381 bck = bin_at(av, victim_index);
03382 fwd = bck->fd;
03383
03384
03385 if (fwd != bck) {
03386 size |= PREV_INUSE;
03387
03388 if ((unsigned long)(size) <= (unsigned long)(bck->bk->size)) {
03389 fwd = bck;
03390 bck = bck->bk;
03391 }
03392 else {
03393 while ((unsigned long)(size) < (unsigned long)(fwd->size))
03394 fwd = fwd->fd;
03395 bck = fwd->bk;
03396 }
03397 }
03398 }
03399
03400 mark_bin(av, victim_index);
03401 victim->bk = bck;
03402 victim->fd = fwd;
03403 fwd->bk = victim;
03404 bck->fd = victim;
03405 }
03406
03407
03408
03409
03410
03411
03412
03413
03414 if (!in_smallbin_range(nb)) {
03415 bin = bin_at(av, idx);
03416
03417
03418 if ((victim = last(bin)) != bin &&
03419 (unsigned long)(first(bin)->size) >= (unsigned long)(nb)) {
03420
03421 while (((unsigned long)(size = chunksize(victim)) <
03422 (unsigned long)(nb)))
03423 victim = victim->bk;
03424
03425 remainder_size = size - nb;
03426 unlink(victim, bck, fwd);
03427
03428
03429 if (remainder_size < MINSIZE) {
03430 set_inuse_bit_at_offset(victim, size);
03431 check_malloced_chunk(victim, nb);
03432 return chunk2mem(victim);
03433 }
03434
03435 else {
03436 remainder = chunk_at_offset(victim, nb);
03437 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
03438 remainder->bk = remainder->fd = unsorted_chunks(av);
03439 set_head(victim, nb | PREV_INUSE);
03440 set_head(remainder, remainder_size | PREV_INUSE);
03441 set_foot(remainder, remainder_size);
03442 check_malloced_chunk(victim, nb);
03443 return chunk2mem(victim);
03444 }
03445 }
03446 }
03447
03448
03449
03450
03451
03452
03453
03454
03455
03456
03457
03458
03459 ++idx;
03460 bin = bin_at(av,idx);
03461 block = idx2block(idx);
03462 map = av->binmap[block];
03463 bit = idx2bit(idx);
03464
03465 for (;;) {
03466
03467
03468 if (bit > map || bit == 0) {
03469 do {
03470 if (++block >= BINMAPSIZE)
03471 goto use_top;
03472 } while ( (map = av->binmap[block]) == 0);
03473
03474 bin = bin_at(av, (block << BINMAPSHIFT));
03475 bit = 1;
03476 }
03477
03478
03479 while ((bit & map) == 0) {
03480 bin = next_bin(bin);
03481 bit <<= 1;
03482 assert(bit != 0);
03483 }
03484
03485
03486 victim = last(bin);
03487
03488
03489 if (victim == bin) {
03490 av->binmap[block] = map &= ~bit;
03491 bin = next_bin(bin);
03492 bit <<= 1;
03493 }
03494
03495 else {
03496 size = chunksize(victim);
03497
03498
03499 assert((unsigned long)(size) >= (unsigned long)(nb));
03500
03501 remainder_size = size - nb;
03502
03503
03504 bck = victim->bk;
03505 bin->bk = bck;
03506 bck->fd = bin;
03507
03508
03509 if (remainder_size < MINSIZE) {
03510 set_inuse_bit_at_offset(victim, size);
03511 check_malloced_chunk(victim, nb);
03512 return chunk2mem(victim);
03513 }
03514
03515
03516 else {
03517 remainder = chunk_at_offset(victim, nb);
03518
03519 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
03520 remainder->bk = remainder->fd = unsorted_chunks(av);
03521
03522 if (in_smallbin_range(nb))
03523 av->last_remainder = remainder;
03524
03525 set_head(victim, nb | PREV_INUSE);
03526 set_head(remainder, remainder_size | PREV_INUSE);
03527 set_foot(remainder, remainder_size);
03528 check_malloced_chunk(victim, nb);
03529 return chunk2mem(victim);
03530 }
03531 }
03532 }
03533
03534 use_top:
03535
03536
03537
03538
03539
03540
03541
03542
03543
03544
03545
03546
03547
03548
03549
03550 victim = av->top;
03551 size = chunksize(victim);
03552
03553 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
03554 remainder_size = size - nb;
03555 remainder = chunk_at_offset(victim, nb);
03556 av->top = remainder;
03557 set_head(victim, nb | PREV_INUSE);
03558 set_head(remainder, remainder_size | PREV_INUSE);
03559
03560 check_malloced_chunk(victim, nb);
03561 return chunk2mem(victim);
03562 }
03563
03564
03565
03566
03567
03568
03569
03570 else if (have_fastchunks(av)) {
03571 assert(in_smallbin_range(nb));
03572 malloc_consolidate(av);
03573 idx = smallbin_index(nb);
03574 }
03575
03576
03577
03578
03579 else
03580 return sYSMALLOc(nb, av);
03581 }
03582 }
03583
03584
03585
03586
03587
03588 #if __STD_C
03589 void fREe(Void_t* mem)
03590 #else
03591 void fREe(mem) Void_t* mem;
03592 #endif
03593 {
03594 mstate av = get_malloc_state();
03595
03596 mchunkptr p;
03597 INTERNAL_SIZE_T size;
03598 mfastbinptr* fb;
03599 mchunkptr nextchunk;
03600 INTERNAL_SIZE_T nextsize;
03601 int nextinuse;
03602 INTERNAL_SIZE_T prevsize;
03603 mchunkptr bck;
03604 mchunkptr fwd;
03605
03606
03607
03608 if (mem != 0) {
03609 p = mem2chunk(mem);
03610 size = chunksize(p);
03611
03612 check_inuse_chunk(p);
03613
03614
03615
03616
03617
03618
03619 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
03620
03621 #if TRIM_FASTBINS
03622
03623
03624
03625
03626 && (chunk_at_offset(p, size) != av->top)
03627 #endif
03628 ) {
03629
03630 set_fastchunks(av);
03631 fb = &(av->fastbins[fastbin_index(size)]);
03632 p->fd = *fb;
03633 *fb = p;
03634 }
03635
03636
03637
03638
03639
03640 else if (!chunk_is_mmapped(p)) {
03641 nextchunk = chunk_at_offset(p, size);
03642 nextsize = chunksize(nextchunk);
03643
03644
03645 if (!prev_inuse(p)) {
03646 prevsize = p->prev_size;
03647 size += prevsize;
03648 p = chunk_at_offset(p, -((long) prevsize));
03649 unlink(p, bck, fwd);
03650 }
03651
03652 if (nextchunk != av->top) {
03653
03654 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
03655 set_head(nextchunk, nextsize);
03656
03657
03658 if (!nextinuse) {
03659 unlink(nextchunk, bck, fwd);
03660 size += nextsize;
03661 }
03662
03663
03664
03665
03666
03667
03668
03669 bck = unsorted_chunks(av);
03670 fwd = bck->fd;
03671 p->bk = bck;
03672 p->fd = fwd;
03673 bck->fd = p;
03674 fwd->bk = p;
03675
03676 set_head(p, size | PREV_INUSE);
03677 set_foot(p, size);
03678
03679 check_free_chunk(p);
03680 }
03681
03682
03683
03684
03685
03686
03687 else {
03688 size += nextsize;
03689 set_head(p, size | PREV_INUSE);
03690 av->top = p;
03691 check_chunk(p);
03692 }
03693
03694
03695
03696
03697
03698
03699
03700
03701
03702
03703
03704
03705
03706
03707 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
03708 if (have_fastchunks(av))
03709 malloc_consolidate(av);
03710
03711 #ifndef MORECORE_CANNOT_TRIM
03712 if ((unsigned long)(chunksize(av->top)) >=
03713 (unsigned long)(av->trim_threshold))
03714 sYSTRIm(av->top_pad, av);
03715 #endif
03716 }
03717
03718 }
03719
03720
03721
03722
03723
03724
03725
03726
03727 else {
03728 #if HAVE_MMAP
03729 int ret;
03730 INTERNAL_SIZE_T offset = p->prev_size;
03731 av->n_mmaps--;
03732 av->mmapped_mem -= (size + offset);
03733 ret = munmap((char*)p - offset, size + offset);
03734
03735 assert(ret == 0);
03736 #endif
03737 }
03738 }
03739 }
03740
03741
03742
03743
03744
03745
03746
03747
03748
03749
03750
03751
03752
03753
03754
03755 #if __STD_C
03756 static void malloc_consolidate(mstate av)
03757 #else
03758 static void malloc_consolidate(av) mstate av;
03759 #endif
03760 {
03761 mfastbinptr* fb;
03762 mfastbinptr* maxfb;
03763 mchunkptr p;
03764 mchunkptr nextp;
03765 mchunkptr unsorted_bin;
03766 mchunkptr first_unsorted;
03767
03768
03769 mchunkptr nextchunk;
03770 INTERNAL_SIZE_T size;
03771 INTERNAL_SIZE_T nextsize;
03772 INTERNAL_SIZE_T prevsize;
03773 int nextinuse;
03774 mchunkptr bck;
03775 mchunkptr fwd;
03776
03777
03778
03779
03780
03781
03782 if (av->max_fast != 0) {
03783 clear_fastchunks(av);
03784
03785 unsorted_bin = unsorted_chunks(av);
03786
03787
03788
03789
03790
03791
03792
03793
03794
03795 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
03796 fb = &(av->fastbins[0]);
03797 do {
03798 if ( (p = *fb) != 0) {
03799 *fb = 0;
03800
03801 do {
03802 check_inuse_chunk(p);
03803 nextp = p->fd;
03804
03805
03806 size = p->size & ~PREV_INUSE;
03807 nextchunk = chunk_at_offset(p, size);
03808 nextsize = chunksize(nextchunk);
03809
03810 if (!prev_inuse(p)) {
03811 prevsize = p->prev_size;
03812 size += prevsize;
03813 p = chunk_at_offset(p, -((long) prevsize));
03814 unlink(p, bck, fwd);
03815 }
03816
03817 if (nextchunk != av->top) {
03818 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
03819 set_head(nextchunk, nextsize);
03820
03821 if (!nextinuse) {
03822 size += nextsize;
03823 unlink(nextchunk, bck, fwd);
03824 }
03825
03826 first_unsorted = unsorted_bin->fd;
03827 unsorted_bin->fd = p;
03828 first_unsorted->bk = p;
03829
03830 set_head(p, size | PREV_INUSE);
03831 p->bk = unsorted_bin;
03832 p->fd = first_unsorted;
03833 set_foot(p, size);
03834 }
03835
03836 else {
03837 size += nextsize;
03838 set_head(p, size | PREV_INUSE);
03839 av->top = p;
03840 }
03841
03842 } while ( (p = nextp) != 0);
03843
03844 }
03845 } while (fb++ != maxfb);
03846 }
03847 else {
03848 malloc_init_state(av);
03849 check_malloc_state();
03850 }
03851 }
03852
03853
03854
03855
03856
03857
03858 #if __STD_C
03859 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
03860 #else
03861 Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
03862 #endif
03863 {
03864 mstate av = get_malloc_state();
03865
03866 INTERNAL_SIZE_T nb;
03867
03868 mchunkptr oldp;
03869 INTERNAL_SIZE_T oldsize;
03870
03871 mchunkptr newp;
03872 INTERNAL_SIZE_T newsize;
03873 Void_t* newmem;
03874
03875 mchunkptr next;
03876
03877 mchunkptr remainder;
03878 unsigned long remainder_size;
03879
03880 mchunkptr bck;
03881 mchunkptr fwd;
03882
03883 unsigned long copysize;
03884 unsigned int ncopies;
03885 INTERNAL_SIZE_T* s;
03886 INTERNAL_SIZE_T* d;
03887
03888
03889 #ifdef REALLOC_ZERO_BYTES_FREES
03890 if (bytes == 0) {
03891 fREe(oldmem);
03892 return 0;
03893 }
03894 #endif
03895
03896
03897 if (oldmem == 0) return mALLOc(bytes);
03898
03899 checked_request2size(bytes, nb);
03900
03901 oldp = mem2chunk(oldmem);
03902 oldsize = chunksize(oldp);
03903
03904 check_inuse_chunk(oldp);
03905
03906 if (!chunk_is_mmapped(oldp)) {
03907
03908 if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
03909
03910 newp = oldp;
03911 newsize = oldsize;
03912 }
03913
03914 else {
03915 next = chunk_at_offset(oldp, oldsize);
03916
03917
03918 if (next == av->top &&
03919 (unsigned long)(newsize = oldsize + chunksize(next)) >=
03920 (unsigned long)(nb + MINSIZE)) {
03921 set_head_size(oldp, nb);
03922 av->top = chunk_at_offset(oldp, nb);
03923 set_head(av->top, (newsize - nb) | PREV_INUSE);
03924 return chunk2mem(oldp);
03925 }
03926
03927
03928 else if (next != av->top &&
03929 !inuse(next) &&
03930 (unsigned long)(newsize = oldsize + chunksize(next)) >=
03931 (unsigned long)(nb)) {
03932 newp = oldp;
03933 unlink(next, bck, fwd);
03934 }
03935
03936
03937 else {
03938 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
03939 if (newmem == 0)
03940 return 0;
03941
03942 newp = mem2chunk(newmem);
03943 newsize = chunksize(newp);
03944
03945
03946
03947
03948 if (newp == next) {
03949 newsize += oldsize;
03950 newp = oldp;
03951 }
03952 else {
03953
03954
03955
03956
03957
03958
03959 copysize = oldsize - SIZE_SZ;
03960 s = (INTERNAL_SIZE_T*)(oldmem);
03961 d = (INTERNAL_SIZE_T*)(newmem);
03962 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
03963 assert(ncopies >= 3);
03964
03965 if (ncopies > 9)
03966 MALLOC_COPY(d, s, copysize);
03967
03968 else {
03969 *(d+0) = *(s+0);
03970 *(d+1) = *(s+1);
03971 *(d+2) = *(s+2);
03972 if (ncopies > 4) {
03973 *(d+3) = *(s+3);
03974 *(d+4) = *(s+4);
03975 if (ncopies > 6) {
03976 *(d+5) = *(s+5);
03977 *(d+6) = *(s+6);
03978 if (ncopies > 8) {
03979 *(d+7) = *(s+7);
03980 *(d+8) = *(s+8);
03981 }
03982 }
03983 }
03984 }
03985
03986 fREe(oldmem);
03987 check_inuse_chunk(newp);
03988 return chunk2mem(newp);
03989 }
03990 }
03991 }
03992
03993
03994
03995 assert((unsigned long)(newsize) >= (unsigned long)(nb));
03996
03997 remainder_size = newsize - nb;
03998
03999 if (remainder_size < MINSIZE) {
04000 set_head_size(newp, newsize);
04001 set_inuse_bit_at_offset(newp, newsize);
04002 }
04003 else {
04004 remainder = chunk_at_offset(newp, nb);
04005 set_head_size(newp, nb);
04006 set_head(remainder, remainder_size | PREV_INUSE);
04007
04008 set_inuse_bit_at_offset(remainder, remainder_size);
04009 fREe(chunk2mem(remainder));
04010 }
04011
04012 check_inuse_chunk(newp);
04013 return chunk2mem(newp);
04014 }
04015
04016
04017
04018
04019
04020 else {
04021 #if HAVE_MMAP
04022
04023 #if HAVE_MREMAP
04024 INTERNAL_SIZE_T offset = oldp->prev_size;
04025 size_t pagemask = av->pagesize - 1;
04026 char *cp;
04027 unsigned long sum;
04028
04029
04030 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
04031
04032
04033 if (oldsize == newsize - offset)
04034 return oldmem;
04035
04036 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
04037
04038 if (cp != (char*)MORECORE_FAILURE) {
04039
04040 newp = (mchunkptr)(cp + offset);
04041 set_head(newp, (newsize - offset)|IS_MMAPPED);
04042
04043 assert(aligned_OK(chunk2mem(newp)));
04044 assert((newp->prev_size == offset));
04045
04046
04047 sum = av->mmapped_mem += newsize - oldsize;
04048 if (sum > (unsigned long)(av->max_mmapped_mem))
04049 av->max_mmapped_mem = sum;
04050 sum += av->sbrked_mem;
04051 if (sum > (unsigned long)(av->max_total_mem))
04052 av->max_total_mem = sum;
04053
04054 return chunk2mem(newp);
04055 }
04056 #endif
04057
04058
04059 if ((unsigned long)(oldsize) >= (unsigned long)(nb + SIZE_SZ))
04060 newmem = oldmem;
04061 else {
04062
04063 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
04064 if (newmem != 0) {
04065 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
04066 fREe(oldmem);
04067 }
04068 }
04069 return newmem;
04070
04071 #else
04072
04073 check_malloc_state();
04074 MALLOC_FAILURE_ACTION;
04075 return 0;
04076 #endif
04077 }
04078 }
04079
04080
04081
04082
04083
04084 #if __STD_C
04085 Void_t* mEMALIGn(size_t alignment, size_t bytes)
04086 #else
04087 Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
04088 #endif
04089 {
04090 INTERNAL_SIZE_T nb;
04091 char* m;
04092 mchunkptr p;
04093 char* brk;
04094 mchunkptr newp;
04095 INTERNAL_SIZE_T newsize;
04096 INTERNAL_SIZE_T leadsize;
04097 mchunkptr remainder;
04098 unsigned long remainder_size;
04099 INTERNAL_SIZE_T size;
04100
04101
04102
04103 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
04104
04105
04106
04107 if (alignment < MINSIZE) alignment = MINSIZE;
04108
04109
04110 if ((alignment & (alignment - 1)) != 0) {
04111 size_t a = MALLOC_ALIGNMENT * 2;
04112 while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
04113 alignment = a;
04114 }
04115
04116 checked_request2size(bytes, nb);
04117
04118
04119
04120
04121
04122
04123
04124
04125
04126 m = (char*)(mALLOc(nb + alignment + MINSIZE));
04127
04128 if (m == 0) return 0;
04129
04130 p = mem2chunk(m);
04131
04132 if ((((unsigned long)(m)) % alignment) != 0) {
04133
04134
04135
04136
04137
04138
04139
04140
04141
04142 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) &
04143 -((signed long) alignment));
04144 if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
04145 brk += alignment;
04146
04147 newp = (mchunkptr)brk;
04148 leadsize = brk - (char*)(p);
04149 newsize = chunksize(p) - leadsize;
04150
04151
04152 if (chunk_is_mmapped(p)) {
04153 newp->prev_size = p->prev_size + leadsize;
04154 set_head(newp, newsize|IS_MMAPPED);
04155 return chunk2mem(newp);
04156 }
04157
04158
04159 set_head(newp, newsize | PREV_INUSE);
04160 set_inuse_bit_at_offset(newp, newsize);
04161 set_head_size(p, leadsize);
04162 fREe(chunk2mem(p));
04163 p = newp;
04164
04165 assert (newsize >= nb &&
04166 (((unsigned long)(chunk2mem(p))) % alignment) == 0);
04167 }
04168
04169
04170 if (!chunk_is_mmapped(p)) {
04171 size = chunksize(p);
04172 if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
04173 remainder_size = size - nb;
04174 remainder = chunk_at_offset(p, nb);
04175 set_head(remainder, remainder_size | PREV_INUSE);
04176 set_head_size(p, nb);
04177 fREe(chunk2mem(remainder));
04178 }
04179 }
04180
04181 check_inuse_chunk(p);
04182 return chunk2mem(p);
04183 }
04184
04185
04186
04187
04188
04189 #if __STD_C
04190 Void_t* cALLOc(size_t n_elements, size_t elem_size)
04191 #else
04192 Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
04193 #endif
04194 {
04195 mchunkptr p;
04196 unsigned long clearsize;
04197 unsigned long nclears;
04198 INTERNAL_SIZE_T* d;
04199
04200 Void_t* mem = mALLOc(n_elements * elem_size);
04201
04202 if (mem != 0) {
04203 p = mem2chunk(mem);
04204
04205 #if MMAP_CLEARS
04206 if (!chunk_is_mmapped(p))
04207 #endif
04208 {
04209
04210
04211
04212
04213
04214
04215 d = (INTERNAL_SIZE_T*)mem;
04216 clearsize = chunksize(p) - SIZE_SZ;
04217 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
04218 assert(nclears >= 3);
04219
04220 if (nclears > 9)
04221 MALLOC_ZERO(d, clearsize);
04222
04223 else {
04224 *(d+0) = 0;
04225 *(d+1) = 0;
04226 *(d+2) = 0;
04227 if (nclears > 4) {
04228 *(d+3) = 0;
04229 *(d+4) = 0;
04230 if (nclears > 6) {
04231 *(d+5) = 0;
04232 *(d+6) = 0;
04233 if (nclears > 8) {
04234 *(d+7) = 0;
04235 *(d+8) = 0;
04236 }
04237 }
04238 }
04239 }
04240 }
04241 }
04242 return mem;
04243 }
04244
04245
04246
04247
04248
04249 #if __STD_C
04250 void cFREe(Void_t *mem)
04251 #else
04252 void cFREe(mem) Void_t *mem;
04253 #endif
04254 {
04255 fREe(mem);
04256 }
04257
04258
04259
04260
04261
04262 #if __STD_C
04263 Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
04264 #else
04265 Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
04266 #endif
04267 {
04268 size_t sz = elem_size;
04269
04270 return iALLOc(n_elements, &sz, 3, chunks);
04271 }
04272
04273
04274
04275
04276
04277 #if __STD_C
04278 Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
04279 #else
04280 Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
04281 #endif
04282 {
04283 return iALLOc(n_elements, sizes, 0, chunks);
04284 }
04285
04286
04287
04288
04289
04290
04291
04292
04293
04294
04295
04296
04297
04298 #if __STD_C
04299 static Void_t** iALLOc(size_t n_elements,
04300 size_t* sizes,
04301 int opts,
04302 Void_t* chunks[])
04303 #else
04304 static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
04305 #endif
04306 {
04307 mstate av = get_malloc_state();
04308 INTERNAL_SIZE_T element_size;
04309 INTERNAL_SIZE_T contents_size;
04310 INTERNAL_SIZE_T array_size;
04311 Void_t* mem;
04312 mchunkptr p;
04313 INTERNAL_SIZE_T remainder_size;
04314 Void_t** marray;
04315 mchunkptr array_chunk;
04316 int mmx;
04317 INTERNAL_SIZE_T size;
04318 size_t i;
04319
04320
04321 if (have_fastchunks(av)) malloc_consolidate(av);
04322
04323
04324 if (chunks != 0) {
04325 if (n_elements == 0)
04326 return chunks;
04327 marray = chunks;
04328 array_size = 0;
04329 }
04330 else {
04331
04332 if (n_elements == 0)
04333 return (Void_t**) mALLOc(0);
04334 marray = 0;
04335 array_size = request2size(n_elements * (sizeof(Void_t*)));
04336 }
04337
04338
04339 if (opts & 0x1) {
04340 element_size = request2size(*sizes);
04341 contents_size = n_elements * element_size;
04342 }
04343 else {
04344 element_size = 0;
04345 contents_size = 0;
04346 for (i = 0; i != n_elements; ++i)
04347 contents_size += request2size(sizes[i]);
04348 }
04349
04350
04351 size = contents_size + array_size - MALLOC_ALIGN_MASK;
04352
04353
04354
04355
04356
04357
04358
04359 mmx = av->n_mmaps_max;
04360 av->n_mmaps_max = 0;
04361 mem = mALLOc(size);
04362 av->n_mmaps_max = mmx;
04363 if (mem == 0)
04364 return 0;
04365
04366 p = mem2chunk(mem);
04367 assert(!chunk_is_mmapped(p));
04368 remainder_size = chunksize(p);
04369
04370 if (opts & 0x2) {
04371 MALLOC_ZERO(mem, remainder_size - SIZE_SZ - array_size);
04372 }
04373
04374
04375 if (marray == 0) {
04376 array_chunk = chunk_at_offset(p, contents_size);
04377 marray = (Void_t**) (chunk2mem(array_chunk));
04378 set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
04379 remainder_size = contents_size;
04380 }
04381
04382
04383 for (i = 0; ; ++i) {
04384 marray[i] = chunk2mem(p);
04385 if (i != n_elements-1) {
04386 if (element_size != 0)
04387 size = element_size;
04388 else
04389 size = request2size(sizes[i]);
04390 remainder_size -= size;
04391 set_head(p, size | PREV_INUSE);
04392 p = chunk_at_offset(p, size);
04393 }
04394 else {
04395 set_head(p, remainder_size | PREV_INUSE);
04396 break;
04397 }
04398 }
04399
04400 #if DEBUG
04401 if (marray != chunks) {
04402
04403 if (element_size != 0)
04404 assert(remainder_size == element_size);
04405 else
04406 assert(remainder_size == request2size(sizes[i]));
04407 check_inuse_chunk(mem2chunk(marray));
04408 }
04409
04410 for (i = 0; i != n_elements; ++i)
04411 check_inuse_chunk(mem2chunk(marray[i]));
04412 #endif
04413
04414 return marray;
04415 }
04416
04417
04418
04419
04420
04421
04422 #if __STD_C
04423 Void_t* vALLOc(size_t bytes)
04424 #else
04425 Void_t* vALLOc(bytes) size_t bytes;
04426 #endif
04427 {
04428
04429 mstate av = get_malloc_state();
04430 if (have_fastchunks(av)) malloc_consolidate(av);
04431 return mEMALIGn(av->pagesize, bytes);
04432 }
04433
04434
04435
04436
04437
04438
04439 #if __STD_C
04440 Void_t* pVALLOc(size_t bytes)
04441 #else
04442 Void_t* pVALLOc(bytes) size_t bytes;
04443 #endif
04444 {
04445 mstate av = get_malloc_state();
04446 size_t pagesz;
04447
04448
04449 if (have_fastchunks(av)) malloc_consolidate(av);
04450 pagesz = av->pagesize;
04451 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
04452 }
04453
04454
04455
04456
04457
04458
04459 #if __STD_C
04460 int mTRIm(size_t pad)
04461 #else
04462 int mTRIm(pad) size_t pad;
04463 #endif
04464 {
04465 mstate av = get_malloc_state();
04466
04467 malloc_consolidate(av);
04468
04469 #ifndef MORECORE_CANNOT_TRIM
04470 return sYSTRIm(pad, av);
04471 #else
04472 return 0;
04473 #endif
04474 }
04475
04476
04477
04478
04479
04480
04481 #if __STD_C
04482 size_t mUSABLe(Void_t* mem)
04483 #else
04484 size_t mUSABLe(mem) Void_t* mem;
04485 #endif
04486 {
04487 mchunkptr p;
04488 if (mem != 0) {
04489 p = mem2chunk(mem);
04490 if (chunk_is_mmapped(p))
04491 return chunksize(p) - 2*SIZE_SZ;
04492 else if (inuse(p))
04493 return chunksize(p) - SIZE_SZ;
04494 }
04495 return 0;
04496 }
04497
04498
04499
04500
04501
04502 struct mallinfo mALLINFo()
04503 {
04504 mstate av = get_malloc_state();
04505 struct mallinfo mi;
04506 int i;
04507 mbinptr b;
04508 mchunkptr p;
04509 INTERNAL_SIZE_T avail;
04510 INTERNAL_SIZE_T fastavail;
04511 int nblocks;
04512 int nfastblocks;
04513
04514
04515 if (av->top == 0) malloc_consolidate(av);
04516
04517 check_malloc_state();
04518
04519
04520 avail = chunksize(av->top);
04521 nblocks = 1;
04522
04523
04524 nfastblocks = 0;
04525 fastavail = 0;
04526
04527 for (i = 0; i < NFASTBINS; ++i) {
04528 for (p = av->fastbins[i]; p != 0; p = p->fd) {
04529 ++nfastblocks;
04530 fastavail += chunksize(p);
04531 }
04532 }
04533
04534 avail += fastavail;
04535
04536
04537 for (i = 1; i < NBINS; ++i) {
04538 b = bin_at(av, i);
04539 for (p = last(b); p != b; p = p->bk) {
04540 ++nblocks;
04541 avail += chunksize(p);
04542 }
04543 }
04544
04545 mi.smblks = nfastblocks;
04546 mi.ordblks = nblocks;
04547 mi.fordblks = avail;
04548 mi.uordblks = av->sbrked_mem - avail;
04549 mi.arena = av->sbrked_mem;
04550 mi.hblks = av->n_mmaps;
04551 mi.hblkhd = av->mmapped_mem;
04552 mi.fsmblks = fastavail;
04553 mi.keepcost = chunksize(av->top);
04554 mi.usmblks = av->max_total_mem;
04555 return mi;
04556 }
04557
04558
04559
04560
04561
04562 void mSTATs()
04563 {
04564 struct mallinfo mi = mALLINFo();
04565
04566
04567
04568 #ifndef WIN32
04569
04570 #ifdef WIN32
04571 {
04572 unsigned long free, reserved, committed;
04573 vminfo (&free, &reserved, &committed);
04574 fprintf(stderr, "free bytes = %10lu\n",
04575 free);
04576 fprintf(stderr, "reserved bytes = %10lu\n",
04577 reserved);
04578 fprintf(stderr, "committed bytes = %10lu\n",
04579 committed);
04580 }
04581 #endif
04582
04583
04584 fprintf(stderr, "max system bytes = %10lu\n",
04585 (unsigned long)(mi.usmblks));
04586 fprintf(stderr, "system bytes = %10lu\n",
04587 (unsigned long)(mi.arena + mi.hblkhd));
04588 fprintf(stderr, "in use bytes = %10lu\n",
04589 (unsigned long)(mi.uordblks + mi.hblkhd));
04590
04591
04592 #ifdef WIN32
04593 {
04594 unsigned long kernel, user;
04595 if (cpuinfo (TRUE, &kernel, &user)) {
04596 fprintf(stderr, "kernel ms = %10lu\n",
04597 kernel);
04598 fprintf(stderr, "user ms = %10lu\n",
04599 user);
04600 }
04601 }
04602 #endif
04603 #endif
04604
04605 }
04606
04607
04608
04609
04610
04611
04612 #if __STD_C
04613 int mALLOPt(int param_number, int value)
04614 #else
04615 int mALLOPt(param_number, value) int param_number; int value;
04616 #endif
04617 {
04618 mstate av = get_malloc_state();
04619
04620 malloc_consolidate(av);
04621
04622 switch(param_number) {
04623 case M_MXFAST:
04624 if (value >= 0 && value <= MAX_FAST_SIZE) {
04625 set_max_fast(av, value);
04626 return 1;
04627 }
04628 else
04629 return 0;
04630
04631 case M_TRIM_THRESHOLD:
04632 av->trim_threshold = value;
04633 return 1;
04634
04635 case M_TOP_PAD:
04636 av->top_pad = value;
04637 return 1;
04638
04639 case M_MMAP_THRESHOLD:
04640 av->mmap_threshold = value;
04641 return 1;
04642
04643 case M_MMAP_MAX:
04644 #if !HAVE_MMAP
04645 if (value != 0)
04646 return 0;
04647 #endif
04648 av->n_mmaps_max = value;
04649 return 1;
04650
04651 default:
04652 return 0;
04653 }
04654 }
04655
04656
04657
04658
04659
04660
04661
04662
04663
04664
04665
04666
04667
04668
04669
04670
04671
04672
04673
04674
04675
04676
04677
04678
04679
04680
04681
04682
04683
04684
04685
04686
04687
04688
04689
04690
04691
04692
04693
04694
04695
04696
04697
04698
04699
04700
04701
04702
04703
04704
04705
04706
04707
04708
04709
04710
04711
04712
04713
04714
04715
04716
04717
04718
04719
04720
04721
04722
04723
04724
04725
04726
04727
04728
04729
04730
04731
04732
04733
04734
04735
04736
04737
04738
04739
04740
04741
04742
04743
04744
04745
04746
04747
04748
04749
04750
04751
04752
04753
04754
04755
04756
04757
04758
04759
04760
04761
04762
04763
04764
04765
04766
04767
04768
04769
04770
04771
04772
04773
04774
04775
04776
04777
04778
04779
04780
04781
04782
04783
04784
04785
04786
04787
04788
04789
04790
04791
04792
04793
04794
04795
04796
04797
04798
04799
04800
04801
04802
04803
04804
04805
04806 #ifdef WIN32
04807
04808 #ifdef _DEBUG
04809
04810 #endif
04811
04812
04813 #ifdef USE_MALLOC_LOCK
04814
04815
04816 static int slwait (int *sl) {
04817 while (InterlockedCompareExchange ((void **) sl, (void *) 1, (void *) 0) != 0)
04818 Sleep (0);
04819 return 0;
04820 }
04821
04822
04823 static int slrelease (int *sl) {
04824 InterlockedExchange (sl, 0);
04825 return 0;
04826 }
04827
04828 #ifdef NEEDED
04829
04830 static int g_sl;
04831 #endif
04832
04833 #endif
04834
04835
04836 static long getpagesize (void) {
04837 static long g_pagesize = 0;
04838 if (! g_pagesize) {
04839 SYSTEM_INFO system_info;
04840 GetSystemInfo (&system_info);
04841 g_pagesize = system_info.dwPageSize;
04842 }
04843 return g_pagesize;
04844 }
04845 static long getregionsize (void) {
04846 static long g_regionsize = 0;
04847 if (! g_regionsize) {
04848 SYSTEM_INFO system_info;
04849 GetSystemInfo (&system_info);
04850 g_regionsize = system_info.dwAllocationGranularity;
04851 }
04852 return g_regionsize;
04853 }
04854
04855
04856 typedef struct _region_list_entry {
04857 void *top_allocated;
04858 void *top_committed;
04859 void *top_reserved;
04860 long reserve_size;
04861 struct _region_list_entry *previous;
04862 } region_list_entry;
04863
04864
04865 static int region_list_append (region_list_entry **last, void *base_reserved, long reserve_size) {
04866 region_list_entry *next = HeapAlloc (GetProcessHeap (), 0, sizeof (region_list_entry));
04867 if (! next)
04868 return FALSE;
04869 next->top_allocated = (char *) base_reserved;
04870 next->top_committed = (char *) base_reserved;
04871 next->top_reserved = (char *) base_reserved + reserve_size;
04872 next->reserve_size = reserve_size;
04873 next->previous = *last;
04874 *last = next;
04875 return TRUE;
04876 }
04877
04878 static int region_list_remove (region_list_entry **last) {
04879 region_list_entry *previous = (*last)->previous;
04880 if (! HeapFree (GetProcessHeap (), sizeof (region_list_entry), *last))
04881 return FALSE;
04882 *last = previous;
04883 return TRUE;
04884 }
04885
04886 #define CEIL(size,to) (((size)+(to)-1)&~((to)-1))
04887 #define FLOOR(size,to) ((size)&~((to)-1))
04888
04889 #define SBRK_SCALE 0
04890
04891
04892
04893
04894
04895 static void *sbrk (long size) {
04896 static long g_pagesize, g_my_pagesize;
04897 static long g_regionsize, g_my_regionsize;
04898 static region_list_entry *g_last;
04899 void *result = (void *) MORECORE_FAILURE;
04900 #ifdef TRACE
04901 printf ("sbrk %d\n", size);
04902 #endif
04903 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
04904
04905 slwait (&g_sl);
04906 #endif
04907
04908 if (! g_pagesize) {
04909 g_pagesize = getpagesize ();
04910 g_my_pagesize = g_pagesize << SBRK_SCALE;
04911 }
04912 if (! g_regionsize) {
04913 g_regionsize = getregionsize ();
04914 g_my_regionsize = g_regionsize << SBRK_SCALE;
04915 }
04916 if (! g_last) {
04917 if (! region_list_append (&g_last, 0, 0)) {
04918 goto sbrk_exit;
04919 }
04920 }
04921
04922 assert (g_last);
04923 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
04924 g_last->top_allocated <= g_last->top_committed);
04925 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
04926 g_last->top_committed <= g_last->top_reserved &&
04927 (unsigned) g_last->top_committed % g_pagesize == 0);
04928 assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
04929 assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
04930
04931 if (size >= 0) {
04932
04933 long allocate_size = size;
04934
04935 long to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
04936
04937 if (to_commit > 0) {
04938
04939 long commit_size = CEIL (to_commit, g_my_pagesize);
04940
04941 long to_reserve = (char *) g_last->top_committed + commit_size - (char *) g_last->top_reserved;
04942
04943 if (to_reserve > 0) {
04944
04945 long remaining_commit_size = (char *) g_last->top_reserved - (char *) g_last->top_committed;
04946 if (remaining_commit_size > 0) {
04947
04948 assert ((unsigned) g_last->top_committed % g_pagesize == 0);
04949 assert (0 < remaining_commit_size && remaining_commit_size % g_pagesize == 0); {
04950
04951 void *base_committed = VirtualAlloc (g_last->top_committed, remaining_commit_size,
04952 MEM_COMMIT, PAGE_READWRITE);
04953
04954 if (base_committed != g_last->top_committed) {
04955 goto sbrk_exit;
04956 }
04957
04958 assert ((unsigned) base_committed % g_pagesize == 0);
04959 #ifdef TRACE
04960 printf ("Commit %p %d\n", base_committed, remaining_commit_size);
04961 #endif
04962
04963 g_last->top_committed = (char *) base_committed + remaining_commit_size;
04964 }
04965 } {
04966
04967 int contiguous = -1;
04968 int found = FALSE;
04969 MEMORY_BASIC_INFORMATION memory_info;
04970 void *base_reserved;
04971 long reserve_size;
04972 do {
04973
04974 contiguous = TRUE;
04975
04976 reserve_size = CEIL (to_reserve, g_my_regionsize);
04977
04978 memory_info.BaseAddress = g_last->top_reserved;
04979
04980 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
04981 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
04982 while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
04983
04984 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
04985 #ifdef TRACE
04986 printf ("Query %p %d %s\n", memory_info.BaseAddress, memory_info.RegionSize,
04987 memory_info.State == MEM_FREE ? "FREE":
04988 (memory_info.State == MEM_RESERVE ? "RESERVED":
04989 (memory_info.State == MEM_COMMIT ? "COMMITTED": "?")));
04990 #endif
04991
04992 if (memory_info.State == MEM_FREE &&
04993 (unsigned) memory_info.BaseAddress % g_regionsize == 0 &&
04994 memory_info.RegionSize >= (unsigned) reserve_size) {
04995 found = TRUE;
04996 break;
04997 }
04998
04999 contiguous = FALSE;
05000
05001 reserve_size = CEIL (allocate_size, g_my_regionsize);
05002 memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
05003
05004 assert ((unsigned) memory_info.BaseAddress % g_pagesize == 0);
05005 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
05006 }
05007
05008 if (! found) {
05009 goto sbrk_exit;
05010 }
05011
05012 assert ((unsigned) memory_info.BaseAddress % g_regionsize == 0);
05013 assert (0 < reserve_size && reserve_size % g_regionsize == 0);
05014
05015 base_reserved = VirtualAlloc (memory_info.BaseAddress, reserve_size,
05016 MEM_RESERVE, PAGE_NOACCESS);
05017 if (! base_reserved) {
05018 int rc = GetLastError ();
05019 if (rc != ERROR_INVALID_ADDRESS) {
05020 goto sbrk_exit;
05021 }
05022 }
05023
05024
05025 } while (! base_reserved);
05026
05027 if (memory_info.BaseAddress && base_reserved != memory_info.BaseAddress) {
05028 goto sbrk_exit;
05029 }
05030
05031 assert ((unsigned) base_reserved % g_regionsize == 0);
05032 #ifdef TRACE
05033 printf ("Reserve %p %d\n", base_reserved, reserve_size);
05034 #endif
05035
05036 if (contiguous) {
05037 long start_size = (char *) g_last->top_committed - (char *) g_last->top_allocated;
05038
05039 allocate_size -= start_size;
05040
05041 g_last->top_allocated = g_last->top_committed;
05042
05043 to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
05044
05045 commit_size = CEIL (to_commit, g_my_pagesize);
05046 }
05047
05048 if (! region_list_append (&g_last, base_reserved, reserve_size)) {
05049 goto sbrk_exit;
05050 }
05051
05052 if (! contiguous) {
05053
05054 to_commit = (char *) g_last->top_allocated + allocate_size - (char *) g_last->top_committed;
05055
05056 commit_size = CEIL (to_commit, g_my_pagesize);
05057 }
05058 }
05059 }
05060
05061 assert ((unsigned) g_last->top_committed % g_pagesize == 0);
05062 assert (0 < commit_size && commit_size % g_pagesize == 0); {
05063
05064 void *base_committed = VirtualAlloc (g_last->top_committed, commit_size,
05065 MEM_COMMIT, PAGE_READWRITE);
05066
05067 if (base_committed != g_last->top_committed) {
05068 goto sbrk_exit;
05069 }
05070
05071 assert ((unsigned) base_committed % g_pagesize == 0);
05072 #ifdef TRACE
05073 printf ("Commit %p %d\n", base_committed, commit_size);
05074 #endif
05075
05076 g_last->top_committed = (char *) base_committed + commit_size;
05077 }
05078 }
05079
05080 g_last->top_allocated = (char *) g_last->top_allocated + allocate_size;
05081 result = (char *) g_last->top_allocated - size;
05082
05083 } else if (size < 0) {
05084 long deallocate_size = - size;
05085
05086 while ((char *) g_last->top_allocated - deallocate_size < (char *) g_last->top_reserved - g_last->reserve_size) {
05087
05088 long release_size = g_last->reserve_size;
05089
05090 void *base_reserved = (char *) g_last->top_reserved - release_size;
05091
05092 assert ((unsigned) base_reserved % g_regionsize == 0);
05093 assert (0 < release_size && release_size % g_regionsize == 0); {
05094
05095 int rc = VirtualFree (base_reserved, 0,
05096 MEM_RELEASE);
05097
05098 if (! rc) {
05099 goto sbrk_exit;
05100 }
05101 #ifdef TRACE
05102 printf ("Release %p %d\n", base_reserved, release_size);
05103 #endif
05104 }
05105
05106 deallocate_size -= (char *) g_last->top_allocated - (char *) base_reserved;
05107
05108 if (! region_list_remove (&g_last)) {
05109 goto sbrk_exit;
05110 }
05111 } {
05112
05113 long to_decommit = (char *) g_last->top_committed - ((char *) g_last->top_allocated - deallocate_size);
05114 if (to_decommit >= g_my_pagesize) {
05115
05116 long decommit_size = FLOOR (to_decommit, g_my_pagesize);
05117
05118 void *base_committed = (char *) g_last->top_committed - decommit_size;
05119
05120 assert ((unsigned) base_committed % g_pagesize == 0);
05121 assert (0 < decommit_size && decommit_size % g_pagesize == 0); {
05122
05123 int rc = VirtualFree ((char *) base_committed, decommit_size,
05124 MEM_DECOMMIT);
05125
05126 if (! rc) {
05127 goto sbrk_exit;
05128 }
05129 #ifdef TRACE
05130 printf ("Decommit %p %d\n", base_committed, decommit_size);
05131 #endif
05132 }
05133
05134 deallocate_size -= (char *) g_last->top_allocated - (char *) base_committed;
05135 g_last->top_committed = base_committed;
05136 g_last->top_allocated = base_committed;
05137 }
05138 }
05139
05140 g_last->top_allocated = (char *) g_last->top_allocated - deallocate_size;
05141
05142 if ((char *) g_last->top_reserved - g_last->reserve_size > (char *) g_last->top_allocated ||
05143 g_last->top_allocated > g_last->top_committed) {
05144
05145 g_last->top_allocated = (char *) g_last->top_reserved - g_last->reserve_size;
05146 goto sbrk_exit;
05147 }
05148 result = g_last->top_allocated;
05149 }
05150
05151 assert (g_last);
05152 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_allocated &&
05153 g_last->top_allocated <= g_last->top_committed);
05154 assert ((char *) g_last->top_reserved - g_last->reserve_size <= (char *) g_last->top_committed &&
05155 g_last->top_committed <= g_last->top_reserved &&
05156 (unsigned) g_last->top_committed % g_pagesize == 0);
05157 assert ((unsigned) g_last->top_reserved % g_regionsize == 0);
05158 assert ((unsigned) g_last->reserve_size % g_regionsize == 0);
05159
05160 sbrk_exit:
05161 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
05162
05163 slrelease (&g_sl);
05164 #endif
05165 return result;
05166 }
05167
05168
05169 static void *mmap (void *ptr, long size, long prot, long type, long handle, long arg) {
05170 static long g_pagesize;
05171 static long g_regionsize;
05172 #ifdef TRACE
05173 printf ("mmap %d\n", size);
05174 #endif
05175 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
05176
05177 slwait (&g_sl);
05178 #endif
05179
05180 if (! g_pagesize)
05181 g_pagesize = getpagesize ();
05182 if (! g_regionsize)
05183 g_regionsize = getregionsize ();
05184
05185 assert ((unsigned) ptr % g_regionsize == 0);
05186 assert (size % g_pagesize == 0);
05187
05188 ptr = VirtualAlloc (ptr, size,
05189 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, PAGE_READWRITE);
05190 if (! ptr) {
05191 ptr = (void *) MORECORE_FAILURE;
05192 goto mmap_exit;
05193 }
05194
05195 assert ((unsigned) ptr % g_regionsize == 0);
05196 #ifdef TRACE
05197 printf ("Commit %p %d\n", ptr, size);
05198 #endif
05199 mmap_exit:
05200 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
05201
05202 slrelease (&g_sl);
05203 #endif
05204 return ptr;
05205 }
05206
05207
05208 static long munmap (void *ptr, long size) {
05209 static long g_pagesize;
05210 static long g_regionsize;
05211 int rc = MUNMAP_FAILURE;
05212 #ifdef TRACE
05213 printf ("munmap %p %d\n", ptr, size);
05214 #endif
05215 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
05216
05217 slwait (&g_sl);
05218 #endif
05219
05220 if (! g_pagesize)
05221 g_pagesize = getpagesize ();
05222 if (! g_regionsize)
05223 g_regionsize = getregionsize ();
05224
05225 assert ((unsigned) ptr % g_regionsize == 0);
05226 assert (size % g_pagesize == 0);
05227
05228 if (! VirtualFree (ptr, 0,
05229 MEM_RELEASE))
05230 goto munmap_exit;
05231 rc = 0;
05232 #ifdef TRACE
05233 printf ("Release %p %d\n", ptr, size);
05234 #endif
05235 munmap_exit:
05236 #if defined (USE_MALLOC_LOCK) && defined (NEEDED)
05237
05238 slrelease (&g_sl);
05239 #endif
05240 return rc;
05241 }
05242
05243 static void vminfo (unsigned long *free, unsigned long *reserved, unsigned long *committed) {
05244 MEMORY_BASIC_INFORMATION memory_info;
05245 memory_info.BaseAddress = 0;
05246 *free = *reserved = *committed = 0;
05247 while (VirtualQuery (memory_info.BaseAddress, &memory_info, sizeof (memory_info))) {
05248 switch (memory_info.State) {
05249 case MEM_FREE:
05250 *free += memory_info.RegionSize;
05251 break;
05252 case MEM_RESERVE:
05253 *reserved += memory_info.RegionSize;
05254 break;
05255 case MEM_COMMIT:
05256 *committed += memory_info.RegionSize;
05257 break;
05258 }
05259 memory_info.BaseAddress = (char *) memory_info.BaseAddress + memory_info.RegionSize;
05260 }
05261 }
05262
05263 static int cpuinfo (int whole, unsigned long *kernel, unsigned long *user) {
05264 if (whole) {
05265 __int64 creation64, exit64, kernel64, user64;
05266 int rc = GetProcessTimes (GetCurrentProcess (),
05267 (FILETIME *) &creation64,
05268 (FILETIME *) &exit64,
05269 (FILETIME *) &kernel64,
05270 (FILETIME *) &user64);
05271 if (! rc) {
05272 *kernel = 0;
05273 *user = 0;
05274 return FALSE;
05275 }
05276 *kernel = (unsigned long) (kernel64 / 10000);
05277 *user = (unsigned long) (user64 / 10000);
05278 return TRUE;
05279 } else {
05280 __int64 creation64, exit64, kernel64, user64;
05281 int rc = GetThreadTimes (GetCurrentThread (),
05282 (FILETIME *) &creation64,
05283 (FILETIME *) &exit64,
05284 (FILETIME *) &kernel64,
05285 (FILETIME *) &user64);
05286 if (! rc) {
05287 *kernel = 0;
05288 *user = 0;
05289 return FALSE;
05290 }
05291 *kernel = (unsigned long) (kernel64 / 10000);
05292 *user = (unsigned long) (user64 / 10000);
05293 return TRUE;
05294 }
05295 }
05296
05297 #endif
05298
05299
05300
05301
05302
05303
05304
05305
05306
05307
05308
05309
05310
05311
05312
05313
05314
05315
05316
05317
05318
05319
05320
05321
05322
05323
05324
05325
05326
05327
05328
05329
05330
05331
05332
05333
05334
05335
05336
05337
05338
05339
05340
05341
05342
05343
05344
05345
05346
05347
05348
05349
05350
05351
05352
05353
05354
05355
05356
05357
05358
05359
05360
05361
05362
05363
05364
05365
05366
05367
05368
05369
05370
05371
05372
05373
05374
05375
05376
05377
05378
05379
05380
05381
05382
05383
05384
05385
05386
05387
05388
05389
05390
05391
05392
05393
05394
05395
05396
05397
05398
05399
05400
05401
05402
05403
05404
05405
05406
05407
05408
05409
05410
05411
05412
05413
05414
05415
05416
05417
05418
05419
05420
05421
05422
05423
05424
05425
05426
05427