00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 #include "ruby/ruby.h"
00015 #include "ruby/st.h"
00016 #include "ruby/re.h"
00017 #include "ruby/io.h"
00018 #include "ruby/thread.h"
00019 #include "ruby/util.h"
00020 #include "ruby/debug.h"
00021 #include "eval_intern.h"
00022 #include "vm_core.h"
00023 #include "internal.h"
00024 #include "gc.h"
00025 #include "constant.h"
00026 #include "ruby_atomic.h"
00027 #include "probes.h"
00028 #include <stdio.h>
00029 #include <stdarg.h>
00030 #include <setjmp.h>
00031 #include <sys/types.h>
00032 #include <assert.h>
00033
00034 #ifndef __has_feature
00035 # define __has_feature(x) 0
00036 #endif
00037
00038 #ifndef HAVE_MALLOC_USABLE_SIZE
00039 # ifdef _WIN32
00040 # define HAVE_MALLOC_USABLE_SIZE
00041 # define malloc_usable_size(a) _msize(a)
00042 # elif defined HAVE_MALLOC_SIZE
00043 # define HAVE_MALLOC_USABLE_SIZE
00044 # define malloc_usable_size(a) malloc_size(a)
00045 # endif
00046 #endif
00047 #ifdef HAVE_MALLOC_USABLE_SIZE
00048 # ifdef HAVE_MALLOC_H
00049 # include <malloc.h>
00050 # elif defined(HAVE_MALLOC_NP_H)
00051 # include <malloc_np.h>
00052 # elif defined(HAVE_MALLOC_MALLOC_H)
00053 # include <malloc/malloc.h>
00054 # endif
00055 #endif
00056
00057 #if \
00058 __has_feature(address_sanitizer) || \
00059 defined(__SANITIZE_ADDRESS__)
00060 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
00061 __attribute__((no_address_safety_analysis)) \
00062 __attribute__((noinline))
00063 #else
00064 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
00065 #endif
00066
00067 #ifdef HAVE_SYS_TIME_H
00068 #include <sys/time.h>
00069 #endif
00070
00071 #ifdef HAVE_SYS_RESOURCE_H
00072 #include <sys/resource.h>
00073 #endif
00074 #if defined(__native_client__) && defined(NACL_NEWLIB)
00075 # include "nacl/resource.h"
00076 # undef HAVE_POSIX_MEMALIGN
00077 # undef HAVE_MEMALIGN
00078
00079 #endif
00080
00081 #if defined _WIN32 || defined __CYGWIN__
00082 #include <windows.h>
00083 #elif defined(HAVE_POSIX_MEMALIGN)
00084 #elif defined(HAVE_MEMALIGN)
00085 #include <malloc.h>
00086 #endif
00087
00088 #define rb_setjmp(env) RUBY_SETJMP(env)
00089 #define rb_jmp_buf rb_jmpbuf_t
00090
00091 #if defined(HAVE_RB_GC_GUARDED_PTR) && HAVE_RB_GC_GUARDED_PTR
00092 volatile VALUE *
00093 rb_gc_guarded_ptr(volatile VALUE *ptr)
00094 {
00095 return ptr;
00096 }
00097 #endif
00098
00099 #ifndef GC_HEAP_FREE_SLOTS
00100 #define GC_HEAP_FREE_SLOTS 4096
00101 #endif
00102 #ifndef GC_HEAP_INIT_SLOTS
00103 #define GC_HEAP_INIT_SLOTS 10000
00104 #endif
00105 #ifndef GC_HEAP_GROWTH_FACTOR
00106 #define GC_HEAP_GROWTH_FACTOR 1.8
00107 #endif
00108 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
00109 #define GC_HEAP_GROWTH_MAX_SLOTS 0
00110 #endif
00111 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
00112 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
00113 #endif
00114
00115 #ifndef GC_MALLOC_LIMIT_MIN
00116 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
00117 #endif
00118 #ifndef GC_MALLOC_LIMIT_MAX
00119 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
00120 #endif
00121 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
00122 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
00123 #endif
00124
00125 #ifndef GC_OLDMALLOC_LIMIT_MIN
00126 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
00127 #endif
00128 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
00129 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
00130 #endif
00131 #ifndef GC_OLDMALLOC_LIMIT_MAX
00132 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
00133 #endif
00134
00135 typedef struct {
00136 unsigned int heap_init_slots;
00137 unsigned int heap_free_slots;
00138 double growth_factor;
00139 unsigned int growth_max_slots;
00140 double oldobject_limit_factor;
00141 unsigned int malloc_limit_min;
00142 unsigned int malloc_limit_max;
00143 double malloc_limit_growth_factor;
00144 unsigned int oldmalloc_limit_min;
00145 unsigned int oldmalloc_limit_max;
00146 double oldmalloc_limit_growth_factor;
00147 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00148 VALUE gc_stress;
00149 #endif
00150 } ruby_gc_params_t;
00151
00152 static ruby_gc_params_t gc_params = {
00153 GC_HEAP_INIT_SLOTS,
00154 GC_HEAP_FREE_SLOTS,
00155 GC_HEAP_GROWTH_FACTOR,
00156 GC_HEAP_GROWTH_MAX_SLOTS,
00157 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
00158 GC_MALLOC_LIMIT_MIN,
00159 GC_MALLOC_LIMIT_MAX,
00160 GC_MALLOC_LIMIT_GROWTH_FACTOR,
00161 GC_OLDMALLOC_LIMIT_MIN,
00162 GC_OLDMALLOC_LIMIT_MAX,
00163 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
00164 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00165 FALSE,
00166 #endif
00167 };
00168
00169
00170
00171
00172 #ifndef GC_DEBUG
00173 #define GC_DEBUG 0
00174 #endif
00175
00176 #if USE_RGENGC
00177
00178
00179
00180
00181
00182
00183
00184 #ifndef RGENGC_DEBUG
00185 #define RGENGC_DEBUG 0
00186 #endif
00187
00188
00189
00190
00191
00192
00193
00194
00195 #ifndef RGENGC_CHECK_MODE
00196 #define RGENGC_CHECK_MODE 0
00197 #endif
00198
00199
00200
00201
00202
00203
00204 #ifndef RGENGC_PROFILE
00205 #define RGENGC_PROFILE 0
00206 #endif
00207
00208
00209
00210
00211
00212
00213 #ifndef RGENGC_THREEGEN
00214 #define RGENGC_THREEGEN 0
00215 #endif
00216
00217
00218
00219
00220
00221
00222
00223 #ifndef RGENGC_ESTIMATE_OLDMALLOC
00224 #define RGENGC_ESTIMATE_OLDMALLOC 1
00225 #endif
00226
00227 #else
00228
00229 #define RGENGC_DEBUG 0
00230 #define RGENGC_CHECK_MODE 0
00231 #define RGENGC_PROFILE 0
00232 #define RGENGC_THREEGEN 0
00233 #define RGENGC_ESTIMATE_OLDMALLOC 0
00234
00235 #endif
00236
00237 #ifndef GC_PROFILE_MORE_DETAIL
00238 #define GC_PROFILE_MORE_DETAIL 0
00239 #endif
00240 #ifndef GC_PROFILE_DETAIL_MEMORY
00241 #define GC_PROFILE_DETAIL_MEMORY 0
00242 #endif
00243 #ifndef GC_ENABLE_LAZY_SWEEP
00244 #define GC_ENABLE_LAZY_SWEEP 1
00245 #endif
00246 #ifndef CALC_EXACT_MALLOC_SIZE
00247 #define CALC_EXACT_MALLOC_SIZE 0
00248 #endif
00249 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
00250 #ifndef MALLOC_ALLOCATED_SIZE
00251 #define MALLOC_ALLOCATED_SIZE 0
00252 #endif
00253 #else
00254 #define MALLOC_ALLOCATED_SIZE 0
00255 #endif
00256 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
00257 #define MALLOC_ALLOCATED_SIZE_CHECK 0
00258 #endif
00259
00260 typedef enum {
00261 GPR_FLAG_NONE = 0x000,
00262
00263 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
00264 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
00265 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
00266 GPR_FLAG_MAJOR_BY_RESCAN = 0x008,
00267 GPR_FLAG_MAJOR_BY_STRESS = 0x010,
00268 #if RGENGC_ESTIMATE_OLDMALLOC
00269 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
00270 #endif
00271 GPR_FLAG_MAJOR_MASK = 0x0ff,
00272
00273
00274 GPR_FLAG_NEWOBJ = 0x100,
00275 GPR_FLAG_MALLOC = 0x200,
00276 GPR_FLAG_METHOD = 0x400,
00277 GPR_FLAG_CAPI = 0x800,
00278 GPR_FLAG_STRESS = 0x1000,
00279
00280
00281 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
00282 GPR_FLAG_HAVE_FINALIZE = 0x4000
00283 } gc_profile_record_flag;
00284
00285 typedef struct gc_profile_record {
00286 int flags;
00287
00288 double gc_time;
00289 double gc_invoke_time;
00290
00291 size_t heap_total_objects;
00292 size_t heap_use_size;
00293 size_t heap_total_size;
00294
00295 #if GC_PROFILE_MORE_DETAIL
00296 double gc_mark_time;
00297 double gc_sweep_time;
00298
00299 size_t heap_use_pages;
00300 size_t heap_live_objects;
00301 size_t heap_free_objects;
00302
00303 size_t allocate_increase;
00304 size_t allocate_limit;
00305
00306 double prepare_time;
00307 size_t removing_objects;
00308 size_t empty_objects;
00309 #if GC_PROFILE_DETAIL_MEMORY
00310 long maxrss;
00311 long minflt;
00312 long majflt;
00313 #endif
00314 #endif
00315 #if MALLOC_ALLOCATED_SIZE
00316 size_t allocated_size;
00317 #endif
00318
00319 #if RGENGC_PROFILE > 0
00320 size_t old_objects;
00321 size_t remembered_normal_objects;
00322 size_t remembered_shady_objects;
00323 #endif
00324 } gc_profile_record;
00325
00326 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00327 #pragma pack(push, 1)
00328 #endif
00329
00330 typedef struct RVALUE {
00331 union {
00332 struct {
00333 VALUE flags;
00334 struct RVALUE *next;
00335 } free;
00336 struct RBasic basic;
00337 struct RObject object;
00338 struct RClass klass;
00339 struct RFloat flonum;
00340 struct RString string;
00341 struct RArray array;
00342 struct RRegexp regexp;
00343 struct RHash hash;
00344 struct RData data;
00345 struct RTypedData typeddata;
00346 struct RStruct rstruct;
00347 struct RBignum bignum;
00348 struct RFile file;
00349 struct RNode node;
00350 struct RMatch match;
00351 struct RRational rational;
00352 struct RComplex complex;
00353 struct {
00354 struct RBasic basic;
00355 VALUE v1;
00356 VALUE v2;
00357 VALUE v3;
00358 } values;
00359 } as;
00360 #if GC_DEBUG
00361 const char *file;
00362 VALUE line;
00363 #endif
00364 } RVALUE;
00365
00366 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
00367 #pragma pack(pop)
00368 #endif
00369
00370 typedef uintptr_t bits_t;
00371 enum {
00372 BITS_SIZE = sizeof(bits_t),
00373 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
00374 };
00375
00376 struct heap_page_header {
00377 struct heap_page *page;
00378 };
00379
00380 struct heap_page_body {
00381 struct heap_page_header header;
00382
00383
00384 };
00385
00386 struct gc_list {
00387 VALUE *varptr;
00388 struct gc_list *next;
00389 };
00390
00391 #define STACK_CHUNK_SIZE 500
00392
00393 typedef struct stack_chunk {
00394 VALUE data[STACK_CHUNK_SIZE];
00395 struct stack_chunk *next;
00396 } stack_chunk_t;
00397
00398 typedef struct mark_stack {
00399 stack_chunk_t *chunk;
00400 stack_chunk_t *cache;
00401 size_t index;
00402 size_t limit;
00403 size_t cache_size;
00404 size_t unused_cache_size;
00405 } mark_stack_t;
00406
00407 typedef struct rb_heap_struct {
00408 struct heap_page *pages;
00409 struct heap_page *free_pages;
00410 struct heap_page *using_page;
00411 struct heap_page *sweep_pages;
00412 RVALUE *freelist;
00413 size_t page_length;
00414 size_t total_slots;
00415 } rb_heap_t;
00416
00417 typedef struct rb_objspace {
00418 struct {
00419 size_t limit;
00420 size_t increase;
00421 #if MALLOC_ALLOCATED_SIZE
00422 size_t allocated_size;
00423 size_t allocations;
00424 #endif
00425 } malloc_params;
00426
00427 rb_heap_t eden_heap;
00428 rb_heap_t tomb_heap;
00429
00430 struct {
00431 struct heap_page **sorted;
00432 size_t used;
00433 size_t length;
00434 RVALUE *range[2];
00435
00436 size_t limit;
00437 size_t increment;
00438
00439 size_t swept_slots;
00440 size_t min_free_slots;
00441 size_t max_free_slots;
00442
00443
00444 size_t final_slots;
00445 RVALUE *deferred_final;
00446 } heap_pages;
00447
00448 struct {
00449 int dont_gc;
00450 int dont_lazy_sweep;
00451 int during_gc;
00452 rb_atomic_t finalizing;
00453 } flags;
00454 st_table *finalizer_table;
00455 mark_stack_t mark_stack;
00456 struct {
00457 int run;
00458 gc_profile_record *records;
00459 gc_profile_record *current_record;
00460 size_t next_index;
00461 size_t size;
00462
00463 #if GC_PROFILE_MORE_DETAIL
00464 double prepare_time;
00465 #endif
00466 double invoke_time;
00467
00468 #if USE_RGENGC
00469 size_t minor_gc_count;
00470 size_t major_gc_count;
00471 #if RGENGC_PROFILE > 0
00472 size_t generated_normal_object_count;
00473 size_t generated_shady_object_count;
00474 size_t shade_operation_count;
00475 size_t promote_infant_count;
00476 #if RGENGC_THREEGEN
00477 size_t promote_young_count;
00478 #endif
00479 size_t remembered_normal_object_count;
00480 size_t remembered_shady_object_count;
00481
00482 #if RGENGC_PROFILE >= 2
00483 size_t generated_normal_object_count_types[RUBY_T_MASK];
00484 size_t generated_shady_object_count_types[RUBY_T_MASK];
00485 size_t shade_operation_count_types[RUBY_T_MASK];
00486 size_t promote_infant_types[RUBY_T_MASK];
00487 #if RGENGC_THREEGEN
00488 size_t promote_young_types[RUBY_T_MASK];
00489 #endif
00490 size_t remembered_normal_object_count_types[RUBY_T_MASK];
00491 size_t remembered_shady_object_count_types[RUBY_T_MASK];
00492 #endif
00493 #endif
00494 #endif
00495
00496
00497 double gc_sweep_start_time;
00498 size_t total_allocated_object_num_at_gc_start;
00499 size_t heap_used_at_gc_start;
00500
00501
00502 size_t count;
00503 size_t total_allocated_object_num;
00504 size_t total_freed_object_num;
00505 int latest_gc_info;
00506 } profile;
00507 struct gc_list *global_list;
00508 rb_event_flag_t hook_events;
00509 VALUE gc_stress;
00510
00511 struct mark_func_data_struct {
00512 void *data;
00513 void (*mark_func)(VALUE v, void *data);
00514 } *mark_func_data;
00515
00516 #if USE_RGENGC
00517 struct {
00518 int during_minor_gc;
00519 int parent_object_is_old;
00520
00521 int need_major_gc;
00522
00523 size_t last_major_gc;
00524
00525 size_t remembered_shady_object_count;
00526 size_t remembered_shady_object_limit;
00527 size_t old_object_count;
00528 size_t old_object_limit;
00529 #if RGENGC_THREEGEN
00530 size_t young_object_count;
00531 #endif
00532
00533 #if RGENGC_ESTIMATE_OLDMALLOC
00534 size_t oldmalloc_increase;
00535 size_t oldmalloc_increase_limit;
00536 #endif
00537
00538 #if RGENGC_CHECK_MODE >= 2
00539 struct st_table *allrefs_table;
00540 size_t error_count;
00541 #endif
00542 } rgengc;
00543 #endif
00544 } rb_objspace_t;
00545
00546
00547 #ifndef HEAP_ALIGN_LOG
00548
00549 #define HEAP_ALIGN_LOG 14
00550 #endif
00551 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
00552 enum {
00553 HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
00554 HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
00555 REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
00556 HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
00557 HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
00558 HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
00559 HEAP_BITMAP_SIZE = ( BITS_SIZE * HEAP_BITMAP_LIMIT),
00560 HEAP_BITMAP_PLANES = USE_RGENGC ? 3 : 1
00561 };
00562
00563 struct heap_page {
00564 struct heap_page_body *body;
00565 RVALUE *freelist;
00566 RVALUE *start;
00567 size_t final_slots;
00568 size_t limit;
00569 struct heap_page *next;
00570 struct heap_page *prev;
00571 struct heap_page *free_next;
00572 rb_heap_t *heap;
00573 int before_sweep;
00574
00575 bits_t mark_bits[HEAP_BITMAP_LIMIT];
00576 #if USE_RGENGC
00577 bits_t rememberset_bits[HEAP_BITMAP_LIMIT];
00578 bits_t oldgen_bits[HEAP_BITMAP_LIMIT];
00579 #endif
00580 };
00581
00582 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_ALIGN_MASK)))
00583 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
00584 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
00585 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
00586 #define GET_HEAP_REMEMBERSET_BITS(x) (&GET_HEAP_PAGE(x)->rememberset_bits[0])
00587 #define GET_HEAP_OLDGEN_BITS(x) (&GET_HEAP_PAGE(x)->oldgen_bits[0])
00588 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
00589 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
00590 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
00591 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
00592
00593 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
00594 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
00595 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
00596
00597
00598 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00599 #define rb_objspace (*GET_VM()->objspace)
00600 #define ruby_initial_gc_stress gc_params.gc_stress
00601 VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
00602 #else
00603 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT_MIN}};
00604 VALUE *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
00605 #endif
00606
00607 #define malloc_limit objspace->malloc_params.limit
00608 #define malloc_increase objspace->malloc_params.increase
00609 #define malloc_allocated_size objspace->malloc_params.allocated_size
00610 #define heap_pages_sorted objspace->heap_pages.sorted
00611 #define heap_pages_used objspace->heap_pages.used
00612 #define heap_pages_length objspace->heap_pages.length
00613 #define heap_pages_lomem objspace->heap_pages.range[0]
00614 #define heap_pages_himem objspace->heap_pages.range[1]
00615 #define heap_pages_swept_slots objspace->heap_pages.swept_slots
00616 #define heap_pages_increment objspace->heap_pages.increment
00617 #define heap_pages_min_free_slots objspace->heap_pages.min_free_slots
00618 #define heap_pages_max_free_slots objspace->heap_pages.max_free_slots
00619 #define heap_pages_final_slots objspace->heap_pages.final_slots
00620 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
00621 #define heap_eden (&objspace->eden_heap)
00622 #define heap_tomb (&objspace->tomb_heap)
00623 #define dont_gc objspace->flags.dont_gc
00624 #define during_gc objspace->flags.during_gc
00625 #define finalizing objspace->flags.finalizing
00626 #define finalizer_table objspace->finalizer_table
00627 #define global_List objspace->global_list
00628 #define ruby_gc_stress objspace->gc_stress
00629 #define monitor_level objspace->rgengc.monitor_level
00630 #define monitored_object_table objspace->rgengc.monitored_object_table
00631
00632 #define is_lazy_sweeping(heap) ((heap)->sweep_pages != 0)
00633 #if SIZEOF_LONG == SIZEOF_VOIDP
00634 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
00635 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
00636 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
00637 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
00638 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
00639 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
00640 #else
00641 # error not supported
00642 #endif
00643
00644 #define RANY(o) ((RVALUE*)(o))
00645
00646 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
00647
00648 int ruby_gc_debug_indent = 0;
00649 VALUE rb_mGC;
00650 int ruby_disable_gc_stress = 0;
00651
00652 void rb_gcdebug_print_obj_condition(VALUE obj);
00653
00654 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
00655 static VALUE define_final0(VALUE obj, VALUE block);
00656
00657 static void negative_size_allocation_error(const char *);
00658 static void *aligned_malloc(size_t, size_t);
00659 static void aligned_free(void *);
00660
00661 static void init_mark_stack(mark_stack_t *stack);
00662
00663 static VALUE lazy_sweep_enable(void);
00664 static int ready_to_gc(rb_objspace_t *objspace);
00665 static int heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap);
00666 static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
00667 static int garbage_collect_body(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
00668 static int gc_heap_lazy_sweep(rb_objspace_t *objspace, rb_heap_t *heap);
00669 static void gc_rest_sweep(rb_objspace_t *objspace);
00670 static void gc_heap_rest_sweep(rb_objspace_t *objspace, rb_heap_t *heap);
00671
00672 static void gc_mark_stacked_objects(rb_objspace_t *);
00673 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
00674 static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr);
00675 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
00676
00677 static size_t obj_memsize_of(VALUE obj, int use_tdata);
00678
00679 static double getrusage_time(void);
00680 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
00681 static inline void gc_prof_timer_start(rb_objspace_t *);
00682 static inline void gc_prof_timer_stop(rb_objspace_t *);
00683 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
00684 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
00685 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
00686 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
00687 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
00688 static inline void gc_prof_set_heap_info(rb_objspace_t *);
00689
00690 #define gc_prof_record(objspace) (objspace)->profile.current_record
00691 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
00692
00693 #define rgengc_report if (RGENGC_DEBUG) rgengc_report_body
00694 static void rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...);
00695 static const char * type_name(int type, VALUE obj);
00696 static const char *obj_type_name(VALUE obj);
00697
00698 #if USE_RGENGC
00699 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
00700 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
00701 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
00702 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
00703
00704 #define FL_TEST2(x,f) ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? (rb_bug("FL_TEST2: SPECIAL_CONST"), 0) : FL_TEST_RAW((x),(f)) != 0)
00705 #define FL_SET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_SET2: SPECIAL_CONST"); RBASIC(x)->flags |= (f);} while (0)
00706 #define FL_UNSET2(x,f) do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_UNSET2: SPECIAL_CONST"); RBASIC(x)->flags &= ~(f);} while (0)
00707
00708 #define RVALUE_WB_PROTECTED_RAW(obj) FL_TEST2((obj), FL_WB_PROTECTED)
00709 #define RVALUE_WB_PROTECTED(obj) RVALUE_WB_PROTECTED_RAW(check_gen_consistency((VALUE)obj))
00710
00711 #define RVALUE_OLDGEN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), (obj))
00712
00713 static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);
00714 static inline int gc_marked(rb_objspace_t *objspace, VALUE ptr);
00715
00716 static inline VALUE
00717 check_gen_consistency(VALUE obj)
00718 {
00719 if (RGENGC_CHECK_MODE > 0) {
00720 int old_flag = RVALUE_OLDGEN_BITMAP(obj) != 0;
00721 int promoted_flag = FL_TEST2(obj, FL_PROMOTED);
00722 rb_objspace_t *objspace = &rb_objspace;
00723
00724 obj_memsize_of((VALUE)obj, FALSE);
00725
00726 if (!is_pointer_to_heap(objspace, (void *)obj)) {
00727 rb_bug("check_gen_consistency: %p (%s) is not Ruby object.", (void *)obj, obj_type_name(obj));
00728 }
00729
00730 if (promoted_flag) {
00731 if (!RVALUE_WB_PROTECTED_RAW(obj)) {
00732 const char *type = old_flag ? "old" : "young";
00733 rb_bug("check_gen_consistency: %p (%s) is not WB protected, but %s object.", (void *)obj, obj_type_name(obj), type);
00734 }
00735
00736 #if !RGENGC_THREEGEN
00737 if (!old_flag) {
00738 rb_bug("check_gen_consistency: %p (%s) is not infant, but is not old (on 2gen).", (void *)obj, obj_type_name(obj));
00739 }
00740 #endif
00741
00742 if (old_flag && objspace->rgengc.during_minor_gc && !gc_marked(objspace, obj)) {
00743 rb_bug("check_gen_consistency: %p (%s) is old, but is not marked while minor marking.", (void *)obj, obj_type_name(obj));
00744 }
00745 }
00746 else {
00747 if (old_flag) {
00748 rb_bug("check_gen_consistency: %p (%s) is not infant, but is old.", (void *)obj, obj_type_name(obj));
00749 }
00750 }
00751 }
00752 return obj;
00753 }
00754
00755 static inline VALUE
00756 RVALUE_INFANT_P(VALUE obj)
00757 {
00758 check_gen_consistency(obj);
00759 return !FL_TEST2(obj, FL_PROMOTED);
00760 }
00761
00762 static inline VALUE
00763 RVALUE_OLD_BITMAP_P(VALUE obj)
00764 {
00765 check_gen_consistency(obj);
00766 return (RVALUE_OLDGEN_BITMAP(obj) != 0);
00767 }
00768
00769 static inline VALUE
00770 RVALUE_OLD_P(VALUE obj)
00771 {
00772 check_gen_consistency(obj);
00773 #if RGENGC_THREEGEN
00774 return FL_TEST2(obj, FL_PROMOTED) && RVALUE_OLD_BITMAP_P(obj);
00775 #else
00776 return FL_TEST2(obj, FL_PROMOTED);
00777 #endif
00778 }
00779
00780 static inline VALUE
00781 RVALUE_PROMOTED_P(VALUE obj)
00782 {
00783 check_gen_consistency(obj);
00784 return FL_TEST2(obj, FL_PROMOTED);
00785 }
00786
00787 static inline void
00788 RVALUE_PROMOTE_INFANT(VALUE obj)
00789 {
00790 check_gen_consistency(obj);
00791 if (RGENGC_CHECK_MODE && !RVALUE_INFANT_P(obj)) rb_bug("RVALUE_PROMOTE_INFANT: %p (%s) is not infant object.", (void *)obj, obj_type_name(obj));
00792 FL_SET2(obj, FL_PROMOTED);
00793 #if !RGENGC_THREEGEN
00794 MARK_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj);
00795 #endif
00796 check_gen_consistency(obj);
00797
00798 #if RGENGC_PROFILE >= 1
00799 {
00800 rb_objspace_t *objspace = &rb_objspace;
00801 objspace->profile.promote_infant_count++;
00802
00803 #if RGENGC_PROFILE >= 2
00804 objspace->profile.promote_infant_types[BUILTIN_TYPE(obj)]++;
00805 #endif
00806 }
00807 #endif
00808 }
00809
00810 #if RGENGC_THREEGEN
00811
00812
00813
00814
00815 static inline VALUE
00816 RVALUE_YOUNG_P(VALUE obj)
00817 {
00818 check_gen_consistency(obj);
00819 return FL_TEST2(obj, FL_PROMOTED) && (RVALUE_OLDGEN_BITMAP(obj) == 0);
00820 }
00821
00822 static inline void
00823 RVALUE_PROMOTE_YOUNG(VALUE obj)
00824 {
00825 check_gen_consistency(obj);
00826 if (RGENGC_CHECK_MODE && !RVALUE_YOUNG_P(obj)) rb_bug("RVALUE_PROMOTE_YOUNG: %p (%s) is not young object.", (void *)obj, obj_type_name(obj));
00827 MARK_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj);
00828 check_gen_consistency(obj);
00829
00830 #if RGENGC_PROFILE >= 1
00831 {
00832 rb_objspace_t *objspace = &rb_objspace;
00833 objspace->profile.promote_young_count++;
00834 #if RGENGC_PROFILE >= 2
00835 objspace->profile.promote_young_types[BUILTIN_TYPE(obj)]++;
00836 #endif
00837 }
00838 #endif
00839 }
00840
00841 static inline void
00842 RVALUE_DEMOTE_FROM_YOUNG(VALUE obj)
00843 {
00844 if (RGENGC_CHECK_MODE && !RVALUE_YOUNG_P(obj))
00845 rb_bug("RVALUE_DEMOTE_FROM_YOUNG: %p (%s) is not young object.", (void *)obj, obj_type_name(obj));
00846
00847 check_gen_consistency(obj);
00848 FL_UNSET2(obj, FL_PROMOTED);
00849 check_gen_consistency(obj);
00850 }
00851 #endif
00852
00853 static inline void
00854 RVALUE_DEMOTE_FROM_OLD(VALUE obj)
00855 {
00856 if (RGENGC_CHECK_MODE && !RVALUE_OLD_P(obj))
00857 rb_bug("RVALUE_DEMOTE_FROM_OLD: %p (%s) is not old object.", (void *)obj, obj_type_name(obj));
00858
00859 check_gen_consistency(obj);
00860 FL_UNSET2(obj, FL_PROMOTED);
00861 CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj);
00862 check_gen_consistency(obj);
00863 }
00864
00865 #endif
00866
00867
00868
00869
00870
00871 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00872 rb_objspace_t *
00873 rb_objspace_alloc(void)
00874 {
00875 rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
00876 memset(objspace, 0, sizeof(*objspace));
00877 ruby_gc_stress = ruby_initial_gc_stress;
00878
00879 malloc_limit = gc_params.malloc_limit_min;
00880
00881 return objspace;
00882 }
00883 #endif
00884
00885 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
00886 static void free_stack_chunks(mark_stack_t *);
00887 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
00888
00889 void
00890 rb_objspace_free(rb_objspace_t *objspace)
00891 {
00892 gc_rest_sweep(objspace);
00893
00894 if (objspace->profile.records) {
00895 free(objspace->profile.records);
00896 objspace->profile.records = 0;
00897 }
00898
00899 if (global_List) {
00900 struct gc_list *list, *next;
00901 for (list = global_List; list; list = next) {
00902 next = list->next;
00903 xfree(list);
00904 }
00905 }
00906 if (heap_pages_sorted) {
00907 size_t i;
00908 for (i = 0; i < heap_pages_used; ++i) {
00909 heap_page_free(objspace, heap_pages_sorted[i]);
00910 }
00911 free(heap_pages_sorted);
00912 heap_pages_used = 0;
00913 heap_pages_length = 0;
00914 heap_pages_lomem = 0;
00915 heap_pages_himem = 0;
00916
00917 objspace->eden_heap.page_length = 0;
00918 objspace->eden_heap.total_slots = 0;
00919 objspace->eden_heap.pages = NULL;
00920 }
00921 free_stack_chunks(&objspace->mark_stack);
00922 free(objspace);
00923 }
00924 #endif
00925
00926 static void
00927 heap_pages_expand_sorted(rb_objspace_t *objspace)
00928 {
00929 size_t next_length = heap_pages_increment;
00930 next_length += heap_eden->page_length;
00931 next_length += heap_tomb->page_length;
00932
00933 if (next_length > heap_pages_length) {
00934 struct heap_page **sorted;
00935 size_t size = next_length * sizeof(struct heap_page *);
00936
00937 rgengc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
00938
00939 if (heap_pages_length > 0) {
00940 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
00941 if (sorted) heap_pages_sorted = sorted;
00942 }
00943 else {
00944 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
00945 }
00946
00947 if (sorted == 0) {
00948 during_gc = 0;
00949 rb_memerror();
00950 }
00951
00952 heap_pages_length = next_length;
00953 }
00954 }
00955
00956 static inline void
00957 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
00958 {
00959 RVALUE *p = (RVALUE *)obj;
00960 p->as.free.flags = 0;
00961 p->as.free.next = page->freelist;
00962 page->freelist = p;
00963 rgengc_report(3, objspace, "heap_page_add_freeobj: %p (%s) is added to freelist\n", p, obj_type_name(obj));
00964 }
00965
00966 static inline void
00967 heap_add_freepage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
00968 {
00969 if (page->freelist) {
00970 page->free_next = heap->free_pages;
00971 heap->free_pages = page;
00972 }
00973 }
00974
00975 static void
00976 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
00977 {
00978 if (page->prev) page->prev->next = page->next;
00979 if (page->next) page->next->prev = page->prev;
00980 if (heap->pages == page) heap->pages = page->next;
00981 page->prev = NULL;
00982 page->next = NULL;
00983 page->heap = NULL;
00984 heap->page_length--;
00985 heap->total_slots -= page->limit;
00986 }
00987
00988 static void
00989 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
00990 {
00991 heap_pages_used--;
00992 aligned_free(page->body);
00993 free(page);
00994 }
00995
00996 static void
00997 heap_pages_free_unused_pages(rb_objspace_t *objspace)
00998 {
00999 size_t i, j;
01000
01001 for (i = j = 1; j < heap_pages_used; i++) {
01002 struct heap_page *page = heap_pages_sorted[i];
01003
01004 if (page->heap == heap_tomb && page->final_slots == 0) {
01005 if (heap_pages_swept_slots - page->limit > heap_pages_max_free_slots) {
01006 if (0) fprintf(stderr, "heap_pages_free_unused_pages: %d free page %p, heap_pages_swept_slots: %d, heap_pages_max_free_slots: %d\n",
01007 (int)i, page, (int)heap_pages_swept_slots, (int)heap_pages_max_free_slots);
01008 heap_pages_swept_slots -= page->limit;
01009 heap_unlink_page(objspace, heap_tomb, page);
01010 heap_page_free(objspace, page);
01011 continue;
01012 }
01013 else {
01014
01015 }
01016 }
01017 if (i != j) {
01018 heap_pages_sorted[j] = page;
01019 }
01020 j++;
01021 }
01022 assert(j == heap_pages_used);
01023 }
01024
01025 static struct heap_page *
01026 heap_page_allocate(rb_objspace_t *objspace)
01027 {
01028 RVALUE *start, *end, *p;
01029 struct heap_page *page;
01030 struct heap_page_body *page_body = 0;
01031 size_t hi, lo, mid;
01032 size_t limit = HEAP_OBJ_LIMIT;
01033
01034
01035 page_body = (struct heap_page_body *)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
01036 if (page_body == 0) {
01037 during_gc = 0;
01038 rb_memerror();
01039 }
01040
01041
01042 page = (struct heap_page *)malloc(sizeof(struct heap_page));
01043 if (page == 0) {
01044 aligned_free(page_body);
01045 during_gc = 0;
01046 rb_memerror();
01047 }
01048 MEMZERO((void*)page, struct heap_page, 1);
01049
01050 page->body = page_body;
01051
01052
01053 lo = 0;
01054 hi = heap_pages_used;
01055 while (lo < hi) {
01056 struct heap_page *mid_page;
01057
01058 mid = (lo + hi) / 2;
01059 mid_page = heap_pages_sorted[mid];
01060 if (mid_page->body < page_body) {
01061 lo = mid + 1;
01062 }
01063 else if (mid_page->body > page_body) {
01064 hi = mid;
01065 }
01066 else {
01067 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
01068 }
01069 }
01070 if (hi < heap_pages_used) {
01071 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_pages_used - hi);
01072 }
01073
01074 heap_pages_sorted[hi] = page;
01075
01076 heap_pages_used++;
01077 assert(heap_pages_used <= heap_pages_length);
01078
01079
01080 start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
01081 if ((VALUE)start % sizeof(RVALUE) != 0) {
01082 int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
01083 start = (RVALUE*)((VALUE)start + delta);
01084 limit = (HEAP_SIZE - (size_t)((VALUE)start - (VALUE)page_body))/sizeof(RVALUE);
01085 }
01086 end = start + limit;
01087
01088 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
01089 if (heap_pages_himem < end) heap_pages_himem = end;
01090
01091 page->start = start;
01092 page->limit = limit;
01093 page_body->header.page = page;
01094
01095 for (p = start; p != end; p++) {
01096 rgengc_report(3, objspace, "assign_heap_page: %p is added to freelist\n");
01097 heap_page_add_freeobj(objspace, page, (VALUE)p);
01098 }
01099
01100 return page;
01101 }
01102
01103 static struct heap_page *
01104 heap_page_resurrect(rb_objspace_t *objspace)
01105 {
01106 struct heap_page *page;
01107
01108 if ((page = heap_tomb->pages) != NULL) {
01109 heap_unlink_page(objspace, heap_tomb, page);
01110 return page;
01111 }
01112 return NULL;
01113 }
01114
01115 static struct heap_page *
01116 heap_page_create(rb_objspace_t *objspace)
01117 {
01118 struct heap_page *page = heap_page_resurrect(objspace);
01119 const char *method = "recycle";
01120 if (page == NULL) {
01121 page = heap_page_allocate(objspace);
01122 method = "allocate";
01123 }
01124 if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_pages_used: %d, heap_pages_used: %d, tomb->page_length: %d\n",
01125 method, page, (int)heap_pages_length, (int)heap_pages_used, (int)heap_tomb->page_length);
01126 return page;
01127 }
01128
01129 static void
01130 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
01131 {
01132 page->heap = heap;
01133 page->next = heap->pages;
01134 if (heap->pages) heap->pages->prev = page;
01135 heap->pages = page;
01136 heap->page_length++;
01137 heap->total_slots += page->limit;
01138 }
01139
01140 static void
01141 heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
01142 {
01143 struct heap_page *page = heap_page_create(objspace);
01144 heap_add_page(objspace, heap, page);
01145 heap_add_freepage(objspace, heap, page);
01146 }
01147
01148 static void
01149 heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
01150 {
01151 size_t i;
01152
01153 heap_pages_increment = add;
01154 heap_pages_expand_sorted(objspace);
01155 for (i = 0; i < add; i++) {
01156 heap_assign_page(objspace, heap);
01157 }
01158 heap_pages_increment = 0;
01159 }
01160
01161 static void
01162 heap_set_increment(rb_objspace_t *objspace, size_t minimum_limit)
01163 {
01164 size_t used = heap_pages_used - heap_tomb->page_length;
01165 size_t next_used_limit = (size_t)(used * gc_params.growth_factor);
01166 if (gc_params.growth_max_slots > 0) {
01167 size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/HEAP_OBJ_LIMIT);
01168 if (next_used_limit > max_used_limit) next_used_limit = max_used_limit;
01169 }
01170 if (next_used_limit == heap_pages_used) next_used_limit++;
01171
01172 if (next_used_limit < minimum_limit) {
01173 next_used_limit = minimum_limit;
01174 }
01175
01176 heap_pages_increment = next_used_limit - used;
01177 heap_pages_expand_sorted(objspace);
01178
01179 if (0) fprintf(stderr, "heap_set_increment: heap_pages_length: %d, heap_pages_used: %d, heap_pages_increment: %d, next_used_limit: %d\n",
01180 (int)heap_pages_length, (int)heap_pages_used, (int)heap_pages_increment, (int)next_used_limit);
01181 }
01182
01183 static int
01184 heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
01185 {
01186 rgengc_report(5, objspace, "heap_increment: heap_pages_length: %d, heap_pages_inc: %d, heap->page_length: %d\n",
01187 (int)heap_pages_length, (int)heap_pages_increment, (int)heap->page_length);
01188
01189 if (heap_pages_increment > 0) {
01190 heap_pages_increment--;
01191 heap_assign_page(objspace, heap);
01192 return TRUE;
01193 }
01194 return FALSE;
01195 }
01196
01197 static struct heap_page *
01198 heap_prepare_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
01199 {
01200 if (!GC_ENABLE_LAZY_SWEEP && objspace->flags.dont_lazy_sweep) {
01201 if (heap_increment(objspace, heap) == 0 &&
01202 garbage_collect(objspace, FALSE, TRUE, GPR_FLAG_NEWOBJ) == 0) {
01203 goto err;
01204 }
01205 goto ok;
01206 }
01207
01208 if (!heap_ready_to_gc(objspace, heap)) return heap->free_pages;
01209
01210 during_gc++;
01211
01212 if ((is_lazy_sweeping(heap) && gc_heap_lazy_sweep(objspace, heap)) || heap_increment(objspace, heap)) {
01213 goto ok;
01214 }
01215
01216 #if GC_PROFILE_MORE_DETAIL
01217 objspace->profile.prepare_time = 0;
01218 #endif
01219 if (garbage_collect_body(objspace, 0, 0, GPR_FLAG_NEWOBJ) == 0) {
01220 err:
01221 during_gc = 0;
01222 rb_memerror();
01223 }
01224 ok:
01225 during_gc = 0;
01226 return heap->free_pages;
01227 }
01228
01229 static RVALUE *
01230 heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
01231 {
01232 struct heap_page *page;
01233 RVALUE *p;
01234
01235 page = heap->free_pages;
01236 while (page == NULL) {
01237 page = heap_prepare_freepage(objspace, heap);
01238 }
01239 heap->free_pages = page->free_next;
01240 heap->using_page = page;
01241
01242 p = page->freelist;
01243 page->freelist = NULL;
01244
01245 return p;
01246 }
01247
01248 static inline VALUE
01249 heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
01250 {
01251 RVALUE *p = heap->freelist;
01252
01253 while (1) {
01254 if (p) {
01255 heap->freelist = p->as.free.next;
01256 return (VALUE)p;
01257 }
01258 else {
01259 p = heap_get_freeobj_from_next_freepage(objspace, heap);
01260 }
01261 }
01262 }
01263
01264 void
01265 rb_objspace_set_event_hook(const rb_event_flag_t event)
01266 {
01267 rb_objspace_t *objspace = &rb_objspace;
01268 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
01269 }
01270
01271 static void
01272 gc_event_hook_body(rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
01273 {
01274 rb_thread_t *th = GET_THREAD();
01275 EXEC_EVENT_HOOK(th, event, th->cfp->self, 0, 0, data);
01276 }
01277
01278 #define gc_event_hook(objspace, event, data) do { \
01279 if (UNLIKELY((objspace)->hook_events & (event))) { \
01280 gc_event_hook_body((objspace), (event), (data)); \
01281 } \
01282 } while (0)
01283
01284 static VALUE
01285 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3)
01286 {
01287 rb_objspace_t *objspace = &rb_objspace;
01288 VALUE obj;
01289
01290 if (UNLIKELY(during_gc)) {
01291 dont_gc = 1;
01292 during_gc = 0;
01293 rb_bug("object allocation during garbage collection phase");
01294 }
01295
01296 if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
01297 if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
01298 during_gc = 0;
01299 rb_memerror();
01300 }
01301 }
01302
01303 obj = heap_get_freeobj(objspace, heap_eden);
01304
01305
01306 RBASIC(obj)->flags = flags;
01307 RBASIC_SET_CLASS_RAW(obj, klass);
01308 if (rb_safe_level() >= 3) FL_SET((obj), FL_TAINT);
01309 RANY(obj)->as.values.v1 = v1;
01310 RANY(obj)->as.values.v2 = v2;
01311 RANY(obj)->as.values.v3 = v3;
01312
01313 #if GC_DEBUG
01314 RANY(obj)->file = rb_sourcefile();
01315 RANY(obj)->line = rb_sourceline();
01316 assert(!SPECIAL_CONST_P(obj));
01317 #endif
01318
01319 #if RGENGC_PROFILE
01320 if (flags & FL_WB_PROTECTED) {
01321 objspace->profile.generated_normal_object_count++;
01322 #if RGENGC_PROFILE >= 2
01323 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
01324 #endif
01325 }
01326 else {
01327 objspace->profile.generated_shady_object_count++;
01328 #if RGENGC_PROFILE >= 2
01329 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
01330 #endif
01331 }
01332 #endif
01333
01334 rgengc_report(5, objspace, "newobj: %p (%s)\n", (void *)obj, obj_type_name(obj));
01335
01336 #if USE_RGENGC && RGENGC_CHECK_MODE
01337 if (RVALUE_PROMOTED_P(obj)) rb_bug("newobj: %p (%s) is promoted.\n", (void *)obj, obj_type_name(obj));
01338 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %p (%s) is remembered.\n", (void *)obj, obj_type_name(obj));
01339 #endif
01340
01341 objspace->profile.total_allocated_object_num++;
01342 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
01343
01344 return obj;
01345 }
01346
01347 VALUE
01348 rb_newobj(void)
01349 {
01350 return newobj_of(0, T_NONE, 0, 0, 0);
01351 }
01352
01353 VALUE
01354 rb_newobj_of(VALUE klass, VALUE flags)
01355 {
01356 return newobj_of(klass, flags, 0, 0, 0);
01357 }
01358
01359 NODE*
01360 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
01361 {
01362 VALUE flags = (RGENGC_WB_PROTECTED_NODE_CREF && type == NODE_CREF ? FL_WB_PROTECTED : 0);
01363 NODE *n = (NODE *)newobj_of(0, T_NODE | flags, a0, a1, a2);
01364 nd_set_type(n, type);
01365 return n;
01366 }
01367
01368 VALUE
01369 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
01370 {
01371 if (klass) Check_Type(klass, T_CLASS);
01372 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap);
01373 }
01374
01375 VALUE
01376 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
01377 {
01378 if (klass) Check_Type(klass, T_CLASS);
01379 return newobj_of(klass, T_DATA | (type->flags & ~T_MASK), (VALUE)type, (VALUE)1, (VALUE)datap);
01380 }
01381
01382 size_t
01383 rb_objspace_data_type_memsize(VALUE obj)
01384 {
01385 if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
01386 return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
01387 }
01388 else {
01389 return 0;
01390 }
01391 }
01392
01393 const char *
01394 rb_objspace_data_type_name(VALUE obj)
01395 {
01396 if (RTYPEDDATA_P(obj)) {
01397 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
01398 }
01399 else {
01400 return 0;
01401 }
01402 }
01403
01404 static inline int
01405 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
01406 {
01407 register RVALUE *p = RANY(ptr);
01408 register struct heap_page *page;
01409 register size_t hi, lo, mid;
01410
01411 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
01412 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
01413
01414
01415 lo = 0;
01416 hi = heap_pages_used;
01417 while (lo < hi) {
01418 mid = (lo + hi) / 2;
01419 page = heap_pages_sorted[mid];
01420 if (page->start <= p) {
01421 if (p < page->start + page->limit) {
01422 return TRUE;
01423 }
01424 lo = mid + 1;
01425 }
01426 else {
01427 hi = mid;
01428 }
01429 }
01430 return FALSE;
01431 }
01432
01433 static int
01434 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
01435 {
01436 if (!me->mark) {
01437 rb_free_method_entry(me);
01438 }
01439 return ST_CONTINUE;
01440 }
01441
01442 void
01443 rb_free_m_tbl(st_table *tbl)
01444 {
01445 st_foreach(tbl, free_method_entry_i, 0);
01446 st_free_table(tbl);
01447 }
01448
01449 void
01450 rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper)
01451 {
01452 if (wrapper->tbl) {
01453 rb_free_m_tbl(wrapper->tbl);
01454 }
01455 xfree(wrapper);
01456 }
01457
01458 static int
01459 free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
01460 {
01461 xfree(ce);
01462 return ST_CONTINUE;
01463 }
01464
01465 void
01466 rb_free_const_table(st_table *tbl)
01467 {
01468 st_foreach(tbl, free_const_entry_i, 0);
01469 st_free_table(tbl);
01470 }
01471
01472 static inline void
01473 make_deferred(rb_objspace_t *objspace,RVALUE *p)
01474 {
01475 p->as.basic.flags = T_ZOMBIE;
01476 p->as.free.next = heap_pages_deferred_final;
01477 heap_pages_deferred_final = p;
01478 }
01479
01480 static inline void
01481 make_io_deferred(rb_objspace_t *objspace,RVALUE *p)
01482 {
01483 rb_io_t *fptr = p->as.file.fptr;
01484 make_deferred(objspace, p);
01485 p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
01486 p->as.data.data = fptr;
01487 }
01488
01489 static int
01490 obj_free(rb_objspace_t *objspace, VALUE obj)
01491 {
01492 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
01493
01494 switch (BUILTIN_TYPE(obj)) {
01495 case T_NIL:
01496 case T_FIXNUM:
01497 case T_TRUE:
01498 case T_FALSE:
01499 rb_bug("obj_free() called for broken object");
01500 break;
01501 }
01502
01503 if (FL_TEST(obj, FL_EXIVAR)) {
01504 rb_free_generic_ivar((VALUE)obj);
01505 FL_UNSET(obj, FL_EXIVAR);
01506 }
01507
01508 #if USE_RGENGC
01509 if (MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj),obj))
01510 CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj),obj);
01511 #endif
01512
01513 switch (BUILTIN_TYPE(obj)) {
01514 case T_OBJECT:
01515 if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
01516 RANY(obj)->as.object.as.heap.ivptr) {
01517 xfree(RANY(obj)->as.object.as.heap.ivptr);
01518 }
01519 break;
01520 case T_MODULE:
01521 case T_CLASS:
01522 if (RCLASS_M_TBL_WRAPPER(obj)) {
01523 rb_free_m_tbl_wrapper(RCLASS_M_TBL_WRAPPER(obj));
01524 }
01525 if (RCLASS_IV_TBL(obj)) {
01526 st_free_table(RCLASS_IV_TBL(obj));
01527 }
01528 if (RCLASS_CONST_TBL(obj)) {
01529 rb_free_const_table(RCLASS_CONST_TBL(obj));
01530 }
01531 if (RCLASS_IV_INDEX_TBL(obj)) {
01532 st_free_table(RCLASS_IV_INDEX_TBL(obj));
01533 }
01534 if (RCLASS_EXT(obj)->subclasses) {
01535 if (BUILTIN_TYPE(obj) == T_MODULE) {
01536 rb_class_detach_module_subclasses(obj);
01537 }
01538 else {
01539 rb_class_detach_subclasses(obj);
01540 }
01541 RCLASS_EXT(obj)->subclasses = NULL;
01542 }
01543 rb_class_remove_from_module_subclasses(obj);
01544 rb_class_remove_from_super_subclasses(obj);
01545 if (RANY(obj)->as.klass.ptr)
01546 xfree(RANY(obj)->as.klass.ptr);
01547 RANY(obj)->as.klass.ptr = NULL;
01548 break;
01549 case T_STRING:
01550 rb_str_free(obj);
01551 break;
01552 case T_ARRAY:
01553 rb_ary_free(obj);
01554 break;
01555 case T_HASH:
01556 if (RANY(obj)->as.hash.ntbl) {
01557 st_free_table(RANY(obj)->as.hash.ntbl);
01558 }
01559 break;
01560 case T_REGEXP:
01561 if (RANY(obj)->as.regexp.ptr) {
01562 onig_free(RANY(obj)->as.regexp.ptr);
01563 }
01564 break;
01565 case T_DATA:
01566 if (DATA_PTR(obj)) {
01567 int free_immediately = FALSE;
01568
01569 if (RTYPEDDATA_P(obj)) {
01570 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
01571 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
01572 if (0 && free_immediately == 0)
01573 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
01574 }
01575 if (RANY(obj)->as.data.dfree == RUBY_DEFAULT_FREE) {
01576 xfree(DATA_PTR(obj));
01577 }
01578 else if (RANY(obj)->as.data.dfree) {
01579 if (free_immediately) {
01580 (RDATA(obj)->dfree)(DATA_PTR(obj));
01581 }
01582 else {
01583 make_deferred(objspace, RANY(obj));
01584 return 1;
01585 }
01586 }
01587 }
01588 break;
01589 case T_MATCH:
01590 if (RANY(obj)->as.match.rmatch) {
01591 struct rmatch *rm = RANY(obj)->as.match.rmatch;
01592 onig_region_free(&rm->regs, 0);
01593 if (rm->char_offset)
01594 xfree(rm->char_offset);
01595 xfree(rm);
01596 }
01597 break;
01598 case T_FILE:
01599 if (RANY(obj)->as.file.fptr) {
01600 make_io_deferred(objspace, RANY(obj));
01601 return 1;
01602 }
01603 break;
01604 case T_RATIONAL:
01605 case T_COMPLEX:
01606 break;
01607 case T_ICLASS:
01608
01609 if (RCLASS_EXT(obj)->subclasses) {
01610 rb_class_detach_subclasses(obj);
01611 RCLASS_EXT(obj)->subclasses = NULL;
01612 }
01613 rb_class_remove_from_module_subclasses(obj);
01614 rb_class_remove_from_super_subclasses(obj);
01615 xfree(RANY(obj)->as.klass.ptr);
01616 RANY(obj)->as.klass.ptr = NULL;
01617 break;
01618
01619 case T_FLOAT:
01620 break;
01621
01622 case T_BIGNUM:
01623 if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
01624 xfree(RBIGNUM_DIGITS(obj));
01625 }
01626 break;
01627 case T_NODE:
01628 switch (nd_type(obj)) {
01629 case NODE_SCOPE:
01630 if (RANY(obj)->as.node.u1.tbl) {
01631 xfree(RANY(obj)->as.node.u1.tbl);
01632 }
01633 break;
01634 case NODE_ARGS:
01635 if (RANY(obj)->as.node.u3.args) {
01636 xfree(RANY(obj)->as.node.u3.args);
01637 }
01638 break;
01639 case NODE_ALLOCA:
01640 xfree(RANY(obj)->as.node.u1.node);
01641 break;
01642 }
01643 break;
01644
01645 case T_STRUCT:
01646 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
01647 RANY(obj)->as.rstruct.as.heap.ptr) {
01648 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
01649 }
01650 break;
01651
01652 default:
01653 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
01654 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
01655 }
01656
01657 return 0;
01658 }
01659
01660 void
01661 Init_heap(void)
01662 {
01663 rb_objspace_t *objspace = &rb_objspace;
01664
01665 #if RGENGC_ESTIMATE_OLDMALLOC
01666 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
01667 #endif
01668
01669 heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_OBJ_LIMIT);
01670
01671 init_mark_stack(&objspace->mark_stack);
01672
01673 #ifdef USE_SIGALTSTACK
01674 {
01675
01676 rb_thread_t *th = GET_THREAD();
01677 void *tmp = th->altstack;
01678 th->altstack = malloc(rb_sigaltstack_size());
01679 free(tmp);
01680 }
01681 #endif
01682
01683 objspace->profile.invoke_time = getrusage_time();
01684 finalizer_table = st_init_numtable();
01685 }
01686
01687 typedef int each_obj_callback(void *, void *, size_t, void *);
01688
01689 struct each_obj_args {
01690 each_obj_callback *callback;
01691 void *data;
01692 };
01693
01694 static VALUE
01695 objspace_each_objects(VALUE arg)
01696 {
01697 size_t i;
01698 struct heap_page_body *last_body = 0;
01699 struct heap_page *page;
01700 RVALUE *pstart, *pend;
01701 rb_objspace_t *objspace = &rb_objspace;
01702 struct each_obj_args *args = (struct each_obj_args *)arg;
01703
01704 i = 0;
01705 while (i < heap_pages_used) {
01706 while (0 < i && last_body < heap_pages_sorted[i-1]->body) i--;
01707 while (i < heap_pages_used && heap_pages_sorted[i]->body <= last_body) i++;
01708 if (heap_pages_used <= i) break;
01709
01710 page = heap_pages_sorted[i];
01711 last_body = page->body;
01712
01713 pstart = page->start;
01714 pend = pstart + page->limit;
01715
01716 if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
01717 break;
01718 }
01719 }
01720
01721 return Qnil;
01722 }
01723
01724
01725
01726
01727
01728
01729
01730
01731
01732
01733
01734
01735
01736
01737
01738
01739
01740
01741
01742
01743
01744
01745
01746
01747
01748
01749
01750
01751
01752
01753
01754
01755
01756
01757
01758
01759
01760 void
01761 rb_objspace_each_objects(each_obj_callback *callback, void *data)
01762 {
01763 struct each_obj_args args;
01764 rb_objspace_t *objspace = &rb_objspace;
01765 int prev_dont_lazy_sweep = objspace->flags.dont_lazy_sweep;
01766
01767 gc_rest_sweep(objspace);
01768 objspace->flags.dont_lazy_sweep = TRUE;
01769
01770 args.callback = callback;
01771 args.data = data;
01772
01773 if (prev_dont_lazy_sweep) {
01774 objspace_each_objects((VALUE)&args);
01775 }
01776 else {
01777 rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
01778 }
01779 }
01780
01781 struct os_each_struct {
01782 size_t num;
01783 VALUE of;
01784 };
01785
01786 static int
01787 internal_object_p(VALUE obj)
01788 {
01789 RVALUE *p = (RVALUE *)obj;
01790
01791 if (p->as.basic.flags) {
01792 switch (BUILTIN_TYPE(p)) {
01793 case T_NONE:
01794 case T_ICLASS:
01795 case T_NODE:
01796 case T_ZOMBIE:
01797 break;
01798 case T_CLASS:
01799 if (FL_TEST(p, FL_SINGLETON))
01800 break;
01801 default:
01802 if (!p->as.basic.klass) break;
01803 return 0;
01804 }
01805 }
01806 return 1;
01807 }
01808
01809 int
01810 rb_objspace_internal_object_p(VALUE obj)
01811 {
01812 return internal_object_p(obj);
01813 }
01814
01815 static int
01816 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
01817 {
01818 struct os_each_struct *oes = (struct os_each_struct *)data;
01819 RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
01820
01821 for (; p != pend; p++) {
01822 volatile VALUE v = (VALUE)p;
01823 if (!internal_object_p(v)) {
01824 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
01825 rb_yield(v);
01826 oes->num++;
01827 }
01828 }
01829 }
01830
01831 return 0;
01832 }
01833
01834 static VALUE
01835 os_obj_of(VALUE of)
01836 {
01837 struct os_each_struct oes;
01838
01839 oes.num = 0;
01840 oes.of = of;
01841 rb_objspace_each_objects(os_obj_of_i, &oes);
01842 return SIZET2NUM(oes.num);
01843 }
01844
01845
01846
01847
01848
01849
01850
01851
01852
01853
01854
01855
01856
01857
01858
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869
01870
01871
01872
01873
01874
01875
01876
01877
01878
01879
01880
01881 static VALUE
01882 os_each_obj(int argc, VALUE *argv, VALUE os)
01883 {
01884 VALUE of;
01885
01886 if (argc == 0) {
01887 of = 0;
01888 }
01889 else {
01890 rb_scan_args(argc, argv, "01", &of);
01891 }
01892 RETURN_ENUMERATOR(os, 1, &of);
01893 return os_obj_of(of);
01894 }
01895
01896
01897
01898
01899
01900
01901
01902
01903
01904 static VALUE
01905 undefine_final(VALUE os, VALUE obj)
01906 {
01907 return rb_undefine_finalizer(obj);
01908 }
01909
01910 VALUE
01911 rb_undefine_finalizer(VALUE obj)
01912 {
01913 rb_objspace_t *objspace = &rb_objspace;
01914 st_data_t data = obj;
01915 rb_check_frozen(obj);
01916 st_delete(finalizer_table, &data, 0);
01917 FL_UNSET(obj, FL_FINALIZE);
01918 return obj;
01919 }
01920
01921 static void
01922 should_be_callable(VALUE block)
01923 {
01924 if (!rb_obj_respond_to(block, rb_intern("call"), TRUE)) {
01925 rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
01926 rb_obj_classname(block));
01927 }
01928 }
01929 static void
01930 should_be_finalizable(VALUE obj)
01931 {
01932 rb_check_frozen(obj);
01933 if (!FL_ABLE(obj)) {
01934 rb_raise(rb_eArgError, "cannot define finalizer for %s",
01935 rb_obj_classname(obj));
01936 }
01937 }
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948 static VALUE
01949 define_final(int argc, VALUE *argv, VALUE os)
01950 {
01951 VALUE obj, block;
01952
01953 rb_scan_args(argc, argv, "11", &obj, &block);
01954 should_be_finalizable(obj);
01955 if (argc == 1) {
01956 block = rb_block_proc();
01957 }
01958 else {
01959 should_be_callable(block);
01960 }
01961
01962 return define_final0(obj, block);
01963 }
01964
01965 static VALUE
01966 define_final0(VALUE obj, VALUE block)
01967 {
01968 rb_objspace_t *objspace = &rb_objspace;
01969 VALUE table;
01970 st_data_t data;
01971
01972 RBASIC(obj)->flags |= FL_FINALIZE;
01973
01974 block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
01975 OBJ_FREEZE(block);
01976
01977 if (st_lookup(finalizer_table, obj, &data)) {
01978 table = (VALUE)data;
01979 rb_ary_push(table, block);
01980 }
01981 else {
01982 table = rb_ary_new3(1, block);
01983 RBASIC_CLEAR_CLASS(table);
01984 st_add_direct(finalizer_table, obj, table);
01985 }
01986 return block;
01987 }
01988
01989 VALUE
01990 rb_define_finalizer(VALUE obj, VALUE block)
01991 {
01992 should_be_finalizable(obj);
01993 should_be_callable(block);
01994 return define_final0(obj, block);
01995 }
01996
01997 void
01998 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
01999 {
02000 rb_objspace_t *objspace = &rb_objspace;
02001 VALUE table;
02002 st_data_t data;
02003
02004 if (!FL_TEST(obj, FL_FINALIZE)) return;
02005 if (st_lookup(finalizer_table, obj, &data)) {
02006 table = (VALUE)data;
02007 st_insert(finalizer_table, dest, table);
02008 }
02009 FL_SET(dest, FL_FINALIZE);
02010 }
02011
02012 static VALUE
02013 run_single_final(VALUE arg)
02014 {
02015 VALUE *args = (VALUE *)arg;
02016 rb_eval_cmd(args[0], args[1], (int)args[2]);
02017 return Qnil;
02018 }
02019
02020 static void
02021 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
02022 {
02023 long i;
02024 int status;
02025 VALUE args[3];
02026 VALUE objid = nonspecial_obj_id(obj);
02027
02028 if (RARRAY_LEN(table) > 0) {
02029 args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
02030 }
02031 else {
02032 args[1] = 0;
02033 }
02034
02035 args[2] = (VALUE)rb_safe_level();
02036 for (i=0; i<RARRAY_LEN(table); i++) {
02037 VALUE final = RARRAY_AREF(table, i);
02038 args[0] = RARRAY_AREF(final, 1);
02039 args[2] = FIX2INT(RARRAY_AREF(final, 0));
02040 status = 0;
02041 rb_protect(run_single_final, (VALUE)args, &status);
02042 if (status)
02043 rb_set_errinfo(Qnil);
02044 }
02045 }
02046
02047 static void
02048 run_final(rb_objspace_t *objspace, VALUE obj)
02049 {
02050 RUBY_DATA_FUNC free_func = 0;
02051 st_data_t key, table;
02052
02053 heap_pages_final_slots--;
02054
02055 RBASIC_CLEAR_CLASS(obj);
02056
02057 if (RTYPEDDATA_P(obj)) {
02058 free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
02059 }
02060 else {
02061 free_func = RDATA(obj)->dfree;
02062 }
02063 if (free_func) {
02064 (*free_func)(DATA_PTR(obj));
02065 }
02066
02067 key = (st_data_t)obj;
02068 if (st_delete(finalizer_table, &key, &table)) {
02069 run_finalizer(objspace, obj, (VALUE)table);
02070 }
02071 }
02072
02073 static void
02074 finalize_list(rb_objspace_t *objspace, RVALUE *p)
02075 {
02076 while (p) {
02077 RVALUE *tmp = p->as.free.next;
02078 struct heap_page *page = GET_HEAP_PAGE(p);
02079
02080 run_final(objspace, (VALUE)p);
02081 objspace->profile.total_freed_object_num++;
02082
02083 page->final_slots--;
02084 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(p), (VALUE)p);
02085 heap_pages_swept_slots++;
02086
02087 p = tmp;
02088 }
02089 }
02090
02091 static void
02092 finalize_deferred(rb_objspace_t *objspace)
02093 {
02094 RVALUE *p;
02095
02096 while ((p = ATOMIC_PTR_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
02097 finalize_list(objspace, p);
02098 }
02099 }
02100
02101 static void
02102 gc_finalize_deferred(void *dmy)
02103 {
02104 rb_objspace_t *objspace = &rb_objspace;
02105 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
02106 finalize_deferred(objspace);
02107 ATOMIC_SET(finalizing, 0);
02108 }
02109
02110
02111 void
02112 rb_gc_finalize_deferred(void)
02113 {
02114 gc_finalize_deferred(0);
02115 }
02116
02117 static void
02118 gc_finalize_deferred_register(void)
02119 {
02120 if (rb_postponed_job_register_one(0, gc_finalize_deferred, 0) == 0) {
02121 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
02122 }
02123 }
02124
02125 struct force_finalize_list {
02126 VALUE obj;
02127 VALUE table;
02128 struct force_finalize_list *next;
02129 };
02130
02131 static int
02132 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
02133 {
02134 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
02135 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
02136 curr->obj = key;
02137 curr->table = val;
02138 curr->next = *prev;
02139 *prev = curr;
02140 return ST_CONTINUE;
02141 }
02142
02143 void
02144 rb_gc_call_finalizer_at_exit(void)
02145 {
02146 rb_objspace_call_finalizer(&rb_objspace);
02147 }
02148
02149 static void
02150 rb_objspace_call_finalizer(rb_objspace_t *objspace)
02151 {
02152 RVALUE *p, *pend;
02153 size_t i;
02154
02155 gc_rest_sweep(objspace);
02156
02157 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
02158
02159
02160 finalize_deferred(objspace);
02161 assert(heap_pages_deferred_final == 0);
02162
02163
02164 while (finalizer_table->num_entries) {
02165 struct force_finalize_list *list = 0;
02166 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
02167 while (list) {
02168 struct force_finalize_list *curr = list;
02169 st_data_t obj = (st_data_t)curr->obj;
02170 run_finalizer(objspace, curr->obj, curr->table);
02171 st_delete(finalizer_table, &obj, 0);
02172 list = curr->next;
02173 xfree(curr);
02174 }
02175 }
02176
02177
02178 during_gc++;
02179
02180
02181 for (i = 0; i < heap_pages_used; i++) {
02182 p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->limit;
02183 while (p < pend) {
02184 switch (BUILTIN_TYPE(p)) {
02185 case T_DATA:
02186 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
02187 if (rb_obj_is_thread((VALUE)p)) break;
02188 if (rb_obj_is_mutex((VALUE)p)) break;
02189 if (rb_obj_is_fiber((VALUE)p)) break;
02190 p->as.free.flags = 0;
02191 if (RTYPEDDATA_P(p)) {
02192 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
02193 }
02194 if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
02195 xfree(DATA_PTR(p));
02196 }
02197 else if (RANY(p)->as.data.dfree) {
02198 make_deferred(objspace, RANY(p));
02199 }
02200 break;
02201 case T_FILE:
02202 if (RANY(p)->as.file.fptr) {
02203 make_io_deferred(objspace, RANY(p));
02204 }
02205 break;
02206 }
02207 p++;
02208 }
02209 }
02210 during_gc = 0;
02211 if (heap_pages_deferred_final) {
02212 finalize_list(objspace, heap_pages_deferred_final);
02213 }
02214
02215 st_free_table(finalizer_table);
02216 finalizer_table = 0;
02217 ATOMIC_SET(finalizing, 0);
02218 }
02219
02220 static inline int
02221 is_id_value(rb_objspace_t *objspace, VALUE ptr)
02222 {
02223 if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
02224 if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
02225 if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
02226 return TRUE;
02227 }
02228
02229 static inline int
02230 heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
02231 {
02232 struct heap_page *page = GET_HEAP_PAGE(ptr);
02233 return page->before_sweep ? FALSE : TRUE;
02234 }
02235
02236 static inline int
02237 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
02238 {
02239 if (heap_is_swept_object(objspace, heap_eden, ptr)) {
02240 return TRUE;
02241 }
02242 else {
02243 return FALSE;
02244 }
02245 }
02246
02247 static inline int
02248 is_dead_object(rb_objspace_t *objspace, VALUE ptr)
02249 {
02250 if (!is_lazy_sweeping(heap_eden) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) return FALSE;
02251 if (!is_swept_object(objspace, ptr)) return TRUE;
02252 return FALSE;
02253 }
02254
02255 static inline int
02256 is_live_object(rb_objspace_t *objspace, VALUE ptr)
02257 {
02258 switch (BUILTIN_TYPE(ptr)) {
02259 case 0: case T_ZOMBIE:
02260 return FALSE;
02261 }
02262 if (is_dead_object(objspace, ptr)) return FALSE;
02263 return TRUE;
02264 }
02265
02266 static inline int
02267 is_markable_object(rb_objspace_t *objspace, VALUE obj)
02268 {
02269 if (rb_special_const_p(obj)) return 0;
02270
02271 if (RGENGC_CHECK_MODE) {
02272 if (!is_pointer_to_heap(objspace, (void *)obj)) rb_bug("is_markable_object: %p is not pointer to heap", (void *)obj);
02273 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("is_markable_object: %p is T_NONE", (void *)obj);
02274 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("is_markable_object: %p is T_ZOMBIE", (void *)obj);
02275 }
02276
02277 return 1;
02278 }
02279
02280 int
02281 rb_objspace_markable_object_p(VALUE obj)
02282 {
02283 rb_objspace_t *objspace = &rb_objspace;
02284 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
02285 }
02286
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300 static VALUE
02301 id2ref(VALUE obj, VALUE objid)
02302 {
02303 #if SIZEOF_LONG == SIZEOF_VOIDP
02304 #define NUM2PTR(x) NUM2ULONG(x)
02305 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
02306 #define NUM2PTR(x) NUM2ULL(x)
02307 #endif
02308 rb_objspace_t *objspace = &rb_objspace;
02309 VALUE ptr;
02310 void *p0;
02311
02312 ptr = NUM2PTR(objid);
02313 p0 = (void *)ptr;
02314
02315 if (ptr == Qtrue) return Qtrue;
02316 if (ptr == Qfalse) return Qfalse;
02317 if (ptr == Qnil) return Qnil;
02318 if (FIXNUM_P(ptr)) return (VALUE)ptr;
02319 if (FLONUM_P(ptr)) return (VALUE)ptr;
02320 ptr = obj_id_to_ref(objid);
02321
02322 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
02323 ID symid = ptr / sizeof(RVALUE);
02324 if (rb_id2name(symid) == 0)
02325 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
02326 return ID2SYM(symid);
02327 }
02328
02329 if (!is_id_value(objspace, ptr)) {
02330 rb_raise(rb_eRangeError, "%p is not id value", p0);
02331 }
02332 if (!is_live_object(objspace, ptr)) {
02333 rb_raise(rb_eRangeError, "%p is recycled object", p0);
02334 }
02335 if (RBASIC(ptr)->klass == 0) {
02336 rb_raise(rb_eRangeError, "%p is internal object", p0);
02337 }
02338 return (VALUE)ptr;
02339 }
02340
02341
02342
02343
02344
02345
02346
02347
02348
02349
02350
02351
02352
02353
02354
02355
02356
02357
02358
02359
02360
02361
02362
02363
02364
02365
02366
02367
02368
02369
02370
02371
02372
02373
02374
02375 VALUE
02376 rb_obj_id(VALUE obj)
02377 {
02378
02379
02380
02381
02382
02383
02384
02385
02386
02387
02388
02389
02390
02391
02392
02393
02394
02395
02396
02397
02398
02399
02400
02401
02402
02403
02404
02405
02406 if (SYMBOL_P(obj)) {
02407 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
02408 }
02409 else if (FLONUM_P(obj)) {
02410 #if SIZEOF_LONG == SIZEOF_VOIDP
02411 return LONG2NUM((SIGNED_VALUE)obj);
02412 #else
02413 return LL2NUM((SIGNED_VALUE)obj);
02414 #endif
02415 }
02416 else if (SPECIAL_CONST_P(obj)) {
02417 return LONG2NUM((SIGNED_VALUE)obj);
02418 }
02419 return nonspecial_obj_id(obj);
02420 }
02421
02422 size_t rb_str_memsize(VALUE);
02423 size_t rb_ary_memsize(VALUE);
02424 size_t rb_io_memsize(const rb_io_t *);
02425 size_t rb_generic_ivar_memsize(VALUE);
02426 #include "regint.h"
02427
02428 static size_t
02429 obj_memsize_of(VALUE obj, int use_tdata)
02430 {
02431 size_t size = 0;
02432
02433 if (SPECIAL_CONST_P(obj)) {
02434 return 0;
02435 }
02436
02437 if (FL_TEST(obj, FL_EXIVAR)) {
02438 size += rb_generic_ivar_memsize(obj);
02439 }
02440
02441 switch (BUILTIN_TYPE(obj)) {
02442 case T_OBJECT:
02443 if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
02444 ROBJECT(obj)->as.heap.ivptr) {
02445 size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
02446 }
02447 break;
02448 case T_MODULE:
02449 case T_CLASS:
02450 if (RCLASS_M_TBL_WRAPPER(obj)) {
02451 size += sizeof(struct method_table_wrapper);
02452 }
02453 if (RCLASS_M_TBL(obj)) {
02454 size += st_memsize(RCLASS_M_TBL(obj));
02455 }
02456 if (RCLASS_EXT(obj)) {
02457 if (RCLASS_IV_TBL(obj)) {
02458 size += st_memsize(RCLASS_IV_TBL(obj));
02459 }
02460 if (RCLASS_IV_INDEX_TBL(obj)) {
02461 size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
02462 }
02463 if (RCLASS(obj)->ptr->iv_tbl) {
02464 size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
02465 }
02466 if (RCLASS(obj)->ptr->const_tbl) {
02467 size += st_memsize(RCLASS(obj)->ptr->const_tbl);
02468 }
02469 size += sizeof(rb_classext_t);
02470 }
02471 break;
02472 case T_STRING:
02473 size += rb_str_memsize(obj);
02474 break;
02475 case T_ARRAY:
02476 size += rb_ary_memsize(obj);
02477 break;
02478 case T_HASH:
02479 if (RHASH(obj)->ntbl) {
02480 size += st_memsize(RHASH(obj)->ntbl);
02481 }
02482 break;
02483 case T_REGEXP:
02484 if (RREGEXP(obj)->ptr) {
02485 size += onig_memsize(RREGEXP(obj)->ptr);
02486 }
02487 break;
02488 case T_DATA:
02489 if (use_tdata) size += rb_objspace_data_type_memsize(obj);
02490 break;
02491 case T_MATCH:
02492 if (RMATCH(obj)->rmatch) {
02493 struct rmatch *rm = RMATCH(obj)->rmatch;
02494 size += onig_region_memsize(&rm->regs);
02495 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
02496 size += sizeof(struct rmatch);
02497 }
02498 break;
02499 case T_FILE:
02500 if (RFILE(obj)->fptr) {
02501 size += rb_io_memsize(RFILE(obj)->fptr);
02502 }
02503 break;
02504 case T_RATIONAL:
02505 case T_COMPLEX:
02506 break;
02507 case T_ICLASS:
02508
02509 break;
02510
02511 case T_FLOAT:
02512 break;
02513
02514 case T_BIGNUM:
02515 if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
02516 size += RBIGNUM_LEN(obj) * sizeof(BDIGIT);
02517 }
02518 break;
02519 case T_NODE:
02520 switch (nd_type(obj)) {
02521 case NODE_SCOPE:
02522 if (RNODE(obj)->u1.tbl) {
02523
02524 }
02525 break;
02526 case NODE_ALLOCA:
02527
02528 ;
02529 }
02530 break;
02531
02532 case T_STRUCT:
02533 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
02534 RSTRUCT(obj)->as.heap.ptr) {
02535 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
02536 }
02537 break;
02538
02539 case T_ZOMBIE:
02540 break;
02541
02542 default:
02543 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
02544 BUILTIN_TYPE(obj), (void*)obj);
02545 }
02546
02547 return size;
02548 }
02549
02550 size_t
02551 rb_obj_memsize_of(VALUE obj)
02552 {
02553 return obj_memsize_of(obj, TRUE);
02554 }
02555
02556 static int
02557 set_zero(st_data_t key, st_data_t val, st_data_t arg)
02558 {
02559 VALUE k = (VALUE)key;
02560 VALUE hash = (VALUE)arg;
02561 rb_hash_aset(hash, k, INT2FIX(0));
02562 return ST_CONTINUE;
02563 }
02564
02565
02566
02567
02568
02569
02570
02571
02572
02573
02574
02575
02576
02577
02578
02579
02580
02581
02582
02583
02584
02585
02586
02587
02588
02589
02590 static VALUE
02591 count_objects(int argc, VALUE *argv, VALUE os)
02592 {
02593 rb_objspace_t *objspace = &rb_objspace;
02594 size_t counts[T_MASK+1];
02595 size_t freed = 0;
02596 size_t total = 0;
02597 size_t i;
02598 VALUE hash;
02599
02600 if (rb_scan_args(argc, argv, "01", &hash) == 1) {
02601 if (!RB_TYPE_P(hash, T_HASH))
02602 rb_raise(rb_eTypeError, "non-hash given");
02603 }
02604
02605 for (i = 0; i <= T_MASK; i++) {
02606 counts[i] = 0;
02607 }
02608
02609 for (i = 0; i < heap_pages_used; i++) {
02610 struct heap_page *page = heap_pages_sorted[i];
02611 RVALUE *p, *pend;
02612
02613 p = page->start; pend = p + page->limit;
02614 for (;p < pend; p++) {
02615 if (p->as.basic.flags) {
02616 counts[BUILTIN_TYPE(p)]++;
02617 }
02618 else {
02619 freed++;
02620 }
02621 }
02622 total += page->limit;
02623 }
02624
02625 if (hash == Qnil) {
02626 hash = rb_hash_new();
02627 }
02628 else if (!RHASH_EMPTY_P(hash)) {
02629 st_foreach(RHASH_TBL_RAW(hash), set_zero, hash);
02630 }
02631 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
02632 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
02633
02634 for (i = 0; i <= T_MASK; i++) {
02635 VALUE type;
02636 switch (i) {
02637 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
02638 COUNT_TYPE(T_NONE);
02639 COUNT_TYPE(T_OBJECT);
02640 COUNT_TYPE(T_CLASS);
02641 COUNT_TYPE(T_MODULE);
02642 COUNT_TYPE(T_FLOAT);
02643 COUNT_TYPE(T_STRING);
02644 COUNT_TYPE(T_REGEXP);
02645 COUNT_TYPE(T_ARRAY);
02646 COUNT_TYPE(T_HASH);
02647 COUNT_TYPE(T_STRUCT);
02648 COUNT_TYPE(T_BIGNUM);
02649 COUNT_TYPE(T_FILE);
02650 COUNT_TYPE(T_DATA);
02651 COUNT_TYPE(T_MATCH);
02652 COUNT_TYPE(T_COMPLEX);
02653 COUNT_TYPE(T_RATIONAL);
02654 COUNT_TYPE(T_NIL);
02655 COUNT_TYPE(T_TRUE);
02656 COUNT_TYPE(T_FALSE);
02657 COUNT_TYPE(T_SYMBOL);
02658 COUNT_TYPE(T_FIXNUM);
02659 COUNT_TYPE(T_UNDEF);
02660 COUNT_TYPE(T_NODE);
02661 COUNT_TYPE(T_ICLASS);
02662 COUNT_TYPE(T_ZOMBIE);
02663 #undef COUNT_TYPE
02664 default: type = INT2NUM(i); break;
02665 }
02666 if (counts[i])
02667 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
02668 }
02669
02670 return hash;
02671 }
02672
02673
02674
02675
02676
02677
02678
02679 static VALUE
02680 lazy_sweep_enable(void)
02681 {
02682 rb_objspace_t *objspace = &rb_objspace;
02683
02684 objspace->flags.dont_lazy_sweep = FALSE;
02685 return Qnil;
02686 }
02687
02688 static size_t
02689 objspace_live_slot(rb_objspace_t *objspace)
02690 {
02691 return objspace->profile.total_allocated_object_num - objspace->profile.total_freed_object_num;
02692 }
02693
02694 static size_t
02695 objspace_total_slot(rb_objspace_t *objspace)
02696 {
02697 return heap_eden->total_slots + heap_tomb->total_slots;
02698 }
02699
02700 static size_t
02701 objspace_free_slot(rb_objspace_t *objspace)
02702 {
02703 return objspace_total_slot(objspace) - (objspace_live_slot(objspace) - heap_pages_final_slots);
02704 }
02705
02706 static void
02707 gc_setup_mark_bits(struct heap_page *page)
02708 {
02709 #if USE_RGENGC
02710
02711 memcpy(&page->mark_bits[0], &page->oldgen_bits[0], HEAP_BITMAP_SIZE);
02712 #else
02713
02714 memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE);
02715 #endif
02716 }
02717
02718 static inline void
02719 gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
02720 {
02721 int i;
02722 size_t empty_slots = 0, freed_slots = 0, final_slots = 0;
02723 RVALUE *p, *pend,*offset;
02724 bits_t *bits, bitset;
02725
02726 rgengc_report(1, objspace, "page_sweep: start.\n");
02727
02728 sweep_page->before_sweep = 0;
02729
02730 p = sweep_page->start; pend = p + sweep_page->limit;
02731 offset = p - NUM_IN_PAGE(p);
02732 bits = sweep_page->mark_bits;
02733
02734
02735 bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
02736 bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
02737
02738 for (i=0; i < HEAP_BITMAP_LIMIT; i++) {
02739 bitset = ~bits[i];
02740 if (bitset) {
02741 p = offset + i * BITS_BITLENGTH;
02742 do {
02743 if ((bitset & 1) && BUILTIN_TYPE(p) != T_ZOMBIE) {
02744 if (p->as.basic.flags) {
02745 rgengc_report(3, objspace, "page_sweep: free %p (%s)\n", p, obj_type_name((VALUE)p));
02746 #if USE_RGENGC && RGENGC_CHECK_MODE
02747 if (objspace->rgengc.during_minor_gc && RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p (%s) is old while minor GC.\n", p, obj_type_name((VALUE)p));
02748 if (rgengc_remembered(objspace, (VALUE)p)) rb_bug("page_sweep: %p (%s) is remembered.\n", p, obj_type_name((VALUE)p));
02749 #endif
02750 if (obj_free(objspace, (VALUE)p)) {
02751 final_slots++;
02752 }
02753 else if (FL_TEST(p, FL_FINALIZE)) {
02754 RDATA(p)->dfree = 0;
02755 make_deferred(objspace,p);
02756 final_slots++;
02757 }
02758 else {
02759 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
02760 heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
02761 rgengc_report(3, objspace, "page_sweep: %p (%s) is added to freelist\n", p, obj_type_name((VALUE)p));
02762 freed_slots++;
02763 }
02764 }
02765 else {
02766 empty_slots++;
02767 }
02768 }
02769 p++;
02770 bitset >>= 1;
02771 } while (bitset);
02772 }
02773 }
02774
02775 gc_setup_mark_bits(sweep_page);
02776
02777 #if GC_PROFILE_MORE_DETAIL
02778 if (gc_prof_enabled(objspace)) {
02779 gc_profile_record *record = gc_prof_record(objspace);
02780 record->removing_objects += final_slots + freed_slots;
02781 record->empty_objects += empty_slots;
02782 }
02783 #endif
02784
02785 if (final_slots + freed_slots + empty_slots == sweep_page->limit) {
02786
02787 heap_unlink_page(objspace, heap, sweep_page);
02788 heap_add_page(objspace, heap_tomb, sweep_page);
02789 }
02790 else {
02791 if (freed_slots + empty_slots > 0) {
02792 heap_add_freepage(objspace, heap, sweep_page);
02793 }
02794 else {
02795 sweep_page->free_next = NULL;
02796 }
02797 }
02798 heap_pages_swept_slots += freed_slots + empty_slots;
02799 objspace->profile.total_freed_object_num += freed_slots;
02800 heap_pages_final_slots += final_slots;
02801 sweep_page->final_slots = final_slots;
02802
02803 if (0) fprintf(stderr, "gc_page_sweep(%d): freed?: %d, limt: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
02804 (int)rb_gc_count(),
02805 final_slots + freed_slots + empty_slots == sweep_page->limit,
02806 (int)sweep_page->limit, (int)freed_slots, (int)empty_slots, (int)final_slots);
02807
02808 if (heap_pages_deferred_final && !finalizing) {
02809 rb_thread_t *th = GET_THREAD();
02810 if (th) {
02811 gc_finalize_deferred_register();
02812 }
02813 }
02814
02815 rgengc_report(1, objspace, "page_sweep: end.\n");
02816 }
02817
02818
02819 static void
02820 gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
02821 {
02822 if (!heap->free_pages) {
02823
02824 heap_set_increment(objspace, 0);
02825 if (!heap_increment(objspace, heap)) {
02826 during_gc = 0;
02827 rb_memerror();
02828 }
02829 }
02830 }
02831
02832 static void
02833 gc_before_heap_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
02834 {
02835 heap->sweep_pages = heap->pages;
02836 heap->free_pages = NULL;
02837
02838 if (heap->using_page) {
02839 RVALUE **p = &heap->using_page->freelist;
02840 while (*p) {
02841 p = &(*p)->as.free.next;
02842 }
02843 *p = heap->freelist;
02844 heap->using_page = NULL;
02845 }
02846 heap->freelist = NULL;
02847 }
02848
02849 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
02850 __attribute__((noinline))
02851 #endif
02852 static void
02853 gc_before_sweep(rb_objspace_t *objspace)
02854 {
02855 rb_heap_t *heap;
02856 size_t total_limit_slot;
02857
02858 rgengc_report(1, objspace, "gc_before_sweep\n");
02859
02860
02861 if (GET_VM()->unlinked_method_entry_list) {
02862 rb_sweep_method_entry(GET_VM());
02863 }
02864
02865 heap_pages_swept_slots = 0;
02866 total_limit_slot = objspace_total_slot(objspace);
02867
02868 heap_pages_min_free_slots = (size_t)(total_limit_slot * 0.30);
02869 if (heap_pages_min_free_slots < gc_params.heap_free_slots) {
02870 heap_pages_min_free_slots = gc_params.heap_free_slots;
02871 }
02872 heap_pages_max_free_slots = (size_t)(total_limit_slot * 0.80);
02873 if (heap_pages_max_free_slots < gc_params.heap_init_slots) {
02874 heap_pages_max_free_slots = gc_params.heap_init_slots;
02875 }
02876 if (0) fprintf(stderr, "heap_pages_min_free_slots: %d, heap_pages_max_free_slots: %d\n",
02877 (int)heap_pages_min_free_slots, (int)heap_pages_max_free_slots);
02878
02879 heap = heap_eden;
02880 gc_before_heap_sweep(objspace, heap);
02881
02882 gc_prof_set_malloc_info(objspace);
02883
02884
02885 if (0) fprintf(stderr, "%d\t%d\t%d\n", (int)rb_gc_count(), (int)malloc_increase, (int)malloc_limit);
02886
02887 {
02888 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
02889 size_t old_limit = malloc_limit;
02890
02891 if (inc > malloc_limit) {
02892 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
02893 if (gc_params.malloc_limit_max > 0 &&
02894 malloc_limit > gc_params.malloc_limit_max) {
02895 malloc_limit = gc_params.malloc_limit_max;
02896 }
02897 }
02898 else {
02899 malloc_limit = (size_t)(malloc_limit * 0.98);
02900 if (malloc_limit < gc_params.malloc_limit_min) {
02901 malloc_limit = gc_params.malloc_limit_min;
02902 }
02903 }
02904
02905 if (0) {
02906 if (old_limit != malloc_limit) {
02907 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
02908 rb_gc_count(), old_limit, malloc_limit);
02909 }
02910 else {
02911 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
02912 rb_gc_count(), malloc_limit);
02913 }
02914 }
02915 }
02916
02917
02918 #if RGENGC_ESTIMATE_OLDMALLOC
02919 if (objspace->rgengc.during_minor_gc) {
02920 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
02921 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_OLDMALLOC;;
02922 objspace->rgengc.oldmalloc_increase_limit =
02923 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
02924
02925 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
02926 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
02927 }
02928 }
02929
02930 if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
02931 (int)rb_gc_count(),
02932 (int)objspace->rgengc.need_major_gc,
02933 (unsigned int)objspace->rgengc.oldmalloc_increase,
02934 (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
02935 (unsigned int)gc_params.oldmalloc_limit_max);
02936 }
02937 else {
02938
02939 objspace->rgengc.oldmalloc_increase = 0;
02940
02941 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
02942 objspace->rgengc.oldmalloc_increase_limit =
02943 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
02944 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
02945 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
02946 }
02947 }
02948 }
02949
02950 #endif
02951
02952 }
02953
02954 static void
02955 gc_after_sweep(rb_objspace_t *objspace)
02956 {
02957 rb_heap_t *heap = heap_eden;
02958
02959 rgengc_report(1, objspace, "after_gc_sweep: heap->total_slots: %d, heap->swept_slots: %d, min_free_slots: %d\n",
02960 (int)heap->total_slots, (int)heap_pages_swept_slots, (int)heap_pages_min_free_slots);
02961
02962 if (heap_pages_swept_slots < heap_pages_min_free_slots) {
02963 #if USE_RGENGC
02964 if (objspace->rgengc.during_minor_gc && objspace->profile.count - objspace->rgengc.last_major_gc > 2 ) {
02965 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_NOFREE;
02966 }
02967 else {
02968 heap_set_increment(objspace, (heap_pages_min_free_slots - heap_pages_swept_slots) / HEAP_OBJ_LIMIT);
02969 heap_increment(objspace, heap);
02970 }
02971 #else
02972 heap_set_increment(objspace, (heap_pages_min_free_slots - heap_pages_swept_slots) / HEAP_OBJ_LIMIT);
02973 heap_increment(objspace, heap);
02974 #endif
02975 }
02976
02977 gc_prof_set_heap_info(objspace);
02978
02979 heap_pages_free_unused_pages(objspace);
02980
02981
02982 if (heap_pages_increment < heap_tomb->page_length) {
02983 heap_pages_increment = heap_tomb->page_length;
02984 }
02985
02986 #if RGENGC_PROFILE > 0
02987 if (0) {
02988 fprintf(stderr, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
02989 (int)rb_gc_count(),
02990 (int)objspace->profile.major_gc_count,
02991 (int)objspace->profile.minor_gc_count,
02992 (int)objspace->profile.promote_infant_count,
02993 #if RGENGC_THREEGEN
02994 (int)objspace->profile.promote_young_count,
02995 #else
02996 0,
02997 #endif
02998 (int)objspace->profile.remembered_normal_object_count,
02999 (int)objspace->rgengc.remembered_shady_object_count);
03000 }
03001 #endif
03002
03003 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
03004 }
03005
03006 static int
03007 gc_heap_lazy_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
03008 {
03009 struct heap_page *page = heap->sweep_pages, *next;
03010 int result = FALSE;
03011
03012 if (page == NULL) return FALSE;
03013
03014 #if GC_ENABLE_LAZY_SWEEP
03015 gc_prof_sweep_timer_start(objspace);
03016 #endif
03017
03018 while (page) {
03019 heap->sweep_pages = next = page->next;
03020
03021 gc_page_sweep(objspace, heap, page);
03022
03023 if (!next) gc_after_sweep(objspace);
03024
03025 if (heap->free_pages) {
03026 result = TRUE;
03027 break;
03028 }
03029
03030 page = next;
03031 }
03032
03033 #if GC_ENABLE_LAZY_SWEEP
03034 gc_prof_sweep_timer_stop(objspace);
03035 #endif
03036
03037 return result;
03038 }
03039
03040 static void
03041 gc_heap_rest_sweep(rb_objspace_t *objspace, rb_heap_t *heap)
03042 {
03043 if (is_lazy_sweeping(heap)) {
03044 during_gc++;
03045 while (is_lazy_sweeping(heap)) {
03046 gc_heap_lazy_sweep(objspace, heap);
03047 }
03048 during_gc = 0;
03049 }
03050 }
03051
03052 static void
03053 gc_rest_sweep(rb_objspace_t *objspace)
03054 {
03055 rb_heap_t *heap = heap_eden;
03056 gc_heap_rest_sweep(objspace, heap);
03057 }
03058
03059 static void
03060 gc_sweep(rb_objspace_t *objspace, int immediate_sweep)
03061 {
03062 if (immediate_sweep) {
03063 #if !GC_ENABLE_LAZY_SWEEP
03064 gc_prof_sweep_timer_start(objspace);
03065 #endif
03066 gc_before_sweep(objspace);
03067 gc_heap_rest_sweep(objspace, heap_eden);
03068 #if !GC_ENABLE_LAZY_SWEEP
03069 gc_prof_sweep_timer_stop(objspace);
03070 #endif
03071 }
03072 else {
03073 struct heap_page *page;
03074 gc_before_sweep(objspace);
03075 page = heap_eden->sweep_pages;
03076 while (page) {
03077 page->before_sweep = 1;
03078 page = page->next;
03079 }
03080 gc_heap_lazy_sweep(objspace, heap_eden);
03081 }
03082
03083 gc_heap_prepare_minimum_pages(objspace, heap_eden);
03084 }
03085
03086
03087
03088 static void push_mark_stack(mark_stack_t *, VALUE);
03089 static int pop_mark_stack(mark_stack_t *, VALUE *);
03090 static void shrink_stack_chunk_cache(mark_stack_t *stack);
03091
03092 static stack_chunk_t *
03093 stack_chunk_alloc(void)
03094 {
03095 stack_chunk_t *res;
03096
03097 res = malloc(sizeof(stack_chunk_t));
03098 if (!res)
03099 rb_memerror();
03100
03101 return res;
03102 }
03103
03104 static inline int
03105 is_mark_stack_empty(mark_stack_t *stack)
03106 {
03107 return stack->chunk == NULL;
03108 }
03109
03110 static void
03111 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
03112 {
03113 chunk->next = stack->cache;
03114 stack->cache = chunk;
03115 stack->cache_size++;
03116 }
03117
03118 static void
03119 shrink_stack_chunk_cache(mark_stack_t *stack)
03120 {
03121 stack_chunk_t *chunk;
03122
03123 if (stack->unused_cache_size > (stack->cache_size/2)) {
03124 chunk = stack->cache;
03125 stack->cache = stack->cache->next;
03126 stack->cache_size--;
03127 free(chunk);
03128 }
03129 stack->unused_cache_size = stack->cache_size;
03130 }
03131
03132 static void
03133 push_mark_stack_chunk(mark_stack_t *stack)
03134 {
03135 stack_chunk_t *next;
03136
03137 assert(stack->index == stack->limit);
03138 if (stack->cache_size > 0) {
03139 next = stack->cache;
03140 stack->cache = stack->cache->next;
03141 stack->cache_size--;
03142 if (stack->unused_cache_size > stack->cache_size)
03143 stack->unused_cache_size = stack->cache_size;
03144 }
03145 else {
03146 next = stack_chunk_alloc();
03147 }
03148 next->next = stack->chunk;
03149 stack->chunk = next;
03150 stack->index = 0;
03151 }
03152
03153 static void
03154 pop_mark_stack_chunk(mark_stack_t *stack)
03155 {
03156 stack_chunk_t *prev;
03157
03158 prev = stack->chunk->next;
03159 assert(stack->index == 0);
03160 add_stack_chunk_cache(stack, stack->chunk);
03161 stack->chunk = prev;
03162 stack->index = stack->limit;
03163 }
03164
03165 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
03166 static void
03167 free_stack_chunks(mark_stack_t *stack)
03168 {
03169 stack_chunk_t *chunk = stack->chunk;
03170 stack_chunk_t *next = NULL;
03171
03172 while (chunk != NULL) {
03173 next = chunk->next;
03174 free(chunk);
03175 chunk = next;
03176 }
03177 }
03178 #endif
03179
03180 static void
03181 push_mark_stack(mark_stack_t *stack, VALUE data)
03182 {
03183 if (stack->index == stack->limit) {
03184 push_mark_stack_chunk(stack);
03185 }
03186 stack->chunk->data[stack->index++] = data;
03187 }
03188
03189 static int
03190 pop_mark_stack(mark_stack_t *stack, VALUE *data)
03191 {
03192 if (is_mark_stack_empty(stack)) {
03193 return FALSE;
03194 }
03195 if (stack->index == 1) {
03196 *data = stack->chunk->data[--stack->index];
03197 pop_mark_stack_chunk(stack);
03198 }
03199 else {
03200 *data = stack->chunk->data[--stack->index];
03201 }
03202 return TRUE;
03203 }
03204
03205 static void
03206 init_mark_stack(mark_stack_t *stack)
03207 {
03208 int i;
03209
03210 if (0) push_mark_stack_chunk(stack);
03211 stack->index = stack->limit = STACK_CHUNK_SIZE;
03212
03213 for (i=0; i < 4; i++) {
03214 add_stack_chunk_cache(stack, stack_chunk_alloc());
03215 }
03216 stack->unused_cache_size = stack->cache_size;
03217 }
03218
03219
03220
03221 #ifdef __ia64
03222 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine.stack_end), th->machine.register_stack_end = rb_ia64_bsp())
03223 #else
03224 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine.stack_end)
03225 #endif
03226
03227 #define STACK_START (th->machine.stack_start)
03228 #define STACK_END (th->machine.stack_end)
03229 #define STACK_LEVEL_MAX (th->machine.stack_maxsize/sizeof(VALUE))
03230
03231 #if STACK_GROW_DIRECTION < 0
03232 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
03233 #elif STACK_GROW_DIRECTION > 0
03234 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
03235 #else
03236 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
03237 : (size_t)(STACK_END - STACK_START + 1))
03238 #endif
03239 #if !STACK_GROW_DIRECTION
03240 int ruby_stack_grow_direction;
03241 int
03242 ruby_get_stack_grow_direction(volatile VALUE *addr)
03243 {
03244 VALUE *end;
03245 SET_MACHINE_STACK_END(&end);
03246
03247 if (end > addr) return ruby_stack_grow_direction = 1;
03248 return ruby_stack_grow_direction = -1;
03249 }
03250 #endif
03251
03252 size_t
03253 ruby_stack_length(VALUE **p)
03254 {
03255 rb_thread_t *th = GET_THREAD();
03256 SET_STACK_END;
03257 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
03258 return STACK_LENGTH;
03259 }
03260
03261 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
03262 static int
03263 stack_check(int water_mark)
03264 {
03265 int ret;
03266 rb_thread_t *th = GET_THREAD();
03267 SET_STACK_END;
03268 ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
03269 #ifdef __ia64
03270 if (!ret) {
03271 ret = (VALUE*)rb_ia64_bsp() - th->machine.register_stack_start >
03272 th->machine.register_stack_maxsize/sizeof(VALUE) - water_mark;
03273 }
03274 #endif
03275 return ret;
03276 }
03277 #endif
03278
03279 #define STACKFRAME_FOR_CALL_CFUNC 512
03280
03281 int
03282 ruby_stack_check(void)
03283 {
03284 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
03285 return 0;
03286 #else
03287 return stack_check(STACKFRAME_FOR_CALL_CFUNC);
03288 #endif
03289 }
03290
03291 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
03292 static void
03293 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
03294 {
03295 VALUE v;
03296 while (n--) {
03297 v = *x;
03298 gc_mark_maybe(objspace, v);
03299 x++;
03300 }
03301 }
03302
03303 static void
03304 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
03305 {
03306 long n;
03307
03308 if (end <= start) return;
03309 n = end - start;
03310 mark_locations_array(objspace, start, n);
03311 }
03312
03313 void
03314 rb_gc_mark_locations(VALUE *start, VALUE *end)
03315 {
03316 gc_mark_locations(&rb_objspace, start, end);
03317 }
03318
03319 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
03320
03321 struct mark_tbl_arg {
03322 rb_objspace_t *objspace;
03323 };
03324
03325 static int
03326 mark_entry(st_data_t key, st_data_t value, st_data_t data)
03327 {
03328 struct mark_tbl_arg *arg = (void*)data;
03329 gc_mark(arg->objspace, (VALUE)value);
03330 return ST_CONTINUE;
03331 }
03332
03333 static void
03334 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
03335 {
03336 struct mark_tbl_arg arg;
03337 if (!tbl || tbl->num_entries == 0) return;
03338 arg.objspace = objspace;
03339 st_foreach(tbl, mark_entry, (st_data_t)&arg);
03340 }
03341
03342 static int
03343 mark_key(st_data_t key, st_data_t value, st_data_t data)
03344 {
03345 struct mark_tbl_arg *arg = (void*)data;
03346 gc_mark(arg->objspace, (VALUE)key);
03347 return ST_CONTINUE;
03348 }
03349
03350 static void
03351 mark_set(rb_objspace_t *objspace, st_table *tbl)
03352 {
03353 struct mark_tbl_arg arg;
03354 if (!tbl) return;
03355 arg.objspace = objspace;
03356 st_foreach(tbl, mark_key, (st_data_t)&arg);
03357 }
03358
03359 void
03360 rb_mark_set(st_table *tbl)
03361 {
03362 mark_set(&rb_objspace, tbl);
03363 }
03364
03365 static int
03366 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
03367 {
03368 struct mark_tbl_arg *arg = (void*)data;
03369 gc_mark(arg->objspace, (VALUE)key);
03370 gc_mark(arg->objspace, (VALUE)value);
03371 return ST_CONTINUE;
03372 }
03373
03374 static void
03375 mark_hash(rb_objspace_t *objspace, st_table *tbl)
03376 {
03377 struct mark_tbl_arg arg;
03378 if (!tbl) return;
03379 arg.objspace = objspace;
03380 st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
03381 }
03382
03383 void
03384 rb_mark_hash(st_table *tbl)
03385 {
03386 mark_hash(&rb_objspace, tbl);
03387 }
03388
03389 static void
03390 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
03391 {
03392 const rb_method_definition_t *def = me->def;
03393
03394 gc_mark(objspace, me->klass);
03395 again:
03396 if (!def) return;
03397 switch (def->type) {
03398 case VM_METHOD_TYPE_ISEQ:
03399 gc_mark(objspace, def->body.iseq->self);
03400 break;
03401 case VM_METHOD_TYPE_BMETHOD:
03402 gc_mark(objspace, def->body.proc);
03403 break;
03404 case VM_METHOD_TYPE_ATTRSET:
03405 case VM_METHOD_TYPE_IVAR:
03406 gc_mark(objspace, def->body.attr.location);
03407 break;
03408 case VM_METHOD_TYPE_REFINED:
03409 if (def->body.orig_me) {
03410 def = def->body.orig_me->def;
03411 goto again;
03412 }
03413 break;
03414 default:
03415 break;
03416 }
03417 }
03418
03419 void
03420 rb_mark_method_entry(const rb_method_entry_t *me)
03421 {
03422 mark_method_entry(&rb_objspace, me);
03423 }
03424
03425 static int
03426 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
03427 {
03428 struct mark_tbl_arg *arg = (void*)data;
03429 mark_method_entry(arg->objspace, me);
03430 return ST_CONTINUE;
03431 }
03432
03433 static void
03434 mark_m_tbl_wrapper(rb_objspace_t *objspace, struct method_table_wrapper *wrapper)
03435 {
03436 struct mark_tbl_arg arg;
03437 if (!wrapper || !wrapper->tbl) return;
03438 if (LIKELY(objspace->mark_func_data == 0)) {
03439
03440
03441 size_t serial = rb_gc_count();
03442 if (wrapper->serial == serial) return;
03443 wrapper->serial = serial;
03444 }
03445 arg.objspace = objspace;
03446 st_foreach(wrapper->tbl, mark_method_entry_i, (st_data_t)&arg);
03447 }
03448
03449 static int
03450 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
03451 {
03452 struct mark_tbl_arg *arg = (void*)data;
03453 gc_mark(arg->objspace, ce->value);
03454 gc_mark(arg->objspace, ce->file);
03455 return ST_CONTINUE;
03456 }
03457
03458 static void
03459 mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
03460 {
03461 struct mark_tbl_arg arg;
03462 if (!tbl) return;
03463 arg.objspace = objspace;
03464 st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
03465 }
03466
03467 #if STACK_GROW_DIRECTION < 0
03468 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
03469 #elif STACK_GROW_DIRECTION > 0
03470 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
03471 #else
03472 #define GET_STACK_BOUNDS(start, end, appendix) \
03473 ((STACK_END < STACK_START) ? \
03474 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
03475 #endif
03476
03477 static void
03478 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
03479 {
03480 union {
03481 rb_jmp_buf j;
03482 VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
03483 } save_regs_gc_mark;
03484 VALUE *stack_start, *stack_end;
03485
03486 FLUSH_REGISTER_WINDOWS;
03487
03488 rb_setjmp(save_regs_gc_mark.j);
03489
03490
03491
03492
03493 SET_STACK_END;
03494 GET_STACK_BOUNDS(stack_start, stack_end, 1);
03495
03496 mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
03497
03498 rb_gc_mark_locations(stack_start, stack_end);
03499 #ifdef __ia64
03500 rb_gc_mark_locations(th->machine.register_stack_start, th->machine.register_stack_end);
03501 #endif
03502 #if defined(__mc68000__)
03503 mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
03504 (STACK_START - STACK_END));
03505 #endif
03506 }
03507
03508 void
03509 rb_gc_mark_machine_stack(rb_thread_t *th)
03510 {
03511 rb_objspace_t *objspace = &rb_objspace;
03512 VALUE *stack_start, *stack_end;
03513
03514 GET_STACK_BOUNDS(stack_start, stack_end, 0);
03515 rb_gc_mark_locations(stack_start, stack_end);
03516 #ifdef __ia64
03517 rb_gc_mark_locations(th->machine.register_stack_start, th->machine.register_stack_end);
03518 #endif
03519 }
03520
03521 void
03522 rb_mark_tbl(st_table *tbl)
03523 {
03524 mark_tbl(&rb_objspace, tbl);
03525 }
03526
03527 static void
03528 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
03529 {
03530 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
03531 if (is_pointer_to_heap(objspace, (void *)obj)) {
03532 int type = BUILTIN_TYPE(obj);
03533 if (type != T_ZOMBIE && type != T_NONE) {
03534 gc_mark(objspace, obj);
03535 }
03536 }
03537 }
03538
03539 void
03540 rb_gc_mark_maybe(VALUE obj)
03541 {
03542 gc_mark_maybe(&rb_objspace, obj);
03543 }
03544
03545 static inline int
03546 gc_marked(rb_objspace_t *objspace, VALUE ptr)
03547 {
03548 register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
03549 if (MARKED_IN_BITMAP(bits, ptr)) return 1;
03550 return 0;
03551 }
03552
03553 static inline int
03554 gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
03555 {
03556 register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
03557 if (gc_marked(objspace, ptr)) return 0;
03558 MARK_IN_BITMAP(bits, ptr);
03559 return 1;
03560 }
03561
03562 static void
03563 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
03564 {
03565 #if USE_RGENGC
03566 if (objspace->rgengc.parent_object_is_old) {
03567 if (!RVALUE_WB_PROTECTED(obj)) {
03568 if (rgengc_remember(objspace, obj)) {
03569 objspace->rgengc.remembered_shady_object_count++;
03570 }
03571 }
03572 #if RGENGC_THREEGEN
03573 else {
03574 if (gc_marked(objspace, obj)) {
03575 if (!RVALUE_OLD_P(obj)) {
03576
03577 rgengc_remember(objspace, obj);
03578 }
03579 }
03580 else {
03581 if (RVALUE_INFANT_P(obj)) {
03582 RVALUE_PROMOTE_INFANT(obj);
03583 }
03584 }
03585 }
03586 #endif
03587 }
03588 #endif
03589 }
03590
03591 static void
03592 gc_mark(rb_objspace_t *objspace, VALUE ptr)
03593 {
03594 if (!is_markable_object(objspace, ptr)) return;
03595
03596 if (LIKELY(objspace->mark_func_data == 0)) {
03597 rgengc_check_relation(objspace, ptr);
03598 if (!gc_mark_ptr(objspace, ptr)) return;
03599 push_mark_stack(&objspace->mark_stack, ptr);
03600 }
03601 else {
03602 objspace->mark_func_data->mark_func(ptr, objspace->mark_func_data->data);
03603 }
03604 }
03605
03606 void
03607 rb_gc_mark(VALUE ptr)
03608 {
03609 gc_mark(&rb_objspace, ptr);
03610 }
03611
03612
03613
03614 void
03615 rb_gc_resurrect(VALUE obj)
03616 {
03617 rb_objspace_t *objspace = &rb_objspace;
03618
03619 if (is_lazy_sweeping(heap_eden) &&
03620 !gc_marked(objspace, obj) &&
03621 !is_swept_object(objspace, obj)) {
03622 gc_mark_ptr(objspace, obj);
03623 }
03624 }
03625
03626 static void
03627 gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
03628 {
03629 register RVALUE *obj = RANY(ptr);
03630
03631 goto marking;
03632
03633 again:
03634 if (LIKELY(objspace->mark_func_data == 0)) {
03635 obj = RANY(ptr);
03636 if (!is_markable_object(objspace, ptr)) return;
03637 rgengc_check_relation(objspace, ptr);
03638 if (!gc_mark_ptr(objspace, ptr)) return;
03639 }
03640 else {
03641 gc_mark(objspace, ptr);
03642 return;
03643 }
03644
03645 marking:
03646
03647 #if USE_RGENGC
03648 check_gen_consistency((VALUE)obj);
03649
03650 if (LIKELY(objspace->mark_func_data == 0)) {
03651
03652 if (RVALUE_WB_PROTECTED(obj)) {
03653 if (RVALUE_INFANT_P((VALUE)obj)) {
03654
03655 RVALUE_PROMOTE_INFANT((VALUE)obj);
03656 #if RGENGC_THREEGEN
03657
03658 objspace->rgengc.young_object_count++;
03659 objspace->rgengc.parent_object_is_old = FALSE;
03660 #else
03661
03662 objspace->rgengc.old_object_count++;
03663 objspace->rgengc.parent_object_is_old = TRUE;
03664 #endif
03665 rgengc_report(3, objspace, "gc_mark_children: promote infant -> young %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
03666 }
03667 else {
03668 objspace->rgengc.parent_object_is_old = TRUE;
03669
03670 #if RGENGC_THREEGEN
03671 if (RVALUE_YOUNG_P((VALUE)obj)) {
03672
03673 RVALUE_PROMOTE_YOUNG((VALUE)obj);
03674 objspace->rgengc.old_object_count++;
03675 rgengc_report(3, objspace, "gc_mark_children: promote young -> old %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
03676 }
03677 else {
03678 #endif
03679 if (!objspace->rgengc.during_minor_gc) {
03680
03681 objspace->rgengc.old_object_count++;
03682 }
03683 #if RGENGC_THREEGEN
03684 }
03685 #endif
03686 }
03687 }
03688 else {
03689 rgengc_report(3, objspace, "gc_mark_children: do not promote non-WB-protected %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
03690 objspace->rgengc.parent_object_is_old = FALSE;
03691 }
03692 }
03693
03694 check_gen_consistency((VALUE)obj);
03695 #endif
03696
03697 if (FL_TEST(obj, FL_EXIVAR)) {
03698 rb_mark_generic_ivar(ptr);
03699 }
03700
03701 switch (BUILTIN_TYPE(obj)) {
03702 case T_NIL:
03703 case T_FIXNUM:
03704 rb_bug("rb_gc_mark() called for broken object");
03705 break;
03706
03707 case T_NODE:
03708 switch (nd_type(obj)) {
03709 case NODE_IF:
03710 case NODE_FOR:
03711 case NODE_ITER:
03712 case NODE_WHEN:
03713 case NODE_MASGN:
03714 case NODE_RESCUE:
03715 case NODE_RESBODY:
03716 case NODE_CLASS:
03717 case NODE_BLOCK_PASS:
03718 gc_mark(objspace, (VALUE)obj->as.node.u2.node);
03719
03720 case NODE_BLOCK:
03721 case NODE_ARRAY:
03722 case NODE_DSTR:
03723 case NODE_DXSTR:
03724 case NODE_DREGX:
03725 case NODE_DREGX_ONCE:
03726 case NODE_ENSURE:
03727 case NODE_CALL:
03728 case NODE_DEFS:
03729 case NODE_OP_ASGN1:
03730 gc_mark(objspace, (VALUE)obj->as.node.u1.node);
03731
03732 case NODE_SUPER:
03733 case NODE_FCALL:
03734 case NODE_DEFN:
03735 case NODE_ARGS_AUX:
03736 ptr = (VALUE)obj->as.node.u3.node;
03737 goto again;
03738
03739 case NODE_WHILE:
03740 case NODE_UNTIL:
03741 case NODE_AND:
03742 case NODE_OR:
03743 case NODE_CASE:
03744 case NODE_SCLASS:
03745 case NODE_DOT2:
03746 case NODE_DOT3:
03747 case NODE_FLIP2:
03748 case NODE_FLIP3:
03749 case NODE_MATCH2:
03750 case NODE_MATCH3:
03751 case NODE_OP_ASGN_OR:
03752 case NODE_OP_ASGN_AND:
03753 case NODE_MODULE:
03754 case NODE_ALIAS:
03755 case NODE_VALIAS:
03756 case NODE_ARGSCAT:
03757 gc_mark(objspace, (VALUE)obj->as.node.u1.node);
03758
03759 case NODE_GASGN:
03760 case NODE_LASGN:
03761 case NODE_DASGN:
03762 case NODE_DASGN_CURR:
03763 case NODE_IASGN:
03764 case NODE_IASGN2:
03765 case NODE_CVASGN:
03766 case NODE_COLON3:
03767 case NODE_OPT_N:
03768 case NODE_EVSTR:
03769 case NODE_UNDEF:
03770 case NODE_POSTEXE:
03771 ptr = (VALUE)obj->as.node.u2.node;
03772 goto again;
03773
03774 case NODE_HASH:
03775 case NODE_LIT:
03776 case NODE_STR:
03777 case NODE_XSTR:
03778 case NODE_DEFINED:
03779 case NODE_MATCH:
03780 case NODE_RETURN:
03781 case NODE_BREAK:
03782 case NODE_NEXT:
03783 case NODE_YIELD:
03784 case NODE_COLON2:
03785 case NODE_SPLAT:
03786 case NODE_TO_ARY:
03787 ptr = (VALUE)obj->as.node.u1.node;
03788 goto again;
03789
03790 case NODE_SCOPE:
03791 case NODE_CDECL:
03792 case NODE_OPT_ARG:
03793 gc_mark(objspace, (VALUE)obj->as.node.u3.node);
03794 ptr = (VALUE)obj->as.node.u2.node;
03795 goto again;
03796
03797 case NODE_ARGS:
03798 {
03799 struct rb_args_info *args = obj->as.node.u3.args;
03800 if (args) {
03801 if (args->pre_init) gc_mark(objspace, (VALUE)args->pre_init);
03802 if (args->post_init) gc_mark(objspace, (VALUE)args->post_init);
03803 if (args->opt_args) gc_mark(objspace, (VALUE)args->opt_args);
03804 if (args->kw_args) gc_mark(objspace, (VALUE)args->kw_args);
03805 if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
03806 }
03807 }
03808 ptr = (VALUE)obj->as.node.u2.node;
03809 goto again;
03810
03811 case NODE_ZARRAY:
03812 case NODE_ZSUPER:
03813 case NODE_VCALL:
03814 case NODE_GVAR:
03815 case NODE_LVAR:
03816 case NODE_DVAR:
03817 case NODE_IVAR:
03818 case NODE_CVAR:
03819 case NODE_NTH_REF:
03820 case NODE_BACK_REF:
03821 case NODE_REDO:
03822 case NODE_RETRY:
03823 case NODE_SELF:
03824 case NODE_NIL:
03825 case NODE_TRUE:
03826 case NODE_FALSE:
03827 case NODE_ERRINFO:
03828 case NODE_BLOCK_ARG:
03829 break;
03830 case NODE_ALLOCA:
03831 mark_locations_array(objspace,
03832 (VALUE*)obj->as.node.u1.value,
03833 obj->as.node.u3.cnt);
03834 gc_mark(objspace, (VALUE)obj->as.node.u2.node);
03835 break;
03836
03837 case NODE_CREF:
03838 gc_mark(objspace, obj->as.node.nd_refinements);
03839 gc_mark(objspace, (VALUE)obj->as.node.nd_clss);
03840 ptr = (VALUE)obj->as.node.nd_next;
03841 goto again;
03842
03843 default:
03844 gc_mark_maybe(objspace, (VALUE)obj->as.node.u1.node);
03845 gc_mark_maybe(objspace, (VALUE)obj->as.node.u2.node);
03846 gc_mark_maybe(objspace, (VALUE)obj->as.node.u3.node);
03847 }
03848 return;
03849 }
03850
03851 gc_mark(objspace, obj->as.basic.klass);
03852 switch (BUILTIN_TYPE(obj)) {
03853 case T_ICLASS:
03854 case T_CLASS:
03855 case T_MODULE:
03856 mark_m_tbl_wrapper(objspace, RCLASS_M_TBL_WRAPPER(obj));
03857 if (!RCLASS_EXT(obj)) break;
03858 mark_tbl(objspace, RCLASS_IV_TBL(obj));
03859 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
03860 ptr = RCLASS_SUPER((VALUE)obj);
03861 goto again;
03862
03863 case T_ARRAY:
03864 if (FL_TEST(obj, ELTS_SHARED)) {
03865 ptr = obj->as.array.as.heap.aux.shared;
03866 goto again;
03867 }
03868 else {
03869 long i, len = RARRAY_LEN(obj);
03870 const VALUE *ptr = RARRAY_CONST_PTR(obj);
03871 for (i=0; i < len; i++) {
03872 gc_mark(objspace, *ptr++);
03873 }
03874 }
03875 break;
03876
03877 case T_HASH:
03878 mark_hash(objspace, obj->as.hash.ntbl);
03879 ptr = obj->as.hash.ifnone;
03880 goto again;
03881
03882 case T_STRING:
03883 #define STR_ASSOC FL_USER3
03884 if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
03885 ptr = obj->as.string.as.heap.aux.shared;
03886 goto again;
03887 }
03888 break;
03889
03890 case T_DATA:
03891 if (RTYPEDDATA_P(obj)) {
03892 RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
03893 if (mark_func) (*mark_func)(DATA_PTR(obj));
03894 }
03895 else {
03896 if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
03897 }
03898 break;
03899
03900 case T_OBJECT:
03901 {
03902 long i, len = ROBJECT_NUMIV(obj);
03903 VALUE *ptr = ROBJECT_IVPTR(obj);
03904 for (i = 0; i < len; i++) {
03905 gc_mark(objspace, *ptr++);
03906 }
03907 }
03908 break;
03909
03910 case T_FILE:
03911 if (obj->as.file.fptr) {
03912 gc_mark(objspace, obj->as.file.fptr->pathv);
03913 gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
03914 gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
03915 gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
03916 gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
03917 gc_mark(objspace, obj->as.file.fptr->write_lock);
03918 }
03919 break;
03920
03921 case T_REGEXP:
03922 ptr = obj->as.regexp.src;
03923 goto again;
03924
03925 case T_FLOAT:
03926 case T_BIGNUM:
03927 break;
03928
03929 case T_MATCH:
03930 gc_mark(objspace, obj->as.match.regexp);
03931 if (obj->as.match.str) {
03932 ptr = obj->as.match.str;
03933 goto again;
03934 }
03935 break;
03936
03937 case T_RATIONAL:
03938 gc_mark(objspace, obj->as.rational.num);
03939 ptr = obj->as.rational.den;
03940 goto again;
03941
03942 case T_COMPLEX:
03943 gc_mark(objspace, obj->as.complex.real);
03944 ptr = obj->as.complex.imag;
03945 goto again;
03946
03947 case T_STRUCT:
03948 {
03949 long len = RSTRUCT_LEN(obj);
03950 const VALUE *ptr = RSTRUCT_CONST_PTR(obj);
03951
03952 while (len--) {
03953 gc_mark(objspace, *ptr++);
03954 }
03955 }
03956 break;
03957
03958 default:
03959 #if GC_DEBUG
03960 rb_gcdebug_print_obj_condition((VALUE)obj);
03961 #endif
03962 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
03963 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
03964 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
03965 BUILTIN_TYPE(obj), (void *)obj,
03966 is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
03967 }
03968 }
03969
03970 static void
03971 gc_mark_stacked_objects(rb_objspace_t *objspace)
03972 {
03973 mark_stack_t *mstack = &objspace->mark_stack;
03974 VALUE obj = 0;
03975
03976 if (!mstack->index) return;
03977 while (pop_mark_stack(mstack, &obj)) {
03978 if (RGENGC_CHECK_MODE > 0 && !gc_marked(objspace, obj)) {
03979 rb_bug("gc_mark_stacked_objects: %p (%s) is infant, but not marked.", (void *)obj, obj_type_name(obj));
03980 }
03981 gc_mark_children(objspace, obj);
03982 }
03983 shrink_stack_chunk_cache(mstack);
03984 }
03985
03986 #ifndef RGENGC_PRINT_TICK
03987 #define RGENGC_PRINT_TICK 0
03988 #endif
03989
03990
03991
03992
03993
03994
03995
03996 #if RGENGC_PRINT_TICK
03997 #if defined(__GNUC__) && defined(__i386__)
03998 typedef unsigned long long tick_t;
03999
04000 static inline tick_t
04001 tick(void)
04002 {
04003 unsigned long long int x;
04004 __asm__ __volatile__ ("rdtsc" : "=A" (x));
04005 return x;
04006 }
04007
04008 #elif defined(__GNUC__) && defined(__x86_64__)
04009 typedef unsigned long long tick_t;
04010
04011 static __inline__ tick_t
04012 tick(void)
04013 {
04014 unsigned long hi, lo;
04015 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
04016 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
04017 }
04018
04019 #elif defined(_WIN32) && defined(_MSC_VER)
04020 #include <intrin.h>
04021 typedef unsigned __int64 tick_t;
04022
04023 static inline tick_t
04024 tick(void)
04025 {
04026 return __rdtsc();
04027 }
04028
04029 #else
04030 typedef clock_t tick_t;
04031 static inline tick_t
04032 tick(void)
04033 {
04034 return clock();
04035 }
04036 #endif
04037
04038 #define MAX_TICKS 0x100
04039 static tick_t mark_ticks[MAX_TICKS];
04040 static const char *mark_ticks_categories[MAX_TICKS];
04041
04042 static void
04043 show_mark_ticks(void)
04044 {
04045 int i;
04046 fprintf(stderr, "mark ticks result:\n");
04047 for (i=0; i<MAX_TICKS; i++) {
04048 const char *category = mark_ticks_categories[i];
04049 if (category) {
04050 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
04051 }
04052 else {
04053 break;
04054 }
04055 }
04056 }
04057
04058 #endif
04059
04060 static void
04061 gc_mark_roots(rb_objspace_t *objspace, int full_mark, const char **categoryp)
04062 {
04063 struct gc_list *list;
04064 rb_thread_t *th = GET_THREAD();
04065 if (categoryp) *categoryp = "xxx";
04066
04067 #if RGENGC_PRINT_TICK
04068 tick_t start_tick = tick();
04069 int tick_count = 0;
04070 const char *prev_category = 0;
04071
04072 if (mark_ticks_categories[0] == 0) {
04073 atexit(show_mark_ticks);
04074 }
04075 #endif
04076
04077 #if RGENGC_PRINT_TICK
04078 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
04079 if (prev_category) { \
04080 tick_t t = tick(); \
04081 mark_ticks[tick_count] = t - start_tick; \
04082 mark_ticks_categories[tick_count] = prev_category; \
04083 tick_count++; \
04084 } \
04085 prev_category = category; \
04086 start_tick = tick(); \
04087 } while (0)
04088 #else
04089 #define MARK_CHECKPOINT_PRINT_TICK(category)
04090 #endif
04091
04092 #define MARK_CHECKPOINT(category) do { \
04093 if (categoryp) *categoryp = category; \
04094 MARK_CHECKPOINT_PRINT_TICK(category); \
04095 } while (0)
04096
04097 MARK_CHECKPOINT("vm");
04098 SET_STACK_END;
04099 th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
04100
04101 MARK_CHECKPOINT("finalizers");
04102 mark_tbl(objspace, finalizer_table);
04103
04104 MARK_CHECKPOINT("machine_context");
04105 mark_current_machine_context(objspace, th);
04106
04107 MARK_CHECKPOINT("symbols");
04108 #if USE_RGENGC
04109 objspace->rgengc.parent_object_is_old = TRUE;
04110 rb_gc_mark_symbols(full_mark);
04111 objspace->rgengc.parent_object_is_old = FALSE;
04112 #else
04113 rb_gc_mark_symbols(full_mark);
04114 #endif
04115
04116 MARK_CHECKPOINT("encodings");
04117 rb_gc_mark_encodings();
04118
04119
04120 MARK_CHECKPOINT("global_list");
04121 for (list = global_List; list; list = list->next) {
04122 rb_gc_mark_maybe(*list->varptr);
04123 }
04124
04125 MARK_CHECKPOINT("end_proc");
04126 rb_mark_end_proc();
04127
04128 MARK_CHECKPOINT("global_tbl");
04129 rb_gc_mark_global_tbl();
04130
04131
04132 MARK_CHECKPOINT("generic_ivars");
04133 rb_mark_generic_ivar_tbl();
04134
04135 MARK_CHECKPOINT("parser");
04136 rb_gc_mark_parser();
04137
04138 MARK_CHECKPOINT("live_method_entries");
04139 rb_gc_mark_unlinked_live_method_entries(th->vm);
04140
04141 MARK_CHECKPOINT("finish");
04142 #undef MARK_CHECKPOINT
04143 }
04144
04145 static void
04146 gc_marks_body(rb_objspace_t *objspace, int full_mark)
04147 {
04148
04149 rgengc_report(1, objspace, "gc_marks_body: start (%s)\n", full_mark ? "full" : "minor");
04150
04151 #if USE_RGENGC
04152 objspace->rgengc.parent_object_is_old = FALSE;
04153 objspace->rgengc.during_minor_gc = full_mark ? FALSE : TRUE;
04154
04155 if (objspace->rgengc.during_minor_gc) {
04156 objspace->profile.minor_gc_count++;
04157 rgengc_rememberset_mark(objspace, heap_eden);
04158 }
04159 else {
04160 objspace->profile.major_gc_count++;
04161 rgengc_mark_and_rememberset_clear(objspace, heap_eden);
04162 }
04163 #endif
04164 gc_mark_roots(objspace, full_mark, 0);
04165 gc_mark_stacked_objects(objspace);
04166
04167 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
04168 rgengc_report(1, objspace, "gc_marks_body: end (%s)\n", full_mark ? "full" : "minor");
04169 }
04170
04171 struct verify_internal_consistency_struct {
04172 rb_objspace_t *objspace;
04173 int err_count;
04174 VALUE parent;
04175 };
04176
04177 #if USE_RGENGC
04178 static void
04179 verify_internal_consistency_reachable_i(VALUE child, void *ptr)
04180 {
04181 struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
04182
04183 assert(RVALUE_OLD_P(data->parent));
04184
04185 if (!RVALUE_OLD_P(child)) {
04186 if (!MARKED_IN_BITMAP(GET_HEAP_PAGE(data->parent)->rememberset_bits, data->parent) &&
04187 !MARKED_IN_BITMAP(GET_HEAP_PAGE(child)->rememberset_bits, child)) {
04188 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss %p (%s) -> %p (%s)\n",
04189 (void *)data->parent, obj_type_name(data->parent),
04190 (void *)child, obj_type_name(child));
04191 data->err_count++;
04192 }
04193 }
04194 }
04195
04196 static int
04197 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
04198 {
04199 struct verify_internal_consistency_struct *data = (struct verify_internal_consistency_struct *)ptr;
04200 VALUE v;
04201
04202 for (v = (VALUE)page_start; v != (VALUE)page_end; v += stride) {
04203 if (is_live_object(data->objspace, v)) {
04204 if (RVALUE_OLD_P(v)) {
04205 data->parent = v;
04206
04207 rb_objspace_reachable_objects_from(v, verify_internal_consistency_reachable_i, (void *)data);
04208 }
04209 }
04210 }
04211
04212 return 0;
04213 }
04214 #endif
04215
04216
04217
04218
04219
04220
04221
04222
04223
04224
04225
04226 static VALUE
04227 gc_verify_internal_consistency(VALUE self)
04228 {
04229 struct verify_internal_consistency_struct data;
04230 data.objspace = &rb_objspace;
04231 data.err_count = 0;
04232
04233 #if USE_RGENGC
04234 {
04235 struct each_obj_args eo_args;
04236 eo_args.callback = verify_internal_consistency_i;
04237 eo_args.data = (void *)&data;
04238 objspace_each_objects((VALUE)&eo_args);
04239 }
04240 #endif
04241 if (data.err_count != 0) {
04242 rb_bug("gc_verify_internal_consistency: found internal consistency.\n");
04243 }
04244 return Qnil;
04245 }
04246
04247 #if RGENGC_CHECK_MODE >= 3
04248
04249 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
04250 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
04251 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
04252
04253 struct reflist {
04254 VALUE *list;
04255 int pos;
04256 int size;
04257 };
04258
04259 static struct reflist *
04260 reflist_create(VALUE obj)
04261 {
04262 struct reflist *refs = xmalloc(sizeof(struct reflist));
04263 refs->size = 1;
04264 refs->list = ALLOC_N(VALUE, refs->size);
04265 refs->list[0] = obj;
04266 refs->pos = 1;
04267 return refs;
04268 }
04269
04270 static void
04271 reflist_destruct(struct reflist *refs)
04272 {
04273 xfree(refs->list);
04274 xfree(refs);
04275 }
04276
04277 static void
04278 reflist_add(struct reflist *refs, VALUE obj)
04279 {
04280 if (refs->pos == refs->size) {
04281 refs->size *= 2;
04282 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
04283 }
04284
04285 refs->list[refs->pos++] = obj;
04286 }
04287
04288 static void
04289 reflist_dump(struct reflist *refs)
04290 {
04291 int i;
04292 for (i=0; i<refs->pos; i++) {
04293 VALUE obj = refs->list[i];
04294 if (IS_ROOTSIG(obj)) {
04295 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
04296 }
04297 else {
04298 fprintf(stderr, "<%p@%s>", (void *)obj, obj_type_name(obj));
04299 }
04300 if (i+1 < refs->pos) fprintf(stderr, ", ");
04301 }
04302 }
04303
04304 #if RGENGC_CHECK_MODE >= 3
04305 static int
04306 reflist_refered_from_machine_context(struct reflist *refs)
04307 {
04308 int i;
04309 for (i=0; i<refs->pos; i++) {
04310 VALUE obj = refs->list[i];
04311 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
04312 }
04313 return 0;
04314 }
04315 #endif
04316
04317 struct allrefs {
04318 rb_objspace_t *objspace;
04319
04320
04321
04322
04323
04324
04325
04326 struct st_table *references;
04327 const char *category;
04328 VALUE root_obj;
04329 };
04330
04331 static int
04332 allrefs_add(struct allrefs *data, VALUE obj)
04333 {
04334 struct reflist *refs;
04335
04336 if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
04337 reflist_add(refs, data->root_obj);
04338 return 0;
04339 }
04340 else {
04341 refs = reflist_create(data->root_obj);
04342 st_insert(data->references, obj, (st_data_t)refs);
04343 return 1;
04344 }
04345 }
04346
04347 static void
04348 allrefs_i(VALUE obj, void *ptr)
04349 {
04350 struct allrefs *data = (struct allrefs *)ptr;
04351
04352 if (allrefs_add(data, obj)) {
04353 push_mark_stack(&data->objspace->mark_stack, obj);
04354 }
04355 }
04356
04357 static void
04358 allrefs_roots_i(VALUE obj, void *ptr)
04359 {
04360 struct allrefs *data = (struct allrefs *)ptr;
04361 if (strlen(data->category) == 0) rb_bug("!!!");
04362 data->root_obj = MAKE_ROOTSIG(data->category);
04363
04364 if (allrefs_add(data, obj)) {
04365 push_mark_stack(&data->objspace->mark_stack, obj);
04366 }
04367 }
04368
04369 static st_table *
04370 objspace_allrefs(rb_objspace_t *objspace)
04371 {
04372 struct allrefs data;
04373 struct mark_func_data_struct mfd;
04374 VALUE obj;
04375
04376 data.objspace = objspace;
04377 data.references = st_init_numtable();
04378
04379 mfd.mark_func = allrefs_roots_i;
04380 mfd.data = &data;
04381
04382
04383 objspace->mark_func_data = &mfd;
04384 gc_mark_roots(objspace, TRUE, &data.category);
04385 objspace->mark_func_data = 0;
04386
04387
04388 while (pop_mark_stack(&objspace->mark_stack, &obj)) {
04389 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
04390 }
04391 shrink_stack_chunk_cache(&objspace->mark_stack);
04392
04393 return data.references;
04394 }
04395
04396 static int
04397 objspaec_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
04398 {
04399 struct reflist *refs = (struct reflist *)value;
04400 reflist_destruct(refs);
04401 return ST_CONTINUE;
04402 }
04403
04404 static void
04405 objspace_allrefs_destruct(struct st_table *refs)
04406 {
04407 st_foreach(refs, objspaec_allrefs_destruct_i, 0);
04408 st_free_table(refs);
04409 }
04410
04411 #if RGENGC_CHECK_MODE >= 4
04412 static int
04413 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
04414 {
04415 VALUE obj = (VALUE)k;
04416 struct reflist *refs = (struct reflist *)v;
04417 fprintf(stderr, "[allrefs_dump_i] %p (%s%s%s%s) <- ",
04418 (void *)obj, obj_type_name(obj),
04419 RVALUE_OLD_P(obj) ? "[O]" : "[Y]",
04420 RVALUE_WB_PROTECTED(obj) ? "[W]" : "",
04421 MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) ? "[R]" : "");
04422 reflist_dump(refs);
04423 fprintf(stderr, "\n");
04424 return ST_CONTINUE;
04425 }
04426
04427 static void
04428 allrefs_dump(rb_objspace_t *objspace)
04429 {
04430 fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
04431 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
04432 }
04433 #endif
04434
04435 #if RGENGC_CHECK_MODE >= 3
04436 static int
04437 gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
04438 {
04439 VALUE obj = k;
04440 struct reflist *refs = (struct reflist *)v;
04441 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
04442
04443
04444 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
04445 fprintf(stderr, "gc_check_after_marks_i: %p (%s) is not marked and not oldgen.\n", (void *)obj, obj_type_name(obj));
04446 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
04447 reflist_dump(refs);
04448
04449 if (reflist_refered_from_machine_context(refs)) {
04450 fprintf(stderr, " (marked from machine stack).\n");
04451
04452 }
04453 else {
04454 objspace->rgengc.error_count++;
04455 fprintf(stderr, "\n");
04456 }
04457 }
04458 return ST_CONTINUE;
04459 }
04460 #endif
04461
04462 static void
04463 gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char *checker_name)
04464 {
04465
04466 size_t saved_malloc_increase = objspace->malloc_params.increase;
04467 #if RGENGC_ESTIMATE_OLDMALLOC
04468 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
04469 #endif
04470 VALUE already_disabled = rb_gc_disable();
04471
04472 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
04473 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
04474
04475 if (objspace->rgengc.error_count > 0) {
04476 #if RGENGC_CHECK_MODE >= 4
04477 allrefs_dump(objspace);
04478 #endif
04479 rb_bug("%s: GC has problem.", checker_name);
04480 }
04481
04482 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
04483 objspace->rgengc.allrefs_table = 0;
04484
04485 if (already_disabled == Qfalse) rb_gc_enable();
04486 objspace->malloc_params.increase = saved_malloc_increase;
04487 #if RGENGC_ESTIMATE_OLDMALLOC
04488 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
04489 #endif
04490 }
04491
04492 #endif
04493
04494 static void
04495 gc_marks(rb_objspace_t *objspace, int full_mark)
04496 {
04497 struct mark_func_data_struct *prev_mark_func_data;
04498
04499 gc_prof_mark_timer_start(objspace);
04500 {
04501
04502 prev_mark_func_data = objspace->mark_func_data;
04503 objspace->mark_func_data = 0;
04504
04505 #if USE_RGENGC
04506
04507 #if RGENGC_CHECK_MODE >= 2
04508 gc_verify_internal_consistency(Qnil);
04509 #endif
04510 if (full_mark == TRUE) {
04511 objspace->rgengc.remembered_shady_object_count = 0;
04512 objspace->rgengc.old_object_count = 0;
04513 #if RGENGC_THREEGEN
04514 objspace->rgengc.young_object_count = 0;
04515 #endif
04516
04517 gc_marks_body(objspace, TRUE);
04518 {
04519
04520 const double r = gc_params.oldobject_limit_factor;
04521 objspace->rgengc.remembered_shady_object_limit = (size_t)(objspace->rgengc.remembered_shady_object_count * r);
04522 objspace->rgengc.old_object_limit = (size_t)(objspace->rgengc.old_object_count * r);
04523 }
04524 }
04525 else {
04526 gc_marks_body(objspace, FALSE);
04527 }
04528
04529 #if RGENGC_PROFILE > 0
04530 if (gc_prof_record(objspace)) {
04531 gc_profile_record *record = gc_prof_record(objspace);
04532 record->old_objects = objspace->rgengc.old_object_count;
04533 }
04534 #endif
04535
04536 #if RGENGC_CHECK_MODE >= 3
04537 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
04538 #endif
04539
04540 #else
04541 gc_marks_body(objspace, TRUE);
04542 #endif
04543
04544 objspace->mark_func_data = prev_mark_func_data;
04545 }
04546 gc_prof_mark_timer_stop(objspace);
04547 }
04548
04549
04550
04551 static void
04552 rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
04553 {
04554 if (level <= RGENGC_DEBUG) {
04555 char buf[1024];
04556 FILE *out = stderr;
04557 va_list args;
04558 const char *status = " ";
04559
04560 #if USE_RGENGC
04561 if (during_gc) {
04562 status = objspace->rgengc.during_minor_gc ? "-" : "+";
04563 }
04564 #endif
04565
04566 va_start(args, fmt);
04567 vsnprintf(buf, 1024, fmt, args);
04568 va_end(args);
04569
04570 fprintf(out, "%s|", status);
04571 fputs(buf, out);
04572 }
04573 }
04574
04575 #if USE_RGENGC
04576
04577
04578
04579 static int
04580 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
04581 {
04582 bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
04583 return MARKED_IN_BITMAP(bits, obj) ? 1 : 0;
04584 }
04585
04586 static int
04587 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
04588 {
04589 bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
04590 if (MARKED_IN_BITMAP(bits, obj)) {
04591 return FALSE;
04592 }
04593 else {
04594 MARK_IN_BITMAP(bits, obj);
04595 return TRUE;
04596 }
04597 }
04598
04599
04600
04601
04602 static int
04603 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
04604 {
04605 rgengc_report(2, objspace, "rgengc_remember: %p (%s, %s) %s\n", (void *)obj, obj_type_name(obj),
04606 RVALUE_WB_PROTECTED(obj) ? "WB-protected" : "non-WB-protected",
04607 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
04608
04609 #if RGENGC_CHECK_MODE > 0
04610 {
04611 switch (BUILTIN_TYPE(obj)) {
04612 case T_NONE:
04613 case T_ZOMBIE:
04614 rb_bug("rgengc_remember: should not remember %p (%s)\n",
04615 (void *)obj, obj_type_name(obj));
04616 default:
04617 ;
04618 }
04619 }
04620 #endif
04621
04622 if (RGENGC_PROFILE) {
04623 if (!rgengc_remembered(objspace, obj)) {
04624 #if RGENGC_PROFILE > 0
04625 if (RVALUE_WB_PROTECTED(obj)) {
04626 objspace->profile.remembered_normal_object_count++;
04627 #if RGENGC_PROFILE >= 2
04628 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
04629 #endif
04630 }
04631 else {
04632 objspace->profile.remembered_shady_object_count++;
04633 #if RGENGC_PROFILE >= 2
04634 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
04635 #endif
04636 }
04637 #endif
04638 }
04639 }
04640
04641 return rgengc_remembersetbits_set(objspace, obj);
04642 }
04643
04644 static int
04645 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
04646 {
04647 int result = rgengc_remembersetbits_get(objspace, obj);
04648 check_gen_consistency(obj);
04649 rgengc_report(6, objspace, "gc_remembered: %p (%s) => %d\n", (void *)obj, obj_type_name(obj), result);
04650 return result;
04651 }
04652
04653 static void
04654 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
04655 {
04656 size_t j;
04657 RVALUE *p, *offset;
04658 bits_t *bits, bitset;
04659 struct heap_page *page = heap->pages;
04660
04661 #if RGENGC_PROFILE > 0
04662 size_t shady_object_count = 0, clear_count = 0;
04663 #endif
04664
04665 while (page) {
04666 p = page->start;
04667 bits = page->rememberset_bits;
04668 offset = p - NUM_IN_PAGE(p);
04669
04670 for (j=0; j < HEAP_BITMAP_LIMIT; j++) {
04671 if (bits[j]) {
04672 p = offset + j * BITS_BITLENGTH;
04673 bitset = bits[j];
04674 do {
04675 if (bitset & 1) {
04676
04677 gc_mark_ptr(objspace, (VALUE)p);
04678
04679 if (RVALUE_WB_PROTECTED(p)) {
04680 rgengc_report(2, objspace, "rgengc_rememberset_mark: clear %p (%s)\n", p, obj_type_name((VALUE)p));
04681 #if RGENGC_THREEGEN
04682 if (RVALUE_INFANT_P((VALUE)p)) RVALUE_PROMOTE_INFANT((VALUE)p);
04683 if (RVALUE_YOUNG_P((VALUE)p)) RVALUE_PROMOTE_YOUNG((VALUE)p);
04684 #endif
04685 CLEAR_IN_BITMAP(bits, p);
04686 #if RGENGC_PROFILE > 0
04687 clear_count++;
04688 #endif
04689 }
04690 else {
04691 #if RGENGC_PROFILE > 0
04692 shady_object_count++;
04693 #endif
04694 }
04695
04696 rgengc_report(2, objspace, "rgengc_rememberset_mark: mark %p (%s)\n", p, obj_type_name((VALUE)p));
04697 gc_mark_children(objspace, (VALUE) p);
04698 }
04699 p++;
04700 bitset >>= 1;
04701 } while (bitset);
04702 }
04703 }
04704 page = page->next;
04705 }
04706
04707 rgengc_report(2, objspace, "rgengc_rememberset_mark: finished\n");
04708
04709 #if RGENGC_PROFILE > 0
04710 rgengc_report(2, objspace, "rgengc_rememberset_mark: clear_count: %"PRIdSIZE", shady_object_count: %"PRIdSIZE"\n", clear_count, shady_object_count);
04711 if (gc_prof_record(objspace)) {
04712 gc_profile_record *record = gc_prof_record(objspace);
04713 record->remembered_normal_objects = clear_count;
04714 record->remembered_shady_objects = shady_object_count;
04715 }
04716 #endif
04717 }
04718
04719 static void
04720 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
04721 {
04722 struct heap_page *page = heap->pages;
04723
04724 while (page) {
04725 memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE);
04726 memset(&page->rememberset_bits[0], 0, HEAP_BITMAP_SIZE);
04727 page = page->next;
04728 }
04729 }
04730
04731
04732
04733 void
04734 rb_gc_writebarrier(VALUE a, VALUE b)
04735 {
04736 if (RGENGC_CHECK_MODE) {
04737 if (!RVALUE_PROMOTED_P(a)) rb_bug("rb_gc_writebarrier: referer object %p (%s) is not promoted.\n", (void *)a, obj_type_name(a));
04738 }
04739
04740 if (!RVALUE_OLD_P(b) && RVALUE_OLD_BITMAP_P(a)) {
04741 rb_objspace_t *objspace = &rb_objspace;
04742
04743 if (!rgengc_remembered(objspace, a)) {
04744 rgengc_report(2, objspace, "rb_gc_wb: %p (%s) -> %p (%s)\n",
04745 (void *)a, obj_type_name(a), (void *)b, obj_type_name(b));
04746 rgengc_remember(objspace, a);
04747 }
04748 }
04749 }
04750
04751 void
04752 rb_gc_writebarrier_unprotect_promoted(VALUE obj)
04753 {
04754 rb_objspace_t *objspace = &rb_objspace;
04755
04756 if (RGENGC_CHECK_MODE) {
04757 if (!RVALUE_PROMOTED_P(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on non-promoted object");
04758 if (!RVALUE_WB_PROTECTED(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on shady object");
04759 }
04760
04761 rgengc_report(0, objspace, "rb_gc_writebarrier_unprotect_promoted: %p (%s)%s\n", (void *)obj, obj_type_name(obj),
04762 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
04763
04764 if (RVALUE_OLD_P(obj)) {
04765 RVALUE_DEMOTE_FROM_OLD(obj);
04766
04767 rgengc_remember(objspace, obj);
04768 objspace->rgengc.remembered_shady_object_count++;
04769
04770 #if RGENGC_PROFILE
04771 objspace->profile.shade_operation_count++;
04772 #if RGENGC_PROFILE >= 2
04773 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
04774 #endif
04775 #endif
04776 }
04777 #if RGENGC_THREEGEN
04778 else {
04779 RVALUE_DEMOTE_FROM_YOUNG(obj);
04780 }
04781 #endif
04782 }
04783
04784 void
04785 rb_gc_writebarrier_remember_promoted(VALUE obj)
04786 {
04787 rb_objspace_t *objspace = &rb_objspace;
04788 rgengc_remember(objspace, obj);
04789 }
04790
04791 static st_table *rgengc_unprotect_logging_table;
04792
04793 static int
04794 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val)
04795 {
04796 fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
04797 return ST_CONTINUE;
04798 }
04799
04800 static void
04801 rgengc_unprotect_logging_exit_func(void)
04802 {
04803 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
04804 }
04805
04806 void
04807 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
04808 {
04809 VALUE obj = (VALUE)objptr;
04810
04811 if (rgengc_unprotect_logging_table == 0) {
04812 rgengc_unprotect_logging_table = st_init_strtable();
04813 atexit(rgengc_unprotect_logging_exit_func);
04814 }
04815
04816 if (OBJ_WB_PROTECTED(obj)) {
04817 char buff[0x100];
04818 st_data_t cnt = 1;
04819 char *ptr = buff;
04820
04821 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_type_name(obj), filename, line);
04822
04823 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
04824 cnt++;
04825 }
04826 else {
04827 ptr = (char *)malloc(strlen(buff) + 1);
04828 strcpy(ptr, buff);
04829 }
04830 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
04831 }
04832 }
04833
04834 #endif
04835
04836
04837
04838 VALUE
04839 rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
04840 {
04841 return OBJ_WB_PROTECTED(obj) ? Qtrue : Qfalse;
04842 }
04843
04844 VALUE
04845 rb_obj_rgengc_promoted_p(VALUE obj)
04846 {
04847 return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
04848 }
04849
04850 size_t
04851 rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
04852 {
04853 size_t n = 0;
04854 static ID ID_marked;
04855 #if USE_RGENGC
04856 static ID ID_wb_protected, ID_old, ID_remembered;
04857 #if RGENGC_THREEGEN
04858 static ID ID_young, ID_infant;
04859 #endif
04860 #endif
04861
04862 if (!ID_marked) {
04863 #define I(s) ID_##s = rb_intern(#s);
04864 I(marked);
04865 #if USE_RGENGC
04866 I(wb_protected);
04867 I(old);
04868 I(remembered);
04869 #if RGENGC_THREEGEN
04870 I(young);
04871 I(infant);
04872 #endif
04873 #endif
04874 #undef I
04875 }
04876
04877 #if USE_RGENGC
04878 if (OBJ_WB_PROTECTED(obj) && n<max)
04879 flags[n++] = ID_wb_protected;
04880 if (RVALUE_OLD_P(obj) && n<max)
04881 flags[n++] = ID_old;
04882 #if RGENGC_THREEGEN
04883 if (RVALUE_YOUNG_P(obj) && n<max)
04884 flags[n++] = ID_young;
04885 if (RVALUE_INFANT_P(obj) && n<max)
04886 flags[n++] = ID_infant;
04887 #endif
04888 if (MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) && n<max)
04889 flags[n++] = ID_remembered;
04890 #endif
04891 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max)
04892 flags[n++] = ID_marked;
04893
04894 return n;
04895 }
04896
04897
04898
04899 void
04900 rb_gc_force_recycle(VALUE p)
04901 {
04902 rb_objspace_t *objspace = &rb_objspace;
04903
04904 #if USE_RGENGC
04905 CLEAR_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(p), p);
04906 CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(p), p);
04907 if (!GET_HEAP_PAGE(p)->before_sweep) {
04908 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(p), p);
04909 }
04910 #endif
04911
04912 objspace->profile.total_freed_object_num++;
04913 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(p), p);
04914
04915
04916
04917
04918
04919
04920 }
04921
04922 void
04923 rb_gc_register_mark_object(VALUE obj)
04924 {
04925 VALUE ary = GET_THREAD()->vm->mark_object_ary;
04926 rb_ary_push(ary, obj);
04927 }
04928
04929 void
04930 rb_gc_register_address(VALUE *addr)
04931 {
04932 rb_objspace_t *objspace = &rb_objspace;
04933 struct gc_list *tmp;
04934
04935 tmp = ALLOC(struct gc_list);
04936 tmp->next = global_List;
04937 tmp->varptr = addr;
04938 global_List = tmp;
04939 }
04940
04941 void
04942 rb_gc_unregister_address(VALUE *addr)
04943 {
04944 rb_objspace_t *objspace = &rb_objspace;
04945 struct gc_list *tmp = global_List;
04946
04947 if (tmp->varptr == addr) {
04948 global_List = tmp->next;
04949 xfree(tmp);
04950 return;
04951 }
04952 while (tmp->next) {
04953 if (tmp->next->varptr == addr) {
04954 struct gc_list *t = tmp->next;
04955
04956 tmp->next = tmp->next->next;
04957 xfree(t);
04958 break;
04959 }
04960 tmp = tmp->next;
04961 }
04962 }
04963
04964 void
04965 rb_global_variable(VALUE *var)
04966 {
04967 rb_gc_register_address(var);
04968 }
04969
04970 #define GC_NOTIFY 0
04971
04972 static int
04973 garbage_collect_body(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
04974 {
04975 if (ruby_gc_stress && !ruby_disable_gc_stress) {
04976 int flag = FIXNUM_P(ruby_gc_stress) ? FIX2INT(ruby_gc_stress) : 0;
04977
04978 if (flag & 0x01)
04979 reason &= ~GPR_FLAG_MAJOR_MASK;
04980 else
04981 reason |= GPR_FLAG_MAJOR_BY_STRESS;
04982 immediate_sweep = !(flag & 0x02);
04983 }
04984 else {
04985 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_lazy_sweep) {
04986 immediate_sweep = TRUE;
04987 }
04988 #if USE_RGENGC
04989 if (full_mark) {
04990 reason |= GPR_FLAG_MAJOR_BY_NOFREE;
04991 }
04992 if (objspace->rgengc.need_major_gc) {
04993 reason |= objspace->rgengc.need_major_gc;
04994 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
04995 }
04996 if (objspace->rgengc.remembered_shady_object_count > objspace->rgengc.remembered_shady_object_limit) {
04997 reason |= GPR_FLAG_MAJOR_BY_SHADY;
04998 }
04999 if (objspace->rgengc.old_object_count > objspace->rgengc.old_object_limit) {
05000 reason |= GPR_FLAG_MAJOR_BY_OLDGEN;
05001 }
05002 #endif
05003 }
05004
05005 if (immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
05006 full_mark = (reason & GPR_FLAG_MAJOR_MASK) ? TRUE : FALSE;
05007
05008 if (GC_NOTIFY) fprintf(stderr, "start garbage_collect(%d, %d, %d)\n", full_mark, immediate_sweep, reason);
05009
05010 objspace->profile.count++;
05011 objspace->profile.latest_gc_info = reason;
05012
05013 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 );
05014
05015 objspace->profile.total_allocated_object_num_at_gc_start = objspace->profile.total_allocated_object_num;
05016 objspace->profile.heap_used_at_gc_start = heap_pages_used;
05017
05018 gc_prof_setup_new_record(objspace, reason);
05019 gc_prof_timer_start(objspace);
05020 {
05021 if (during_gc == 0) {
05022 rb_bug("during_gc should not be 0. RUBY_INTERNAL_EVENT_GC_START user should not cause GC in events.");
05023 }
05024 gc_marks(objspace, full_mark);
05025 gc_sweep(objspace, immediate_sweep);
05026 during_gc = 0;
05027 }
05028 gc_prof_timer_stop(objspace);
05029
05030 if (GC_NOTIFY) fprintf(stderr, "end garbage_collect()\n");
05031 return TRUE;
05032 }
05033
05034 static int
05035 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
05036 {
05037 if (dont_gc || during_gc) {
05038 if (!heap->freelist && !heap->free_pages) {
05039 if (!heap_increment(objspace, heap)) {
05040 heap_set_increment(objspace, 0);
05041 heap_increment(objspace, heap);
05042 }
05043 }
05044 return FALSE;
05045 }
05046 return TRUE;
05047 }
05048
05049 static int
05050 ready_to_gc(rb_objspace_t *objspace)
05051 {
05052 return heap_ready_to_gc(objspace, heap_eden);
05053 }
05054
05055 static int
05056 garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
05057 {
05058 if (!heap_pages_used) {
05059 during_gc = 0;
05060 return FALSE;
05061 }
05062 if (!ready_to_gc(objspace)) {
05063 during_gc = 0;
05064 return TRUE;
05065 }
05066
05067 #if GC_PROFILE_MORE_DETAIL
05068 objspace->profile.prepare_time = getrusage_time();
05069 #endif
05070 gc_rest_sweep(objspace);
05071 #if GC_PROFILE_MORE_DETAIL
05072 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
05073 #endif
05074
05075 during_gc++;
05076
05077 return garbage_collect_body(objspace, full_mark, immediate_sweep, reason);
05078 }
05079
05080 struct objspace_and_reason {
05081 rb_objspace_t *objspace;
05082 int reason;
05083 int full_mark;
05084 int immediate_sweep;
05085 };
05086
05087 static void *
05088 gc_with_gvl(void *ptr)
05089 {
05090 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
05091 return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_sweep, oar->reason);
05092 }
05093
05094 static int
05095 garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
05096 {
05097 if (dont_gc) return TRUE;
05098 if (ruby_thread_has_gvl_p()) {
05099 return garbage_collect(objspace, full_mark, immediate_sweep, reason);
05100 }
05101 else {
05102 if (ruby_native_thread_p()) {
05103 struct objspace_and_reason oar;
05104 oar.objspace = objspace;
05105 oar.reason = reason;
05106 oar.full_mark = full_mark;
05107 oar.immediate_sweep = immediate_sweep;
05108 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
05109 }
05110 else {
05111
05112 fprintf(stderr, "[FATAL] failed to allocate memory\n");
05113 exit(EXIT_FAILURE);
05114 }
05115 }
05116 }
05117
05118 int
05119 rb_garbage_collect(void)
05120 {
05121 return garbage_collect(&rb_objspace, TRUE, TRUE, GPR_FLAG_CAPI);
05122 }
05123
05124 #undef Init_stack
05125
05126 void
05127 Init_stack(volatile VALUE *addr)
05128 {
05129 ruby_init_stack(addr);
05130 }
05131
05132
05133
05134
05135
05136
05137
05138
05139
05140
05141
05142
05143
05144
05145
05146
05147
05148
05149
05150
05151
05152
05153 static VALUE
05154 gc_start_internal(int argc, VALUE *argv, VALUE self)
05155 {
05156 rb_objspace_t *objspace = &rb_objspace;
05157 int full_mark = TRUE, immediate_sweep = TRUE;
05158 VALUE opt = Qnil;
05159 static ID keyword_ids[2];
05160
05161 rb_scan_args(argc, argv, "0:", &opt);
05162
05163 if (!NIL_P(opt)) {
05164 VALUE kwvals[2];
05165
05166 if (!keyword_ids[0]) {
05167 keyword_ids[0] = rb_intern("full_mark");
05168 keyword_ids[1] = rb_intern("immediate_sweep");
05169 }
05170
05171 rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
05172
05173 if (kwvals[0] != Qundef)
05174 full_mark = RTEST(kwvals[0]);
05175 if (kwvals[1] != Qundef)
05176 immediate_sweep = RTEST(kwvals[1]);
05177 }
05178
05179 garbage_collect(objspace, full_mark, immediate_sweep, GPR_FLAG_METHOD);
05180 if (!finalizing) finalize_deferred(objspace);
05181
05182 return Qnil;
05183 }
05184
05185 VALUE
05186 rb_gc_start(void)
05187 {
05188 rb_gc();
05189 return Qnil;
05190 }
05191
05192 void
05193 rb_gc(void)
05194 {
05195 rb_objspace_t *objspace = &rb_objspace;
05196 garbage_collect(objspace, TRUE, TRUE, GPR_FLAG_CAPI);
05197 if (!finalizing) finalize_deferred(objspace);
05198 }
05199
05200 int
05201 rb_during_gc(void)
05202 {
05203 rb_objspace_t *objspace = &rb_objspace;
05204 return during_gc;
05205 }
05206
05207 #if RGENGC_PROFILE >= 2
05208 static void
05209 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
05210 {
05211 VALUE result = rb_hash_new();
05212 int i;
05213 for (i=0; i<T_MASK; i++) {
05214 const char *type = type_name(i, 0);
05215 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
05216 }
05217 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
05218 }
05219 #endif
05220
05221 size_t
05222 rb_gc_count(void)
05223 {
05224 return rb_objspace.profile.count;
05225 }
05226
05227
05228
05229
05230
05231
05232
05233
05234
05235
05236
05237 static VALUE
05238 gc_count(VALUE self)
05239 {
05240 return SIZET2NUM(rb_gc_count());
05241 }
05242
05243 static VALUE
05244 gc_info_decode(int flags, VALUE hash_or_key)
05245 {
05246 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer;
05247 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_rescan, sym_stress;
05248 #if RGENGC_ESTIMATE_OLDMALLOC
05249 static VALUE sym_oldmalloc;
05250 #endif
05251 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
05252 VALUE hash = Qnil, key = Qnil;
05253 VALUE major_by;
05254
05255 if (SYMBOL_P(hash_or_key))
05256 key = hash_or_key;
05257 else if (RB_TYPE_P(hash_or_key, T_HASH))
05258 hash = hash_or_key;
05259 else
05260 rb_raise(rb_eTypeError, "non-hash or symbol given");
05261
05262 if (sym_major_by == Qnil) {
05263 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
05264 S(major_by);
05265 S(gc_by);
05266 S(immediate_sweep);
05267 S(have_finalizer);
05268 S(nofree);
05269 S(oldgen);
05270 S(shady);
05271 S(rescan);
05272 S(stress);
05273 #if RGENGC_ESTIMATE_OLDMALLOC
05274 S(oldmalloc);
05275 #endif
05276 S(newobj);
05277 S(malloc);
05278 S(method);
05279 S(capi);
05280 #undef S
05281 }
05282
05283 #define SET(name, attr) \
05284 if (key == sym_##name) \
05285 return (attr); \
05286 else if (hash != Qnil) \
05287 rb_hash_aset(hash, sym_##name, (attr));
05288
05289 major_by =
05290 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
05291 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
05292 (flags & GPR_FLAG_MAJOR_BY_RESCAN) ? sym_rescan :
05293 (flags & GPR_FLAG_MAJOR_BY_STRESS) ? sym_stress :
05294 #if RGENGC_ESTIMATE_OLDMALLOC
05295 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
05296 #endif
05297 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
05298 Qnil;
05299 SET(major_by, major_by);
05300
05301 SET(gc_by,
05302 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
05303 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
05304 (flags & GPR_FLAG_METHOD) ? sym_method :
05305 (flags & GPR_FLAG_CAPI) ? sym_capi :
05306 (flags & GPR_FLAG_STRESS) ? sym_stress :
05307 Qnil
05308 );
05309
05310 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
05311 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
05312 #undef SET
05313
05314 if (key != Qnil)
05315 rb_raise(rb_eArgError, "unknown key: %s", RSTRING_PTR(rb_id2str(SYM2ID(key))));
05316
05317 return hash;
05318 }
05319
05320 VALUE
05321 rb_gc_latest_gc_info(VALUE key)
05322 {
05323 rb_objspace_t *objspace = &rb_objspace;
05324 return gc_info_decode(objspace->profile.latest_gc_info, key);
05325 }
05326
05327
05328
05329
05330
05331
05332
05333
05334
05335
05336 static VALUE
05337 gc_latest_gc_info(int argc, VALUE *argv, VALUE self)
05338 {
05339 rb_objspace_t *objspace = &rb_objspace;
05340 VALUE arg = Qnil;
05341
05342 if (rb_scan_args(argc, argv, "01", &arg) == 1) {
05343 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
05344 rb_raise(rb_eTypeError, "non-hash or symbol given");
05345 }
05346 }
05347
05348 if (arg == Qnil)
05349 arg = rb_hash_new();
05350
05351 return gc_info_decode(objspace->profile.latest_gc_info, arg);
05352 }
05353
05354 static VALUE
05355 gc_stat_internal(VALUE hash_or_sym, size_t *out)
05356 {
05357 static VALUE sym_count;
05358 static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
05359 static VALUE sym_heap_live_slot, sym_heap_free_slot, sym_heap_final_slot, sym_heap_swept_slot;
05360 static VALUE sym_heap_eden_page_length, sym_heap_tomb_page_length;
05361 static VALUE sym_total_allocated_object, sym_total_freed_object;
05362 static VALUE sym_malloc_increase, sym_malloc_limit;
05363 #if USE_RGENGC
05364 static VALUE sym_minor_gc_count, sym_major_gc_count;
05365 static VALUE sym_remembered_shady_object, sym_remembered_shady_object_limit;
05366 static VALUE sym_old_object, sym_old_object_limit;
05367 #if RGENGC_ESTIMATE_OLDMALLOC
05368 static VALUE sym_oldmalloc_increase, sym_oldmalloc_limit;
05369 #endif
05370 #if RGENGC_PROFILE
05371 static VALUE sym_generated_normal_object_count, sym_generated_shady_object_count;
05372 static VALUE sym_shade_operation_count, sym_promote_infant_count, sym_promote_young_count;
05373 static VALUE sym_remembered_normal_object_count, sym_remembered_shady_object_count;
05374 #endif
05375 #endif
05376
05377 rb_objspace_t *objspace = &rb_objspace;
05378 VALUE hash = Qnil, key = Qnil;
05379
05380 if (RB_TYPE_P(hash_or_sym, T_HASH))
05381 hash = hash_or_sym;
05382 else if (SYMBOL_P(hash_or_sym) && out)
05383 key = hash_or_sym;
05384 else
05385 rb_raise(rb_eTypeError, "non-hash or symbol argument");
05386
05387 if (sym_count == 0) {
05388 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
05389 S(count);
05390 S(heap_used);
05391 S(heap_length);
05392 S(heap_increment);
05393 S(heap_live_slot);
05394 S(heap_free_slot);
05395 S(heap_final_slot);
05396 S(heap_swept_slot);
05397 S(heap_eden_page_length);
05398 S(heap_tomb_page_length);
05399 S(total_allocated_object);
05400 S(total_freed_object);
05401 S(malloc_increase);
05402 S(malloc_limit);
05403 #if USE_RGENGC
05404 S(minor_gc_count);
05405 S(major_gc_count);
05406 S(remembered_shady_object);
05407 S(remembered_shady_object_limit);
05408 S(old_object);
05409 S(old_object_limit);
05410 #if RGENGC_ESTIMATE_OLDMALLOC
05411 S(oldmalloc_increase);
05412 S(oldmalloc_limit);
05413 #endif
05414 #if RGENGC_PROFILE
05415 S(generated_normal_object_count);
05416 S(generated_shady_object_count);
05417 S(shade_operation_count);
05418 S(promote_infant_count);
05419 S(promote_young_count);
05420 S(remembered_normal_object_count);
05421 S(remembered_shady_object_count);
05422 #endif
05423 #endif
05424 #undef S
05425 }
05426
05427 #define SET(name, attr) \
05428 if (key == sym_##name) \
05429 return (*out = attr, Qnil); \
05430 else if (hash != Qnil) \
05431 rb_hash_aset(hash, sym_##name, SIZET2NUM(attr));
05432
05433 SET(count, objspace->profile.count);
05434
05435
05436 SET(heap_used, heap_pages_used);
05437 SET(heap_length, heap_pages_length);
05438 SET(heap_increment, heap_pages_increment);
05439 SET(heap_live_slot, objspace_live_slot(objspace));
05440 SET(heap_free_slot, objspace_free_slot(objspace));
05441 SET(heap_final_slot, heap_pages_final_slots);
05442 SET(heap_swept_slot, heap_pages_swept_slots);
05443 SET(heap_eden_page_length, heap_eden->page_length);
05444 SET(heap_tomb_page_length, heap_tomb->page_length);
05445 SET(total_allocated_object, objspace->profile.total_allocated_object_num);
05446 SET(total_freed_object, objspace->profile.total_freed_object_num);
05447 SET(malloc_increase, malloc_increase);
05448 SET(malloc_limit, malloc_limit);
05449 #if USE_RGENGC
05450 SET(minor_gc_count, objspace->profile.minor_gc_count);
05451 SET(major_gc_count, objspace->profile.major_gc_count);
05452 SET(remembered_shady_object, objspace->rgengc.remembered_shady_object_count);
05453 SET(remembered_shady_object_limit, objspace->rgengc.remembered_shady_object_limit);
05454 SET(old_object, objspace->rgengc.old_object_count);
05455 SET(old_object_limit, objspace->rgengc.old_object_limit);
05456 #if RGENGC_ESTIMATE_OLDMALLOC
05457 SET(oldmalloc_increase, objspace->rgengc.oldmalloc_increase);
05458 SET(oldmalloc_limit, objspace->rgengc.oldmalloc_increase_limit);
05459 #endif
05460
05461 #if RGENGC_PROFILE
05462 SET(generated_normal_object_count, objspace->profile.generated_normal_object_count);
05463 SET(generated_shady_object_count, objspace->profile.generated_shady_object_count);
05464 SET(shade_operation_count, objspace->profile.shade_operation_count);
05465 SET(promote_infant_count, objspace->profile.promote_infant_count);
05466 #if RGENGC_THREEGEN
05467 SET(promote_young_count, objspace->profile.promote_young_count);
05468 #endif
05469 SET(remembered_normal_object_count, objspace->profile.remembered_normal_object_count);
05470 SET(remembered_shady_object_count, objspace->profile.remembered_shady_object_count);
05471 #endif
05472 #endif
05473 #undef SET
05474
05475 if (key != Qnil)
05476 rb_raise(rb_eArgError, "unknown key: %s", RSTRING_PTR(rb_id2str(SYM2ID(key))));
05477
05478 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
05479 if (hash != Qnil) {
05480 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
05481 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
05482 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
05483 gc_count_add_each_types(hash, "promote_infant_types", objspace->profile.promote_infant_types);
05484 #if RGENGC_THREEGEN
05485 gc_count_add_each_types(hash, "promote_young_types", objspace->profile.promote_young_types);
05486 #endif
05487 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
05488 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
05489 }
05490 #endif
05491
05492 return hash;
05493 }
05494
05495
05496
05497
05498
05499
05500
05501
05502
05503
05504
05505
05506
05507
05508
05509
05510
05511
05512
05513
05514
05515
05516
05517
05518
05519
05520
05521
05522
05523
05524
05525
05526
05527
05528
05529
05530
05531
05532
05533
05534
05535 static VALUE
05536 gc_stat(int argc, VALUE *argv, VALUE self)
05537 {
05538 VALUE arg = Qnil;
05539
05540 if (rb_scan_args(argc, argv, "01", &arg) == 1) {
05541 if (SYMBOL_P(arg)) {
05542 size_t value = 0;
05543 gc_stat_internal(arg, &value);
05544 return SIZET2NUM(value);
05545 } else if (!RB_TYPE_P(arg, T_HASH)) {
05546 rb_raise(rb_eTypeError, "non-hash or symbol given");
05547 }
05548 }
05549
05550 if (arg == Qnil) {
05551 arg = rb_hash_new();
05552 }
05553 gc_stat_internal(arg, 0);
05554 return arg;
05555 }
05556
05557 size_t
05558 rb_gc_stat(VALUE key)
05559 {
05560 if (SYMBOL_P(key)) {
05561 size_t value = 0;
05562 gc_stat_internal(key, &value);
05563 return value;
05564 } else {
05565 gc_stat_internal(key, 0);
05566 return 0;
05567 }
05568 }
05569
05570
05571
05572
05573
05574
05575
05576
05577 static VALUE
05578 gc_stress_get(VALUE self)
05579 {
05580 rb_objspace_t *objspace = &rb_objspace;
05581 return ruby_gc_stress;
05582 }
05583
05584
05585
05586
05587
05588
05589
05590
05591
05592
05593
05594
05595
05596 static VALUE
05597 gc_stress_set(VALUE self, VALUE flag)
05598 {
05599 rb_objspace_t *objspace = &rb_objspace;
05600 rb_secure(2);
05601 ruby_gc_stress = FIXNUM_P(flag) ? flag : (RTEST(flag) ? Qtrue : Qfalse);
05602 return flag;
05603 }
05604
05605
05606
05607
05608
05609
05610
05611
05612
05613
05614
05615
05616
05617
05618 VALUE
05619 rb_gc_enable(void)
05620 {
05621 rb_objspace_t *objspace = &rb_objspace;
05622 int old = dont_gc;
05623
05624 dont_gc = FALSE;
05625 return old ? Qtrue : Qfalse;
05626 }
05627
05628
05629
05630
05631
05632
05633
05634
05635
05636
05637
05638
05639
05640 VALUE
05641 rb_gc_disable(void)
05642 {
05643 rb_objspace_t *objspace = &rb_objspace;
05644 int old = dont_gc;
05645
05646 gc_rest_sweep(objspace);
05647
05648 dont_gc = TRUE;
05649 return old ? Qtrue : Qfalse;
05650 }
05651
05652 static int
05653 get_envparam_int(const char *name, unsigned int *default_value, int lower_bound)
05654 {
05655 char *ptr = getenv(name);
05656 int val;
05657
05658 if (ptr != NULL) {
05659 val = atoi(ptr);
05660 if (val > lower_bound) {
05661 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%d (default value: %d)\n", name, val, *default_value);
05662 *default_value = val;
05663 return 1;
05664 }
05665 else {
05666 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%d (default value: %d) is ignored because it must be greater than %d.\n", name, val, *default_value, lower_bound);
05667 }
05668 }
05669 return 0;
05670 }
05671
05672 static int
05673 get_envparam_double(const char *name, double *default_value, double lower_bound)
05674 {
05675 char *ptr = getenv(name);
05676 double val;
05677
05678 if (ptr != NULL) {
05679 val = strtod(ptr, NULL);
05680 if (val > lower_bound) {
05681 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (%f)\n", name, val, *default_value);
05682 *default_value = val;
05683 return 1;
05684 }
05685 else {
05686 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n", name, val, *default_value, lower_bound);
05687 }
05688 }
05689 return 0;
05690 }
05691
05692 static void
05693 gc_set_initial_pages(void)
05694 {
05695 size_t min_pages;
05696 rb_objspace_t *objspace = &rb_objspace;
05697
05698 min_pages = gc_params.heap_init_slots / HEAP_OBJ_LIMIT;
05699 if (min_pages > heap_eden->page_length) {
05700 heap_add_pages(objspace, heap_eden, min_pages - heap_eden->page_length);
05701 }
05702 }
05703
05704
05705
05706
05707
05708
05709
05710
05711
05712
05713
05714
05715
05716
05717
05718
05719
05720
05721
05722
05723
05724
05725
05726
05727
05728
05729
05730
05731
05732
05733
05734
05735 void
05736 ruby_gc_set_params(int safe_level)
05737 {
05738 if (safe_level > 0) return;
05739
05740
05741 if (get_envparam_int("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
05742
05743 }
05744 else if (get_envparam_int("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
05745 rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
05746 }
05747
05748
05749 if (get_envparam_int("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
05750 gc_set_initial_pages();
05751 }
05752 else if (get_envparam_int("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
05753 rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
05754 gc_set_initial_pages();
05755 }
05756
05757 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0);
05758 get_envparam_int ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
05759 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0);
05760
05761 get_envparam_int("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
05762 get_envparam_int("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
05763 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0);
05764
05765 #if RGENGC_ESTIMATE_OLDMALLOC
05766 if (get_envparam_int("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
05767 rb_objspace_t *objspace = &rb_objspace;
05768 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
05769 }
05770 get_envparam_int("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
05771 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0);
05772 #endif
05773 }
05774
05775 void
05776 rb_gc_set_params(void)
05777 {
05778 ruby_gc_set_params(rb_safe_level());
05779 }
05780
05781 void
05782 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
05783 {
05784 rb_objspace_t *objspace = &rb_objspace;
05785
05786 if (is_markable_object(objspace, obj)) {
05787 struct mark_func_data_struct mfd;
05788 mfd.mark_func = func;
05789 mfd.data = data;
05790 objspace->mark_func_data = &mfd;
05791 gc_mark_children(objspace, obj);
05792 objspace->mark_func_data = 0;
05793 }
05794 }
05795
05796 struct root_objects_data {
05797 const char *category;
05798 void (*func)(const char *category, VALUE, void *);
05799 void *data;
05800 };
05801
05802 static void
05803 root_objects_from(VALUE obj, void *ptr)
05804 {
05805 const struct root_objects_data *data = (struct root_objects_data *)ptr;
05806 (*data->func)(data->category, obj, data->data);
05807 }
05808
05809 void
05810 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
05811 {
05812 rb_objspace_t *objspace = &rb_objspace;
05813 struct root_objects_data data;
05814 struct mark_func_data_struct mfd;
05815
05816 data.func = func;
05817 data.data = passing_data;
05818
05819 mfd.mark_func = root_objects_from;
05820 mfd.data = &data;
05821
05822 objspace->mark_func_data = &mfd;
05823 {
05824 gc_mark_roots(objspace, TRUE, &data.category);
05825 }
05826 objspace->mark_func_data = 0;
05827 }
05828
05829
05830
05831
05832
05833 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
05834
05835 static void *
05836 negative_size_allocation_error_with_gvl(void *ptr)
05837 {
05838 rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
05839 return 0;
05840 }
05841
05842 static void
05843 negative_size_allocation_error(const char *msg)
05844 {
05845 if (ruby_thread_has_gvl_p()) {
05846 rb_raise(rb_eNoMemError, "%s", msg);
05847 }
05848 else {
05849 if (ruby_native_thread_p()) {
05850 rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
05851 }
05852 else {
05853 fprintf(stderr, "[FATAL] %s\n", msg);
05854 exit(EXIT_FAILURE);
05855 }
05856 }
05857 }
05858
05859 static void *
05860 ruby_memerror_body(void *dummy)
05861 {
05862 rb_memerror();
05863 return 0;
05864 }
05865
05866 static void
05867 ruby_memerror(void)
05868 {
05869 if (ruby_thread_has_gvl_p()) {
05870 rb_memerror();
05871 }
05872 else {
05873 if (ruby_native_thread_p()) {
05874 rb_thread_call_with_gvl(ruby_memerror_body, 0);
05875 }
05876 else {
05877
05878 fprintf(stderr, "[FATAL] failed to allocate memory\n");
05879 exit(EXIT_FAILURE);
05880 }
05881 }
05882 }
05883
05884 void
05885 rb_memerror(void)
05886 {
05887 rb_thread_t *th = GET_THREAD();
05888 if (!nomem_error ||
05889 rb_thread_raised_p(th, RAISED_NOMEMORY)) {
05890 fprintf(stderr, "[FATAL] failed to allocate memory\n");
05891 exit(EXIT_FAILURE);
05892 }
05893 if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
05894 rb_thread_raised_clear(th);
05895 GET_THREAD()->errinfo = nomem_error;
05896 JUMP_TAG(TAG_RAISE);
05897 }
05898 rb_thread_raised_set(th, RAISED_NOMEMORY);
05899 rb_exc_raise(nomem_error);
05900 }
05901
05902 static void *
05903 aligned_malloc(size_t alignment, size_t size)
05904 {
05905 void *res;
05906
05907 #if defined __MINGW32__
05908 res = __mingw_aligned_malloc(size, alignment);
05909 #elif defined _WIN32 && !defined __CYGWIN__
05910 void *_aligned_malloc(size_t, size_t);
05911 res = _aligned_malloc(size, alignment);
05912 #elif defined(HAVE_POSIX_MEMALIGN)
05913 if (posix_memalign(&res, alignment, size) == 0) {
05914 return res;
05915 }
05916 else {
05917 return NULL;
05918 }
05919 #elif defined(HAVE_MEMALIGN)
05920 res = memalign(alignment, size);
05921 #else
05922 char* aligned;
05923 res = malloc(alignment + size + sizeof(void*));
05924 aligned = (char*)res + alignment + sizeof(void*);
05925 aligned -= ((VALUE)aligned & (alignment - 1));
05926 ((void**)aligned)[-1] = res;
05927 res = (void*)aligned;
05928 #endif
05929
05930 #if defined(_DEBUG) || GC_DEBUG
05931
05932 assert(((alignment - 1) & alignment) == 0);
05933 assert(alignment % sizeof(void*) == 0);
05934 #endif
05935 return res;
05936 }
05937
05938 static void
05939 aligned_free(void *ptr)
05940 {
05941 #if defined __MINGW32__
05942 __mingw_aligned_free(ptr);
05943 #elif defined _WIN32 && !defined __CYGWIN__
05944 _aligned_free(ptr);
05945 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
05946 free(ptr);
05947 #else
05948 free(((void**)ptr)[-1]);
05949 #endif
05950 }
05951
05952 static inline size_t
05953 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
05954 {
05955 #ifdef HAVE_MALLOC_USABLE_SIZE
05956 return malloc_usable_size(ptr);
05957 #else
05958 return hint;
05959 #endif
05960 }
05961
05962 enum memop_type {
05963 MEMOP_TYPE_MALLOC = 1,
05964 MEMOP_TYPE_FREE = 2,
05965 MEMOP_TYPE_REALLOC = 3
05966 };
05967
05968 static inline void
05969 atomic_sub_nounderflow(size_t *var, size_t sub)
05970 {
05971 if (sub == 0) return;
05972
05973 while (1) {
05974 size_t val = *var;
05975 if (val < sub) sub = val;
05976 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
05977 }
05978 }
05979
05980 static void
05981 objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
05982 {
05983 if (new_size > old_size) {
05984 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
05985 #if RGENGC_ESTIMATE_OLDMALLOC
05986 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
05987 #endif
05988 }
05989 else {
05990 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
05991 #if RGENGC_ESTIMATE_OLDMALLOC
05992 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
05993 #endif
05994 }
05995
05996 if (type == MEMOP_TYPE_MALLOC) {
05997 if (ruby_gc_stress && !ruby_disable_gc_stress) {
05998 garbage_collect_with_gvl(objspace, FALSE, TRUE, GPR_FLAG_MALLOC);
05999 }
06000 else {
06001 retry:
06002 if (malloc_increase > malloc_limit) {
06003 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
06004 gc_rest_sweep(objspace);
06005 goto retry;
06006 }
06007 garbage_collect_with_gvl(objspace, FALSE, TRUE, GPR_FLAG_MALLOC);
06008 }
06009 }
06010 }
06011
06012 #if MALLOC_ALLOCATED_SIZE
06013 if (new_size >= old_size) {
06014 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
06015 }
06016 else {
06017 size_t dec_size = old_size - new_size;
06018 size_t allocated_size = objspace->malloc_params.allocated_size;
06019
06020 #if MALLOC_ALLOCATED_SIZE_CHECK
06021 if (allocated_size < dec_size) {
06022 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
06023 }
06024 #endif
06025 atomic_sub_nounderflow(objspace->malloc_params.allocated_size, dec_size);
06026 }
06027
06028 if (0) fprintf(stderr, "incraese - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
06029 mem,
06030 type == MEMOP_TYPE_MALLOC ? "malloc" :
06031 type == MEMOP_TYPE_FREE ? "free " :
06032 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
06033 (int)new_size, (int)old_size);
06034
06035 switch (type) {
06036 case MEMOP_TYPE_MALLOC:
06037 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
06038 break;
06039 case MEMOP_TYPE_FREE:
06040 {
06041 size_t allocations = objspace->malloc_params.allocations;
06042 if (allocations > 0) {
06043 atomic_sub_nounderflow(objspace->malloc_params.allocations, 1);
06044 }
06045 #if MALLOC_ALLOCATED_SIZE_CHECK
06046 else {
06047 assert(objspace->malloc_params.allocations > 0);
06048 }
06049 #endif
06050 }
06051 break;
06052 case MEMOP_TYPE_REALLOC: break;
06053 }
06054 #endif
06055 }
06056
06057 static inline size_t
06058 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
06059 {
06060 if ((ssize_t)size < 0) {
06061 negative_size_allocation_error("negative allocation size (or too big)");
06062 }
06063 if (size == 0) size = 1;
06064
06065 #if CALC_EXACT_MALLOC_SIZE
06066 size += sizeof(size_t);
06067 #endif
06068
06069 return size;
06070 }
06071
06072 static inline void *
06073 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
06074 {
06075 #if CALC_EXACT_MALLOC_SIZE
06076 ((size_t *)mem)[0] = size;
06077 mem = (size_t *)mem + 1;
06078 #endif
06079
06080 return mem;
06081 }
06082
06083 #define TRY_WITH_GC(alloc) do { \
06084 if (!(alloc) && \
06085 (!garbage_collect_with_gvl(objspace, 1, 1, GPR_FLAG_MALLOC) || \
06086 !(alloc))) { \
06087 ruby_memerror(); \
06088 } \
06089 } while (0)
06090
06091 static void *
06092 objspace_xmalloc(rb_objspace_t *objspace, size_t size)
06093 {
06094 void *mem;
06095
06096 size = objspace_malloc_prepare(objspace, size);
06097 TRY_WITH_GC(mem = malloc(size));
06098 size = objspace_malloc_size(objspace, mem, size);
06099 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
06100 return objspace_malloc_fixup(objspace, mem, size);
06101 }
06102
06103 static void *
06104 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
06105 {
06106 void *mem;
06107
06108 if ((ssize_t)new_size < 0) {
06109 negative_size_allocation_error("negative re-allocation size");
06110 }
06111
06112 if (!ptr) return objspace_xmalloc(objspace, new_size);
06113
06114
06115
06116
06117
06118
06119 if (new_size == 0) {
06120 objspace_xfree(objspace, ptr, old_size);
06121 return 0;
06122 }
06123
06124 #if CALC_EXACT_MALLOC_SIZE
06125 new_size += sizeof(size_t);
06126 ptr = (size_t *)ptr - 1;
06127 oldsize = ((size_t *)ptr)[0];
06128 #endif
06129
06130 old_size = objspace_malloc_size(objspace, ptr, old_size);
06131 TRY_WITH_GC(mem = realloc(ptr, new_size));
06132 new_size = objspace_malloc_size(objspace, mem, new_size);
06133
06134 #if CALC_EXACT_MALLOC_SIZE
06135 ((size_t *)mem)[0] = new_size;
06136 mem = (size_t *)mem + 1;
06137 #endif
06138
06139 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
06140
06141 return mem;
06142 }
06143
06144 static void
06145 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
06146 {
06147 #if CALC_EXACT_MALLOC_SIZE
06148 ptr = ((size_t *)ptr) - 1;
06149 oldsize = ((size_t*)ptr)[0];
06150 #endif
06151 old_size = objspace_malloc_size(objspace, ptr, old_size);
06152
06153 free(ptr);
06154
06155 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
06156 }
06157
06158 void *
06159 ruby_xmalloc(size_t size)
06160 {
06161 return objspace_xmalloc(&rb_objspace, size);
06162 }
06163
06164 static inline size_t
06165 xmalloc2_size(size_t n, size_t size)
06166 {
06167 size_t len = size * n;
06168 if (n != 0 && size != len / n) {
06169 rb_raise(rb_eArgError, "malloc: possible integer overflow");
06170 }
06171 return len;
06172 }
06173
06174 void *
06175 ruby_xmalloc2(size_t n, size_t size)
06176 {
06177 return objspace_xmalloc(&rb_objspace, xmalloc2_size(n, size));
06178 }
06179
06180 static void *
06181 objspace_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
06182 {
06183 void *mem;
06184 size_t size;
06185
06186 size = xmalloc2_size(count, elsize);
06187 size = objspace_malloc_prepare(objspace, size);
06188
06189 TRY_WITH_GC(mem = calloc(1, size));
06190 return objspace_malloc_fixup(objspace, mem, size);
06191 }
06192
06193 void *
06194 ruby_xcalloc(size_t n, size_t size)
06195 {
06196 return objspace_xcalloc(&rb_objspace, n, size);
06197 }
06198
06199 #ifdef ruby_sized_xrealloc
06200 #undef ruby_sized_xrealloc
06201 #endif
06202 void *
06203 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
06204 {
06205 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
06206 }
06207
06208 void *
06209 ruby_xrealloc(void *ptr, size_t new_size)
06210 {
06211 return ruby_sized_xrealloc(ptr, new_size, 0);
06212 }
06213
06214 #ifdef ruby_sized_xrealloc2
06215 #undef ruby_sized_xrealloc2
06216 #endif
06217 void *
06218 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
06219 {
06220 size_t len = size * n;
06221 if (n != 0 && size != len / n) {
06222 rb_raise(rb_eArgError, "realloc: possible integer overflow");
06223 }
06224 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
06225 }
06226
06227 void *
06228 ruby_xrealloc2(void *ptr, size_t n, size_t size)
06229 {
06230 return ruby_sized_xrealloc2(ptr, n, size, 0);
06231 }
06232
06233 #ifdef ruby_sized_xfree
06234 #undef ruby_sized_xfree
06235 #endif
06236 void
06237 ruby_sized_xfree(void *x, size_t size)
06238 {
06239 if (x) {
06240 objspace_xfree(&rb_objspace, x, size);
06241 }
06242 }
06243
06244 void
06245 ruby_xfree(void *x)
06246 {
06247 ruby_sized_xfree(x, 0);
06248 }
06249
06250
06251
06252
06253 void *
06254 ruby_mimmalloc(size_t size)
06255 {
06256 void *mem;
06257 #if CALC_EXACT_MALLOC_SIZE
06258 size += sizeof(size_t);
06259 #endif
06260 mem = malloc(size);
06261 #if CALC_EXACT_MALLOC_SIZE
06262
06263 ((size_t *)mem)[0] = 0;
06264 mem = (size_t *)mem + 1;
06265 #endif
06266 return mem;
06267 }
06268
06269 void
06270 ruby_mimfree(void *ptr)
06271 {
06272 size_t *mem = (size_t *)ptr;
06273 #if CALC_EXACT_MALLOC_SIZE
06274 mem = mem - 1;
06275 #endif
06276 free(mem);
06277 }
06278
06279 #if MALLOC_ALLOCATED_SIZE
06280
06281
06282
06283
06284
06285
06286
06287
06288
06289 static VALUE
06290 gc_malloc_allocated_size(VALUE self)
06291 {
06292 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
06293 }
06294
06295
06296
06297
06298
06299
06300
06301
06302
06303
06304 static VALUE
06305 gc_malloc_allocations(VALUE self)
06306 {
06307 return UINT2NUM(rb_objspace.malloc_params.allocations);
06308 }
06309 #endif
06310
06311
06312
06313
06314
06315 struct weakmap {
06316 st_table *obj2wmap;
06317 st_table *wmap2obj;
06318 VALUE final;
06319 };
06320
06321 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
06322
06323 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
06324 static int
06325 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
06326 {
06327 rb_objspace_t *objspace = (rb_objspace_t *)arg;
06328 VALUE obj = (VALUE)val;
06329 if (!is_live_object(objspace, obj)) return ST_DELETE;
06330 return ST_CONTINUE;
06331 }
06332 #endif
06333
06334 static void
06335 wmap_mark(void *ptr)
06336 {
06337 struct weakmap *w = ptr;
06338 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
06339 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
06340 #endif
06341 rb_gc_mark(w->final);
06342 }
06343
06344 static int
06345 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
06346 {
06347 VALUE *ptr = (VALUE *)val;
06348 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
06349 return ST_CONTINUE;
06350 }
06351
06352 static void
06353 wmap_free(void *ptr)
06354 {
06355 struct weakmap *w = ptr;
06356 st_foreach(w->obj2wmap, wmap_free_map, 0);
06357 st_free_table(w->obj2wmap);
06358 st_free_table(w->wmap2obj);
06359 }
06360
06361 static int
06362 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
06363 {
06364 VALUE *ptr = (VALUE *)val;
06365 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
06366 return ST_CONTINUE;
06367 }
06368
06369 static size_t
06370 wmap_memsize(const void *ptr)
06371 {
06372 size_t size;
06373 const struct weakmap *w = ptr;
06374 if (!w) return 0;
06375 size = sizeof(*w);
06376 size += st_memsize(w->obj2wmap);
06377 size += st_memsize(w->wmap2obj);
06378 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
06379 return size;
06380 }
06381
06382 static const rb_data_type_t weakmap_type = {
06383 "weakmap",
06384 {
06385 wmap_mark,
06386 wmap_free,
06387 wmap_memsize,
06388 },
06389 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
06390 };
06391
06392 static VALUE
06393 wmap_allocate(VALUE klass)
06394 {
06395 struct weakmap *w;
06396 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
06397 w->obj2wmap = st_init_numtable();
06398 w->wmap2obj = st_init_numtable();
06399 w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
06400 return obj;
06401 }
06402
06403 static int
06404 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
06405 {
06406 VALUE wmap, *ptr, size, i, j;
06407 if (!existing) return ST_STOP;
06408 wmap = (VALUE)arg, ptr = (VALUE *)*value;
06409 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
06410 if (ptr[i] != wmap) {
06411 ptr[j++] = ptr[i];
06412 }
06413 }
06414 if (j == 1) {
06415 ruby_sized_xfree(ptr, i * sizeof(VALUE));
06416 return ST_DELETE;
06417 }
06418 if (j < i) {
06419 ptr = ruby_sized_xrealloc2(ptr, j + 1, sizeof(VALUE), i);
06420 ptr[0] = j;
06421 *value = (st_data_t)ptr;
06422 }
06423 return ST_CONTINUE;
06424 }
06425
06426 static VALUE
06427 wmap_finalize(VALUE self, VALUE objid)
06428 {
06429 st_data_t orig, wmap, data;
06430 VALUE obj, *rids, i, size;
06431 struct weakmap *w;
06432
06433 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06434
06435 obj = obj_id_to_ref(objid);
06436
06437
06438 orig = (st_data_t)obj;
06439 if (st_delete(w->obj2wmap, &orig, &data)) {
06440 rids = (VALUE *)data;
06441 size = *rids++;
06442 for (i = 0; i < size; ++i) {
06443 wmap = (st_data_t)rids[i];
06444 st_delete(w->wmap2obj, &wmap, NULL);
06445 }
06446 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
06447 }
06448
06449 wmap = (st_data_t)obj;
06450 if (st_delete(w->wmap2obj, &wmap, &orig)) {
06451 wmap = (st_data_t)obj;
06452 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
06453 }
06454 return self;
06455 }
06456
06457 struct wmap_iter_arg {
06458 rb_objspace_t *objspace;
06459 VALUE value;
06460 };
06461
06462 static int
06463 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
06464 {
06465 VALUE str = (VALUE)arg;
06466 VALUE k = (VALUE)key, v = (VALUE)val;
06467
06468 if (RSTRING_PTR(str)[0] == '#') {
06469 rb_str_cat2(str, ", ");
06470 }
06471 else {
06472 rb_str_cat2(str, ": ");
06473 RSTRING_PTR(str)[0] = '#';
06474 }
06475 k = SPECIAL_CONST_P(k) ? rb_inspect(k) : rb_any_to_s(k);
06476 rb_str_append(str, k);
06477 rb_str_cat2(str, " => ");
06478 v = SPECIAL_CONST_P(v) ? rb_inspect(v) : rb_any_to_s(v);
06479 rb_str_append(str, v);
06480 OBJ_INFECT(str, k);
06481 OBJ_INFECT(str, v);
06482
06483 return ST_CONTINUE;
06484 }
06485
06486 static VALUE
06487 wmap_inspect(VALUE self)
06488 {
06489 VALUE str;
06490 VALUE c = rb_class_name(CLASS_OF(self));
06491 struct weakmap *w;
06492
06493 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06494 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
06495 if (w->wmap2obj) {
06496 st_foreach(w->wmap2obj, wmap_inspect_i, str);
06497 }
06498 RSTRING_PTR(str)[0] = '#';
06499 rb_str_cat2(str, ">");
06500 return str;
06501 }
06502
06503 static int
06504 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
06505 {
06506 rb_objspace_t *objspace = (rb_objspace_t *)arg;
06507 VALUE obj = (VALUE)val;
06508 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
06509 rb_yield_values(2, (VALUE)key, obj);
06510 }
06511 return ST_CONTINUE;
06512 }
06513
06514
06515 static VALUE
06516 wmap_each(VALUE self)
06517 {
06518 struct weakmap *w;
06519 rb_objspace_t *objspace = &rb_objspace;
06520
06521 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06522 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
06523 return self;
06524 }
06525
06526 static int
06527 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
06528 {
06529 rb_objspace_t *objspace = (rb_objspace_t *)arg;
06530 VALUE obj = (VALUE)val;
06531 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
06532 rb_yield((VALUE)key);
06533 }
06534 return ST_CONTINUE;
06535 }
06536
06537
06538 static VALUE
06539 wmap_each_key(VALUE self)
06540 {
06541 struct weakmap *w;
06542 rb_objspace_t *objspace = &rb_objspace;
06543
06544 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06545 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
06546 return self;
06547 }
06548
06549 static int
06550 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
06551 {
06552 rb_objspace_t *objspace = (rb_objspace_t *)arg;
06553 VALUE obj = (VALUE)val;
06554 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
06555 rb_yield(obj);
06556 }
06557 return ST_CONTINUE;
06558 }
06559
06560
06561 static VALUE
06562 wmap_each_value(VALUE self)
06563 {
06564 struct weakmap *w;
06565 rb_objspace_t *objspace = &rb_objspace;
06566
06567 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06568 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
06569 return self;
06570 }
06571
06572 static int
06573 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
06574 {
06575 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
06576 rb_objspace_t *objspace = argp->objspace;
06577 VALUE ary = argp->value;
06578 VALUE obj = (VALUE)val;
06579 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
06580 rb_ary_push(ary, (VALUE)key);
06581 }
06582 return ST_CONTINUE;
06583 }
06584
06585
06586 static VALUE
06587 wmap_keys(VALUE self)
06588 {
06589 struct weakmap *w;
06590 struct wmap_iter_arg args;
06591
06592 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06593 args.objspace = &rb_objspace;
06594 args.value = rb_ary_new();
06595 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
06596 return args.value;
06597 }
06598
06599 static int
06600 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
06601 {
06602 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
06603 rb_objspace_t *objspace = argp->objspace;
06604 VALUE ary = argp->value;
06605 VALUE obj = (VALUE)val;
06606 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
06607 rb_ary_push(ary, obj);
06608 }
06609 return ST_CONTINUE;
06610 }
06611
06612
06613 static VALUE
06614 wmap_values(VALUE self)
06615 {
06616 struct weakmap *w;
06617 struct wmap_iter_arg args;
06618
06619 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06620 args.objspace = &rb_objspace;
06621 args.value = rb_ary_new();
06622 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
06623 return args.value;
06624 }
06625
06626 static int
06627 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
06628 {
06629 VALUE size, *ptr, *optr;
06630 if (existing) {
06631 size = (ptr = optr = (VALUE *)*val)[0];
06632 ++size;
06633 ptr = ruby_sized_xrealloc2(ptr, size + 1, sizeof(VALUE), size);
06634 }
06635 else {
06636 optr = 0;
06637 size = 1;
06638 ptr = ruby_xmalloc2(2, sizeof(VALUE));
06639 }
06640 ptr[0] = size;
06641 ptr[size] = (VALUE)arg;
06642 if (ptr == optr) return ST_STOP;
06643 *val = (st_data_t)ptr;
06644 return ST_CONTINUE;
06645 }
06646
06647
06648 static VALUE
06649 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
06650 {
06651 struct weakmap *w;
06652
06653 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06654 should_be_finalizable(orig);
06655 should_be_finalizable(wmap);
06656 define_final0(orig, w->final);
06657 define_final0(wmap, w->final);
06658 st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
06659 st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
06660 return nonspecial_obj_id(orig);
06661 }
06662
06663
06664 static VALUE
06665 wmap_aref(VALUE self, VALUE wmap)
06666 {
06667 st_data_t data;
06668 VALUE obj;
06669 struct weakmap *w;
06670 rb_objspace_t *objspace = &rb_objspace;
06671
06672 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06673 if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
06674 obj = (VALUE)data;
06675 if (!is_id_value(objspace, obj)) return Qnil;
06676 if (!is_live_object(objspace, obj)) return Qnil;
06677 return obj;
06678 }
06679
06680
06681 static VALUE
06682 wmap_has_key(VALUE self, VALUE key)
06683 {
06684 return NIL_P(wmap_aref(self, key)) ? Qfalse : Qtrue;
06685 }
06686
06687 static VALUE
06688 wmap_size(VALUE self)
06689 {
06690 struct weakmap *w;
06691 st_index_t n;
06692
06693 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
06694 n = w->wmap2obj->num_entries;
06695 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
06696 return ULONG2NUM(n);
06697 #else
06698 return ULL2NUM(n);
06699 #endif
06700 }
06701
06702
06703
06704
06705
06706 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
06707
06708 static double
06709 getrusage_time(void)
06710 {
06711 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
06712 {
06713 static int try_clock_gettime = 1;
06714 struct timespec ts;
06715 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
06716 return ts.tv_sec + ts.tv_nsec * 1e-9;
06717 }
06718 else {
06719 try_clock_gettime = 0;
06720 }
06721 }
06722 #endif
06723
06724 #ifdef RUSAGE_SELF
06725 {
06726 struct rusage usage;
06727 struct timeval time;
06728 if (getrusage(RUSAGE_SELF, &usage) == 0) {
06729 time = usage.ru_utime;
06730 return time.tv_sec + time.tv_usec * 1e-6;
06731 }
06732 }
06733 #endif
06734
06735 #ifdef _WIN32
06736 {
06737 FILETIME creation_time, exit_time, kernel_time, user_time;
06738 ULARGE_INTEGER ui;
06739 LONG_LONG q;
06740 double t;
06741
06742 if (GetProcessTimes(GetCurrentProcess(),
06743 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
06744 memcpy(&ui, &user_time, sizeof(FILETIME));
06745 q = ui.QuadPart / 10L;
06746 t = (DWORD)(q % 1000000L) * 1e-6;
06747 q /= 1000000L;
06748 #ifdef __GNUC__
06749 t += q;
06750 #else
06751 t += (double)(DWORD)(q >> 16) * (1 << 16);
06752 t += (DWORD)q & ~(~0 << 16);
06753 #endif
06754 return t;
06755 }
06756 }
06757 #endif
06758
06759 return 0.0;
06760 }
06761
06762 static inline void
06763 gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
06764 {
06765 if (objspace->profile.run) {
06766 size_t index = objspace->profile.next_index;
06767 gc_profile_record *record;
06768
06769
06770 objspace->profile.next_index++;
06771
06772 if (!objspace->profile.records) {
06773 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
06774 objspace->profile.records = malloc(sizeof(gc_profile_record) * objspace->profile.size);
06775 }
06776 if (index >= objspace->profile.size) {
06777 objspace->profile.size += 1000;
06778 objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
06779 }
06780 if (!objspace->profile.records) {
06781 rb_bug("gc_profile malloc or realloc miss");
06782 }
06783 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
06784 MEMZERO(record, gc_profile_record, 1);
06785
06786
06787 record->flags = reason | ((ruby_gc_stress && !ruby_disable_gc_stress) ? GPR_FLAG_STRESS : 0);
06788 #if MALLOC_ALLOCATED_SIZE
06789 record->allocated_size = malloc_allocated_size;
06790 #endif
06791 #if GC_PROFILE_DETAIL_MEMORY
06792 #ifdef RUSAGE_SELF
06793 {
06794 struct rusage usage;
06795 if (getrusage(RUSAGE_SELF, &usage) == 0) {
06796 record->maxrss = usage.ru_maxrss;
06797 record->minflt = usage.ru_minflt;
06798 record->majflt = usage.ru_majflt;
06799 }
06800 }
06801 #endif
06802 #endif
06803 }
06804 }
06805
06806 static inline void
06807 gc_prof_timer_start(rb_objspace_t *objspace)
06808 {
06809 if (gc_prof_enabled(objspace)) {
06810 gc_profile_record *record = gc_prof_record(objspace);
06811 #if GC_PROFILE_MORE_DETAIL
06812 record->prepare_time = objspace->profile.prepare_time;
06813 #endif
06814 record->gc_time = 0;
06815 record->gc_invoke_time = getrusage_time();
06816 }
06817 }
06818
06819 static double
06820 elapsed_time_from(double time)
06821 {
06822 double now = getrusage_time();
06823 if (now > time) {
06824 return now - time;
06825 }
06826 else {
06827 return 0;
06828 }
06829 }
06830
06831 static inline void
06832 gc_prof_timer_stop(rb_objspace_t *objspace)
06833 {
06834 if (gc_prof_enabled(objspace)) {
06835 gc_profile_record *record = gc_prof_record(objspace);
06836 record->gc_time = elapsed_time_from(record->gc_invoke_time);
06837 record->gc_invoke_time -= objspace->profile.invoke_time;
06838 }
06839 }
06840
06841 static inline void
06842 gc_prof_mark_timer_start(rb_objspace_t *objspace)
06843 {
06844 if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
06845 RUBY_DTRACE_GC_MARK_BEGIN();
06846 }
06847 #if GC_PROFILE_MORE_DETAIL
06848 if (gc_prof_enabled(objspace)) {
06849 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
06850 }
06851 #endif
06852 }
06853
06854 static inline void
06855 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
06856 {
06857 if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
06858 RUBY_DTRACE_GC_MARK_END();
06859 }
06860 #if GC_PROFILE_MORE_DETAIL
06861 if (gc_prof_enabled(objspace)) {
06862 gc_profile_record *record = gc_prof_record(objspace);
06863 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
06864 }
06865 #endif
06866 }
06867
06868 static inline void
06869 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
06870 {
06871 if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
06872 RUBY_DTRACE_GC_SWEEP_BEGIN();
06873 }
06874 if (gc_prof_enabled(objspace)) {
06875 gc_profile_record *record = gc_prof_record(objspace);
06876
06877 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
06878 objspace->profile.gc_sweep_start_time = getrusage_time();
06879 }
06880 }
06881 }
06882
06883 static inline void
06884 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
06885 {
06886 if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
06887 RUBY_DTRACE_GC_SWEEP_END();
06888 }
06889
06890 if (gc_prof_enabled(objspace)) {
06891 double sweep_time;
06892 gc_profile_record *record = gc_prof_record(objspace);
06893
06894 if (record->gc_time > 0) {
06895 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
06896
06897 record->gc_time += sweep_time;
06898 }
06899 else if (GC_PROFILE_MORE_DETAIL) {
06900 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
06901 }
06902
06903 #if GC_PROFILE_MORE_DETAIL
06904 record->gc_sweep_time += sweep_time;
06905 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
06906 #endif
06907 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
06908 }
06909 }
06910
06911 static inline void
06912 gc_prof_set_malloc_info(rb_objspace_t *objspace)
06913 {
06914 #if GC_PROFILE_MORE_DETAIL
06915 if (gc_prof_enabled(objspace)) {
06916 gc_profile_record *record = gc_prof_record(objspace);
06917 record->allocate_increase = malloc_increase;
06918 record->allocate_limit = malloc_limit;
06919 }
06920 #endif
06921 }
06922
06923 static inline void
06924 gc_prof_set_heap_info(rb_objspace_t *objspace)
06925 {
06926 if (gc_prof_enabled(objspace)) {
06927 gc_profile_record *record = gc_prof_record(objspace);
06928 size_t live = objspace->profile.total_allocated_object_num_at_gc_start - objspace->profile.total_freed_object_num;
06929 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_OBJ_LIMIT;
06930
06931 #if GC_PROFILE_MORE_DETAIL
06932 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
06933 record->heap_live_objects = live;
06934 record->heap_free_objects = total - live;
06935 #endif
06936
06937 record->heap_total_objects = total;
06938 record->heap_use_size = live * sizeof(RVALUE);
06939 record->heap_total_size = total * sizeof(RVALUE);
06940 }
06941 }
06942
06943
06944
06945
06946
06947
06948
06949
06950
06951 static VALUE
06952 gc_profile_clear(void)
06953 {
06954 rb_objspace_t *objspace = &rb_objspace;
06955 if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
06956 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2;
06957 objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
06958 if (!objspace->profile.records) {
06959 rb_memerror();
06960 }
06961 }
06962 MEMZERO(objspace->profile.records, gc_profile_record, objspace->profile.size);
06963 objspace->profile.next_index = 0;
06964 objspace->profile.current_record = 0;
06965 return Qnil;
06966 }
06967
06968
06969
06970
06971
06972
06973
06974
06975
06976
06977
06978
06979
06980
06981
06982
06983
06984
06985
06986
06987
06988
06989
06990
06991
06992
06993
06994
06995
06996
06997
06998
06999
07000
07001
07002
07003
07004
07005
07006
07007
07008
07009
07010
07011
07012
07013
07014
07015
07016
07017
07018 static VALUE
07019 gc_profile_record_get(void)
07020 {
07021 VALUE prof;
07022 VALUE gc_profile = rb_ary_new();
07023 size_t i;
07024 rb_objspace_t *objspace = (&rb_objspace);
07025
07026 if (!objspace->profile.run) {
07027 return Qnil;
07028 }
07029
07030 for (i =0; i < objspace->profile.next_index; i++) {
07031 gc_profile_record *record = &objspace->profile.records[i];
07032
07033 prof = rb_hash_new();
07034 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(record->flags, rb_hash_new()));
07035 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
07036 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
07037 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
07038 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
07039 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
07040 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
07041 #if GC_PROFILE_MORE_DETAIL
07042 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
07043 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
07044 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
07045 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
07046 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
07047 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
07048 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
07049
07050 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
07051 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
07052
07053 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
07054 #endif
07055
07056 #if RGENGC_PROFILE > 0
07057 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
07058 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
07059 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
07060 #endif
07061 rb_ary_push(gc_profile, prof);
07062 }
07063
07064 return gc_profile;
07065 }
07066
07067 #if GC_PROFILE_MORE_DETAIL
07068 #define MAJOR_REASON_MAX 0x10
07069
07070 static char *
07071 gc_profile_dump_major_reason(int flags, char *buff)
07072 {
07073 int reason = flags & GPR_FLAG_MAJOR_MASK;
07074 int i = 0;
07075
07076 if (reason == GPR_FLAG_NONE) {
07077 buff[0] = '-';
07078 buff[1] = 0;
07079 }
07080 else {
07081 #define C(x, s) \
07082 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
07083 buff[i++] = #x[0]; \
07084 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
07085 buff[i] = 0; \
07086 }
07087 C(NOFREE, N);
07088 C(OLDGEN, O);
07089 C(SHADY, S);
07090 C(RESCAN, R);
07091 C(STRESS, T);
07092 #if RGENGC_ESTIMATE_OLDMALLOC
07093 C(OLDMALLOC, M);
07094 #endif
07095 #undef C
07096 }
07097 return buff;
07098 }
07099 #endif
07100
07101 static void
07102 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
07103 {
07104 rb_objspace_t *objspace = &rb_objspace;
07105 size_t count = objspace->profile.next_index;
07106 #ifdef MAJOR_REASON_MAX
07107 char reason_str[MAJOR_REASON_MAX];
07108 #endif
07109
07110 if (objspace->profile.run && count ) {
07111 size_t i;
07112 const gc_profile_record *record;
07113
07114 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
07115 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
07116
07117 for (i = 0; i < count; i++) {
07118 record = &objspace->profile.records[i];
07119 append(out, rb_sprintf("%5"PRIdSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
07120 i+1, record->gc_invoke_time, record->heap_use_size,
07121 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
07122 }
07123
07124 #if GC_PROFILE_MORE_DETAIL
07125 append(out, rb_str_new_cstr("\n\n" \
07126 "More detail.\n" \
07127 "Prepare Time = Previously GC's rest sweep time\n"
07128 "Index Flags Allocate Inc. Allocate Limit"
07129 #if CALC_EXACT_MALLOC_SIZE
07130 " Allocated Size"
07131 #endif
07132 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
07133 #if RGENGC_PROFILE
07134 " OldgenObj RemNormObj RemShadObj"
07135 #endif
07136 #if GC_PROFILE_DETAIL_MEMORY
07137 " MaxRSS(KB) MinorFLT MajorFLT"
07138 #endif
07139 "\n"));
07140
07141 for (i = 0; i < count; i++) {
07142 record = &objspace->profile.records[i];
07143 append(out, rb_sprintf("%5"PRIdSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
07144 #if CALC_EXACT_MALLOC_SIZE
07145 " %15"PRIuSIZE
07146 #endif
07147 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
07148 #if RGENGC_PROFILE
07149 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
07150 #endif
07151 #if GC_PROFILE_DETAIL_MEMORY
07152 "%11ld %8ld %8ld"
07153 #endif
07154
07155 "\n",
07156 i+1,
07157 gc_profile_dump_major_reason(record->flags, reason_str),
07158 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
07159 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
07160 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
07161 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
07162 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
07163 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
07164 record->allocate_increase, record->allocate_limit,
07165 #if CALC_EXACT_MALLOC_SIZE
07166 record->allocated_size,
07167 #endif
07168 record->heap_use_pages,
07169 record->gc_mark_time*1000,
07170 record->gc_sweep_time*1000,
07171 record->prepare_time*1000,
07172
07173 record->heap_live_objects,
07174 record->heap_free_objects,
07175 record->removing_objects,
07176 record->empty_objects
07177 #if RGENGC_PROFILE
07178 ,
07179 record->old_objects,
07180 record->remembered_normal_objects,
07181 record->remembered_shady_objects
07182 #endif
07183 #if GC_PROFILE_DETAIL_MEMORY
07184 ,
07185 record->maxrss / 1024,
07186 record->minflt,
07187 record->majflt
07188 #endif
07189
07190 ));
07191 }
07192 #endif
07193 }
07194 }
07195
07196
07197
07198
07199
07200
07201
07202
07203
07204
07205
07206
07207 static VALUE
07208 gc_profile_result(void)
07209 {
07210 VALUE str = rb_str_buf_new(0);
07211 gc_profile_dump_on(str, rb_str_buf_append);
07212 return str;
07213 }
07214
07215
07216
07217
07218
07219
07220
07221
07222
07223
07224 static VALUE
07225 gc_profile_report(int argc, VALUE *argv, VALUE self)
07226 {
07227 VALUE out;
07228
07229 if (argc == 0) {
07230 out = rb_stdout;
07231 }
07232 else {
07233 rb_scan_args(argc, argv, "01", &out);
07234 }
07235 gc_profile_dump_on(out, rb_io_write);
07236
07237 return Qnil;
07238 }
07239
07240
07241
07242
07243
07244
07245
07246
07247 static VALUE
07248 gc_profile_total_time(VALUE self)
07249 {
07250 double time = 0;
07251 rb_objspace_t *objspace = &rb_objspace;
07252
07253 if (objspace->profile.run && objspace->profile.next_index > 0) {
07254 size_t i;
07255 size_t count = objspace->profile.next_index;
07256
07257 for (i = 0; i < count; i++) {
07258 time += objspace->profile.records[i].gc_time;
07259 }
07260 }
07261 return DBL2NUM(time);
07262 }
07263
07264
07265
07266
07267
07268
07269
07270
07271 static VALUE
07272 gc_profile_enable_get(VALUE self)
07273 {
07274 rb_objspace_t *objspace = &rb_objspace;
07275 return objspace->profile.run ? Qtrue : Qfalse;
07276 }
07277
07278
07279
07280
07281
07282
07283
07284
07285
07286 static VALUE
07287 gc_profile_enable(void)
07288 {
07289 rb_objspace_t *objspace = &rb_objspace;
07290 objspace->profile.run = TRUE;
07291 objspace->profile.current_record = 0;
07292 return Qnil;
07293 }
07294
07295
07296
07297
07298
07299
07300
07301
07302
07303 static VALUE
07304 gc_profile_disable(void)
07305 {
07306 rb_objspace_t *objspace = &rb_objspace;
07307
07308 objspace->profile.run = FALSE;
07309 objspace->profile.current_record = 0;
07310 return Qnil;
07311 }
07312
07313
07314
07315
07316
07317 static const char *
07318 type_name(int type, VALUE obj)
07319 {
07320 switch (type) {
07321 #define TYPE_NAME(t) case (t): return #t;
07322 TYPE_NAME(T_NONE);
07323 TYPE_NAME(T_OBJECT);
07324 TYPE_NAME(T_CLASS);
07325 TYPE_NAME(T_MODULE);
07326 TYPE_NAME(T_FLOAT);
07327 TYPE_NAME(T_STRING);
07328 TYPE_NAME(T_REGEXP);
07329 TYPE_NAME(T_ARRAY);
07330 TYPE_NAME(T_HASH);
07331 TYPE_NAME(T_STRUCT);
07332 TYPE_NAME(T_BIGNUM);
07333 TYPE_NAME(T_FILE);
07334 TYPE_NAME(T_MATCH);
07335 TYPE_NAME(T_COMPLEX);
07336 TYPE_NAME(T_RATIONAL);
07337 TYPE_NAME(T_NIL);
07338 TYPE_NAME(T_TRUE);
07339 TYPE_NAME(T_FALSE);
07340 TYPE_NAME(T_SYMBOL);
07341 TYPE_NAME(T_FIXNUM);
07342 TYPE_NAME(T_UNDEF);
07343 TYPE_NAME(T_NODE);
07344 TYPE_NAME(T_ICLASS);
07345 TYPE_NAME(T_ZOMBIE);
07346 case T_DATA:
07347 if (obj && rb_objspace_data_type_name(obj)) {
07348 return rb_objspace_data_type_name(obj);
07349 }
07350 return "T_DATA";
07351 #undef TYPE_NAME
07352 }
07353 return "unknown";
07354 }
07355
07356 static const char *
07357 obj_type_name(VALUE obj)
07358 {
07359 return type_name(TYPE(obj), obj);
07360 }
07361
07362 #if GC_DEBUG
07363
07364 void
07365 rb_gcdebug_print_obj_condition(VALUE obj)
07366 {
07367 rb_objspace_t *objspace = &rb_objspace;
07368
07369 fprintf(stderr, "created at: %s:%d\n", RSTRING_PTR(RANY(obj)->file), FIX2INT(RANY(obj)->line));
07370
07371 if (is_pointer_to_heap(objspace, (void *)obj)) {
07372 fprintf(stderr, "pointer to heap?: true\n");
07373 }
07374 else {
07375 fprintf(stderr, "pointer to heap?: false\n");
07376 return;
07377 }
07378
07379 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
07380 #if USE_RGENGC
07381 #if RGENGC_THREEGEN
07382 fprintf(stderr, "young? : %s\n", RVALUE_YOUNG_P(obj) ? "true" : "false");
07383 #endif
07384 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
07385 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_PROTECTED(obj) ? "true" : "false");
07386 fprintf(stderr, "remembered? : %s\n", MARKED_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(obj), obj) ? "true" : "false");
07387 #endif
07388
07389 if (is_lazy_sweeping(heap_eden)) {
07390 fprintf(stderr, "lazy sweeping?: true\n");
07391 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
07392 }
07393 else {
07394 fprintf(stderr, "lazy sweeping?: false\n");
07395 }
07396 }
07397
07398 static VALUE
07399 gcdebug_sential(VALUE obj, VALUE name)
07400 {
07401 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
07402 return Qnil;
07403 }
07404
07405 void
07406 rb_gcdebug_sentinel(VALUE obj, const char *name)
07407 {
07408 rb_define_finalizer(obj, rb_proc_new(gcdebug_sential, (VALUE)name));
07409 }
07410 #endif
07411
07412
07413
07414
07415
07416
07417
07418
07419
07420
07421
07422
07423
07424
07425
07426
07427
07428
07429
07430
07431
07432
07433
07434
07435
07436
07437
07438
07439
07440
07441
07442
07443
07444
07445
07446
07447
07448
07449
07450
07451
07452
07453
07454
07455
07456
07457
07458
07459
07460
07461
07462
07463
07464
07465
07466
07467
07468
07469
07470
07471
07472
07473
07474 void
07475 Init_GC(void)
07476 {
07477 VALUE rb_mObjSpace;
07478 VALUE rb_mProfiler;
07479 VALUE gc_constants;
07480
07481 rb_mGC = rb_define_module("GC");
07482 rb_define_singleton_method(rb_mGC, "start", gc_start_internal, -1);
07483 rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
07484 rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
07485 rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
07486 rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
07487 rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
07488 rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
07489 rb_define_singleton_method(rb_mGC, "latest_gc_info", gc_latest_gc_info, -1);
07490 rb_define_method(rb_mGC, "garbage_collect", gc_start_internal, -1);
07491
07492 gc_constants = rb_hash_new();
07493 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
07494 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_OBJ_LIMIT")), SIZET2NUM(HEAP_OBJ_LIMIT));
07495 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_SIZE")), SIZET2NUM(HEAP_BITMAP_SIZE));
07496 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_PLANES")), SIZET2NUM(HEAP_BITMAP_PLANES));
07497 OBJ_FREEZE(gc_constants);
07498 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
07499
07500 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
07501 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
07502 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
07503 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
07504 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
07505 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
07506 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
07507 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
07508 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
07509
07510 rb_mObjSpace = rb_define_module("ObjectSpace");
07511 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
07512 rb_define_module_function(rb_mObjSpace, "garbage_collect", gc_start_internal, -1);
07513
07514 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
07515 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
07516
07517 rb_define_module_function(rb_mObjSpace, "_id2ref", id2ref, 1);
07518
07519 nomem_error = rb_exc_new3(rb_eNoMemError,
07520 rb_obj_freeze(rb_str_new2("failed to allocate memory")));
07521 OBJ_TAINT(nomem_error);
07522 OBJ_FREEZE(nomem_error);
07523
07524 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
07525 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
07526
07527 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
07528
07529 {
07530 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
07531 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
07532 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
07533 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
07534 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
07535 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
07536 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
07537 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
07538 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
07539 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
07540 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
07541 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
07542 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
07543 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
07544 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
07545 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
07546 rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
07547 rb_include_module(rb_cWeakMap, rb_mEnumerable);
07548 }
07549
07550
07551 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
07552 #if MALLOC_ALLOCATED_SIZE
07553 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
07554 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
07555 #endif
07556
07557
07558 {
07559 VALUE opts;
07560 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
07561 #define OPT(o) if (o) rb_ary_push(opts, rb_str_new2(#o))
07562 OPT(GC_DEBUG);
07563 OPT(USE_RGENGC);
07564 OPT(RGENGC_DEBUG);
07565 OPT(RGENGC_CHECK_MODE);
07566 OPT(RGENGC_PROFILE);
07567 OPT(RGENGC_THREEGEN);
07568 OPT(RGENGC_ESTIMATE_OLDMALLOC);
07569 OPT(GC_PROFILE_MORE_DETAIL);
07570 OPT(GC_ENABLE_LAZY_SWEEP);
07571 OPT(CALC_EXACT_MALLOC_SIZE);
07572 OPT(MALLOC_ALLOCATED_SIZE);
07573 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
07574 OPT(GC_PROFILE_DETAIL_MEMORY);
07575 #undef OPT
07576 }
07577 }
07578