00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021 #include "probes.h"
00022 #include "probes_helper.h"
00023
00024 static inline VALUE *
00025 VM_EP_LEP(VALUE *ep)
00026 {
00027 while (!VM_EP_LEP_P(ep)) {
00028 ep = VM_EP_PREV_EP(ep);
00029 }
00030 return ep;
00031 }
00032
00033 VALUE *
00034 rb_vm_ep_local_ep(VALUE *ep)
00035 {
00036 return VM_EP_LEP(ep);
00037 }
00038
00039 static inline VALUE *
00040 VM_CF_LEP(rb_control_frame_t *cfp)
00041 {
00042 return VM_EP_LEP(cfp->ep);
00043 }
00044
00045 static inline VALUE *
00046 VM_CF_PREV_EP(rb_control_frame_t * cfp)
00047 {
00048 return VM_EP_PREV_EP((cfp)->ep);
00049 }
00050
00051 static inline rb_block_t *
00052 VM_CF_BLOCK_PTR(rb_control_frame_t *cfp)
00053 {
00054 VALUE *ep = VM_CF_LEP(cfp);
00055 return VM_EP_BLOCK_PTR(ep);
00056 }
00057
00058 rb_block_t *
00059 rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp)
00060 {
00061 return VM_CF_BLOCK_PTR(cfp);
00062 }
00063
00064 #if VM_COLLECT_USAGE_DETAILS
00065 static void vm_collect_usage_operand(int insn, int n, VALUE op);
00066 static void vm_collect_usage_insn(int insn);
00067 static void vm_collect_usage_register(int reg, int isset);
00068 #endif
00069
00070 static VALUE
00071 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00072 int argc, const VALUE *argv, const rb_block_t *blockptr);
00073
00074 static rb_serial_t ruby_vm_global_method_state = 1;
00075 static rb_serial_t ruby_vm_global_constant_state = 1;
00076 static rb_serial_t ruby_vm_class_serial = 1;
00077
00078 #include "vm_insnhelper.h"
00079 #include "vm_insnhelper.c"
00080 #include "vm_exec.h"
00081 #include "vm_exec.c"
00082
00083 #include "vm_method.c"
00084 #include "vm_eval.c"
00085
00086 #include <assert.h>
00087
00088 #define BUFSIZE 0x100
00089 #define PROCDEBUG 0
00090
00091 rb_serial_t
00092 rb_next_class_serial(void)
00093 {
00094 return NEXT_CLASS_SERIAL();
00095 }
00096
00097 VALUE rb_cRubyVM;
00098 VALUE rb_cThread;
00099 VALUE rb_cEnv;
00100 VALUE rb_mRubyVMFrozenCore;
00101
00102 VALUE ruby_vm_const_missing_count = 0;
00103 short ruby_vm_redefined_flag[BOP_LAST_];
00104 rb_thread_t *ruby_current_thread = 0;
00105 rb_vm_t *ruby_current_vm = 0;
00106 rb_event_flag_t ruby_vm_event_flags;
00107
00108 static void thread_free(void *ptr);
00109
00110 void
00111 rb_vm_inc_const_missing_count(void)
00112 {
00113 ruby_vm_const_missing_count +=1;
00114 }
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138 static VALUE
00139 vm_stat(int argc, VALUE *argv, VALUE self)
00140 {
00141 static VALUE sym_global_method_state, sym_global_constant_state, sym_class_serial;
00142 VALUE arg = Qnil;
00143 VALUE hash = Qnil, key = Qnil;
00144
00145 if (rb_scan_args(argc, argv, "01", &arg) == 1) {
00146 if (SYMBOL_P(arg))
00147 key = arg;
00148 else if (RB_TYPE_P(arg, T_HASH))
00149 hash = arg;
00150 else
00151 rb_raise(rb_eTypeError, "non-hash or symbol given");
00152 } else if (arg == Qnil) {
00153 hash = rb_hash_new();
00154 }
00155
00156 if (sym_global_method_state == 0) {
00157 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
00158 S(global_method_state);
00159 S(global_constant_state);
00160 S(class_serial);
00161 #undef S
00162 }
00163
00164 #define SET(name, attr) \
00165 if (key == sym_##name) \
00166 return SERIALT2NUM(attr); \
00167 else if (hash != Qnil) \
00168 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
00169
00170 SET(global_method_state, ruby_vm_global_method_state);
00171 SET(global_constant_state, ruby_vm_global_constant_state);
00172 SET(class_serial, ruby_vm_class_serial);
00173 #undef SET
00174
00175 if (key != Qnil)
00176 rb_raise(rb_eArgError, "unknown key: %s", RSTRING_PTR(rb_id2str(SYM2ID(key))));
00177
00178 return hash;
00179 }
00180
00181
00182
00183 static void
00184 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00185 {
00186 rb_iseq_t *iseq;
00187 GetISeqPtr(iseqval, iseq);
00188
00189 if (iseq->type != ISEQ_TYPE_TOP) {
00190 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00191 }
00192
00193
00194 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
00195 th->top_self, rb_cObject, VM_ENVVAL_BLOCK_PTR(0),
00196 iseq->iseq_encoded, th->cfp->sp, iseq->local_size, 0, iseq->stack_max);
00197 }
00198
00199 static void
00200 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref, rb_block_t *base_block)
00201 {
00202 rb_iseq_t *iseq;
00203 GetISeqPtr(iseqval, iseq);
00204
00205 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
00206 base_block->self, base_block->klass,
00207 VM_ENVVAL_PREV_EP_PTR(base_block->ep), iseq->iseq_encoded,
00208 th->cfp->sp, iseq->local_size, 0, iseq->stack_max);
00209
00210 if (cref) {
00211 th->cfp->ep[-1] = (VALUE)cref;
00212 }
00213 }
00214
00215 static void
00216 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00217 {
00218 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00219 rb_binding_t *bind;
00220 rb_iseq_t *iseq;
00221 rb_env_t *env;
00222
00223 GetBindingPtr(toplevel_binding, bind);
00224 GetEnvPtr(bind->env, env);
00225 vm_set_eval_stack(th, iseqval, 0, &env->block);
00226
00227
00228 GetISeqPtr(iseqval, iseq);
00229 if (bind && iseq->local_size > 0) {
00230 bind->env = rb_vm_make_env_object(th, th->cfp);
00231 }
00232 }
00233
00234 rb_control_frame_t *
00235 rb_vm_get_binding_creatable_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00236 {
00237 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00238 if (cfp->iseq) {
00239 return (rb_control_frame_t *)cfp;
00240 }
00241 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00242 }
00243 return 0;
00244 }
00245
00246 rb_control_frame_t *
00247 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00248 {
00249 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00250 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00251 return (rb_control_frame_t *)cfp;
00252 }
00253 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00254 }
00255 return 0;
00256 }
00257
00258 static rb_control_frame_t *
00259 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00260 {
00261 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00262 return cfp;
00263 }
00264
00265 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00266
00267 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00268 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00269 return cfp;
00270 }
00271
00272 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00273 break;
00274 }
00275 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00276 }
00277 return 0;
00278 }
00279
00280 void
00281 rb_vm_pop_cfunc_frame(void)
00282 {
00283 rb_thread_t *th = GET_THREAD();
00284 const rb_method_entry_t *me = th->cfp->me;
00285 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass, Qnil);
00286 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
00287 vm_pop_frame(th);
00288 }
00289
00290 void
00291 rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00292 {
00293
00294 while (th->cfp != cfp) {
00295 #if VMDEBUG
00296 printf("skipped frame: %s\n", vm_frametype_name(th->cfp));
00297 #endif
00298 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_CFUNC) {
00299 vm_pop_frame(th);
00300 }
00301 else {
00302 rb_vm_pop_cfunc_frame();
00303 }
00304 }
00305 }
00306
00307
00308 void
00309 rb_frame_pop(void)
00310 {
00311 rb_thread_t *th = GET_THREAD();
00312 vm_pop_frame(th);
00313 }
00314
00315
00316
00317 void
00318 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00319 {
00320 rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00321 }
00322
00323 static void
00324 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00325 {
00326 VALUE hook = (VALUE)&vm->at_exit;
00327
00328 while (RARRAY_LEN(hook) > 0) {
00329 typedef void rb_vm_at_exit_func(rb_vm_t*);
00330 rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00331 (*func)(vm);
00332 }
00333 rb_ary_free(hook);
00334 }
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345 #define ENV_IN_HEAP_P(th, env) \
00346 (!((th)->stack <= (env) && (env) < ((th)->stack + (th)->stack_size)))
00347 #define ENV_VAL(env) ((env)[1])
00348
00349 static void
00350 env_mark(void * const ptr)
00351 {
00352 RUBY_MARK_ENTER("env");
00353 if (ptr) {
00354 const rb_env_t * const env = ptr;
00355
00356 if (env->env) {
00357
00358 RUBY_GC_INFO("env->env\n");
00359 rb_gc_mark_locations(env->env, env->env + env->env_size);
00360 }
00361
00362 RUBY_GC_INFO("env->prev_envval\n");
00363 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00364 RUBY_MARK_UNLESS_NULL(env->block.self);
00365 RUBY_MARK_UNLESS_NULL(env->block.proc);
00366
00367 if (env->block.iseq) {
00368 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00369 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00370 }
00371 else {
00372 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00373 }
00374 }
00375 }
00376 RUBY_MARK_LEAVE("env");
00377 }
00378
00379 static void
00380 env_free(void * const ptr)
00381 {
00382 RUBY_FREE_ENTER("env");
00383 if (ptr) {
00384 rb_env_t *const env = ptr;
00385 RUBY_FREE_UNLESS_NULL(env->env);
00386 ruby_xfree(ptr);
00387 }
00388 RUBY_FREE_LEAVE("env");
00389 }
00390
00391 static size_t
00392 env_memsize(const void *ptr)
00393 {
00394 if (ptr) {
00395 const rb_env_t * const env = ptr;
00396 size_t size = sizeof(rb_env_t);
00397 if (env->env) {
00398 size += env->env_size * sizeof(VALUE);
00399 }
00400 return size;
00401 }
00402 return 0;
00403 }
00404
00405 static const rb_data_type_t env_data_type = {
00406 "VM/env",
00407 {env_mark, env_free, env_memsize,},
00408 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
00409 };
00410
00411 static VALUE
00412 env_alloc(void)
00413 {
00414 VALUE obj;
00415 rb_env_t *env;
00416 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00417 env->env = 0;
00418 env->prev_envval = 0;
00419 env->block.iseq = 0;
00420 return obj;
00421 }
00422
00423 static VALUE check_env_value(VALUE envval);
00424
00425 static int
00426 check_env(rb_env_t * const env)
00427 {
00428 fprintf(stderr, "---\n");
00429 fprintf(stderr, "envptr: %p\n", (void *)&env->block.ep[0]);
00430 fprintf(stderr, "envval: %10p ", (void *)env->block.ep[1]);
00431 dp(env->block.ep[1]);
00432 fprintf(stderr, "ep: %10p\n", (void *)env->block.ep);
00433 if (env->prev_envval) {
00434 fprintf(stderr, ">>\n");
00435 check_env_value(env->prev_envval);
00436 fprintf(stderr, "<<\n");
00437 }
00438 return 1;
00439 }
00440
00441 static VALUE
00442 check_env_value(VALUE envval)
00443 {
00444 rb_env_t *env;
00445 GetEnvPtr(envval, env);
00446
00447 if (check_env(env)) {
00448 return envval;
00449 }
00450 rb_bug("invalid env");
00451 return Qnil;
00452 }
00453
00454 static VALUE
00455 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00456 VALUE *envptr, VALUE * const endptr)
00457 {
00458 VALUE envval, penvval = 0;
00459 rb_env_t *env;
00460 VALUE *nenvptr;
00461 int i, local_size;
00462
00463 if (ENV_IN_HEAP_P(th, envptr)) {
00464 return ENV_VAL(envptr);
00465 }
00466
00467 if (envptr != endptr) {
00468 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00469 rb_control_frame_t *pcfp = cfp;
00470
00471 if (ENV_IN_HEAP_P(th, penvptr)) {
00472 penvval = ENV_VAL(penvptr);
00473 }
00474 else {
00475 while (pcfp->ep != penvptr) {
00476 pcfp++;
00477 if (pcfp->ep == 0) {
00478 SDR();
00479 rb_bug("invalid ep");
00480 }
00481 }
00482 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00483 *envptr = VM_ENVVAL_PREV_EP_PTR(pcfp->ep);
00484 }
00485 }
00486
00487
00488 envval = env_alloc();
00489 GetEnvPtr(envval, env);
00490
00491 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00492 local_size = 2;
00493 }
00494 else {
00495 local_size = cfp->iseq->local_size;
00496 }
00497
00498 env->env_size = local_size + 1 + 1;
00499 env->local_size = local_size;
00500 env->env = ALLOC_N(VALUE, env->env_size);
00501 env->prev_envval = penvval;
00502
00503 for (i = 0; i <= local_size; i++) {
00504 env->env[i] = envptr[-local_size + i];
00505 #if 0
00506 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00507 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00508
00509 envptr[-local_size + i] = 0;
00510 }
00511 #endif
00512 }
00513
00514 *envptr = envval;
00515 nenvptr = &env->env[i - 1];
00516 nenvptr[1] = envval;
00517
00518
00519 cfp->ep = nenvptr;
00520
00521
00522 env->block.self = cfp->self;
00523 env->block.ep = cfp->ep;
00524 env->block.iseq = cfp->iseq;
00525
00526 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00527
00528 env->block.iseq = 0;
00529 }
00530 return envval;
00531 }
00532
00533 static int
00534 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00535 {
00536 int i;
00537 if (!iseq) return 0;
00538 for (i = 0; i < iseq->local_table_size; i++) {
00539 ID lid = iseq->local_table[i];
00540 if (rb_is_local_id(lid)) {
00541 rb_ary_push(ary, ID2SYM(lid));
00542 }
00543 }
00544 return 1;
00545 }
00546
00547 static int
00548 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00549 {
00550
00551 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00552 env->prev_envval) {
00553 GetEnvPtr(env->prev_envval, env);
00554 }
00555 return 0;
00556 }
00557
00558 static int
00559 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *ep, VALUE ary)
00560 {
00561 if (ENV_IN_HEAP_P(th, ep)) {
00562 rb_env_t *env;
00563 GetEnvPtr(ENV_VAL(ep), env);
00564 collect_local_variables_in_env(env, ary);
00565 return 1;
00566 }
00567 else {
00568 return 0;
00569 }
00570 }
00571
00572 static void vm_rewrite_ep_in_errinfo(rb_thread_t *th);
00573 static VALUE vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block);
00574 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
00575
00576 VALUE
00577 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00578 {
00579 VALUE blockprocval;
00580 return vm_make_env_object(th, cfp, &blockprocval);
00581 }
00582
00583 static VALUE
00584 vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *blockprocptr)
00585 {
00586 VALUE envval;
00587 VALUE *lep = VM_CF_LEP(cfp);
00588 rb_block_t *blockptr = VM_EP_BLOCK_PTR(lep);
00589
00590 if (blockptr) {
00591 VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
00592 rb_proc_t *p;
00593 GetProcPtr(blockprocval, p);
00594 lep[0] = VM_ENVVAL_BLOCK_PTR(&p->block);
00595 *blockprocptr = blockprocval;
00596 }
00597
00598 envval = vm_make_env_each(th, cfp, cfp->ep, lep);
00599 vm_rewrite_ep_in_errinfo(th);
00600
00601 if (PROCDEBUG) {
00602 check_env_value(envval);
00603 }
00604
00605 return envval;
00606 }
00607
00608 static void
00609 vm_rewrite_ep_in_errinfo(rb_thread_t *th)
00610 {
00611 rb_control_frame_t *cfp = th->cfp;
00612 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00613
00614 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00615 (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00616 cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00617 VALUE errinfo = cfp->ep[-2];
00618 if (RB_TYPE_P(errinfo, T_NODE)) {
00619 VALUE *escape_ep = GET_THROWOBJ_CATCH_POINT(errinfo);
00620 if (! ENV_IN_HEAP_P(th, escape_ep)) {
00621 VALUE epval = *escape_ep;
00622 if (!SPECIAL_CONST_P(epval) && RBASIC(epval)->klass == rb_cEnv) {
00623 rb_env_t *epenv;
00624 GetEnvPtr(epval, epenv);
00625 SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(epenv->env + epenv->local_size));
00626 }
00627 }
00628 }
00629 }
00630 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00631 }
00632 }
00633
00634 void
00635 rb_vm_stack_to_heap(rb_thread_t *th)
00636 {
00637 rb_control_frame_t *cfp = th->cfp;
00638 while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) {
00639 rb_vm_make_env_object(th, cfp);
00640 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00641 }
00642 }
00643
00644
00645
00646 static VALUE
00647 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00648 {
00649 if (!block->proc) {
00650 block->proc = rb_vm_make_proc(th, block, rb_cProc);
00651 }
00652 return block->proc;
00653 }
00654
00655 VALUE
00656 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00657 {
00658 VALUE procval, envval, blockprocval = 0;
00659 rb_proc_t *proc;
00660 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00661
00662 if (block->proc) {
00663 rb_bug("rb_vm_make_proc: Proc value is already created.");
00664 }
00665
00666 envval = vm_make_env_object(th, cfp, &blockprocval);
00667
00668 if (PROCDEBUG) {
00669 check_env_value(envval);
00670 }
00671 procval = rb_proc_alloc(klass);
00672 GetProcPtr(procval, proc);
00673 proc->blockprocval = blockprocval;
00674 proc->block.self = block->self;
00675 proc->block.klass = block->klass;
00676 proc->block.ep = block->ep;
00677 proc->block.iseq = block->iseq;
00678 proc->block.proc = procval;
00679 proc->envval = envval;
00680 proc->safe_level = th->safe_level;
00681
00682 if (VMDEBUG) {
00683 if (th->stack < block->ep && block->ep < th->stack + th->stack_size) {
00684 rb_bug("invalid ptr: block->ep");
00685 }
00686 }
00687
00688 return procval;
00689 }
00690
00691
00692
00693 VALUE
00694 rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp)
00695 {
00696 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(th, src_cfp);
00697 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(th, src_cfp);
00698 VALUE bindval, envval;
00699 rb_binding_t *bind;
00700 VALUE blockprocval = 0;
00701
00702 if (cfp == 0 || ruby_level_cfp == 0) {
00703 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
00704 }
00705
00706 while (1) {
00707 envval = vm_make_env_object(th, cfp, &blockprocval);
00708 if (cfp == ruby_level_cfp) {
00709 break;
00710 }
00711 cfp = rb_vm_get_binding_creatable_next_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
00712 }
00713
00714 bindval = rb_binding_alloc(rb_cBinding);
00715 GetBindingPtr(bindval, bind);
00716 bind->env = envval;
00717 bind->path = ruby_level_cfp->iseq->location.path;
00718 bind->blockprocval = blockprocval;
00719 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
00720
00721 return bindval;
00722 }
00723
00724 VALUE *
00725 rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars)
00726 {
00727 VALUE envval = bind->env, path = bind->path, iseqval;
00728 rb_env_t *env;
00729 rb_block_t *base_block;
00730 rb_thread_t *th = GET_THREAD();
00731 rb_iseq_t *base_iseq;
00732 NODE *node = 0;
00733 ID minibuf[4], *dyns = minibuf;
00734 VALUE idtmp = 0;
00735 VALUE blockprocval = 0;
00736
00737 if (dyncount < 0) return 0;
00738
00739 GetEnvPtr(envval, env);
00740
00741 base_block = &env->block;
00742 base_iseq = base_block->iseq;
00743
00744 if (dyncount >= numberof(minibuf)) dyns = ALLOCV_N(ID, idtmp, dyncount + 1);
00745
00746 dyns[0] = dyncount;
00747 MEMCPY(dyns + 1, dynvars, ID, dyncount);
00748 node = NEW_NODE(NODE_SCOPE, dyns, 0, 0);
00749
00750 iseqval = rb_iseq_new(node, base_iseq->location.label, path, path,
00751 base_iseq->self, ISEQ_TYPE_EVAL);
00752 node->u1.tbl = 0;
00753 ALLOCV_END(idtmp);
00754
00755 vm_set_eval_stack(th, iseqval, 0, base_block);
00756 bind->env = vm_make_env_object(th, th->cfp, &blockprocval);
00757 bind->blockprocval = blockprocval;
00758 vm_pop_frame(th);
00759 GetEnvPtr(bind->env, env);
00760
00761 return env->env;
00762 }
00763
00764
00765
00766 static inline VALUE
00767 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00768 VALUE self, int argc, const VALUE *argv,
00769 const rb_block_t *blockptr, const NODE *cref,
00770 VALUE defined_class)
00771 {
00772 if (SPECIAL_CONST_P(block->iseq)) {
00773 return Qnil;
00774 }
00775 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00776 VALUE ret;
00777 const rb_iseq_t *iseq = block->iseq;
00778 const rb_control_frame_t *cfp;
00779 int i, opt_pc, arg_size = iseq->arg_size;
00780 int type = block_proc_is_lambda(block->proc) ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00781 const rb_method_entry_t *me = th->passed_bmethod_me;
00782 th->passed_bmethod_me = 0;
00783 cfp = th->cfp;
00784
00785 for (i=0; i<argc; i++) {
00786 cfp->sp[i] = argv[i];
00787 }
00788
00789 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00790 type == VM_FRAME_MAGIC_LAMBDA);
00791
00792 if (me != 0) {
00793
00794 vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_BMETHOD,
00795 self, defined_class,
00796 VM_ENVVAL_PREV_EP_PTR(block->ep),
00797 iseq->iseq_encoded + opt_pc,
00798 cfp->sp + arg_size, iseq->local_size - arg_size,
00799 me, iseq->stack_max);
00800
00801 RUBY_DTRACE_METHOD_ENTRY_HOOK(th, me->klass, me->called_id);
00802 EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, self, me->called_id, me->klass, Qnil);
00803 }
00804 else {
00805 vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH,
00806 self, defined_class,
00807 VM_ENVVAL_PREV_EP_PTR(block->ep),
00808 iseq->iseq_encoded + opt_pc,
00809 cfp->sp + arg_size, iseq->local_size - arg_size,
00810 0, iseq->stack_max);
00811 }
00812
00813 if (cref) {
00814 th->cfp->ep[-1] = (VALUE)cref;
00815 }
00816
00817 ret = vm_exec(th);
00818
00819 if (me) {
00820
00821 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, self, me->called_id, me->klass, ret);
00822 RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->klass, me->called_id);
00823 }
00824
00825 return ret;
00826 }
00827 else {
00828 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00829 }
00830 }
00831
00832 static inline const rb_block_t *
00833 check_block(rb_thread_t *th)
00834 {
00835 const rb_block_t *blockptr = VM_CF_BLOCK_PTR(th->cfp);
00836
00837 if (blockptr == 0) {
00838 rb_vm_localjump_error("no block given", Qnil, 0);
00839 }
00840
00841 return blockptr;
00842 }
00843
00844 static inline VALUE
00845 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00846 {
00847 const rb_block_t *blockptr = check_block(th);
00848 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref,
00849 blockptr->klass);
00850 }
00851
00852 static inline VALUE
00853 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00854 {
00855 const rb_block_t *blockptr = check_block(th);
00856 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0,
00857 blockptr->klass);
00858 }
00859
00860 static inline VALUE
00861 vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, const rb_block_t *blockargptr)
00862 {
00863 const rb_block_t *blockptr = check_block(th);
00864 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, blockargptr, 0,
00865 blockptr->klass);
00866 }
00867
00868 static VALUE
00869 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00870 int argc, const VALUE *argv, const rb_block_t *blockptr)
00871 {
00872 VALUE val = Qundef;
00873 int state;
00874 volatile int stored_safe = th->safe_level;
00875
00876 TH_PUSH_TAG(th);
00877 if ((state = EXEC_TAG()) == 0) {
00878 if (!proc->is_from_method) {
00879 th->safe_level = proc->safe_level;
00880 }
00881 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0,
00882 defined_class);
00883 }
00884 TH_POP_TAG();
00885
00886 if (!proc->is_from_method) {
00887 th->safe_level = stored_safe;
00888 }
00889
00890 if (state) {
00891 JUMP_TAG(state);
00892 }
00893 return val;
00894 }
00895
00896 VALUE
00897 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
00898 int argc, const VALUE *argv, const rb_block_t *blockptr)
00899 {
00900 return vm_invoke_proc(th, proc, proc->block.self, proc->block.klass,
00901 argc, argv, blockptr);
00902 }
00903
00904
00905
00906 static rb_control_frame_t *
00907 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00908 {
00909 while (cfp->pc == 0) {
00910 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00911 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00912 return 0;
00913 }
00914 }
00915 return cfp;
00916 }
00917
00918 static VALUE
00919 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00920 {
00921 cfp = vm_normal_frame(th, cfp);
00922 return lep_svar_get(th, cfp ? VM_CF_LEP(cfp) : 0, key);
00923 }
00924
00925 static void
00926 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00927 {
00928 cfp = vm_normal_frame(th, cfp);
00929 lep_svar_set(th, cfp ? VM_CF_LEP(cfp) : 0, key, val);
00930 }
00931
00932 static VALUE
00933 vm_svar_get(VALUE key)
00934 {
00935 rb_thread_t *th = GET_THREAD();
00936 return vm_cfp_svar_get(th, th->cfp, key);
00937 }
00938
00939 static void
00940 vm_svar_set(VALUE key, VALUE val)
00941 {
00942 rb_thread_t *th = GET_THREAD();
00943 vm_cfp_svar_set(th, th->cfp, key, val);
00944 }
00945
00946 VALUE
00947 rb_backref_get(void)
00948 {
00949 return vm_svar_get(1);
00950 }
00951
00952 void
00953 rb_backref_set(VALUE val)
00954 {
00955 vm_svar_set(1, val);
00956 }
00957
00958 VALUE
00959 rb_lastline_get(void)
00960 {
00961 return vm_svar_get(0);
00962 }
00963
00964 void
00965 rb_lastline_set(VALUE val)
00966 {
00967 vm_svar_set(0, val);
00968 }
00969
00970
00971
00972 VALUE
00973 rb_sourcefilename(void)
00974 {
00975 rb_thread_t *th = GET_THREAD();
00976 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00977
00978 if (cfp) {
00979 return cfp->iseq->location.path;
00980 }
00981 else {
00982 return Qnil;
00983 }
00984 }
00985
00986 const char *
00987 rb_sourcefile(void)
00988 {
00989 rb_thread_t *th = GET_THREAD();
00990 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00991
00992 if (cfp) {
00993 return RSTRING_PTR(cfp->iseq->location.path);
00994 }
00995 else {
00996 return 0;
00997 }
00998 }
00999
01000 int
01001 rb_sourceline(void)
01002 {
01003 rb_thread_t *th = GET_THREAD();
01004 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01005
01006 if (cfp) {
01007 return rb_vm_get_sourceline(cfp);
01008 }
01009 else {
01010 return 0;
01011 }
01012 }
01013
01014 NODE *
01015 rb_vm_cref(void)
01016 {
01017 rb_thread_t *th = GET_THREAD();
01018 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01019
01020 if (cfp == 0) {
01021 return NULL;
01022 }
01023 return rb_vm_get_cref(cfp->iseq, cfp->ep);
01024 }
01025
01026 NODE *
01027 rb_vm_cref_in_context(VALUE self)
01028 {
01029 rb_thread_t *th = GET_THREAD();
01030 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01031 if (cfp->self != self) return NULL;
01032 return rb_vm_get_cref(cfp->iseq, cfp->ep);
01033 }
01034
01035 #if 0
01036 void
01037 debug_cref(NODE *cref)
01038 {
01039 while (cref) {
01040 dp(cref->nd_clss);
01041 printf("%ld\n", cref->nd_visi);
01042 cref = cref->nd_next;
01043 }
01044 }
01045 #endif
01046
01047 VALUE
01048 rb_vm_cbase(void)
01049 {
01050 rb_thread_t *th = GET_THREAD();
01051 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01052
01053 if (cfp == 0) {
01054 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
01055 }
01056 return vm_get_cbase(cfp->iseq, cfp->ep);
01057 }
01058
01059
01060
01061 static VALUE
01062 make_localjump_error(const char *mesg, VALUE value, int reason)
01063 {
01064 extern VALUE rb_eLocalJumpError;
01065 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
01066 ID id;
01067
01068 switch (reason) {
01069 case TAG_BREAK:
01070 CONST_ID(id, "break");
01071 break;
01072 case TAG_REDO:
01073 CONST_ID(id, "redo");
01074 break;
01075 case TAG_RETRY:
01076 CONST_ID(id, "retry");
01077 break;
01078 case TAG_NEXT:
01079 CONST_ID(id, "next");
01080 break;
01081 case TAG_RETURN:
01082 CONST_ID(id, "return");
01083 break;
01084 default:
01085 CONST_ID(id, "noreason");
01086 break;
01087 }
01088 rb_iv_set(exc, "@exit_value", value);
01089 rb_iv_set(exc, "@reason", ID2SYM(id));
01090 return exc;
01091 }
01092
01093 void
01094 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
01095 {
01096 VALUE exc = make_localjump_error(mesg, value, reason);
01097 rb_exc_raise(exc);
01098 }
01099
01100 VALUE
01101 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
01102 {
01103 VALUE result = Qnil;
01104
01105 if (val == Qundef) {
01106 val = GET_THREAD()->tag->retval;
01107 }
01108 switch (state) {
01109 case 0:
01110 break;
01111 case TAG_RETURN:
01112 result = make_localjump_error("unexpected return", val, state);
01113 break;
01114 case TAG_BREAK:
01115 result = make_localjump_error("unexpected break", val, state);
01116 break;
01117 case TAG_NEXT:
01118 result = make_localjump_error("unexpected next", val, state);
01119 break;
01120 case TAG_REDO:
01121 result = make_localjump_error("unexpected redo", Qnil, state);
01122 break;
01123 case TAG_RETRY:
01124 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
01125 break;
01126 default:
01127 break;
01128 }
01129 return result;
01130 }
01131
01132 void
01133 rb_vm_jump_tag_but_local_jump(int state)
01134 {
01135 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
01136 if (!NIL_P(exc)) rb_exc_raise(exc);
01137 JUMP_TAG(state);
01138 }
01139
01140 NORETURN(static void vm_iter_break(rb_thread_t *th, VALUE val));
01141
01142 static void
01143 vm_iter_break(rb_thread_t *th, VALUE val)
01144 {
01145 rb_control_frame_t *cfp = th->cfp;
01146 VALUE *ep = VM_CF_PREV_EP(cfp);
01147
01148 th->state = TAG_BREAK;
01149 th->errinfo = (VALUE)NEW_THROW_OBJECT(val, (VALUE)ep, TAG_BREAK);
01150 TH_JUMP_TAG(th, TAG_BREAK);
01151 }
01152
01153 void
01154 rb_iter_break(void)
01155 {
01156 vm_iter_break(GET_THREAD(), Qnil);
01157 }
01158
01159 void
01160 rb_iter_break_value(VALUE val)
01161 {
01162 vm_iter_break(GET_THREAD(), val);
01163 }
01164
01165
01166
01167 static st_table *vm_opt_method_table = 0;
01168
01169 static int
01170 vm_redefinition_check_flag(VALUE klass)
01171 {
01172 if (klass == rb_cFixnum) return FIXNUM_REDEFINED_OP_FLAG;
01173 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
01174 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
01175 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
01176 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
01177 if (klass == rb_cBignum) return BIGNUM_REDEFINED_OP_FLAG;
01178 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
01179 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
01180 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
01181 return 0;
01182 }
01183
01184 static void
01185 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
01186 {
01187 st_data_t bop;
01188 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01189 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01190 int flag = vm_redefinition_check_flag(klass);
01191
01192 ruby_vm_redefined_flag[bop] |= flag;
01193 }
01194 }
01195 }
01196
01197 static int
01198 check_redefined_method(st_data_t key, st_data_t value, st_data_t data)
01199 {
01200 ID mid = (ID)key;
01201 rb_method_entry_t *me = (rb_method_entry_t *)value;
01202 VALUE klass = (VALUE)data;
01203 rb_method_entry_t *newme = rb_method_entry(klass, mid, NULL);
01204
01205 if (newme != me)
01206 rb_vm_check_redefinition_opt_method(me, me->klass);
01207 return ST_CONTINUE;
01208 }
01209
01210 void
01211 rb_vm_check_redefinition_by_prepend(VALUE klass)
01212 {
01213 if (!vm_redefinition_check_flag(klass)) return;
01214 st_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method,
01215 (st_data_t)klass);
01216 }
01217
01218 static void
01219 add_opt_method(VALUE klass, ID mid, VALUE bop)
01220 {
01221 rb_method_entry_t *me;
01222 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01223 me->def->type == VM_METHOD_TYPE_CFUNC) {
01224 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01225 }
01226 else {
01227 rb_bug("undefined optimized method: %s", rb_id2name(mid));
01228 }
01229 }
01230
01231 static void
01232 vm_init_redefined_flag(void)
01233 {
01234 ID mid;
01235 VALUE bop;
01236
01237 vm_opt_method_table = st_init_numtable();
01238
01239 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01240 #define C(k) add_opt_method(rb_c##k, mid, bop)
01241 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01242 OP(MINUS, MINUS), (C(Fixnum), C(Float));
01243 OP(MULT, MULT), (C(Fixnum), C(Float));
01244 OP(DIV, DIV), (C(Fixnum), C(Float));
01245 OP(MOD, MOD), (C(Fixnum), C(Float));
01246 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01247 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01248 OP(LT, LT), (C(Fixnum), C(Float));
01249 OP(LE, LE), (C(Fixnum), C(Float));
01250 OP(GT, GT), (C(Fixnum), C(Float));
01251 OP(GE, GE), (C(Fixnum), C(Float));
01252 OP(LTLT, LTLT), (C(String), C(Array));
01253 OP(AREF, AREF), (C(Array), C(Hash));
01254 OP(ASET, ASET), (C(Array), C(Hash));
01255 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01256 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01257 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
01258 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01259 OP(EqTilde, MATCH), (C(Regexp), C(String));
01260 OP(Freeze, FREEZE), (C(String));
01261 #undef C
01262 #undef OP
01263 }
01264
01265
01266
01267 #if VMDEBUG
01268 static const char *
01269 vm_frametype_name(const rb_control_frame_t *cfp)
01270 {
01271 switch (VM_FRAME_TYPE(cfp)) {
01272 case VM_FRAME_MAGIC_METHOD: return "method";
01273 case VM_FRAME_MAGIC_BLOCK: return "block";
01274 case VM_FRAME_MAGIC_CLASS: return "class";
01275 case VM_FRAME_MAGIC_TOP: return "top";
01276 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01277 case VM_FRAME_MAGIC_PROC: return "proc";
01278 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01279 case VM_FRAME_MAGIC_EVAL: return "eval";
01280 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01281 case VM_FRAME_MAGIC_RESCUE: return "rescue";
01282 default:
01283 rb_bug("unknown frame");
01284 }
01285 }
01286 #endif
01287
01288 static void
01289 hook_before_rewind(rb_thread_t *th, rb_control_frame_t *cfp, int will_finish_vm_exec)
01290 {
01291 switch (VM_FRAME_TYPE(th->cfp)) {
01292 case VM_FRAME_MAGIC_METHOD:
01293 RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0);
01294 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0, Qnil);
01295 break;
01296 case VM_FRAME_MAGIC_BLOCK:
01297 case VM_FRAME_MAGIC_LAMBDA:
01298 if (VM_FRAME_TYPE_BMETHOD_P(th->cfp)) {
01299 EXEC_EVENT_HOOK(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, Qnil);
01300
01301 if (!will_finish_vm_exec) {
01302
01303 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self,
01304 th->cfp->me->called_id,
01305 th->cfp->me->klass, Qnil);
01306 }
01307 }
01308 else {
01309 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, Qnil);
01310 }
01311 break;
01312 case VM_FRAME_MAGIC_CLASS:
01313 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0, Qnil);
01314 break;
01315 }
01316 }
01317
01318
01319
01320
01321
01322
01323
01324
01325
01326
01327
01328
01329
01330
01331
01332
01333
01334
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347
01348
01349
01350
01351
01352
01353
01354
01355
01356
01357
01358
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378
01379
01380
01381
01382
01383
01384
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401
01402
01403
01404
01405
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416 static VALUE
01417 vm_exec(rb_thread_t *th)
01418 {
01419 int state;
01420 VALUE result, err;
01421 VALUE initial = 0;
01422
01423 TH_PUSH_TAG(th);
01424 _tag.retval = Qnil;
01425 if ((state = EXEC_TAG()) == 0) {
01426 vm_loop_start:
01427 result = vm_exec_core(th, initial);
01428 if ((state = th->state) != 0) {
01429 err = result;
01430 th->state = 0;
01431 goto exception_handler;
01432 }
01433 }
01434 else {
01435 int i;
01436 struct iseq_catch_table_entry *entry;
01437 unsigned long epc, cont_pc, cont_sp;
01438 VALUE catch_iseqval;
01439 rb_control_frame_t *cfp;
01440 VALUE type;
01441 VALUE *escape_ep;
01442
01443 err = th->errinfo;
01444
01445 exception_handler:
01446 cont_pc = cont_sp = catch_iseqval = 0;
01447
01448 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01449 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01450 const rb_method_entry_t *me = th->cfp->me;
01451 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass, Qnil);
01452 RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->klass, me->called_id);
01453 }
01454 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01455 }
01456
01457 cfp = th->cfp;
01458 epc = cfp->pc - cfp->iseq->iseq_encoded;
01459
01460 escape_ep = NULL;
01461 if (state == TAG_BREAK || state == TAG_RETURN) {
01462 escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01463
01464 if (cfp->ep == escape_ep) {
01465 if (state == TAG_RETURN) {
01466 if (!VM_FRAME_TYPE_FINISH_P(cfp)) {
01467 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->ep);
01468 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01469 }
01470 else {
01471 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01472 entry = &cfp->iseq->catch_table[i];
01473 if (entry->start < epc && entry->end >= epc) {
01474 if (entry->type == CATCH_TYPE_ENSURE) {
01475 catch_iseqval = entry->iseq;
01476 cont_pc = entry->cont;
01477 cont_sp = entry->sp;
01478 break;
01479 }
01480 }
01481 }
01482 if (!catch_iseqval) {
01483 th->errinfo = Qnil;
01484 result = GET_THROWOBJ_VAL(err);
01485 hook_before_rewind(th, th->cfp, TRUE);
01486 vm_pop_frame(th);
01487 goto finish_vme;
01488 }
01489 }
01490
01491 }
01492 else {
01493
01494 #if OPT_STACK_CACHING
01495 initial = (GET_THROWOBJ_VAL(err));
01496 #else
01497 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01498 #endif
01499 th->errinfo = Qnil;
01500 goto vm_loop_start;
01501 }
01502 }
01503 }
01504
01505 if (state == TAG_RAISE) {
01506 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01507 entry = &cfp->iseq->catch_table[i];
01508 if (entry->start < epc && entry->end >= epc) {
01509
01510 if (entry->type == CATCH_TYPE_RESCUE ||
01511 entry->type == CATCH_TYPE_ENSURE) {
01512 catch_iseqval = entry->iseq;
01513 cont_pc = entry->cont;
01514 cont_sp = entry->sp;
01515 break;
01516 }
01517 }
01518 }
01519 }
01520 else if (state == TAG_RETRY) {
01521 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01522 entry = &cfp->iseq->catch_table[i];
01523 if (entry->start < epc && entry->end >= epc) {
01524
01525 if (entry->type == CATCH_TYPE_ENSURE) {
01526 catch_iseqval = entry->iseq;
01527 cont_pc = entry->cont;
01528 cont_sp = entry->sp;
01529 break;
01530 }
01531 else if (entry->type == CATCH_TYPE_RETRY) {
01532 VALUE *escape_ep;
01533 escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01534 if (cfp->ep == escape_ep) {
01535 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01536 th->errinfo = Qnil;
01537 goto vm_loop_start;
01538 }
01539 }
01540 }
01541 }
01542 }
01543 else if (state == TAG_BREAK && ((VALUE)escape_ep & ~0x03) == 0) {
01544 type = CATCH_TYPE_BREAK;
01545
01546 search_restart_point:
01547 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01548 entry = &cfp->iseq->catch_table[i];
01549
01550 if (entry->start < epc && entry->end >= epc) {
01551 if (entry->type == CATCH_TYPE_ENSURE) {
01552 catch_iseqval = entry->iseq;
01553 cont_pc = entry->cont;
01554 cont_sp = entry->sp;
01555 break;
01556 }
01557 else if (entry->type == type) {
01558 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01559 cfp->sp = vm_base_ptr(cfp) + entry->sp;
01560
01561 if (state != TAG_REDO) {
01562 #if OPT_STACK_CACHING
01563 initial = (GET_THROWOBJ_VAL(err));
01564 #else
01565 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01566 #endif
01567 }
01568 th->errinfo = Qnil;
01569 th->state = 0;
01570 goto vm_loop_start;
01571 }
01572 }
01573 }
01574 }
01575 else if (state == TAG_REDO) {
01576 type = CATCH_TYPE_REDO;
01577 goto search_restart_point;
01578 }
01579 else if (state == TAG_NEXT) {
01580 type = CATCH_TYPE_NEXT;
01581 goto search_restart_point;
01582 }
01583 else {
01584 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01585 entry = &cfp->iseq->catch_table[i];
01586 if (entry->start < epc && entry->end >= epc) {
01587
01588 if (entry->type == CATCH_TYPE_ENSURE) {
01589 catch_iseqval = entry->iseq;
01590 cont_pc = entry->cont;
01591 cont_sp = entry->sp;
01592 break;
01593 }
01594 }
01595 }
01596 }
01597
01598 if (catch_iseqval != 0) {
01599
01600 rb_iseq_t *catch_iseq;
01601
01602
01603 GetISeqPtr(catch_iseqval, catch_iseq);
01604 cfp->sp = vm_base_ptr(cfp) + cont_sp;
01605 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01606
01607
01608 cfp->sp[0] = err;
01609 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_RESCUE,
01610 cfp->self, cfp->klass,
01611 VM_ENVVAL_PREV_EP_PTR(cfp->ep),
01612 catch_iseq->iseq_encoded,
01613 cfp->sp + 1 ,
01614 catch_iseq->local_size - 1,
01615 cfp->me, catch_iseq->stack_max);
01616
01617 state = 0;
01618 th->state = 0;
01619 th->errinfo = Qnil;
01620 goto vm_loop_start;
01621 }
01622 else {
01623
01624 hook_before_rewind(th, th->cfp, FALSE);
01625
01626 if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
01627 vm_pop_frame(th);
01628 th->errinfo = err;
01629 TH_POP_TAG2();
01630 JUMP_TAG(state);
01631 }
01632 else {
01633 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01634 goto exception_handler;
01635 }
01636 }
01637 }
01638 finish_vme:
01639 TH_POP_TAG();
01640 return result;
01641 }
01642
01643
01644
01645 VALUE
01646 rb_iseq_eval(VALUE iseqval)
01647 {
01648 rb_thread_t *th = GET_THREAD();
01649 VALUE val;
01650
01651 vm_set_top_stack(th, iseqval);
01652
01653 val = vm_exec(th);
01654 RB_GC_GUARD(iseqval);
01655 return val;
01656 }
01657
01658 VALUE
01659 rb_iseq_eval_main(VALUE iseqval)
01660 {
01661 rb_thread_t *th = GET_THREAD();
01662 VALUE val;
01663
01664 vm_set_main_stack(th, iseqval);
01665
01666 val = vm_exec(th);
01667 RB_GC_GUARD(iseqval);
01668 return val;
01669 }
01670
01671 int
01672 rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp)
01673 {
01674 rb_iseq_t *iseq = cfp->iseq;
01675 if (!iseq && cfp->me) {
01676 if (idp) *idp = cfp->me->def->original_id;
01677 if (klassp) *klassp = cfp->me->klass;
01678 return 1;
01679 }
01680 while (iseq) {
01681 if (RUBY_VM_IFUNC_P(iseq)) {
01682 if (idp) *idp = idIFUNC;
01683 if (klassp) *klassp = 0;
01684 return 1;
01685 }
01686 if (iseq->defined_method_id) {
01687 if (idp) *idp = iseq->defined_method_id;
01688 if (klassp) *klassp = iseq->klass;
01689 return 1;
01690 }
01691 if (iseq->local_iseq == iseq) {
01692 break;
01693 }
01694 iseq = iseq->parent_iseq;
01695 }
01696 return 0;
01697 }
01698
01699 int
01700 rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp)
01701 {
01702 return rb_vm_control_frame_id_and_class(th->cfp, idp, klassp);
01703 }
01704
01705 int
01706 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01707 {
01708 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01709 }
01710
01711 VALUE
01712 rb_thread_current_status(const rb_thread_t *th)
01713 {
01714 const rb_control_frame_t *cfp = th->cfp;
01715 VALUE str = Qnil;
01716
01717 if (cfp->iseq != 0) {
01718 if (cfp->pc != 0) {
01719 rb_iseq_t *iseq = cfp->iseq;
01720 int line_no = rb_vm_get_sourceline(cfp);
01721 char *file = RSTRING_PTR(iseq->location.path);
01722 str = rb_sprintf("%s:%d:in `%s'",
01723 file, line_no, RSTRING_PTR(iseq->location.label));
01724 }
01725 }
01726 else if (cfp->me->def->original_id) {
01727 str = rb_sprintf("`%s#%s' (cfunc)",
01728 rb_class2name(cfp->me->klass),
01729 rb_id2name(cfp->me->def->original_id));
01730 }
01731
01732 return str;
01733 }
01734
01735 VALUE
01736 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01737 const rb_block_t *blockptr, VALUE filename)
01738 {
01739 rb_thread_t *th = GET_THREAD();
01740 const rb_control_frame_t *reg_cfp = th->cfp;
01741 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01742 VALUE val;
01743
01744 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
01745 recv, CLASS_OF(recv), VM_ENVVAL_BLOCK_PTR(blockptr), 0, reg_cfp->sp, 1, 0, 0);
01746
01747 val = (*func)(arg);
01748
01749 vm_pop_frame(th);
01750 return val;
01751 }
01752
01753
01754
01755 static int
01756 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01757 {
01758 VALUE thval = (VALUE)key;
01759 rb_gc_mark(thval);
01760 return ST_CONTINUE;
01761 }
01762
01763 void rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks);
01764
01765 void
01766 rb_vm_mark(void *ptr)
01767 {
01768 int i;
01769
01770 RUBY_MARK_ENTER("vm");
01771 RUBY_GC_INFO("-------------------------------------------------\n");
01772 if (ptr) {
01773 rb_vm_t *vm = ptr;
01774 if (vm->living_threads) {
01775 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01776 }
01777 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01778 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01779 RUBY_MARK_UNLESS_NULL(vm->load_path);
01780 RUBY_MARK_UNLESS_NULL(vm->load_path_snapshot);
01781 RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
01782 RUBY_MARK_UNLESS_NULL(vm->expanded_load_path);
01783 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01784 RUBY_MARK_UNLESS_NULL(vm->loaded_features_snapshot);
01785 RUBY_MARK_UNLESS_NULL(vm->top_self);
01786 RUBY_MARK_UNLESS_NULL(vm->coverages);
01787 RUBY_MARK_UNLESS_NULL(vm->defined_module_hash);
01788 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01789
01790 if (vm->loading_table) {
01791 rb_mark_tbl(vm->loading_table);
01792 }
01793
01794 rb_vm_trace_mark_event_hooks(&vm->event_hooks);
01795
01796 for (i = 0; i < RUBY_NSIG; i++) {
01797 if (vm->trap_list[i].cmd)
01798 rb_gc_mark(vm->trap_list[i].cmd);
01799 }
01800 if (vm->defined_strings) {
01801 rb_gc_mark_locations(vm->defined_strings, vm->defined_strings + DEFINED_EXPR);
01802 }
01803 }
01804
01805 RUBY_MARK_LEAVE("vm");
01806 }
01807
01808
01809 int
01810 rb_vm_add_root_module(ID id, VALUE module)
01811 {
01812 rb_vm_t *vm = GET_VM();
01813 if (vm->defined_module_hash) {
01814 rb_hash_aset(vm->defined_module_hash, ID2SYM(id), module);
01815 }
01816 return TRUE;
01817 }
01818
01819 #define vm_free 0
01820
01821 int
01822 ruby_vm_destruct(rb_vm_t *vm)
01823 {
01824 RUBY_FREE_ENTER("vm");
01825 if (vm) {
01826 rb_thread_t *th = vm->main_thread;
01827 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01828 struct rb_objspace *objspace = vm->objspace;
01829 #endif
01830 rb_gc_force_recycle(vm->self);
01831 vm->main_thread = 0;
01832 if (th) {
01833 rb_fiber_reset_root_local_storage(th->self);
01834 thread_free(th);
01835 }
01836 if (vm->living_threads) {
01837 st_free_table(vm->living_threads);
01838 vm->living_threads = 0;
01839 }
01840 ruby_vm_run_at_exit_hooks(vm);
01841 rb_vm_gvl_destroy(vm);
01842 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01843 if (objspace) {
01844 rb_objspace_free(objspace);
01845 }
01846 #endif
01847
01848 ruby_mimfree(vm);
01849 ruby_current_vm = 0;
01850 }
01851 RUBY_FREE_LEAVE("vm");
01852 return 0;
01853 }
01854
01855 static size_t
01856 vm_memsize(const void *ptr)
01857 {
01858 if (ptr) {
01859 const rb_vm_t *vmobj = ptr;
01860 size_t size = sizeof(rb_vm_t);
01861 if (vmobj->living_threads) {
01862 size += st_memsize(vmobj->living_threads);
01863 }
01864 if (vmobj->defined_strings) {
01865 size += DEFINED_EXPR * sizeof(VALUE);
01866 }
01867 return size;
01868 }
01869 else {
01870 return 0;
01871 }
01872 }
01873
01874 static const rb_data_type_t vm_data_type = {
01875 "VM",
01876 {rb_vm_mark, vm_free, vm_memsize,},
01877 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
01878 };
01879
01880
01881 static VALUE
01882 vm_default_params(void)
01883 {
01884 rb_vm_t *vm = GET_VM();
01885 VALUE result = rb_hash_new();
01886 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
01887 SET(thread_vm_stack_size);
01888 SET(thread_machine_stack_size);
01889 SET(fiber_vm_stack_size);
01890 SET(fiber_machine_stack_size);
01891 #undef SET
01892 rb_obj_freeze(result);
01893 return result;
01894 }
01895
01896 static size_t
01897 get_param(const char *name, size_t default_value, size_t min_value)
01898 {
01899 const char *envval;
01900 size_t result = default_value;
01901 if ((envval = getenv(name)) != 0) {
01902 long val = atol(envval);
01903 if (val < (long)min_value) {
01904 val = (long)min_value;
01905 }
01906 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
01907 }
01908 if (0) fprintf(stderr, "%s: %"PRIdSIZE"\n", name, result);
01909
01910 return result;
01911 }
01912
01913 static void
01914 check_machine_stack_size(size_t *sizep)
01915 {
01916 #ifdef PTHREAD_STACK_MIN
01917 size_t size = *sizep;
01918 #endif
01919
01920 #ifdef __SYMBIAN32__
01921 *sizep = 64 * 1024;
01922 #endif
01923
01924 #ifdef PTHREAD_STACK_MIN
01925 if (size < PTHREAD_STACK_MIN) {
01926 *sizep = PTHREAD_STACK_MIN * 2;
01927 }
01928 #endif
01929 }
01930
01931 static void
01932 vm_default_params_setup(rb_vm_t *vm)
01933 {
01934 vm->default_params.thread_vm_stack_size =
01935 get_param("RUBY_THREAD_VM_STACK_SIZE",
01936 RUBY_VM_THREAD_VM_STACK_SIZE,
01937 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
01938
01939 vm->default_params.thread_machine_stack_size =
01940 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
01941 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
01942 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
01943
01944 vm->default_params.fiber_vm_stack_size =
01945 get_param("RUBY_FIBER_VM_STACK_SIZE",
01946 RUBY_VM_FIBER_VM_STACK_SIZE,
01947 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
01948
01949 vm->default_params.fiber_machine_stack_size =
01950 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
01951 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
01952 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
01953
01954
01955 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
01956 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
01957 }
01958
01959 static void
01960 vm_init2(rb_vm_t *vm)
01961 {
01962 MEMZERO(vm, rb_vm_t, 1);
01963 vm->src_encoding_index = -1;
01964 vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK;
01965 rb_obj_hide((VALUE)&vm->at_exit);
01966
01967 vm_default_params_setup(vm);
01968 }
01969
01970
01971
01972 #define USE_THREAD_DATA_RECYCLE 1
01973
01974 #if USE_THREAD_DATA_RECYCLE
01975 #define RECYCLE_MAX 64
01976 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01977 static int thread_recycle_stack_count = 0;
01978
01979 static VALUE *
01980 thread_recycle_stack(size_t size)
01981 {
01982 if (thread_recycle_stack_count) {
01983
01984 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01985 }
01986 else {
01987 return ALLOC_N(VALUE, size);
01988 }
01989 }
01990
01991 #else
01992 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01993 #endif
01994
01995 void
01996 rb_thread_recycle_stack_release(VALUE *stack)
01997 {
01998 #if USE_THREAD_DATA_RECYCLE
01999 if (thread_recycle_stack_count < RECYCLE_MAX) {
02000 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
02001 return;
02002 }
02003 #endif
02004 ruby_xfree(stack);
02005 }
02006
02007 #ifdef USE_THREAD_RECYCLE
02008 static rb_thread_t *
02009 thread_recycle_struct(void)
02010 {
02011 void *p = ALLOC_N(rb_thread_t, 1);
02012 memset(p, 0, sizeof(rb_thread_t));
02013 return p;
02014 }
02015 #endif
02016
02017 void
02018 rb_thread_mark(void *ptr)
02019 {
02020 rb_thread_t *th = NULL;
02021 RUBY_MARK_ENTER("thread");
02022 if (ptr) {
02023 th = ptr;
02024 if (th->stack) {
02025 VALUE *p = th->stack;
02026 VALUE *sp = th->cfp->sp;
02027 rb_control_frame_t *cfp = th->cfp;
02028 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
02029
02030 while (p < sp) {
02031 rb_gc_mark(*p++);
02032 }
02033 rb_gc_mark_locations(p, p + th->mark_stack_len);
02034
02035 while (cfp != limit_cfp) {
02036 rb_iseq_t *iseq = cfp->iseq;
02037 rb_gc_mark(cfp->proc);
02038 rb_gc_mark(cfp->self);
02039 rb_gc_mark(cfp->klass);
02040 if (iseq) {
02041 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
02042 }
02043 if (cfp->me) {
02044
02045 ((rb_method_entry_t *)cfp->me)->mark = 1;
02046 rb_mark_method_entry(cfp->me);
02047 }
02048 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
02049 }
02050 }
02051
02052
02053 RUBY_MARK_UNLESS_NULL(th->first_proc);
02054 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
02055
02056 RUBY_MARK_UNLESS_NULL(th->thgroup);
02057 RUBY_MARK_UNLESS_NULL(th->value);
02058 RUBY_MARK_UNLESS_NULL(th->errinfo);
02059 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
02060 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
02061 RUBY_MARK_UNLESS_NULL(th->root_svar);
02062 RUBY_MARK_UNLESS_NULL(th->top_self);
02063 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
02064 RUBY_MARK_UNLESS_NULL(th->fiber);
02065 RUBY_MARK_UNLESS_NULL(th->root_fiber);
02066 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
02067 RUBY_MARK_UNLESS_NULL(th->last_status);
02068
02069 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
02070
02071 rb_mark_tbl(th->local_storage);
02072
02073 if (GET_THREAD() != th && th->machine.stack_start && th->machine.stack_end) {
02074 rb_gc_mark_machine_stack(th);
02075 rb_gc_mark_locations((VALUE *)&th->machine.regs,
02076 (VALUE *)(&th->machine.regs) +
02077 sizeof(th->machine.regs) / sizeof(VALUE));
02078 }
02079
02080 rb_vm_trace_mark_event_hooks(&th->event_hooks);
02081 }
02082
02083 RUBY_MARK_LEAVE("thread");
02084 }
02085
02086 static void
02087 thread_free(void *ptr)
02088 {
02089 rb_thread_t *th;
02090 RUBY_FREE_ENTER("thread");
02091
02092 if (ptr) {
02093 th = ptr;
02094
02095 if (!th->root_fiber) {
02096 RUBY_FREE_UNLESS_NULL(th->stack);
02097 }
02098
02099 if (th->locking_mutex != Qfalse) {
02100 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
02101 }
02102 if (th->keeping_mutexes != NULL) {
02103 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
02104 }
02105
02106 if (th->local_storage) {
02107 st_free_table(th->local_storage);
02108 }
02109
02110 if (th->vm && th->vm->main_thread == th) {
02111 RUBY_GC_INFO("main thread\n");
02112 }
02113 else {
02114 #ifdef USE_SIGALTSTACK
02115 if (th->altstack) {
02116 free(th->altstack);
02117 }
02118 #endif
02119 ruby_xfree(ptr);
02120 }
02121 if (ruby_current_thread == th)
02122 ruby_current_thread = NULL;
02123 }
02124 RUBY_FREE_LEAVE("thread");
02125 }
02126
02127 static size_t
02128 thread_memsize(const void *ptr)
02129 {
02130 if (ptr) {
02131 const rb_thread_t *th = ptr;
02132 size_t size = sizeof(rb_thread_t);
02133
02134 if (!th->root_fiber) {
02135 size += th->stack_size * sizeof(VALUE);
02136 }
02137 if (th->local_storage) {
02138 size += st_memsize(th->local_storage);
02139 }
02140 return size;
02141 }
02142 else {
02143 return 0;
02144 }
02145 }
02146
02147 #define thread_data_type ruby_threadptr_data_type
02148 const rb_data_type_t ruby_threadptr_data_type = {
02149 "VM/thread",
02150 {
02151 rb_thread_mark,
02152 thread_free,
02153 thread_memsize,
02154 },
02155 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
02156 };
02157
02158 VALUE
02159 rb_obj_is_thread(VALUE obj)
02160 {
02161 if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
02162 return Qtrue;
02163 }
02164 else {
02165 return Qfalse;
02166 }
02167 }
02168
02169 static VALUE
02170 thread_alloc(VALUE klass)
02171 {
02172 VALUE volatile obj;
02173 #ifdef USE_THREAD_RECYCLE
02174 rb_thread_t *th = thread_recycle_struct();
02175 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
02176 #else
02177 rb_thread_t *th;
02178 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
02179 #endif
02180 return obj;
02181 }
02182
02183 static void
02184 th_init(rb_thread_t *th, VALUE self)
02185 {
02186 th->self = self;
02187
02188
02189 #ifdef USE_SIGALTSTACK
02190
02191 th->altstack = malloc(rb_sigaltstack_size());
02192 #endif
02193
02194
02195
02196 th->stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
02197 th->stack = thread_recycle_stack(th->stack_size);
02198
02199 th->cfp = (void *)(th->stack + th->stack_size);
02200
02201 vm_push_frame(th, 0 , VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
02202 Qnil , Qnil , VM_ENVVAL_BLOCK_PTR(0), 0 , th->stack, 1, 0, 0);
02203
02204 th->status = THREAD_RUNNABLE;
02205 th->errinfo = Qnil;
02206 th->last_status = Qnil;
02207 th->waiting_fd = -1;
02208 th->root_svar = Qnil;
02209
02210 #if OPT_CALL_THREADED_CODE
02211 th->retval = Qundef;
02212 #endif
02213 }
02214
02215 static VALUE
02216 ruby_thread_init(VALUE self)
02217 {
02218 rb_thread_t *th;
02219 rb_vm_t *vm = GET_THREAD()->vm;
02220 GetThreadPtr(self, th);
02221
02222 th->vm = vm;
02223 th_init(th, self);
02224 rb_ivar_set(self, rb_intern("locals"), rb_hash_new());
02225
02226 th->top_wrapper = 0;
02227 th->top_self = rb_vm_top_self();
02228 th->root_svar = Qnil;
02229 return self;
02230 }
02231
02232 VALUE
02233 rb_thread_alloc(VALUE klass)
02234 {
02235 VALUE self = thread_alloc(klass);
02236 ruby_thread_init(self);
02237 return self;
02238 }
02239
02240 static void
02241 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
02242 rb_num_t is_singleton, NODE *cref)
02243 {
02244 VALUE klass = cref->nd_clss;
02245 int noex = (int)cref->nd_visi;
02246 rb_iseq_t *miseq;
02247 GetISeqPtr(iseqval, miseq);
02248
02249 if (miseq->klass) {
02250 RB_GC_GUARD(iseqval) = rb_iseq_clone(iseqval, 0);
02251 GetISeqPtr(iseqval, miseq);
02252 }
02253
02254 if (NIL_P(klass)) {
02255 rb_raise(rb_eTypeError, "no class/module to add method");
02256 }
02257
02258 if (is_singleton) {
02259 klass = rb_singleton_class(obj);
02260 noex = NOEX_PUBLIC;
02261 }
02262
02263
02264 COPY_CREF(miseq->cref_stack, cref);
02265 miseq->cref_stack->nd_visi = NOEX_PUBLIC;
02266 RB_OBJ_WRITE(miseq->self, &miseq->klass, klass);
02267 miseq->defined_method_id = id;
02268 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
02269
02270 if (!is_singleton && noex == NOEX_MODFUNC) {
02271 klass = rb_singleton_class(klass);
02272 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
02273 }
02274 }
02275
02276 #define REWIND_CFP(expr) do { \
02277 rb_thread_t *th__ = GET_THREAD(); \
02278 VALUE *const curr_sp = (th__->cfp++)->sp; \
02279 VALUE *const saved_sp = th__->cfp->sp; \
02280 th__->cfp->sp = curr_sp; \
02281 expr; \
02282 (th__->cfp--)->sp = saved_sp; \
02283 } while (0)
02284
02285 static VALUE
02286 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02287 {
02288 REWIND_CFP({
02289 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
02290 });
02291 return sym;
02292 }
02293
02294 static VALUE
02295 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02296 {
02297 REWIND_CFP({
02298 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
02299 });
02300 return sym;
02301 }
02302
02303 static VALUE
02304 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
02305 {
02306 REWIND_CFP({
02307 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
02308 });
02309 return Qnil;
02310 }
02311
02312 static VALUE
02313 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
02314 {
02315 REWIND_CFP({
02316 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
02317 });
02318 return Qnil;
02319 }
02320
02321 static VALUE
02322 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02323 {
02324 REWIND_CFP({
02325 rb_undef(cbase, SYM2ID(sym));
02326 rb_clear_method_cache_by_class(self);
02327 });
02328 return Qnil;
02329 }
02330
02331 static VALUE
02332 m_core_set_postexe(VALUE self)
02333 {
02334 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
02335 return Qnil;
02336 }
02337
02338 static VALUE core_hash_merge_ary(VALUE hash, VALUE ary);
02339 static VALUE core_hash_from_ary(VALUE ary);
02340 static VALUE core_hash_merge_kwd(int argc, VALUE *argv);
02341
02342 static VALUE
02343 core_hash_merge(VALUE hash, long argc, const VALUE *argv)
02344 {
02345 long i;
02346
02347 assert(argc % 2 == 0);
02348 for (i=0; i<argc; i+=2) {
02349 rb_hash_aset(hash, argv[i], argv[i+1]);
02350 }
02351 return hash;
02352 }
02353
02354 static VALUE
02355 m_core_hash_from_ary(VALUE self, VALUE ary)
02356 {
02357 VALUE hash;
02358 REWIND_CFP(hash = core_hash_from_ary(ary));
02359 return hash;
02360 }
02361
02362 static VALUE
02363 core_hash_from_ary(VALUE ary)
02364 {
02365 VALUE hash = rb_hash_new();
02366
02367 if (RUBY_DTRACE_HASH_CREATE_ENABLED()) {
02368 RUBY_DTRACE_HASH_CREATE(RARRAY_LEN(ary), rb_sourcefile(), rb_sourceline());
02369 }
02370
02371 return core_hash_merge_ary(hash, ary);
02372 }
02373
02374 static VALUE
02375 m_core_hash_merge_ary(VALUE self, VALUE hash, VALUE ary)
02376 {
02377 REWIND_CFP(core_hash_merge_ary(hash, ary));
02378 return hash;
02379 }
02380
02381 static VALUE
02382 core_hash_merge_ary(VALUE hash, VALUE ary)
02383 {
02384 core_hash_merge(hash, RARRAY_LEN(ary), RARRAY_CONST_PTR(ary));
02385 return hash;
02386 }
02387
02388 static VALUE
02389 m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
02390 {
02391 VALUE hash = argv[0];
02392
02393 REWIND_CFP(core_hash_merge(hash, argc-1, argv+1));
02394
02395 return hash;
02396 }
02397
02398 static int
02399 kwmerge_ii(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
02400 {
02401 if (existing) return ST_STOP;
02402 *value = arg;
02403 return ST_CONTINUE;
02404 }
02405
02406 static int
02407 kwmerge_i(VALUE key, VALUE value, VALUE hash)
02408 {
02409 if (!SYMBOL_P(key)) Check_Type(key, T_SYMBOL);
02410 if (st_update(RHASH_TBL_RAW(hash), key, kwmerge_ii, (st_data_t)value) == 0) {
02411 RB_OBJ_WRITTEN(hash, Qundef, value);
02412 }
02413 return ST_CONTINUE;
02414 }
02415
02416 static int
02417 kwcheck_i(VALUE key, VALUE value, VALUE hash)
02418 {
02419 if (!SYMBOL_P(key)) Check_Type(key, T_SYMBOL);
02420 return ST_CONTINUE;
02421 }
02422
02423 static VALUE
02424 m_core_hash_merge_kwd(int argc, VALUE *argv, VALUE recv)
02425 {
02426 VALUE hash;
02427 REWIND_CFP(hash = core_hash_merge_kwd(argc, argv));
02428 return hash;
02429 }
02430
02431 static VALUE
02432 core_hash_merge_kwd(int argc, VALUE *argv)
02433 {
02434 VALUE hash, kw;
02435 rb_check_arity(argc, 1, 2);
02436 hash = argv[0];
02437 kw = argv[argc-1];
02438 kw = rb_convert_type(kw, T_HASH, "Hash", "to_hash");
02439 if (argc < 2) hash = kw;
02440 rb_hash_foreach(kw, argc < 2 ? kwcheck_i : kwmerge_i, hash);
02441 return hash;
02442 }
02443
02444 extern VALUE *rb_gc_stack_start;
02445 extern size_t rb_gc_stack_maxsize;
02446 #ifdef __ia64
02447 extern VALUE *rb_gc_register_stack_start;
02448 #endif
02449
02450
02451
02452
02453 static VALUE
02454 sdr(void)
02455 {
02456 rb_vm_bugreport();
02457 return Qnil;
02458 }
02459
02460
02461 static VALUE
02462 nsdr(void)
02463 {
02464 VALUE ary = rb_ary_new();
02465 #if HAVE_BACKTRACE
02466 #include <execinfo.h>
02467 #define MAX_NATIVE_TRACE 1024
02468 static void *trace[MAX_NATIVE_TRACE];
02469 int n = backtrace(trace, MAX_NATIVE_TRACE);
02470 char **syms = backtrace_symbols(trace, n);
02471 int i;
02472
02473 if (syms == 0) {
02474 rb_memerror();
02475 }
02476
02477 for (i=0; i<n; i++) {
02478 rb_ary_push(ary, rb_str_new2(syms[i]));
02479 }
02480 free(syms);
02481 #endif
02482 return ary;
02483 }
02484
02485 #if VM_COLLECT_USAGE_DETAILS
02486 static VALUE usage_analysis_insn_stop(VALUE self);
02487 static VALUE usage_analysis_operand_stop(VALUE self);
02488 static VALUE usage_analysis_register_stop(VALUE self);
02489 #endif
02490
02491 void
02492 Init_VM(void)
02493 {
02494 VALUE opts;
02495 VALUE klass;
02496 VALUE fcore;
02497
02498
02499 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02500 rb_undef_alloc_func(rb_cRubyVM);
02501 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02502 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
02503
02504
02505 fcore = rb_class_new(rb_cBasicObject);
02506 RBASIC(fcore)->flags = T_ICLASS;
02507 klass = rb_singleton_class(fcore);
02508 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02509 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02510 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02511 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02512 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02513 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
02514 rb_define_method_id(klass, id_core_hash_from_ary, m_core_hash_from_ary, 1);
02515 rb_define_method_id(klass, id_core_hash_merge_ary, m_core_hash_merge_ary, 2);
02516 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
02517 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, -1);
02518 rb_define_method_id(klass, idProc, rb_block_proc, 0);
02519 rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
02520 rb_obj_freeze(fcore);
02521 RBASIC_CLEAR_CLASS(klass);
02522 RCLASS_SET_SUPER(klass, 0);
02523 rb_obj_freeze(klass);
02524 rb_gc_register_mark_object(fcore);
02525 rb_mRubyVMFrozenCore = fcore;
02526
02527
02528 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02529 rb_undef_alloc_func(rb_cEnv);
02530 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02531
02532
02533
02534
02535
02536
02537
02538
02539
02540
02541
02542
02543
02544
02545
02546
02547
02548
02549
02550
02551
02552
02553
02554
02555
02556
02557
02558
02559
02560
02561
02562
02563
02564
02565
02566
02567
02568
02569
02570
02571
02572
02573
02574
02575
02576
02577
02578
02579
02580
02581
02582
02583
02584
02585
02586
02587
02588
02589
02590
02591
02592
02593
02594
02595
02596
02597
02598
02599
02600
02601
02602
02603
02604
02605
02606
02607
02608
02609
02610
02611
02612
02613
02614
02615
02616
02617
02618
02619
02620
02621
02622
02623
02624
02625
02626
02627
02628
02629
02630
02631
02632
02633
02634
02635
02636
02637
02638
02639
02640
02641
02642
02643
02644
02645
02646
02647
02648
02649
02650
02651
02652
02653
02654
02655
02656
02657
02658
02659
02660
02661
02662
02663
02664
02665
02666
02667
02668
02669
02670
02671
02672
02673
02674
02675
02676
02677
02678
02679 rb_cThread = rb_define_class("Thread", rb_cObject);
02680 rb_undef_alloc_func(rb_cThread);
02681
02682 #if VM_COLLECT_USAGE_DETAILS
02683
02684 #define define_usage_analysis_hash(name) \
02685 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_"#name, rb_hash_new())
02686 define_usage_analysis_hash("INSN");
02687 define_usage_analysis_hash("REGS");
02688 define_usage_analysis_hash("INSN_BIGRAM");
02689
02690 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
02691 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
02692 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
02693 #endif
02694
02695
02696 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02697
02698 #if OPT_DIRECT_THREADED_CODE
02699 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02700 #elif OPT_TOKEN_THREADED_CODE
02701 rb_ary_push(opts, rb_str_new2("token threaded code"));
02702 #elif OPT_CALL_THREADED_CODE
02703 rb_ary_push(opts, rb_str_new2("call threaded code"));
02704 #endif
02705
02706 #if OPT_STACK_CACHING
02707 rb_ary_push(opts, rb_str_new2("stack caching"));
02708 #endif
02709 #if OPT_OPERANDS_UNIFICATION
02710 rb_ary_push(opts, rb_str_new2("operands unification]"));
02711 #endif
02712 #if OPT_INSTRUCTIONS_UNIFICATION
02713 rb_ary_push(opts, rb_str_new2("instructions unification"));
02714 #endif
02715 #if OPT_INLINE_METHOD_CACHE
02716 rb_ary_push(opts, rb_str_new2("inline method cache"));
02717 #endif
02718 #if OPT_BLOCKINLINING
02719 rb_ary_push(opts, rb_str_new2("block inlining"));
02720 #endif
02721
02722
02723 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02724
02725
02726
02727
02728
02729
02730
02731 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
02732
02733
02734 #if VMDEBUG
02735 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02736 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02737 #else
02738 (void)sdr;
02739 (void)nsdr;
02740 #endif
02741
02742
02743 {
02744 rb_vm_t *vm = ruby_current_vm;
02745 rb_thread_t *th = GET_THREAD();
02746 VALUE filename = rb_str_new2("<main>");
02747 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02748 volatile VALUE th_self;
02749 rb_iseq_t *iseq;
02750
02751
02752 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02753
02754
02755 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02756 rb_iv_set(th_self, "locals", rb_hash_new());
02757 vm->main_thread = th;
02758 vm->running_thread = th;
02759 th->vm = vm;
02760 th->top_wrapper = 0;
02761 th->top_self = rb_vm_top_self();
02762 rb_thread_set_current(th);
02763
02764 vm->living_threads = st_init_numtable();
02765 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02766
02767 rb_gc_register_mark_object(iseqval);
02768 GetISeqPtr(iseqval, iseq);
02769 th->cfp->iseq = iseq;
02770 th->cfp->pc = iseq->iseq_encoded;
02771 th->cfp->self = th->top_self;
02772 th->cfp->klass = Qnil;
02773
02774
02775
02776
02777 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02778 }
02779 vm_init_redefined_flag();
02780
02781
02782 Init_vm_backtrace();
02783 VM_PROFILE_ATEXIT();
02784 }
02785
02786 void
02787 rb_vm_set_progname(VALUE filename)
02788 {
02789 rb_thread_t *th = GET_VM()->main_thread;
02790 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02791 --cfp;
02792 RB_OBJ_WRITE(cfp->iseq->self, &cfp->iseq->location.path, filename);
02793 }
02794
02795 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02796 struct rb_objspace *rb_objspace_alloc(void);
02797 #endif
02798
02799 void
02800 Init_BareVM(void)
02801 {
02802
02803 rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
02804 rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
02805 if (!vm || !th) {
02806 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02807 exit(EXIT_FAILURE);
02808 }
02809 MEMZERO(th, rb_thread_t, 1);
02810 rb_thread_set_current_raw(th);
02811
02812 vm_init2(vm);
02813 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02814 vm->objspace = rb_objspace_alloc();
02815 #endif
02816 ruby_current_vm = vm;
02817
02818 Init_native_thread();
02819 th->vm = vm;
02820 th_init(th, 0);
02821 ruby_thread_init_stack(th);
02822 vm->defined_module_hash = rb_hash_new();
02823 }
02824
02825
02826
02827 static VALUE
02828 main_to_s(VALUE obj)
02829 {
02830 return rb_str_new2("main");
02831 }
02832
02833 VALUE
02834 rb_vm_top_self(void)
02835 {
02836 return GET_VM()->top_self;
02837 }
02838
02839 void
02840 Init_top_self(void)
02841 {
02842 rb_vm_t *vm = GET_VM();
02843
02844 vm->top_self = rb_obj_alloc(rb_cObject);
02845 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02846 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
02847
02848
02849 vm->mark_object_ary = rb_ary_tmp_new(1);
02850 }
02851
02852 VALUE *
02853 ruby_vm_verbose_ptr(rb_vm_t *vm)
02854 {
02855 return &vm->verbose;
02856 }
02857
02858 VALUE *
02859 ruby_vm_debug_ptr(rb_vm_t *vm)
02860 {
02861 return &vm->debug;
02862 }
02863
02864 VALUE *
02865 rb_ruby_verbose_ptr(void)
02866 {
02867 return ruby_vm_verbose_ptr(GET_VM());
02868 }
02869
02870 VALUE *
02871 rb_ruby_debug_ptr(void)
02872 {
02873 return ruby_vm_debug_ptr(GET_VM());
02874 }
02875
02876
02877 VALUE rb_insn_operand_intern(rb_iseq_t *iseq,
02878 VALUE insn, int op_no, VALUE op,
02879 int len, size_t pos, VALUE *pnop, VALUE child);
02880
02881 #if VM_COLLECT_USAGE_DETAILS
02882
02883 #define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
02884
02885
02886
02887
02888
02889
02890
02891
02892
02893
02894
02895
02896 static void
02897 vm_analysis_insn(int insn)
02898 {
02899 ID usage_hash;
02900 ID bigram_hash;
02901 static int prev_insn = -1;
02902
02903 VALUE uh;
02904 VALUE ihash;
02905 VALUE cv;
02906
02907 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02908 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
02909 uh = rb_const_get(rb_cRubyVM, usage_hash);
02910 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02911 ihash = rb_hash_new();
02912 HASH_ASET(uh, INT2FIX(insn), ihash);
02913 }
02914 if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
02915 cv = INT2FIX(0);
02916 }
02917 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
02918
02919
02920 if (prev_insn != -1) {
02921 VALUE bi;
02922 VALUE ary[2];
02923 VALUE cv;
02924
02925 ary[0] = INT2FIX(prev_insn);
02926 ary[1] = INT2FIX(insn);
02927 bi = rb_ary_new4(2, &ary[0]);
02928
02929 uh = rb_const_get(rb_cRubyVM, bigram_hash);
02930 if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
02931 cv = INT2FIX(0);
02932 }
02933 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
02934 }
02935 prev_insn = insn;
02936 }
02937
02938 static void
02939 vm_analysis_operand(int insn, int n, VALUE op)
02940 {
02941 ID usage_hash;
02942
02943 VALUE uh;
02944 VALUE ihash;
02945 VALUE ophash;
02946 VALUE valstr;
02947 VALUE cv;
02948
02949 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02950
02951 uh = rb_const_get(rb_cRubyVM, usage_hash);
02952 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02953 ihash = rb_hash_new();
02954 HASH_ASET(uh, INT2FIX(insn), ihash);
02955 }
02956 if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
02957 ophash = rb_hash_new();
02958 HASH_ASET(ihash, INT2FIX(n), ophash);
02959 }
02960
02961 valstr = rb_insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
02962
02963
02964 if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
02965 cv = INT2FIX(0);
02966 }
02967 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
02968 }
02969
02970 static void
02971 vm_analysis_register(int reg, int isset)
02972 {
02973 ID usage_hash;
02974 VALUE uh;
02975 VALUE valstr;
02976 static const char regstrs[][5] = {
02977 "pc",
02978 "sp",
02979 "ep",
02980 "cfp",
02981 "self",
02982 "iseq",
02983 };
02984 static const char getsetstr[][4] = {
02985 "get",
02986 "set",
02987 };
02988 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
02989
02990 VALUE cv;
02991
02992 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
02993 if (syms[0] == 0) {
02994 char buff[0x10];
02995 int i;
02996
02997 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
02998 int j;
02999 for (j = 0; j < 2; j++) {
03000 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
03001 syms[i][j] = ID2SYM(rb_intern(buff));
03002 }
03003 }
03004 }
03005 valstr = syms[reg][isset];
03006
03007 uh = rb_const_get(rb_cRubyVM, usage_hash);
03008 if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
03009 cv = INT2FIX(0);
03010 }
03011 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
03012 }
03013
03014 #undef HASH_ASET
03015
03016 void (*ruby_vm_collect_usage_func_insn)(int insn) = vm_analysis_insn;
03017 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = vm_analysis_operand;
03018 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = vm_analysis_register;
03019
03020
03021 static VALUE
03022 usage_analysis_insn_stop(VALUE self)
03023 {
03024 ruby_vm_collect_usage_func_insn = 0;
03025 return Qnil;
03026 }
03027
03028
03029 static VALUE
03030 usage_analysis_operand_stop(VALUE self)
03031 {
03032 ruby_vm_collect_usage_func_operand = 0;
03033 return Qnil;
03034 }
03035
03036
03037 static VALUE
03038 usage_analysis_register_stop(VALUE self)
03039 {
03040 ruby_vm_collect_usage_func_register = 0;
03041 return Qnil;
03042 }
03043
03044 #else
03045
03046 void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
03047 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
03048 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
03049
03050 #endif
03051
03052 #if VM_COLLECT_USAGE_DETAILS
03053
03054 static void
03055 vm_collect_usage_insn(int insn)
03056 {
03057 if (RUBY_DTRACE_INSN_ENABLED()) {
03058 RUBY_DTRACE_INSN(rb_insns_name(insn));
03059 }
03060 if (ruby_vm_collect_usage_func_insn)
03061 (*ruby_vm_collect_usage_func_insn)(insn);
03062 }
03063
03064
03065
03066
03067
03068 static void
03069 vm_collect_usage_operand(int insn, int n, VALUE op)
03070 {
03071 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
03072 VALUE valstr;
03073
03074 valstr = rb_insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
03075
03076 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
03077 RB_GC_GUARD(valstr);
03078 }
03079 if (ruby_vm_collect_usage_func_operand)
03080 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
03081 }
03082
03083
03084
03085 static void
03086 vm_collect_usage_register(int reg, int isset)
03087 {
03088 if (ruby_vm_collect_usage_func_register)
03089 (*ruby_vm_collect_usage_func_register)(reg, isset);
03090 }
03091 #endif
03092
03093