00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/vm.h"
00013 #include "ruby/st.h"
00014 #include "ruby/encoding.h"
00015 #include "internal.h"
00016
00017 #include "gc.h"
00018 #include "vm_core.h"
00019 #include "iseq.h"
00020 #include "eval_intern.h"
00021 #include "probes.h"
00022 #include "probes_helper.h"
00023
00024 static inline VALUE *
00025 VM_EP_LEP(VALUE *ep)
00026 {
00027 while (1) {
00028 if (VM_EP_LEP_P(ep)) {
00029 return ep;
00030 }
00031 ep = VM_EP_PREV_EP(ep);
00032 }
00033 }
00034
00035 VALUE *
00036 rb_vm_ep_local_ep(VALUE *ep)
00037 {
00038 return VM_EP_LEP(ep);
00039 }
00040
00041 static inline VALUE *
00042 VM_CF_LEP(rb_control_frame_t *cfp)
00043 {
00044 return VM_EP_LEP(cfp->ep);
00045 }
00046
00047 static inline VALUE *
00048 VM_CF_PREV_EP(rb_control_frame_t * cfp)
00049 {
00050 return VM_EP_PREV_EP((cfp)->ep);
00051 }
00052
00053 static inline rb_block_t *
00054 VM_CF_BLOCK_PTR(rb_control_frame_t *cfp)
00055 {
00056 VALUE *ep = VM_CF_LEP(cfp);
00057 return VM_EP_BLOCK_PTR(ep);
00058 }
00059
00060 rb_block_t *
00061 rb_vm_control_frame_block_ptr(rb_control_frame_t *cfp)
00062 {
00063 return VM_CF_BLOCK_PTR(cfp);
00064 }
00065
00066 #if VM_COLLECT_USAGE_DETAILS
00067 static void vm_collect_usage_operand(int insn, int n, VALUE op);
00068 static void vm_collect_usage_insn(int insn);
00069 static void vm_collect_usage_register(int reg, int isset);
00070 #endif
00071
00072 static VALUE
00073 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00074 int argc, const VALUE *argv, const rb_block_t *blockptr);
00075
00076 #include "vm_insnhelper.h"
00077 #include "vm_insnhelper.c"
00078 #include "vm_exec.h"
00079 #include "vm_exec.c"
00080
00081 #include "vm_method.c"
00082 #include "vm_eval.c"
00083
00084 #include <assert.h>
00085
00086 #define BUFSIZE 0x100
00087 #define PROCDEBUG 0
00088
00089 VALUE rb_cRubyVM;
00090 VALUE rb_cThread;
00091 VALUE rb_cEnv;
00092 VALUE rb_mRubyVMFrozenCore;
00093
00094 VALUE ruby_vm_const_missing_count = 0;
00095 char ruby_vm_redefined_flag[BOP_LAST_];
00096 rb_thread_t *ruby_current_thread = 0;
00097 rb_vm_t *ruby_current_vm = 0;
00098 rb_event_flag_t ruby_vm_event_flags;
00099
00100 static void thread_free(void *ptr);
00101
00102 void
00103 rb_vm_change_state(void)
00104 {
00105 INC_VM_STATE_VERSION();
00106 }
00107
00108 static void vm_clear_global_method_cache(void);
00109
00110 static void
00111 vm_clear_all_inline_method_cache(void)
00112 {
00113
00114
00115
00116
00117 }
00118
00119 static void
00120 vm_clear_all_cache()
00121 {
00122 vm_clear_global_method_cache();
00123 vm_clear_all_inline_method_cache();
00124 ruby_vm_global_state_version = 1;
00125 }
00126
00127 void
00128 rb_vm_inc_const_missing_count(void)
00129 {
00130 ruby_vm_const_missing_count +=1;
00131 }
00132
00133
00134
00135 static void
00136 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00137 {
00138 rb_iseq_t *iseq;
00139 GetISeqPtr(iseqval, iseq);
00140
00141 if (iseq->type != ISEQ_TYPE_TOP) {
00142 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00143 }
00144
00145
00146 CHECK_VM_STACK_OVERFLOW(th->cfp, iseq->local_size + iseq->stack_max);
00147 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
00148 th->top_self, rb_cObject, VM_ENVVAL_BLOCK_PTR(0),
00149 iseq->iseq_encoded, th->cfp->sp, iseq->local_size, 0);
00150 }
00151
00152 static void
00153 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref, rb_block_t *base_block)
00154 {
00155 rb_iseq_t *iseq;
00156 GetISeqPtr(iseqval, iseq);
00157
00158 CHECK_VM_STACK_OVERFLOW(th->cfp, iseq->local_size + iseq->stack_max);
00159 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
00160 base_block->self, base_block->klass,
00161 VM_ENVVAL_PREV_EP_PTR(base_block->ep), iseq->iseq_encoded,
00162 th->cfp->sp, iseq->local_size, 0);
00163
00164 if (cref) {
00165 th->cfp->ep[-1] = (VALUE)cref;
00166 }
00167 }
00168
00169 static void
00170 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00171 {
00172 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00173 rb_binding_t *bind;
00174 rb_iseq_t *iseq;
00175 rb_env_t *env;
00176
00177 GetBindingPtr(toplevel_binding, bind);
00178 GetEnvPtr(bind->env, env);
00179 vm_set_eval_stack(th, iseqval, 0, &env->block);
00180
00181
00182 GetISeqPtr(iseqval, iseq);
00183 if (bind && iseq->local_size > 0) {
00184 bind->env = rb_vm_make_env_object(th, th->cfp);
00185 }
00186 }
00187
00188 rb_control_frame_t *
00189 rb_vm_get_binding_creatable_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00190 {
00191 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00192 if (cfp->iseq) {
00193 return (rb_control_frame_t *)cfp;
00194 }
00195 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00196 }
00197 return 0;
00198 }
00199
00200 rb_control_frame_t *
00201 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, const rb_control_frame_t *cfp)
00202 {
00203 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00204 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00205 return (rb_control_frame_t *)cfp;
00206 }
00207 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00208 }
00209 return 0;
00210 }
00211
00212 static rb_control_frame_t *
00213 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00214 {
00215 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00216 return cfp;
00217 }
00218
00219 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00220
00221 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00222 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00223 return cfp;
00224 }
00225
00226 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00227 break;
00228 }
00229 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00230 }
00231 return 0;
00232 }
00233
00234 void
00235 rb_vm_pop_cfunc_frame(void)
00236 {
00237 rb_thread_t *th = GET_THREAD();
00238 const rb_method_entry_t *me = th->cfp->me;
00239 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass, Qnil);
00240 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
00241 vm_pop_frame(th);
00242 }
00243
00244 void
00245 rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00246 {
00247
00248 while (th->cfp != cfp) {
00249 #if VMDEBUG
00250 printf("skipped frame: %s\n", vm_frametype_name(th->cfp));
00251 #endif
00252 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_CFUNC) {
00253 vm_pop_frame(th);
00254 }
00255 else {
00256 rb_vm_pop_cfunc_frame();
00257 }
00258 }
00259 }
00260
00261
00262 void
00263 rb_frame_pop(void)
00264 {
00265 rb_thread_t *th = GET_THREAD();
00266 vm_pop_frame(th);
00267 }
00268
00269
00270
00271 void
00272 ruby_vm_at_exit(void (*func)(rb_vm_t *))
00273 {
00274 rb_ary_push((VALUE)&GET_VM()->at_exit, (VALUE)func);
00275 }
00276
00277 static void
00278 ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
00279 {
00280 VALUE hook = (VALUE)&vm->at_exit;
00281
00282 while (RARRAY_LEN(hook) > 0) {
00283 typedef void rb_vm_at_exit_func(rb_vm_t*);
00284 rb_vm_at_exit_func *func = (rb_vm_at_exit_func*)rb_ary_pop(hook);
00285 (*func)(vm);
00286 }
00287 rb_ary_free(hook);
00288 }
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299 #define ENV_IN_HEAP_P(th, env) \
00300 (!((th)->stack <= (env) && (env) < ((th)->stack + (th)->stack_size)))
00301 #define ENV_VAL(env) ((env)[1])
00302
00303 static void
00304 env_mark(void * const ptr)
00305 {
00306 RUBY_MARK_ENTER("env");
00307 if (ptr) {
00308 const rb_env_t * const env = ptr;
00309
00310 if (env->env) {
00311
00312 RUBY_GC_INFO("env->env\n");
00313 rb_gc_mark_locations(env->env, env->env + env->env_size);
00314 }
00315
00316 RUBY_GC_INFO("env->prev_envval\n");
00317 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00318 RUBY_MARK_UNLESS_NULL(env->block.self);
00319 RUBY_MARK_UNLESS_NULL(env->block.proc);
00320
00321 if (env->block.iseq) {
00322 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00323 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00324 }
00325 else {
00326 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00327 }
00328 }
00329 }
00330 RUBY_MARK_LEAVE("env");
00331 }
00332
00333 static void
00334 env_free(void * const ptr)
00335 {
00336 RUBY_FREE_ENTER("env");
00337 if (ptr) {
00338 rb_env_t *const env = ptr;
00339 RUBY_FREE_UNLESS_NULL(env->env);
00340 ruby_xfree(ptr);
00341 }
00342 RUBY_FREE_LEAVE("env");
00343 }
00344
00345 static size_t
00346 env_memsize(const void *ptr)
00347 {
00348 if (ptr) {
00349 const rb_env_t * const env = ptr;
00350 size_t size = sizeof(rb_env_t);
00351 if (env->env) {
00352 size += env->env_size * sizeof(VALUE);
00353 }
00354 return size;
00355 }
00356 return 0;
00357 }
00358
00359 static const rb_data_type_t env_data_type = {
00360 "VM/env",
00361 {env_mark, env_free, env_memsize,},
00362 };
00363
00364 static VALUE
00365 env_alloc(void)
00366 {
00367 VALUE obj;
00368 rb_env_t *env;
00369 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00370 env->env = 0;
00371 env->prev_envval = 0;
00372 env->block.iseq = 0;
00373 return obj;
00374 }
00375
00376 static VALUE check_env_value(VALUE envval);
00377
00378 static int
00379 check_env(rb_env_t * const env)
00380 {
00381 fprintf(stderr, "---\n");
00382 fprintf(stderr, "envptr: %p\n", (void *)&env->block.ep[0]);
00383 fprintf(stderr, "envval: %10p ", (void *)env->block.ep[1]);
00384 dp(env->block.ep[1]);
00385 fprintf(stderr, "ep: %10p\n", (void *)env->block.ep);
00386 if (env->prev_envval) {
00387 fprintf(stderr, ">>\n");
00388 check_env_value(env->prev_envval);
00389 fprintf(stderr, "<<\n");
00390 }
00391 return 1;
00392 }
00393
00394 static VALUE
00395 check_env_value(VALUE envval)
00396 {
00397 rb_env_t *env;
00398 GetEnvPtr(envval, env);
00399
00400 if (check_env(env)) {
00401 return envval;
00402 }
00403 rb_bug("invalid env");
00404 return Qnil;
00405 }
00406
00407 static VALUE
00408 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00409 VALUE *envptr, VALUE * const endptr)
00410 {
00411 VALUE envval, penvval = 0;
00412 rb_env_t *env;
00413 VALUE *nenvptr;
00414 int i, local_size;
00415
00416 if (ENV_IN_HEAP_P(th, envptr)) {
00417 return ENV_VAL(envptr);
00418 }
00419
00420 if (envptr != endptr) {
00421 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00422 rb_control_frame_t *pcfp = cfp;
00423
00424 if (ENV_IN_HEAP_P(th, penvptr)) {
00425 penvval = ENV_VAL(penvptr);
00426 }
00427 else {
00428 while (pcfp->ep != penvptr) {
00429 pcfp++;
00430 if (pcfp->ep == 0) {
00431 SDR();
00432 rb_bug("invalid ep");
00433 }
00434 }
00435 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00436 *envptr = VM_ENVVAL_PREV_EP_PTR(pcfp->ep);
00437 }
00438 }
00439
00440
00441 envval = env_alloc();
00442 GetEnvPtr(envval, env);
00443
00444 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00445 local_size = 2;
00446 }
00447 else {
00448 local_size = cfp->iseq->local_size;
00449 }
00450
00451 env->env_size = local_size + 1 + 1;
00452 env->local_size = local_size;
00453 env->env = ALLOC_N(VALUE, env->env_size);
00454 env->prev_envval = penvval;
00455
00456 for (i = 0; i <= local_size; i++) {
00457 env->env[i] = envptr[-local_size + i];
00458 #if 0
00459 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00460 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00461
00462 envptr[-local_size + i] = 0;
00463 }
00464 #endif
00465 }
00466
00467 *envptr = envval;
00468 nenvptr = &env->env[i - 1];
00469 nenvptr[1] = envval;
00470
00471
00472 cfp->ep = nenvptr;
00473
00474
00475 env->block.self = cfp->self;
00476 env->block.ep = cfp->ep;
00477 env->block.iseq = cfp->iseq;
00478
00479 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00480
00481 env->block.iseq = 0;
00482 }
00483 return envval;
00484 }
00485
00486 static int
00487 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00488 {
00489 int i;
00490 if (!iseq) return 0;
00491 for (i = 0; i < iseq->local_table_size; i++) {
00492 ID lid = iseq->local_table[i];
00493 if (rb_is_local_id(lid)) {
00494 rb_ary_push(ary, ID2SYM(lid));
00495 }
00496 }
00497 return 1;
00498 }
00499
00500 static int
00501 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00502 {
00503
00504 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00505 env->prev_envval) {
00506 GetEnvPtr(env->prev_envval, env);
00507 }
00508 return 0;
00509 }
00510
00511 static int
00512 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *ep, VALUE ary)
00513 {
00514 if (ENV_IN_HEAP_P(th, ep)) {
00515 rb_env_t *env;
00516 GetEnvPtr(ENV_VAL(ep), env);
00517 collect_local_variables_in_env(env, ary);
00518 return 1;
00519 }
00520 else {
00521 return 0;
00522 }
00523 }
00524
00525 static void vm_rewrite_ep_in_errinfo(rb_thread_t *th);
00526 static VALUE vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block);
00527 static VALUE vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp, VALUE *blockprocptr);
00528
00529 VALUE
00530 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00531 {
00532 VALUE blockprocval;
00533 return vm_make_env_object(th, cfp, &blockprocval);
00534 }
00535
00536 static VALUE
00537 vm_make_env_object(rb_thread_t *th, rb_control_frame_t *cfp, VALUE *blockprocptr)
00538 {
00539 VALUE envval;
00540 VALUE *lep = VM_CF_LEP(cfp);
00541 rb_block_t *blockptr = VM_EP_BLOCK_PTR(lep);
00542
00543 if (blockptr) {
00544 VALUE blockprocval = vm_make_proc_from_block(th, blockptr);
00545 rb_proc_t *p;
00546 GetProcPtr(blockprocval, p);
00547 lep[0] = VM_ENVVAL_BLOCK_PTR(&p->block);
00548 *blockprocptr = blockprocval;
00549 }
00550
00551 envval = vm_make_env_each(th, cfp, cfp->ep, lep);
00552 vm_rewrite_ep_in_errinfo(th);
00553
00554 if (PROCDEBUG) {
00555 check_env_value(envval);
00556 }
00557
00558 return envval;
00559 }
00560
00561 static void
00562 vm_rewrite_ep_in_errinfo(rb_thread_t *th)
00563 {
00564 rb_control_frame_t *cfp = th->cfp;
00565 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00566
00567 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) &&
00568 (cfp->iseq->type == ISEQ_TYPE_RESCUE ||
00569 cfp->iseq->type == ISEQ_TYPE_ENSURE)) {
00570 VALUE errinfo = cfp->ep[-2];
00571 if (RB_TYPE_P(errinfo, T_NODE)) {
00572 VALUE *escape_ep = GET_THROWOBJ_CATCH_POINT(errinfo);
00573 if (! ENV_IN_HEAP_P(th, escape_ep)) {
00574 VALUE epval = *escape_ep;
00575 if (!SPECIAL_CONST_P(epval) && RBASIC(epval)->klass == rb_cEnv) {
00576 rb_env_t *epenv;
00577 GetEnvPtr(epval, epenv);
00578 SET_THROWOBJ_CATCH_POINT(errinfo, (VALUE)(epenv->env + epenv->local_size));
00579 }
00580 }
00581 }
00582 }
00583 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00584 }
00585 }
00586
00587 void
00588 rb_vm_stack_to_heap(rb_thread_t *th)
00589 {
00590 rb_control_frame_t *cfp = th->cfp;
00591 while ((cfp = rb_vm_get_binding_creatable_next_cfp(th, cfp)) != 0) {
00592 rb_vm_make_env_object(th, cfp);
00593 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00594 }
00595 }
00596
00597
00598
00599 static VALUE
00600 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00601 {
00602 if (!block->proc) {
00603 block->proc = rb_vm_make_proc(th, block, rb_cProc);
00604 }
00605 return block->proc;
00606 }
00607
00608 VALUE
00609 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00610 {
00611 VALUE procval, envval, blockprocval = 0;
00612 rb_proc_t *proc;
00613 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00614
00615 if (block->proc) {
00616 rb_bug("rb_vm_make_proc: Proc value is already created.");
00617 }
00618
00619 envval = vm_make_env_object(th, cfp, &blockprocval);
00620
00621 if (PROCDEBUG) {
00622 check_env_value(envval);
00623 }
00624 procval = rb_proc_alloc(klass);
00625 GetProcPtr(procval, proc);
00626 proc->blockprocval = blockprocval;
00627 proc->block.self = block->self;
00628 proc->block.klass = block->klass;
00629 proc->block.ep = block->ep;
00630 proc->block.iseq = block->iseq;
00631 proc->block.proc = procval;
00632 proc->envval = envval;
00633 proc->safe_level = th->safe_level;
00634
00635 if (VMDEBUG) {
00636 if (th->stack < block->ep && block->ep < th->stack + th->stack_size) {
00637 rb_bug("invalid ptr: block->ep");
00638 }
00639 }
00640
00641 return procval;
00642 }
00643
00644
00645
00646 VALUE
00647 rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp)
00648 {
00649 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(th, src_cfp);
00650 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(th, src_cfp);
00651 VALUE bindval, envval;
00652 rb_binding_t *bind;
00653 VALUE blockprocval = 0;
00654
00655 if (cfp == 0 || ruby_level_cfp == 0) {
00656 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
00657 }
00658
00659 while (1) {
00660 envval = vm_make_env_object(th, cfp, &blockprocval);
00661 if (cfp == ruby_level_cfp) {
00662 break;
00663 }
00664 cfp = rb_vm_get_binding_creatable_next_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
00665 }
00666
00667 bindval = rb_binding_alloc(rb_cBinding);
00668 GetBindingPtr(bindval, bind);
00669 bind->env = envval;
00670 bind->path = ruby_level_cfp->iseq->location.path;
00671 bind->blockprocval = blockprocval;
00672 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
00673
00674 return bindval;
00675 }
00676
00677
00678
00679 static inline VALUE
00680 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00681 VALUE self, int argc, const VALUE *argv,
00682 const rb_block_t *blockptr, const NODE *cref,
00683 VALUE defined_class)
00684 {
00685 if (SPECIAL_CONST_P(block->iseq))
00686 return Qnil;
00687 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00688 const rb_iseq_t *iseq = block->iseq;
00689 const rb_control_frame_t *cfp;
00690 int i, opt_pc, arg_size = iseq->arg_size;
00691 int type = block_proc_is_lambda(block->proc) ?
00692 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00693
00694 cfp = th->cfp;
00695 CHECK_VM_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00696
00697 for (i=0; i<argc; i++) {
00698 cfp->sp[i] = argv[i];
00699 }
00700
00701 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00702 type == VM_FRAME_MAGIC_LAMBDA);
00703
00704 vm_push_frame(th, iseq, type | VM_FRAME_FLAG_FINISH,
00705 self, defined_class,
00706 VM_ENVVAL_PREV_EP_PTR(block->ep),
00707 iseq->iseq_encoded + opt_pc,
00708 cfp->sp + arg_size, iseq->local_size - arg_size,
00709 th->passed_me);
00710 th->passed_me = 0;
00711
00712 if (cref) {
00713 th->cfp->ep[-1] = (VALUE)cref;
00714 }
00715
00716 return vm_exec(th);
00717 }
00718 else {
00719 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00720 }
00721 }
00722
00723 static inline const rb_block_t *
00724 check_block(rb_thread_t *th)
00725 {
00726 const rb_block_t *blockptr = VM_CF_BLOCK_PTR(th->cfp);
00727
00728 if (blockptr == 0) {
00729 rb_vm_localjump_error("no block given", Qnil, 0);
00730 }
00731
00732 return blockptr;
00733 }
00734
00735 static inline VALUE
00736 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00737 {
00738 const rb_block_t *blockptr = check_block(th);
00739 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref,
00740 blockptr->klass);
00741 }
00742
00743 static inline VALUE
00744 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00745 {
00746 const rb_block_t *blockptr = check_block(th);
00747 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0,
00748 blockptr->klass);
00749 }
00750
00751 static VALUE
00752 vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self, VALUE defined_class,
00753 int argc, const VALUE *argv, const rb_block_t *blockptr)
00754 {
00755 VALUE val = Qundef;
00756 int state;
00757 volatile int stored_safe = th->safe_level;
00758
00759 TH_PUSH_TAG(th);
00760 if ((state = EXEC_TAG()) == 0) {
00761 if (!proc->is_from_method) {
00762 th->safe_level = proc->safe_level;
00763 }
00764 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0,
00765 defined_class);
00766 }
00767 TH_POP_TAG();
00768
00769 if (!proc->is_from_method) {
00770 th->safe_level = stored_safe;
00771 }
00772
00773 if (state) {
00774 JUMP_TAG(state);
00775 }
00776 return val;
00777 }
00778
00779 VALUE
00780 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
00781 int argc, const VALUE *argv, const rb_block_t *blockptr)
00782 {
00783 return vm_invoke_proc(th, proc, proc->block.self, proc->block.klass,
00784 argc, argv, blockptr);
00785 }
00786
00787
00788
00789 static rb_control_frame_t *
00790 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00791 {
00792 while (cfp->pc == 0) {
00793 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00794 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00795 return 0;
00796 }
00797 }
00798 return cfp;
00799 }
00800
00801 static VALUE
00802 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00803 {
00804 cfp = vm_normal_frame(th, cfp);
00805 return lep_svar_get(th, cfp ? VM_CF_LEP(cfp) : 0, key);
00806 }
00807
00808 static void
00809 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00810 {
00811 cfp = vm_normal_frame(th, cfp);
00812 lep_svar_set(th, cfp ? VM_CF_LEP(cfp) : 0, key, val);
00813 }
00814
00815 static VALUE
00816 vm_svar_get(VALUE key)
00817 {
00818 rb_thread_t *th = GET_THREAD();
00819 return vm_cfp_svar_get(th, th->cfp, key);
00820 }
00821
00822 static void
00823 vm_svar_set(VALUE key, VALUE val)
00824 {
00825 rb_thread_t *th = GET_THREAD();
00826 vm_cfp_svar_set(th, th->cfp, key, val);
00827 }
00828
00829 VALUE
00830 rb_backref_get(void)
00831 {
00832 return vm_svar_get(1);
00833 }
00834
00835 void
00836 rb_backref_set(VALUE val)
00837 {
00838 vm_svar_set(1, val);
00839 }
00840
00841 VALUE
00842 rb_lastline_get(void)
00843 {
00844 return vm_svar_get(0);
00845 }
00846
00847 void
00848 rb_lastline_set(VALUE val)
00849 {
00850 vm_svar_set(0, val);
00851 }
00852
00853
00854
00855 VALUE
00856 rb_sourcefilename(void)
00857 {
00858 rb_thread_t *th = GET_THREAD();
00859 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00860
00861 if (cfp) {
00862 return cfp->iseq->location.path;
00863 }
00864 else {
00865 return Qnil;
00866 }
00867 }
00868
00869 const char *
00870 rb_sourcefile(void)
00871 {
00872 rb_thread_t *th = GET_THREAD();
00873 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00874
00875 if (cfp) {
00876 return RSTRING_PTR(cfp->iseq->location.path);
00877 }
00878 else {
00879 return 0;
00880 }
00881 }
00882
00883 int
00884 rb_sourceline(void)
00885 {
00886 rb_thread_t *th = GET_THREAD();
00887 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00888
00889 if (cfp) {
00890 return rb_vm_get_sourceline(cfp);
00891 }
00892 else {
00893 return 0;
00894 }
00895 }
00896
00897 NODE *
00898 rb_vm_cref(void)
00899 {
00900 rb_thread_t *th = GET_THREAD();
00901 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00902
00903 if (cfp == 0) {
00904 return NULL;
00905 }
00906 return rb_vm_get_cref(cfp->iseq, cfp->ep);
00907 }
00908
00909 #if 0
00910 void
00911 debug_cref(NODE *cref)
00912 {
00913 while (cref) {
00914 dp(cref->nd_clss);
00915 printf("%ld\n", cref->nd_visi);
00916 cref = cref->nd_next;
00917 }
00918 }
00919 #endif
00920
00921 VALUE
00922 rb_vm_cbase(void)
00923 {
00924 rb_thread_t *th = GET_THREAD();
00925 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00926
00927 if (cfp == 0) {
00928 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
00929 }
00930 return vm_get_cbase(cfp->iseq, cfp->ep);
00931 }
00932
00933
00934
00935 static VALUE
00936 make_localjump_error(const char *mesg, VALUE value, int reason)
00937 {
00938 extern VALUE rb_eLocalJumpError;
00939 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00940 ID id;
00941
00942 switch (reason) {
00943 case TAG_BREAK:
00944 CONST_ID(id, "break");
00945 break;
00946 case TAG_REDO:
00947 CONST_ID(id, "redo");
00948 break;
00949 case TAG_RETRY:
00950 CONST_ID(id, "retry");
00951 break;
00952 case TAG_NEXT:
00953 CONST_ID(id, "next");
00954 break;
00955 case TAG_RETURN:
00956 CONST_ID(id, "return");
00957 break;
00958 default:
00959 CONST_ID(id, "noreason");
00960 break;
00961 }
00962 rb_iv_set(exc, "@exit_value", value);
00963 rb_iv_set(exc, "@reason", ID2SYM(id));
00964 return exc;
00965 }
00966
00967 void
00968 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00969 {
00970 VALUE exc = make_localjump_error(mesg, value, reason);
00971 rb_exc_raise(exc);
00972 }
00973
00974 VALUE
00975 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00976 {
00977 VALUE result = Qnil;
00978
00979 if (val == Qundef) {
00980 val = GET_THREAD()->tag->retval;
00981 }
00982 switch (state) {
00983 case 0:
00984 break;
00985 case TAG_RETURN:
00986 result = make_localjump_error("unexpected return", val, state);
00987 break;
00988 case TAG_BREAK:
00989 result = make_localjump_error("unexpected break", val, state);
00990 break;
00991 case TAG_NEXT:
00992 result = make_localjump_error("unexpected next", val, state);
00993 break;
00994 case TAG_REDO:
00995 result = make_localjump_error("unexpected redo", Qnil, state);
00996 break;
00997 case TAG_RETRY:
00998 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00999 break;
01000 default:
01001 break;
01002 }
01003 return result;
01004 }
01005
01006 void
01007 rb_vm_jump_tag_but_local_jump(int state)
01008 {
01009 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
01010 if (!NIL_P(exc)) rb_exc_raise(exc);
01011 JUMP_TAG(state);
01012 }
01013
01014 NORETURN(static void vm_iter_break(rb_thread_t *th, VALUE val));
01015
01016 static void
01017 vm_iter_break(rb_thread_t *th, VALUE val)
01018 {
01019 rb_control_frame_t *cfp = th->cfp;
01020 VALUE *ep = VM_CF_PREV_EP(cfp);
01021
01022 th->state = TAG_BREAK;
01023 th->errinfo = (VALUE)NEW_THROW_OBJECT(val, (VALUE)ep, TAG_BREAK);
01024 TH_JUMP_TAG(th, TAG_BREAK);
01025 }
01026
01027 void
01028 rb_iter_break(void)
01029 {
01030 vm_iter_break(GET_THREAD(), Qnil);
01031 }
01032
01033 void
01034 rb_iter_break_value(VALUE val)
01035 {
01036 vm_iter_break(GET_THREAD(), val);
01037 }
01038
01039
01040
01041 static st_table *vm_opt_method_table = 0;
01042
01043 static int
01044 vm_redefinition_check_flag(VALUE klass)
01045 {
01046 if (klass == rb_cFixnum) return FIXNUM_REDEFINED_OP_FLAG;
01047 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
01048 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
01049 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
01050 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
01051 if (klass == rb_cBignum) return BIGNUM_REDEFINED_OP_FLAG;
01052 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
01053 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
01054 return 0;
01055 }
01056
01057 static void
01058 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
01059 {
01060 st_data_t bop;
01061 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
01062 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
01063 int flag = vm_redefinition_check_flag(klass);
01064
01065 ruby_vm_redefined_flag[bop] |= flag;
01066 }
01067 }
01068 }
01069
01070 static int
01071 check_redefined_method(st_data_t key, st_data_t value, st_data_t data)
01072 {
01073 ID mid = (ID)key;
01074 rb_method_entry_t *me = (rb_method_entry_t *)value;
01075 VALUE klass = (VALUE)data;
01076 rb_method_entry_t *newme = rb_method_entry(klass, mid, NULL);
01077
01078 if (newme != me)
01079 rb_vm_check_redefinition_opt_method(me, me->klass);
01080 return ST_CONTINUE;
01081 }
01082
01083 void
01084 rb_vm_check_redefinition_by_prepend(VALUE klass)
01085 {
01086 if (!vm_redefinition_check_flag(klass)) return;
01087 st_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method,
01088 (st_data_t)klass);
01089 }
01090
01091 static void
01092 add_opt_method(VALUE klass, ID mid, VALUE bop)
01093 {
01094 rb_method_entry_t *me;
01095 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
01096 me->def->type == VM_METHOD_TYPE_CFUNC) {
01097 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
01098 }
01099 else {
01100 rb_bug("undefined optimized method: %s", rb_id2name(mid));
01101 }
01102 }
01103
01104 static void
01105 vm_init_redefined_flag(void)
01106 {
01107 ID mid;
01108 VALUE bop;
01109
01110 vm_opt_method_table = st_init_numtable();
01111
01112 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
01113 #define C(k) add_opt_method(rb_c##k, mid, bop)
01114 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
01115 OP(MINUS, MINUS), (C(Fixnum), C(Float));
01116 OP(MULT, MULT), (C(Fixnum), C(Float));
01117 OP(DIV, DIV), (C(Fixnum), C(Float));
01118 OP(MOD, MOD), (C(Fixnum), C(Float));
01119 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
01120 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
01121 OP(LT, LT), (C(Fixnum), C(Float));
01122 OP(LE, LE), (C(Fixnum), C(Float));
01123 OP(GT, GT), (C(Fixnum), C(Float));
01124 OP(GE, GE), (C(Fixnum), C(Float));
01125 OP(LTLT, LTLT), (C(String), C(Array));
01126 OP(AREF, AREF), (C(Array), C(Hash));
01127 OP(ASET, ASET), (C(Array), C(Hash));
01128 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
01129 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01130 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
01131 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01132 #undef C
01133 #undef OP
01134 }
01135
01136
01137
01138 #if VMDEBUG
01139 static const char *
01140 vm_frametype_name(const rb_control_frame_t *cfp)
01141 {
01142 switch (VM_FRAME_TYPE(cfp)) {
01143 case VM_FRAME_MAGIC_METHOD: return "method";
01144 case VM_FRAME_MAGIC_BLOCK: return "block";
01145 case VM_FRAME_MAGIC_CLASS: return "class";
01146 case VM_FRAME_MAGIC_TOP: return "top";
01147 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01148 case VM_FRAME_MAGIC_PROC: return "proc";
01149 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01150 case VM_FRAME_MAGIC_EVAL: return "eval";
01151 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01152 case VM_FRAME_MAGIC_RESCUE: return "rescue";
01153 default:
01154 rb_bug("unknown frame");
01155 }
01156 }
01157 #endif
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171
01172
01173
01174
01175
01176
01177
01178
01179
01180
01181
01182
01183
01184
01185
01186
01187
01188
01189
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238
01239
01240
01241
01242
01243
01244
01245
01246
01247
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257
01258 static VALUE
01259 vm_exec(rb_thread_t *th)
01260 {
01261 int state;
01262 VALUE result, err;
01263 VALUE initial = 0;
01264 VALUE *escape_ep = NULL;
01265
01266 TH_PUSH_TAG(th);
01267 _tag.retval = Qnil;
01268 if ((state = EXEC_TAG()) == 0) {
01269 vm_loop_start:
01270 result = vm_exec_core(th, initial);
01271 if ((state = th->state) != 0) {
01272 err = result;
01273 th->state = 0;
01274 goto exception_handler;
01275 }
01276 }
01277 else {
01278 int i;
01279 struct iseq_catch_table_entry *entry;
01280 unsigned long epc, cont_pc, cont_sp;
01281 VALUE catch_iseqval;
01282 rb_control_frame_t *cfp;
01283 VALUE type;
01284
01285 err = th->errinfo;
01286
01287 exception_handler:
01288 cont_pc = cont_sp = catch_iseqval = 0;
01289
01290 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01291 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01292 const rb_method_entry_t *me = th->cfp->me;
01293 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass, Qnil);
01294 RUBY_DTRACE_METHOD_RETURN_HOOK(th, me->klass, me->called_id);
01295 }
01296 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01297 }
01298
01299 cfp = th->cfp;
01300 epc = cfp->pc - cfp->iseq->iseq_encoded;
01301
01302 if (state == TAG_BREAK || state == TAG_RETURN) {
01303 escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01304
01305 if (cfp->ep == escape_ep) {
01306 if (state == TAG_RETURN) {
01307 if (!VM_FRAME_TYPE_FINISH_P(cfp)) {
01308 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->ep);
01309 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01310 }
01311 else {
01312 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01313 entry = &cfp->iseq->catch_table[i];
01314 if (entry->start < epc && entry->end >= epc) {
01315 if (entry->type == CATCH_TYPE_ENSURE) {
01316 catch_iseqval = entry->iseq;
01317 cont_pc = entry->cont;
01318 cont_sp = entry->sp;
01319 break;
01320 }
01321 }
01322 }
01323 if (!catch_iseqval) {
01324 result = GET_THROWOBJ_VAL(err);
01325 th->errinfo = Qnil;
01326 vm_pop_frame(th);
01327 goto finish_vme;
01328 }
01329 }
01330
01331 }
01332 else {
01333
01334 #if OPT_STACK_CACHING
01335 initial = (GET_THROWOBJ_VAL(err));
01336 #else
01337 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01338 #endif
01339 th->errinfo = Qnil;
01340 goto vm_loop_start;
01341 }
01342 }
01343 }
01344
01345 if (state == TAG_RAISE) {
01346 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01347 entry = &cfp->iseq->catch_table[i];
01348 if (entry->start < epc && entry->end >= epc) {
01349
01350 if (entry->type == CATCH_TYPE_RESCUE ||
01351 entry->type == CATCH_TYPE_ENSURE) {
01352 catch_iseqval = entry->iseq;
01353 cont_pc = entry->cont;
01354 cont_sp = entry->sp;
01355 break;
01356 }
01357 }
01358 }
01359 }
01360 else if (state == TAG_RETRY) {
01361 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01362 entry = &cfp->iseq->catch_table[i];
01363 if (entry->start < epc && entry->end >= epc) {
01364
01365 if (entry->type == CATCH_TYPE_ENSURE) {
01366 catch_iseqval = entry->iseq;
01367 cont_pc = entry->cont;
01368 cont_sp = entry->sp;
01369 break;
01370 }
01371 else if (entry->type == CATCH_TYPE_RETRY) {
01372 VALUE *escape_ep;
01373 escape_ep = GET_THROWOBJ_CATCH_POINT(err);
01374 if (cfp->ep == escape_ep) {
01375 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01376 th->errinfo = Qnil;
01377 goto vm_loop_start;
01378 }
01379 }
01380 }
01381 }
01382 }
01383 else if (state == TAG_BREAK && ((VALUE)escape_ep & ~0x03) == 0) {
01384 type = CATCH_TYPE_BREAK;
01385
01386 search_restart_point:
01387 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01388 entry = &cfp->iseq->catch_table[i];
01389
01390 if (entry->start < epc && entry->end >= epc) {
01391 if (entry->type == CATCH_TYPE_ENSURE) {
01392 catch_iseqval = entry->iseq;
01393 cont_pc = entry->cont;
01394 cont_sp = entry->sp;
01395 break;
01396 }
01397 else if (entry->type == type) {
01398 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01399 cfp->sp = vm_base_ptr(cfp) + entry->sp;
01400
01401 if (state != TAG_REDO) {
01402 #if OPT_STACK_CACHING
01403 initial = (GET_THROWOBJ_VAL(err));
01404 #else
01405 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01406 #endif
01407 }
01408 th->errinfo = Qnil;
01409 th->state = 0;
01410 goto vm_loop_start;
01411 }
01412 }
01413 }
01414 }
01415 else if (state == TAG_REDO) {
01416 type = CATCH_TYPE_REDO;
01417 goto search_restart_point;
01418 }
01419 else if (state == TAG_NEXT) {
01420 type = CATCH_TYPE_NEXT;
01421 goto search_restart_point;
01422 }
01423 else {
01424 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01425 entry = &cfp->iseq->catch_table[i];
01426 if (entry->start < epc && entry->end >= epc) {
01427
01428 if (entry->type == CATCH_TYPE_ENSURE) {
01429 catch_iseqval = entry->iseq;
01430 cont_pc = entry->cont;
01431 cont_sp = entry->sp;
01432 break;
01433 }
01434 }
01435 }
01436 }
01437
01438 if (catch_iseqval != 0) {
01439
01440 rb_iseq_t *catch_iseq;
01441
01442
01443 GetISeqPtr(catch_iseqval, catch_iseq);
01444 cfp->sp = vm_base_ptr(cfp) + cont_sp;
01445 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01446
01447
01448 cfp->sp[0] = err;
01449 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_RESCUE,
01450 cfp->self, cfp->klass,
01451 VM_ENVVAL_PREV_EP_PTR(cfp->ep),
01452 catch_iseq->iseq_encoded,
01453 cfp->sp + 1 ,
01454 catch_iseq->local_size - 1,
01455 cfp->me);
01456
01457 state = 0;
01458 th->state = 0;
01459 th->errinfo = Qnil;
01460 goto vm_loop_start;
01461 }
01462 else {
01463
01464
01465 switch (VM_FRAME_TYPE(th->cfp)) {
01466 case VM_FRAME_MAGIC_METHOD:
01467 RUBY_DTRACE_METHOD_RETURN_HOOK(th, 0, 0);
01468 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0, Qnil);
01469 break;
01470 case VM_FRAME_MAGIC_BLOCK:
01471 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_B_RETURN, th->cfp->self, 0, 0, Qnil);
01472 break;
01473 case VM_FRAME_MAGIC_CLASS:
01474 EXEC_EVENT_HOOK_AND_POP_FRAME(th, RUBY_EVENT_END, th->cfp->self, 0, 0, Qnil);
01475 break;
01476 }
01477
01478 if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
01479 vm_pop_frame(th);
01480 th->errinfo = err;
01481 TH_POP_TAG2();
01482 JUMP_TAG(state);
01483 }
01484 else {
01485 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01486 goto exception_handler;
01487 }
01488 }
01489 }
01490 finish_vme:
01491 TH_POP_TAG();
01492 return result;
01493 }
01494
01495
01496
01497 VALUE
01498 rb_iseq_eval(VALUE iseqval)
01499 {
01500 rb_thread_t *th = GET_THREAD();
01501 VALUE val;
01502
01503 vm_set_top_stack(th, iseqval);
01504
01505 val = vm_exec(th);
01506 RB_GC_GUARD(iseqval);
01507 return val;
01508 }
01509
01510 VALUE
01511 rb_iseq_eval_main(VALUE iseqval)
01512 {
01513 rb_thread_t *th = GET_THREAD();
01514 VALUE val;
01515
01516 vm_set_main_stack(th, iseqval);
01517
01518 val = vm_exec(th);
01519 RB_GC_GUARD(iseqval);
01520 return val;
01521 }
01522
01523 int
01524 rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, VALUE *klassp)
01525 {
01526 rb_iseq_t *iseq = cfp->iseq;
01527 if (!iseq && cfp->me) {
01528 if (idp) *idp = cfp->me->def->original_id;
01529 if (klassp) *klassp = cfp->me->klass;
01530 return 1;
01531 }
01532 while (iseq) {
01533 if (RUBY_VM_IFUNC_P(iseq)) {
01534 if (idp) CONST_ID(*idp, "<ifunc>");
01535 if (klassp) *klassp = 0;
01536 return 1;
01537 }
01538 if (iseq->defined_method_id) {
01539 if (idp) *idp = iseq->defined_method_id;
01540 if (klassp) *klassp = iseq->klass;
01541 return 1;
01542 }
01543 if (iseq->local_iseq == iseq) {
01544 break;
01545 }
01546 iseq = iseq->parent_iseq;
01547 }
01548 return 0;
01549 }
01550
01551 int
01552 rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp)
01553 {
01554 return rb_vm_control_frame_id_and_class(th->cfp, idp, klassp);
01555 }
01556
01557 int
01558 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01559 {
01560 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01561 }
01562
01563 VALUE
01564 rb_thread_current_status(const rb_thread_t *th)
01565 {
01566 const rb_control_frame_t *cfp = th->cfp;
01567 VALUE str = Qnil;
01568
01569 if (cfp->iseq != 0) {
01570 if (cfp->pc != 0) {
01571 rb_iseq_t *iseq = cfp->iseq;
01572 int line_no = rb_vm_get_sourceline(cfp);
01573 char *file = RSTRING_PTR(iseq->location.path);
01574 str = rb_sprintf("%s:%d:in `%s'",
01575 file, line_no, RSTRING_PTR(iseq->location.label));
01576 }
01577 }
01578 else if (cfp->me->def->original_id) {
01579 str = rb_sprintf("`%s#%s' (cfunc)",
01580 rb_class2name(cfp->me->klass),
01581 rb_id2name(cfp->me->def->original_id));
01582 }
01583
01584 return str;
01585 }
01586
01587 VALUE
01588 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01589 const rb_block_t *blockptr, VALUE filename)
01590 {
01591 rb_thread_t *th = GET_THREAD();
01592 const rb_control_frame_t *reg_cfp = th->cfp;
01593 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
01594 VALUE val;
01595
01596 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
01597 recv, CLASS_OF(recv), VM_ENVVAL_BLOCK_PTR(blockptr), 0, reg_cfp->sp, 1, 0);
01598
01599 val = (*func)(arg);
01600
01601 vm_pop_frame(th);
01602 return val;
01603 }
01604
01605
01606
01607 static int
01608 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01609 {
01610 VALUE thval = (VALUE)key;
01611 rb_gc_mark(thval);
01612 return ST_CONTINUE;
01613 }
01614
01615 void vm_trace_mark_event_hooks(rb_hook_list_t *hooks);
01616
01617 void
01618 rb_vm_mark(void *ptr)
01619 {
01620 int i;
01621
01622 RUBY_MARK_ENTER("vm");
01623 RUBY_GC_INFO("-------------------------------------------------\n");
01624 if (ptr) {
01625 rb_vm_t *vm = ptr;
01626 if (vm->living_threads) {
01627 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01628 }
01629 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01630 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01631 RUBY_MARK_UNLESS_NULL(vm->load_path);
01632 RUBY_MARK_UNLESS_NULL(vm->load_path_snapshot);
01633 RUBY_MARK_UNLESS_NULL(vm->load_path_check_cache);
01634 RUBY_MARK_UNLESS_NULL(vm->expanded_load_path);
01635 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01636 RUBY_MARK_UNLESS_NULL(vm->loaded_features_snapshot);
01637 RUBY_MARK_UNLESS_NULL(vm->top_self);
01638 RUBY_MARK_UNLESS_NULL(vm->coverages);
01639 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01640
01641 if (vm->loading_table) {
01642 rb_mark_tbl(vm->loading_table);
01643 }
01644 if (vm->loaded_features_index) {
01645 rb_mark_tbl(vm->loaded_features_index);
01646 }
01647
01648 vm_trace_mark_event_hooks(&vm->event_hooks);
01649
01650 for (i = 0; i < RUBY_NSIG; i++) {
01651 if (vm->trap_list[i].cmd)
01652 rb_gc_mark(vm->trap_list[i].cmd);
01653 }
01654 if (vm->defined_strings) {
01655 rb_gc_mark_locations(vm->defined_strings, vm->defined_strings + DEFINED_EXPR);
01656 }
01657 }
01658
01659 RUBY_MARK_LEAVE("vm");
01660 }
01661
01662 #define vm_free 0
01663
01664 int
01665 ruby_vm_destruct(rb_vm_t *vm)
01666 {
01667 RUBY_FREE_ENTER("vm");
01668 if (vm) {
01669 rb_thread_t *th = vm->main_thread;
01670 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01671 struct rb_objspace *objspace = vm->objspace;
01672 #endif
01673 rb_gc_force_recycle(vm->self);
01674 vm->main_thread = 0;
01675 if (th) {
01676 rb_fiber_reset_root_local_storage(th->self);
01677 thread_free(th);
01678 }
01679 if (vm->living_threads) {
01680 st_free_table(vm->living_threads);
01681 vm->living_threads = 0;
01682 }
01683 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01684 if (objspace) {
01685 rb_objspace_free(objspace);
01686 }
01687 #endif
01688 ruby_vm_run_at_exit_hooks(vm);
01689 rb_vm_gvl_destroy(vm);
01690 ruby_xfree(vm);
01691 ruby_current_vm = 0;
01692 }
01693 RUBY_FREE_LEAVE("vm");
01694 return 0;
01695 }
01696
01697 static size_t
01698 vm_memsize(const void *ptr)
01699 {
01700 if (ptr) {
01701 const rb_vm_t *vmobj = ptr;
01702 size_t size = sizeof(rb_vm_t);
01703 size += st_memsize(vmobj->living_threads);
01704 if (vmobj->defined_strings) {
01705 size += DEFINED_EXPR * sizeof(VALUE);
01706 }
01707 return size;
01708 }
01709 else {
01710 return 0;
01711 }
01712 }
01713
01714 static const rb_data_type_t vm_data_type = {
01715 "VM",
01716 {rb_vm_mark, vm_free, vm_memsize,},
01717 };
01718
01719
01720 static VALUE
01721 vm_default_params(void)
01722 {
01723 rb_vm_t *vm = GET_VM();
01724 VALUE result = rb_hash_new();
01725 #define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
01726 SET(thread_vm_stack_size);
01727 SET(thread_machine_stack_size);
01728 SET(fiber_vm_stack_size);
01729 SET(fiber_machine_stack_size);
01730 #undef SET
01731 rb_obj_freeze(result);
01732 return result;
01733 }
01734
01735 static size_t
01736 get_param(const char *name, size_t default_value, size_t min_value)
01737 {
01738 const char *envval;
01739 size_t result = default_value;
01740 if ((envval = getenv(name)) != 0) {
01741 long val = atol(envval);
01742 if (val < (long)min_value) {
01743 val = (long)min_value;
01744 }
01745 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
01746 }
01747 if (0) fprintf(stderr, "%s: %"PRIdSIZE"\n", name, result);
01748
01749 return result;
01750 }
01751
01752 static void
01753 check_machine_stack_size(size_t *sizep)
01754 {
01755 #ifdef PTHREAD_STACK_MIN
01756 size_t size = *sizep;
01757 #endif
01758
01759 #ifdef __SYMBIAN32__
01760 *sizep = 64 * 1024;
01761 #endif
01762
01763 #ifdef PTHREAD_STACK_MIN
01764 if (size < PTHREAD_STACK_MIN) {
01765 *sizep = PTHREAD_STACK_MIN * 2;
01766 }
01767 #endif
01768 }
01769
01770 static void
01771 vm_default_params_setup(rb_vm_t *vm)
01772 {
01773 vm->default_params.thread_vm_stack_size =
01774 get_param("RUBY_THREAD_VM_STACK_SIZE",
01775 RUBY_VM_THREAD_VM_STACK_SIZE,
01776 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
01777
01778 vm->default_params.thread_machine_stack_size =
01779 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
01780 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
01781 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
01782
01783 vm->default_params.fiber_vm_stack_size =
01784 get_param("RUBY_FIBER_VM_STACK_SIZE",
01785 RUBY_VM_FIBER_VM_STACK_SIZE,
01786 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
01787
01788 vm->default_params.fiber_machine_stack_size =
01789 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
01790 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
01791 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
01792
01793
01794 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
01795 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
01796 }
01797
01798 static void
01799 vm_init2(rb_vm_t *vm)
01800 {
01801 MEMZERO(vm, rb_vm_t, 1);
01802 vm->src_encoding_index = -1;
01803 vm->at_exit.basic.flags = (T_ARRAY | RARRAY_EMBED_FLAG) & ~RARRAY_EMBED_LEN_MASK;
01804 vm->at_exit.basic.klass = 0;
01805
01806 vm_default_params_setup(vm);
01807 }
01808
01809
01810
01811 #define USE_THREAD_DATA_RECYCLE 1
01812
01813 #if USE_THREAD_DATA_RECYCLE
01814 #define RECYCLE_MAX 64
01815 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01816 static int thread_recycle_stack_count = 0;
01817
01818 static VALUE *
01819 thread_recycle_stack(size_t size)
01820 {
01821 if (thread_recycle_stack_count) {
01822
01823 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01824 }
01825 else {
01826 return ALLOC_N(VALUE, size);
01827 }
01828 }
01829
01830 #else
01831 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01832 #endif
01833
01834 void
01835 rb_thread_recycle_stack_release(VALUE *stack)
01836 {
01837 #if USE_THREAD_DATA_RECYCLE
01838 if (thread_recycle_stack_count < RECYCLE_MAX) {
01839 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01840 return;
01841 }
01842 #endif
01843 ruby_xfree(stack);
01844 }
01845
01846 #ifdef USE_THREAD_RECYCLE
01847 static rb_thread_t *
01848 thread_recycle_struct(void)
01849 {
01850 void *p = ALLOC_N(rb_thread_t, 1);
01851 memset(p, 0, sizeof(rb_thread_t));
01852 return p;
01853 }
01854 #endif
01855
01856 void
01857 rb_thread_mark(void *ptr)
01858 {
01859 rb_thread_t *th = NULL;
01860 RUBY_MARK_ENTER("thread");
01861 if (ptr) {
01862 th = ptr;
01863 if (th->stack) {
01864 VALUE *p = th->stack;
01865 VALUE *sp = th->cfp->sp;
01866 rb_control_frame_t *cfp = th->cfp;
01867 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01868
01869 while (p < sp) {
01870 rb_gc_mark(*p++);
01871 }
01872 rb_gc_mark_locations(p, p + th->mark_stack_len);
01873
01874 while (cfp != limit_cfp) {
01875 rb_iseq_t *iseq = cfp->iseq;
01876 rb_gc_mark(cfp->proc);
01877 rb_gc_mark(cfp->self);
01878 rb_gc_mark(cfp->klass);
01879 if (iseq) {
01880 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01881 }
01882 if (cfp->me) {
01883
01884 ((rb_method_entry_t *)cfp->me)->mark = 1;
01885 rb_mark_method_entry(cfp->me);
01886 }
01887 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01888 }
01889 }
01890
01891
01892 RUBY_MARK_UNLESS_NULL(th->first_proc);
01893 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01894
01895 RUBY_MARK_UNLESS_NULL(th->thgroup);
01896 RUBY_MARK_UNLESS_NULL(th->value);
01897 RUBY_MARK_UNLESS_NULL(th->errinfo);
01898 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_queue);
01899 RUBY_MARK_UNLESS_NULL(th->pending_interrupt_mask_stack);
01900 RUBY_MARK_UNLESS_NULL(th->root_svar);
01901 RUBY_MARK_UNLESS_NULL(th->top_self);
01902 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01903 RUBY_MARK_UNLESS_NULL(th->fiber);
01904 RUBY_MARK_UNLESS_NULL(th->root_fiber);
01905 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01906 RUBY_MARK_UNLESS_NULL(th->last_status);
01907
01908 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01909
01910 rb_mark_tbl(th->local_storage);
01911
01912 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01913 rb_gc_mark_machine_stack(th);
01914 rb_gc_mark_locations((VALUE *)&th->machine_regs,
01915 (VALUE *)(&th->machine_regs) +
01916 sizeof(th->machine_regs) / sizeof(VALUE));
01917 }
01918
01919 vm_trace_mark_event_hooks(&th->event_hooks);
01920 }
01921
01922 RUBY_MARK_LEAVE("thread");
01923 }
01924
01925 static void
01926 thread_free(void *ptr)
01927 {
01928 rb_thread_t *th;
01929 RUBY_FREE_ENTER("thread");
01930
01931 if (ptr) {
01932 th = ptr;
01933
01934 if (!th->root_fiber) {
01935 RUBY_FREE_UNLESS_NULL(th->stack);
01936 }
01937
01938 if (th->locking_mutex != Qfalse) {
01939 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
01940 }
01941 if (th->keeping_mutexes != NULL) {
01942 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
01943 }
01944
01945 if (th->local_storage) {
01946 st_free_table(th->local_storage);
01947 }
01948
01949 if (th->vm && th->vm->main_thread == th) {
01950 RUBY_GC_INFO("main thread\n");
01951 }
01952 else {
01953 #ifdef USE_SIGALTSTACK
01954 if (th->altstack) {
01955 free(th->altstack);
01956 }
01957 #endif
01958 ruby_xfree(ptr);
01959 }
01960 if (ruby_current_thread == th)
01961 ruby_current_thread = NULL;
01962 }
01963 RUBY_FREE_LEAVE("thread");
01964 }
01965
01966 static size_t
01967 thread_memsize(const void *ptr)
01968 {
01969 if (ptr) {
01970 const rb_thread_t *th = ptr;
01971 size_t size = sizeof(rb_thread_t);
01972
01973 if (!th->root_fiber) {
01974 size += th->stack_size * sizeof(VALUE);
01975 }
01976 if (th->local_storage) {
01977 size += st_memsize(th->local_storage);
01978 }
01979 return size;
01980 }
01981 else {
01982 return 0;
01983 }
01984 }
01985
01986 #define thread_data_type ruby_threadptr_data_type
01987 const rb_data_type_t ruby_threadptr_data_type = {
01988 "VM/thread",
01989 {
01990 rb_thread_mark,
01991 thread_free,
01992 thread_memsize,
01993 },
01994 };
01995
01996 VALUE
01997 rb_obj_is_thread(VALUE obj)
01998 {
01999 if (rb_typeddata_is_kind_of(obj, &thread_data_type)) {
02000 return Qtrue;
02001 }
02002 else {
02003 return Qfalse;
02004 }
02005 }
02006
02007 static VALUE
02008 thread_alloc(VALUE klass)
02009 {
02010 VALUE volatile obj;
02011 #ifdef USE_THREAD_RECYCLE
02012 rb_thread_t *th = thread_recycle_struct();
02013 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
02014 #else
02015 rb_thread_t *th;
02016 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
02017 #endif
02018 return obj;
02019 }
02020
02021 static void
02022 th_init(rb_thread_t *th, VALUE self)
02023 {
02024 th->self = self;
02025
02026
02027 #ifdef USE_SIGALTSTACK
02028
02029 th->altstack = malloc(rb_sigaltstack_size());
02030 #endif
02031
02032
02033
02034 th->stack_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
02035 th->stack = thread_recycle_stack(th->stack_size);
02036
02037 th->cfp = (void *)(th->stack + th->stack_size);
02038
02039 vm_push_frame(th, 0 , VM_FRAME_MAGIC_TOP | VM_FRAME_FLAG_FINISH,
02040 Qnil , Qnil , VM_ENVVAL_BLOCK_PTR(0), 0 , th->stack, 1, 0);
02041
02042 th->status = THREAD_RUNNABLE;
02043 th->errinfo = Qnil;
02044 th->last_status = Qnil;
02045 th->waiting_fd = -1;
02046 th->root_svar = Qnil;
02047
02048 #if OPT_CALL_THREADED_CODE
02049 th->retval = Qundef;
02050 #endif
02051 }
02052
02053 static VALUE
02054 ruby_thread_init(VALUE self)
02055 {
02056 rb_thread_t *th;
02057 rb_vm_t *vm = GET_THREAD()->vm;
02058 GetThreadPtr(self, th);
02059
02060 th->vm = vm;
02061 th_init(th, self);
02062 rb_iv_set(self, "locals", rb_hash_new());
02063
02064 th->top_wrapper = 0;
02065 th->top_self = rb_vm_top_self();
02066 th->root_svar = Qnil;
02067 return self;
02068 }
02069
02070 VALUE
02071 rb_thread_alloc(VALUE klass)
02072 {
02073 VALUE self = thread_alloc(klass);
02074 ruby_thread_init(self);
02075 return self;
02076 }
02077
02078 static void
02079 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
02080 rb_num_t is_singleton, NODE *cref)
02081 {
02082 VALUE klass = cref->nd_clss;
02083 int noex = (int)cref->nd_visi;
02084 rb_iseq_t *miseq;
02085 GetISeqPtr(iseqval, miseq);
02086
02087 if (miseq->klass) {
02088 RB_GC_GUARD(iseqval) = rb_iseq_clone(iseqval, 0);
02089 GetISeqPtr(iseqval, miseq);
02090 }
02091
02092 if (NIL_P(klass)) {
02093 rb_raise(rb_eTypeError, "no class/module to add method");
02094 }
02095
02096 if (is_singleton) {
02097 klass = rb_singleton_class(obj);
02098 noex = NOEX_PUBLIC;
02099 }
02100
02101
02102 COPY_CREF(miseq->cref_stack, cref);
02103 miseq->cref_stack->nd_visi = NOEX_PUBLIC;
02104 miseq->klass = klass;
02105 miseq->defined_method_id = id;
02106 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
02107
02108 if (!is_singleton && noex == NOEX_MODFUNC) {
02109 rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
02110 }
02111 INC_VM_STATE_VERSION();
02112 }
02113
02114 #define REWIND_CFP(expr) do { \
02115 rb_thread_t *th__ = GET_THREAD(); \
02116 th__->cfp++; expr; th__->cfp--; \
02117 } while (0)
02118
02119 static VALUE
02120 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02121 {
02122 REWIND_CFP({
02123 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
02124 });
02125 return Qnil;
02126 }
02127
02128 static VALUE
02129 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
02130 {
02131 REWIND_CFP({
02132 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
02133 });
02134 return Qnil;
02135 }
02136
02137 static VALUE
02138 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
02139 {
02140 REWIND_CFP({
02141 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
02142 });
02143 return Qnil;
02144 }
02145
02146 static VALUE
02147 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
02148 {
02149 REWIND_CFP({
02150 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
02151 });
02152 return Qnil;
02153 }
02154
02155 static VALUE
02156 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
02157 {
02158 REWIND_CFP({
02159 rb_undef(cbase, SYM2ID(sym));
02160 INC_VM_STATE_VERSION();
02161 });
02162 return Qnil;
02163 }
02164
02165 static VALUE
02166 m_core_set_postexe(VALUE self, VALUE iseqval)
02167 {
02168 REWIND_CFP({
02169 rb_iseq_t *blockiseq;
02170 rb_block_t *blockptr;
02171 rb_thread_t *th = GET_THREAD();
02172 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02173 VALUE proc;
02174
02175 if (cfp == 0) {
02176 rb_bug("m_core_set_postexe: unreachable");
02177 }
02178
02179 GetISeqPtr(iseqval, blockiseq);
02180
02181 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02182 blockptr->iseq = blockiseq;
02183 blockptr->proc = 0;
02184
02185 proc = rb_vm_make_proc(th, blockptr, rb_cProc);
02186 rb_set_end_proc(rb_call_end_proc, proc);
02187 });
02188 return Qnil;
02189 }
02190
02191 static VALUE m_core_hash_merge_ary(VALUE self, VALUE hash, VALUE ary);
02192
02193 static VALUE
02194 core_hash_merge(VALUE hash, long argc, const VALUE *argv)
02195 {
02196 long i;
02197 assert(argc % 2 == 0);
02198 for (i=0; i<argc; i+=2) {
02199 rb_hash_aset(hash, argv[i], argv[i+1]);
02200 }
02201 return hash;
02202 }
02203
02204 static VALUE
02205 m_core_hash_from_ary(VALUE self, VALUE ary)
02206 {
02207 VALUE hash = rb_hash_new();
02208
02209 if (RUBY_DTRACE_HASH_CREATE_ENABLED()) {
02210 RUBY_DTRACE_HASH_CREATE(RARRAY_LEN(ary), rb_sourcefile(), rb_sourceline());
02211 }
02212
02213 return m_core_hash_merge_ary(self, hash, ary);
02214 }
02215
02216 static VALUE
02217 m_core_hash_merge_ary(VALUE self, VALUE hash, VALUE ary)
02218 {
02219 core_hash_merge(hash, RARRAY_LEN(ary), RARRAY_PTR(ary));
02220 return hash;
02221 }
02222
02223 static VALUE
02224 m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
02225 {
02226 VALUE hash = argv[0];
02227
02228 core_hash_merge(hash, argc-1, argv+1);
02229
02230 return hash;
02231 }
02232
02233 static int
02234 kwmerge_ii(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
02235 {
02236 if (existing) return ST_STOP;
02237 *value = arg;
02238 return ST_CONTINUE;
02239 }
02240
02241 static int
02242 kwmerge_i(VALUE key, VALUE value, VALUE hash)
02243 {
02244 if (!SYMBOL_P(key)) Check_Type(key, T_SYMBOL);
02245 st_update(RHASH_TBL(hash), key, kwmerge_ii, (st_data_t)value);
02246 return ST_CONTINUE;
02247 }
02248
02249 static VALUE
02250 m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
02251 {
02252 kw = rb_convert_type(kw, T_HASH, "Hash", "to_hash");
02253 rb_hash_foreach(kw, kwmerge_i, hash);
02254 return hash;
02255 }
02256
02257 extern VALUE *rb_gc_stack_start;
02258 extern size_t rb_gc_stack_maxsize;
02259 #ifdef __ia64
02260 extern VALUE *rb_gc_register_stack_start;
02261 #endif
02262
02263
02264
02265
02266 static VALUE
02267 sdr(void)
02268 {
02269 rb_vm_bugreport();
02270 return Qnil;
02271 }
02272
02273
02274 static VALUE
02275 nsdr(void)
02276 {
02277 VALUE ary = rb_ary_new();
02278 #if HAVE_BACKTRACE
02279 #include <execinfo.h>
02280 #define MAX_NATIVE_TRACE 1024
02281 static void *trace[MAX_NATIVE_TRACE];
02282 int n = backtrace(trace, MAX_NATIVE_TRACE);
02283 char **syms = backtrace_symbols(trace, n);
02284 int i;
02285
02286 if (syms == 0) {
02287 rb_memerror();
02288 }
02289
02290 for (i=0; i<n; i++) {
02291 rb_ary_push(ary, rb_str_new2(syms[i]));
02292 }
02293 free(syms);
02294 #endif
02295 return ary;
02296 }
02297
02298 #if VM_COLLECT_USAGE_DETAILS
02299 static VALUE usage_analysis_insn_stop(VALUE self);
02300 static VALUE usage_analysis_operand_stop(VALUE self);
02301 static VALUE usage_analysis_register_stop(VALUE self);
02302 #endif
02303
02304 void
02305 Init_VM(void)
02306 {
02307 VALUE opts;
02308 VALUE klass;
02309 VALUE fcore;
02310
02311
02312 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02313 rb_undef_alloc_func(rb_cRubyVM);
02314 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02315
02316
02317 fcore = rb_class_new(rb_cBasicObject);
02318 RBASIC(fcore)->flags = T_ICLASS;
02319 klass = rb_singleton_class(fcore);
02320 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02321 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02322 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02323 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02324 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02325 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02326 rb_define_method_id(klass, id_core_hash_from_ary, m_core_hash_from_ary, 1);
02327 rb_define_method_id(klass, id_core_hash_merge_ary, m_core_hash_merge_ary, 2);
02328 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
02329 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
02330 rb_define_method_id(klass, idProc, rb_block_proc, 0);
02331 rb_define_method_id(klass, idLambda, rb_block_lambda, 0);
02332 rb_obj_freeze(fcore);
02333 rb_gc_register_mark_object(fcore);
02334 rb_mRubyVMFrozenCore = fcore;
02335
02336
02337 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02338 rb_undef_alloc_func(rb_cEnv);
02339 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02340
02341
02342 rb_cThread = rb_define_class("Thread", rb_cObject);
02343 rb_undef_alloc_func(rb_cThread);
02344
02345 #if VM_COLLECT_USAGE_DETAILS
02346
02347 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02348 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02349 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02350
02351 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
02352 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
02353 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
02354 #endif
02355
02356
02357 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02358
02359 #if OPT_DIRECT_THREADED_CODE
02360 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02361 #elif OPT_TOKEN_THREADED_CODE
02362 rb_ary_push(opts, rb_str_new2("token threaded code"));
02363 #elif OPT_CALL_THREADED_CODE
02364 rb_ary_push(opts, rb_str_new2("call threaded code"));
02365 #endif
02366
02367 #if OPT_STACK_CACHING
02368 rb_ary_push(opts, rb_str_new2("stack caching"));
02369 #endif
02370 #if OPT_OPERANDS_UNIFICATION
02371 rb_ary_push(opts, rb_str_new2("operands unification]"));
02372 #endif
02373 #if OPT_INSTRUCTIONS_UNIFICATION
02374 rb_ary_push(opts, rb_str_new2("instructions unification"));
02375 #endif
02376 #if OPT_INLINE_METHOD_CACHE
02377 rb_ary_push(opts, rb_str_new2("inline method cache"));
02378 #endif
02379 #if OPT_BLOCKINLINING
02380 rb_ary_push(opts, rb_str_new2("block inlining"));
02381 #endif
02382
02383
02384 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02385
02386
02387
02388
02389
02390
02391
02392 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
02393
02394
02395 #if VMDEBUG
02396 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02397 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02398 #else
02399 (void)sdr;
02400 (void)nsdr;
02401 #endif
02402
02403
02404 {
02405 rb_vm_t *vm = ruby_current_vm;
02406 rb_thread_t *th = GET_THREAD();
02407 VALUE filename = rb_str_new2("<main>");
02408 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02409 volatile VALUE th_self;
02410 rb_iseq_t *iseq;
02411
02412
02413 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02414
02415
02416 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02417 rb_iv_set(th_self, "locals", rb_hash_new());
02418 vm->main_thread = th;
02419 vm->running_thread = th;
02420 th->vm = vm;
02421 th->top_wrapper = 0;
02422 th->top_self = rb_vm_top_self();
02423 rb_thread_set_current(th);
02424
02425 vm->living_threads = st_init_numtable();
02426 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02427
02428 rb_gc_register_mark_object(iseqval);
02429 GetISeqPtr(iseqval, iseq);
02430 th->cfp->iseq = iseq;
02431 th->cfp->pc = iseq->iseq_encoded;
02432 th->cfp->self = th->top_self;
02433 th->cfp->klass = Qnil;
02434
02435
02436
02437
02438 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02439 }
02440 vm_init_redefined_flag();
02441
02442
02443 Init_vm_backtrace();
02444 VM_PROFILE_ATEXIT();
02445 }
02446
02447 void
02448 rb_vm_set_progname(VALUE filename)
02449 {
02450 rb_thread_t *th = GET_VM()->main_thread;
02451 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02452 --cfp;
02453 cfp->iseq->location.path = filename;
02454 }
02455
02456 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02457 struct rb_objspace *rb_objspace_alloc(void);
02458 #endif
02459
02460 void
02461 Init_BareVM(void)
02462 {
02463
02464 rb_vm_t * vm = ruby_mimmalloc(sizeof(*vm));
02465 rb_thread_t * th = ruby_mimmalloc(sizeof(*th));
02466 if (!vm || !th) {
02467 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02468 exit(EXIT_FAILURE);
02469 }
02470 MEMZERO(th, rb_thread_t, 1);
02471 rb_thread_set_current_raw(th);
02472
02473 vm_init2(vm);
02474 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02475 vm->objspace = rb_objspace_alloc();
02476 #endif
02477 ruby_current_vm = vm;
02478
02479 Init_native_thread();
02480 th->vm = vm;
02481 th_init(th, 0);
02482 ruby_thread_init_stack(th);
02483 }
02484
02485
02486
02487 static VALUE
02488 main_to_s(VALUE obj)
02489 {
02490 return rb_str_new2("main");
02491 }
02492
02493 VALUE
02494 rb_vm_top_self(void)
02495 {
02496 return GET_VM()->top_self;
02497 }
02498
02499 void
02500 Init_top_self(void)
02501 {
02502 rb_vm_t *vm = GET_VM();
02503
02504 vm->top_self = rb_obj_alloc(rb_cObject);
02505 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02506 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
02507
02508
02509 vm->mark_object_ary = rb_ary_tmp_new(1);
02510 }
02511
02512 VALUE *
02513 ruby_vm_verbose_ptr(rb_vm_t *vm)
02514 {
02515 return &vm->verbose;
02516 }
02517
02518 VALUE *
02519 ruby_vm_debug_ptr(rb_vm_t *vm)
02520 {
02521 return &vm->debug;
02522 }
02523
02524 VALUE *
02525 rb_ruby_verbose_ptr(void)
02526 {
02527 return ruby_vm_verbose_ptr(GET_VM());
02528 }
02529
02530 VALUE *
02531 rb_ruby_debug_ptr(void)
02532 {
02533 return ruby_vm_debug_ptr(GET_VM());
02534 }
02535
02536
02537 VALUE insn_operand_intern(rb_iseq_t *iseq,
02538 VALUE insn, int op_no, VALUE op,
02539 int len, size_t pos, VALUE *pnop, VALUE child);
02540
02541 #if VM_COLLECT_USAGE_DETAILS
02542
02543 #define HASH_ASET(h, k, v) st_insert(RHASH_TBL(h), (st_data_t)(k), (st_data_t)(v))
02544
02545
02546
02547
02548
02549
02550
02551
02552
02553
02554
02555
02556 static void
02557 vm_analysis_insn(int insn)
02558 {
02559 ID usage_hash;
02560 ID bigram_hash;
02561 static int prev_insn = -1;
02562
02563 VALUE uh;
02564 VALUE ihash;
02565 VALUE cv;
02566
02567 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02568 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
02569 uh = rb_const_get(rb_cRubyVM, usage_hash);
02570 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02571 ihash = rb_hash_new();
02572 HASH_ASET(uh, INT2FIX(insn), ihash);
02573 }
02574 if ((cv = rb_hash_aref(ihash, INT2FIX(-1))) == Qnil) {
02575 cv = INT2FIX(0);
02576 }
02577 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
02578
02579
02580 if (prev_insn != -1) {
02581 VALUE bi;
02582 VALUE ary[2];
02583 VALUE cv;
02584
02585 ary[0] = INT2FIX(prev_insn);
02586 ary[1] = INT2FIX(insn);
02587 bi = rb_ary_new4(2, &ary[0]);
02588
02589 uh = rb_const_get(rb_cRubyVM, bigram_hash);
02590 if ((cv = rb_hash_aref(uh, bi)) == Qnil) {
02591 cv = INT2FIX(0);
02592 }
02593 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
02594 }
02595 prev_insn = insn;
02596 }
02597
02598 static void
02599 vm_analysis_operand(int insn, int n, VALUE op)
02600 {
02601 ID usage_hash;
02602
02603 VALUE uh;
02604 VALUE ihash;
02605 VALUE ophash;
02606 VALUE valstr;
02607 VALUE cv;
02608
02609 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
02610
02611 uh = rb_const_get(rb_cRubyVM, usage_hash);
02612 if ((ihash = rb_hash_aref(uh, INT2FIX(insn))) == Qnil) {
02613 ihash = rb_hash_new();
02614 HASH_ASET(uh, INT2FIX(insn), ihash);
02615 }
02616 if ((ophash = rb_hash_aref(ihash, INT2FIX(n))) == Qnil) {
02617 ophash = rb_hash_new();
02618 HASH_ASET(ihash, INT2FIX(n), ophash);
02619 }
02620
02621 valstr = insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
02622
02623
02624 if ((cv = rb_hash_aref(ophash, valstr)) == Qnil) {
02625 cv = INT2FIX(0);
02626 }
02627 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
02628 }
02629
02630 static void
02631 vm_analysis_register(int reg, int isset)
02632 {
02633 ID usage_hash;
02634 VALUE uh;
02635 VALUE valstr;
02636 static const char regstrs[][5] = {
02637 "pc",
02638 "sp",
02639 "ep",
02640 "cfp",
02641 "self",
02642 "iseq",
02643 };
02644 static const char getsetstr[][4] = {
02645 "get",
02646 "set",
02647 };
02648 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
02649
02650 VALUE cv;
02651
02652 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
02653 if (syms[0] == 0) {
02654 char buff[0x10];
02655 int i;
02656
02657 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
02658 int j;
02659 for (j = 0; j < 2; j++) {
02660 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
02661 syms[i][j] = ID2SYM(rb_intern(buff));
02662 }
02663 }
02664 }
02665 valstr = syms[reg][isset];
02666
02667 uh = rb_const_get(rb_cRubyVM, usage_hash);
02668 if ((cv = rb_hash_aref(uh, valstr)) == Qnil) {
02669 cv = INT2FIX(0);
02670 }
02671 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
02672 }
02673
02674 #undef HASH_ASET
02675
02676 void (*ruby_vm_collect_usage_func_insn)(int insn) = vm_analysis_insn;
02677 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = vm_analysis_operand;
02678 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = vm_analysis_register;
02679
02680
02681 static VALUE
02682 usage_analysis_insn_stop(VALUE self)
02683 {
02684 ruby_vm_collect_usage_func_insn = 0;
02685 return Qnil;
02686 }
02687
02688
02689 static VALUE
02690 usage_analysis_operand_stop(VALUE self)
02691 {
02692 ruby_vm_collect_usage_func_operand = 0;
02693 return Qnil;
02694 }
02695
02696
02697 static VALUE
02698 usage_analysis_register_stop(VALUE self)
02699 {
02700 ruby_vm_collect_usage_func_register = 0;
02701 return Qnil;
02702 }
02703
02704 #else
02705
02706 void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
02707 void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
02708 void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
02709
02710 #endif
02711
02712 #if VM_COLLECT_USAGE_DETAILS
02713
02714 static void
02715 vm_collect_usage_insn(int insn)
02716 {
02717 if (RUBY_DTRACE_INSN_ENABLED()) {
02718 RUBY_DTRACE_INSN(rb_insns_name(insn));
02719 }
02720 if (ruby_vm_collect_usage_func_insn)
02721 (*ruby_vm_collect_usage_func_insn)(insn);
02722 }
02723
02724
02725
02726
02727
02728 static void
02729 vm_collect_usage_operand(int insn, int n, VALUE op)
02730 {
02731 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
02732 VALUE valstr;
02733
02734 valstr = insn_operand_intern(GET_THREAD()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
02735
02736 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
02737 RB_GC_GUARD(valstr);
02738 }
02739 if (ruby_vm_collect_usage_func_operand)
02740 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
02741 }
02742
02743
02744
02745 static void
02746 vm_collect_usage_register(int reg, int isset)
02747 {
02748 if (ruby_vm_collect_usage_func_register)
02749 (*ruby_vm_collect_usage_func_register)(reg, isset);
02750 }
02751 #endif
02752
02753