00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "insns.inc"
00013 #include <math.h>
00014 #include "constant.h"
00015 #include "internal.h"
00016 #include "probes.h"
00017 #include "probes_helper.h"
00018
00019
00020
00021 #ifndef INLINE
00022 #define INLINE inline
00023 #endif
00024
00025 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
00026
00027 static void
00028 vm_stackoverflow(void)
00029 {
00030 rb_exc_raise(sysstack_error);
00031 }
00032
00033 static inline rb_control_frame_t *
00034 vm_push_frame(rb_thread_t *th,
00035 const rb_iseq_t *iseq,
00036 VALUE type,
00037 VALUE self,
00038 VALUE klass,
00039 VALUE specval,
00040 const VALUE *pc,
00041 VALUE *sp,
00042 int local_size,
00043 const rb_method_entry_t *me,
00044 size_t stack_max)
00045 {
00046 rb_control_frame_t *const cfp = th->cfp - 1;
00047 int i;
00048
00049
00050 CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + (int)stack_max);
00051
00052 th->cfp = cfp;
00053
00054
00055
00056
00057 for (i=0; i < local_size; i++) {
00058 *sp++ = Qnil;
00059 }
00060
00061
00062 *sp = specval;
00063
00064
00065
00066 cfp->pc = (VALUE *)pc;
00067 cfp->sp = sp + 1;
00068 #if VM_DEBUG_BP_CHECK
00069 cfp->bp_check = sp + 1;
00070 #endif
00071 cfp->ep = sp;
00072 cfp->iseq = (rb_iseq_t *) iseq;
00073 cfp->flag = type;
00074 cfp->self = self;
00075 cfp->block_iseq = 0;
00076 cfp->proc = 0;
00077 cfp->me = me;
00078 if (klass) {
00079 cfp->klass = klass;
00080 }
00081 else {
00082 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00083 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) {
00084 cfp->klass = Qnil;
00085 }
00086 else {
00087 cfp->klass = prev_cfp->klass;
00088 }
00089 }
00090
00091 if (VMDEBUG == 2) {
00092 SDR();
00093 }
00094
00095 return cfp;
00096 }
00097
00098 static inline void
00099 vm_pop_frame(rb_thread_t *th)
00100 {
00101 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
00102
00103 if (VMDEBUG == 2) {
00104 SDR();
00105 }
00106 }
00107
00108
00109 static inline VALUE
00110 rb_arg_error_new(int argc, int min, int max)
00111 {
00112 VALUE err_mess = 0;
00113 if (min == max) {
00114 err_mess = rb_sprintf("wrong number of arguments (%d for %d)", argc, min);
00115 }
00116 else if (max == UNLIMITED_ARGUMENTS) {
00117 err_mess = rb_sprintf("wrong number of arguments (%d for %d+)", argc, min);
00118 }
00119 else {
00120 err_mess = rb_sprintf("wrong number of arguments (%d for %d..%d)", argc, min, max);
00121 }
00122 return rb_exc_new3(rb_eArgError, err_mess);
00123 }
00124
00125 NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc));
00126 static void
00127 argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc)
00128 {
00129 rb_thread_t *th = GET_THREAD();
00130 VALUE exc = rb_arg_error_new(miss_argc, min_argc, max_argc);
00131 VALUE at;
00132
00133 if (iseq) {
00134 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, Qnil , Qnil , Qnil ,
00135 iseq->iseq_encoded, th->cfp->sp, 0 , 0 , 0 );
00136 at = rb_vm_backtrace_object();
00137 vm_pop_frame(th);
00138 }
00139 else {
00140 at = rb_vm_backtrace_object();
00141 }
00142
00143 rb_iv_set(exc, "bt_locations", at);
00144 rb_funcall(exc, rb_intern("set_backtrace"), 1, at);
00145 rb_exc_raise(exc);
00146 }
00147
00148 void
00149 rb_error_arity(int argc, int min, int max)
00150 {
00151 rb_exc_raise(rb_arg_error_new(argc, min, max));
00152 }
00153
00154
00155
00156 static inline NODE *
00157 lep_svar_place(rb_thread_t *th, VALUE *lep)
00158 {
00159 VALUE *svar;
00160
00161 if (lep && th->root_lep != lep) {
00162 svar = &lep[-1];
00163 }
00164 else {
00165 svar = &th->root_svar;
00166 }
00167 if (NIL_P(*svar)) {
00168 *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
00169 }
00170 return (NODE *)*svar;
00171 }
00172
00173 static VALUE
00174 lep_svar_get(rb_thread_t *th, VALUE *lep, rb_num_t key)
00175 {
00176 NODE *svar = lep_svar_place(th, lep);
00177
00178 switch (key) {
00179 case 0:
00180 return svar->u1.value;
00181 case 1:
00182 return svar->u2.value;
00183 default: {
00184 const VALUE ary = svar->u3.value;
00185
00186 if (NIL_P(ary)) {
00187 return Qnil;
00188 }
00189 else {
00190 return rb_ary_entry(ary, key - DEFAULT_SPECIAL_VAR_COUNT);
00191 }
00192 }
00193 }
00194 }
00195
00196 static void
00197 lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
00198 {
00199 NODE *svar = lep_svar_place(th, lep);
00200
00201 switch (key) {
00202 case 0:
00203 RB_OBJ_WRITE(svar, &svar->u1.value, val);
00204 return;
00205 case 1:
00206 RB_OBJ_WRITE(svar, &svar->u2.value, val);
00207 return;
00208 default: {
00209 VALUE ary = svar->u3.value;
00210
00211 if (NIL_P(ary)) {
00212 RB_OBJ_WRITE(svar, &svar->u3.value, ary = rb_ary_new());
00213 }
00214 rb_ary_store(ary, key - DEFAULT_SPECIAL_VAR_COUNT, val);
00215 }
00216 }
00217 }
00218
00219 static inline VALUE
00220 vm_getspecial(rb_thread_t *th, VALUE *lep, rb_num_t key, rb_num_t type)
00221 {
00222 VALUE val;
00223
00224 if (type == 0) {
00225 val = lep_svar_get(th, lep, key);
00226 }
00227 else {
00228 VALUE backref = lep_svar_get(th, lep, 1);
00229
00230 if (type & 0x01) {
00231 switch (type >> 1) {
00232 case '&':
00233 val = rb_reg_last_match(backref);
00234 break;
00235 case '`':
00236 val = rb_reg_match_pre(backref);
00237 break;
00238 case '\'':
00239 val = rb_reg_match_post(backref);
00240 break;
00241 case '+':
00242 val = rb_reg_match_last(backref);
00243 break;
00244 default:
00245 rb_bug("unexpected back-ref");
00246 }
00247 }
00248 else {
00249 val = rb_reg_nth_match((int)(type >> 1), backref);
00250 }
00251 }
00252 return val;
00253 }
00254
00255 static NODE *
00256 vm_get_cref0(const rb_iseq_t *iseq, const VALUE *ep)
00257 {
00258 while (1) {
00259 if (VM_EP_LEP_P(ep)) {
00260 if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) return NULL;
00261 return iseq->cref_stack;
00262 }
00263 else if (ep[-1] != Qnil) {
00264 return (NODE *)ep[-1];
00265 }
00266 ep = VM_EP_PREV_EP(ep);
00267 }
00268 }
00269
00270 NODE *
00271 rb_vm_get_cref(const rb_iseq_t *iseq, const VALUE *ep)
00272 {
00273 NODE *cref = vm_get_cref0(iseq, ep);
00274
00275 if (cref == 0) {
00276 rb_bug("rb_vm_get_cref: unreachable");
00277 }
00278 return cref;
00279 }
00280
00281 void
00282 rb_vm_rewrite_cref_stack(NODE *node, VALUE old_klass, VALUE new_klass, NODE **new_cref_ptr)
00283 {
00284 NODE *new_node;
00285 while (node) {
00286 if (node->nd_clss == old_klass) {
00287 new_node = NEW_CREF(new_klass);
00288 COPY_CREF_OMOD(new_node, node);
00289 RB_OBJ_WRITE(new_node, &new_node->nd_next, node->nd_next);
00290 *new_cref_ptr = new_node;
00291 return;
00292 }
00293 new_node = NEW_CREF(node->nd_clss);
00294 COPY_CREF_OMOD(new_node, node);
00295 node = node->nd_next;
00296 *new_cref_ptr = new_node;
00297 new_cref_ptr = &new_node->nd_next;
00298 }
00299 *new_cref_ptr = NULL;
00300 }
00301
00302 static NODE *
00303 vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
00304 {
00305 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
00306 NODE *cref = NEW_CREF(klass);
00307 cref->nd_refinements = Qnil;
00308 cref->nd_visi = noex;
00309
00310 if (blockptr) {
00311 RB_OBJ_WRITE(cref, &cref->nd_next, vm_get_cref0(blockptr->iseq, blockptr->ep));
00312 }
00313 else if (cfp) {
00314 RB_OBJ_WRITE(cref, &cref->nd_next, vm_get_cref0(cfp->iseq, cfp->ep));
00315 }
00316
00317 if (cref->nd_next && cref->nd_next != (void *) 1 &&
00318 !NIL_P(cref->nd_next->nd_refinements)) {
00319 COPY_CREF_OMOD(cref, cref->nd_next);
00320 }
00321
00322 return cref;
00323 }
00324
00325 static inline VALUE
00326 vm_get_cbase(const rb_iseq_t *iseq, const VALUE *ep)
00327 {
00328 NODE *cref = rb_vm_get_cref(iseq, ep);
00329 VALUE klass = Qundef;
00330
00331 while (cref) {
00332 if ((klass = cref->nd_clss) != 0) {
00333 break;
00334 }
00335 cref = cref->nd_next;
00336 }
00337
00338 return klass;
00339 }
00340
00341 static inline VALUE
00342 vm_get_const_base(const rb_iseq_t *iseq, const VALUE *ep)
00343 {
00344 NODE *cref = rb_vm_get_cref(iseq, ep);
00345 VALUE klass = Qundef;
00346
00347 while (cref) {
00348 if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
00349 (klass = cref->nd_clss) != 0) {
00350 break;
00351 }
00352 cref = cref->nd_next;
00353 }
00354
00355 return klass;
00356 }
00357
00358 static inline void
00359 vm_check_if_namespace(VALUE klass)
00360 {
00361 VALUE str;
00362 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
00363 str = rb_inspect(klass);
00364 rb_raise(rb_eTypeError, "%s is not a class/module",
00365 StringValuePtr(str));
00366 }
00367 }
00368
00369 static inline VALUE
00370 vm_get_iclass(rb_control_frame_t *cfp, VALUE klass)
00371 {
00372 if (RB_TYPE_P(klass, T_MODULE) &&
00373 FL_TEST(klass, RMODULE_IS_OVERLAID) &&
00374 RB_TYPE_P(cfp->klass, T_ICLASS) &&
00375 RBASIC(cfp->klass)->klass == klass) {
00376 return cfp->klass;
00377 }
00378 else {
00379 return klass;
00380 }
00381 }
00382
00383 static inline VALUE
00384 vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
00385 VALUE orig_klass, ID id, int is_defined)
00386 {
00387 VALUE val;
00388
00389 if (orig_klass == Qnil) {
00390
00391 const NODE *root_cref = rb_vm_get_cref(iseq, th->cfp->ep);
00392 const NODE *cref;
00393 VALUE klass = orig_klass;
00394
00395 while (root_cref && root_cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
00396 root_cref = root_cref->nd_next;
00397 }
00398 cref = root_cref;
00399 while (cref && cref->nd_next) {
00400 if (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
00401 klass = Qnil;
00402 }
00403 else {
00404 klass = cref->nd_clss;
00405 }
00406 cref = cref->nd_next;
00407
00408 if (!NIL_P(klass)) {
00409 VALUE av, am = 0;
00410 st_data_t data;
00411 search_continue:
00412 if (RCLASS_CONST_TBL(klass) &&
00413 st_lookup(RCLASS_CONST_TBL(klass), id, &data)) {
00414 val = ((rb_const_entry_t*)data)->value;
00415 if (val == Qundef) {
00416 if (am == klass) break;
00417 am = klass;
00418 if (is_defined) return 1;
00419 if (rb_autoloading_value(klass, id, &av)) return av;
00420 rb_autoload_load(klass, id);
00421 goto search_continue;
00422 }
00423 else {
00424 if (is_defined) {
00425 return 1;
00426 }
00427 else {
00428 return val;
00429 }
00430 }
00431 }
00432 }
00433 }
00434
00435
00436 if (root_cref && !NIL_P(root_cref->nd_clss)) {
00437 klass = vm_get_iclass(th->cfp, root_cref->nd_clss);
00438 }
00439 else {
00440 klass = CLASS_OF(th->cfp->self);
00441 }
00442
00443 if (is_defined) {
00444 return rb_const_defined(klass, id);
00445 }
00446 else {
00447 return rb_const_get(klass, id);
00448 }
00449 }
00450 else {
00451 vm_check_if_namespace(orig_klass);
00452 if (is_defined) {
00453 return rb_public_const_defined_from(orig_klass, id);
00454 }
00455 else {
00456 return rb_public_const_get_from(orig_klass, id);
00457 }
00458 }
00459 }
00460
00461 static inline VALUE
00462 vm_get_cvar_base(NODE *cref, rb_control_frame_t *cfp)
00463 {
00464 VALUE klass;
00465
00466 if (!cref) {
00467 rb_bug("vm_get_cvar_base: no cref");
00468 }
00469
00470 while (cref->nd_next &&
00471 (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
00472 (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
00473 cref = cref->nd_next;
00474 }
00475 if (!cref->nd_next) {
00476 rb_warn("class variable access from toplevel");
00477 }
00478
00479 klass = vm_get_iclass(cfp, cref->nd_clss);
00480
00481 if (NIL_P(klass)) {
00482 rb_raise(rb_eTypeError, "no class variables available");
00483 }
00484 return klass;
00485 }
00486
00487 static VALUE
00488 vm_search_const_defined_class(const VALUE cbase, ID id)
00489 {
00490 if (rb_const_defined_at(cbase, id)) return cbase;
00491 if (cbase == rb_cObject) {
00492 VALUE tmp = RCLASS_SUPER(cbase);
00493 while (tmp) {
00494 if (rb_const_defined_at(tmp, id)) return tmp;
00495 tmp = RCLASS_SUPER(tmp);
00496 }
00497 }
00498 return 0;
00499 }
00500
00501 #ifndef USE_IC_FOR_IVAR
00502 #define USE_IC_FOR_IVAR 1
00503 #endif
00504
00505 static inline VALUE
00506 vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
00507 {
00508 #if USE_IC_FOR_IVAR
00509 if (RB_TYPE_P(obj, T_OBJECT)) {
00510 VALUE val = Qundef;
00511 VALUE klass = RBASIC(obj)->klass;
00512
00513 if (LIKELY((!is_attr && ic->ic_serial == RCLASS_SERIAL(klass)) ||
00514 (is_attr && ci->aux.index > 0))) {
00515 long index = !is_attr ? (long)ic->ic_value.index : ci->aux.index - 1;
00516 long len = ROBJECT_NUMIV(obj);
00517 VALUE *ptr = ROBJECT_IVPTR(obj);
00518
00519 if (index < len) {
00520 val = ptr[index];
00521 }
00522 }
00523 else {
00524 st_data_t index;
00525 long len = ROBJECT_NUMIV(obj);
00526 VALUE *ptr = ROBJECT_IVPTR(obj);
00527 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
00528
00529 if (iv_index_tbl) {
00530 if (st_lookup(iv_index_tbl, id, &index)) {
00531 if ((long)index < len) {
00532 val = ptr[index];
00533 }
00534 if (!is_attr) {
00535 ic->ic_value.index = index;
00536 ic->ic_serial = RCLASS_SERIAL(klass);
00537 }
00538 else {
00539 ci->aux.index = index + 1;
00540 }
00541 }
00542 }
00543 }
00544
00545 if (UNLIKELY(val == Qundef)) {
00546 if (!is_attr) rb_warning("instance variable %s not initialized", rb_id2name(id));
00547 val = Qnil;
00548 }
00549 return val;
00550 }
00551 #endif
00552 if (is_attr)
00553 return rb_attr_get(obj, id);
00554 return rb_ivar_get(obj, id);
00555 }
00556
00557 static inline VALUE
00558 vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr)
00559 {
00560 #if USE_IC_FOR_IVAR
00561 rb_check_frozen(obj);
00562
00563 if (RB_TYPE_P(obj, T_OBJECT)) {
00564 VALUE klass = RBASIC(obj)->klass;
00565 st_data_t index;
00566
00567 if (LIKELY(
00568 (!is_attr && ic->ic_serial == RCLASS_SERIAL(klass)) ||
00569 (is_attr && ci->aux.index > 0))) {
00570 long index = !is_attr ? (long)ic->ic_value.index : ci->aux.index-1;
00571 long len = ROBJECT_NUMIV(obj);
00572 VALUE *ptr = ROBJECT_IVPTR(obj);
00573
00574 if (index < len) {
00575 RB_OBJ_WRITE(obj, &ptr[index], val);
00576 return val;
00577 }
00578 }
00579 else {
00580 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
00581
00582 if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
00583 if (!is_attr) {
00584 ic->ic_value.index = index;
00585 ic->ic_serial = RCLASS_SERIAL(klass);
00586 }
00587 else {
00588 ci->aux.index = index + 1;
00589 }
00590 }
00591
00592 }
00593 }
00594 #endif
00595 return rb_ivar_set(obj, id, val);
00596 }
00597
00598 static VALUE
00599 vm_getinstancevariable(VALUE obj, ID id, IC ic)
00600 {
00601 return vm_getivar(obj, id, ic, 0, 0);
00602 }
00603
00604 static void
00605 vm_setinstancevariable(VALUE obj, ID id, VALUE val, IC ic)
00606 {
00607 vm_setivar(obj, id, val, ic, 0, 0);
00608 }
00609
00610 static VALUE
00611 vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
00612 rb_num_t throw_state, VALUE throwobj)
00613 {
00614 int state = (int)(throw_state & 0xff);
00615 int flag = (int)(throw_state & 0x8000);
00616 rb_num_t level = throw_state >> 16;
00617
00618 if (state != 0) {
00619 VALUE *pt = 0;
00620 if (flag != 0) {
00621 pt = (void *) 1;
00622 }
00623 else {
00624 if (state == TAG_BREAK) {
00625 rb_control_frame_t *cfp = GET_CFP();
00626 VALUE *ep = GET_EP();
00627 int is_orphan = 1;
00628 rb_iseq_t *base_iseq = GET_ISEQ();
00629
00630 search_parent:
00631 if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
00632 if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
00633 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00634 ep = cfp->ep;
00635 goto search_parent;
00636 }
00637 ep = VM_EP_PREV_EP(ep);
00638 base_iseq = base_iseq->parent_iseq;
00639
00640 while ((VALUE *) cfp < th->stack + th->stack_size) {
00641 if (cfp->ep == ep) {
00642 goto search_parent;
00643 }
00644 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00645 }
00646 rb_bug("VM (throw): can't find break base.");
00647 }
00648
00649 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
00650
00651 is_orphan = 0;
00652 pt = cfp->ep;
00653 state = TAG_RETURN;
00654 }
00655 else {
00656 ep = VM_EP_PREV_EP(ep);
00657
00658 while ((VALUE *)cfp < th->stack + th->stack_size) {
00659 if (cfp->ep == ep) {
00660 VALUE epc = cfp->pc - cfp->iseq->iseq_encoded;
00661 rb_iseq_t *iseq = cfp->iseq;
00662 int i;
00663
00664 for (i=0; i<iseq->catch_table_size; i++) {
00665 struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
00666
00667 if (entry->type == CATCH_TYPE_BREAK &&
00668 entry->start < epc && entry->end >= epc) {
00669 if (entry->cont == epc) {
00670 goto found;
00671 }
00672 else {
00673 break;
00674 }
00675 }
00676 }
00677 break;
00678
00679 found:
00680 pt = ep;
00681 is_orphan = 0;
00682 break;
00683 }
00684 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00685 }
00686 }
00687
00688 if (is_orphan) {
00689 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
00690 }
00691 }
00692 else if (state == TAG_RETRY) {
00693 rb_num_t i;
00694 pt = VM_EP_PREV_EP(GET_EP());
00695 for (i = 0; i < level; i++) {
00696 pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
00697 }
00698 }
00699 else if (state == TAG_RETURN) {
00700 rb_control_frame_t *cfp = GET_CFP();
00701 VALUE *ep = GET_EP();
00702 VALUE *target_lep = VM_CF_LEP(cfp);
00703 int in_class_frame = 0;
00704
00705
00706 while ((VALUE *) cfp < th->stack + th->stack_size) {
00707 VALUE *lep = VM_CF_LEP(cfp);
00708
00709 if (!target_lep) {
00710 target_lep = lep;
00711 }
00712
00713 if (lep == target_lep && cfp->iseq->type == ISEQ_TYPE_CLASS) {
00714 in_class_frame = 1;
00715 target_lep = 0;
00716 }
00717
00718 if (lep == target_lep) {
00719 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
00720 VALUE *tep = ep;
00721
00722 if (in_class_frame) {
00723
00724 ep = cfp->ep;
00725 goto valid_return;
00726 }
00727
00728 while (target_lep != tep) {
00729 if (cfp->ep == tep) {
00730
00731 ep = cfp->ep;
00732 goto valid_return;
00733 }
00734 tep = VM_EP_PREV_EP(tep);
00735 }
00736 }
00737 }
00738
00739 if (cfp->ep == target_lep && cfp->iseq->type == ISEQ_TYPE_METHOD) {
00740 ep = target_lep;
00741 goto valid_return;
00742 }
00743
00744 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00745 }
00746
00747 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
00748
00749 valid_return:
00750 pt = ep;
00751 }
00752 else {
00753 rb_bug("isns(throw): unsupport throw type");
00754 }
00755 }
00756 th->state = state;
00757 return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
00758 }
00759 else {
00760
00761 VALUE err = throwobj;
00762
00763 if (FIXNUM_P(err)) {
00764 th->state = FIX2INT(err);
00765 }
00766 else if (SYMBOL_P(err)) {
00767 th->state = TAG_THROW;
00768 }
00769 else if (BUILTIN_TYPE(err) == T_NODE) {
00770 th->state = GET_THROWOBJ_STATE(err);
00771 }
00772 else {
00773 th->state = TAG_RAISE;
00774
00775 }
00776 return err;
00777 }
00778 }
00779
00780 static inline void
00781 vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
00782 {
00783 int is_splat = flag & 0x01;
00784 rb_num_t space_size = num + is_splat;
00785 VALUE *base = cfp->sp;
00786 const VALUE *ptr;
00787 rb_num_t len;
00788
00789 if (!RB_TYPE_P(ary, T_ARRAY)) {
00790 ary = rb_ary_to_ary(ary);
00791 }
00792
00793 cfp->sp += space_size;
00794
00795 ptr = RARRAY_CONST_PTR(ary);
00796 len = (rb_num_t)RARRAY_LEN(ary);
00797
00798 if (flag & 0x02) {
00799
00800 rb_num_t i = 0, j;
00801
00802 if (len < num) {
00803 for (i=0; i<num-len; i++) {
00804 *base++ = Qnil;
00805 }
00806 }
00807 for (j=0; i<num; i++, j++) {
00808 VALUE v = ptr[len - j - 1];
00809 *base++ = v;
00810 }
00811 if (is_splat) {
00812 *base = rb_ary_new4(len - j, ptr);
00813 }
00814 }
00815 else {
00816
00817 rb_num_t i;
00818 VALUE *bptr = &base[space_size - 1];
00819
00820 for (i=0; i<num; i++) {
00821 if (len <= i) {
00822 for (; i<num; i++) {
00823 *bptr-- = Qnil;
00824 }
00825 break;
00826 }
00827 *bptr-- = ptr[i];
00828 }
00829 if (is_splat) {
00830 if (num > len) {
00831 *bptr = rb_ary_new();
00832 }
00833 else {
00834 *bptr = rb_ary_new4(len - num, ptr + num);
00835 }
00836 }
00837 }
00838 RB_GC_GUARD(ary);
00839 }
00840
00841 static VALUE vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
00842
00843 static void
00844 vm_search_method(rb_call_info_t *ci, VALUE recv)
00845 {
00846 VALUE klass = CLASS_OF(recv);
00847
00848 #if OPT_INLINE_METHOD_CACHE
00849 if (LIKELY(GET_GLOBAL_METHOD_STATE() == ci->method_state && RCLASS_SERIAL(klass) == ci->class_serial)) {
00850
00851 return;
00852 }
00853 #endif
00854
00855 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class);
00856 ci->klass = klass;
00857 ci->call = vm_call_general;
00858 #if OPT_INLINE_METHOD_CACHE
00859 ci->method_state = GET_GLOBAL_METHOD_STATE();
00860 ci->class_serial = RCLASS_SERIAL(klass);
00861 #endif
00862 }
00863
00864 static inline int
00865 check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
00866 {
00867 if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
00868 me->def->body.cfunc.func == func) {
00869 return 1;
00870 }
00871 else {
00872 return 0;
00873 }
00874 }
00875
00876 static
00877 #ifndef NO_BIG_INLINE
00878 inline
00879 #endif
00880 VALUE
00881 opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci)
00882 {
00883 if (FIXNUM_2_P(recv, obj) &&
00884 BASIC_OP_UNREDEFINED_P(BOP_EQ, FIXNUM_REDEFINED_OP_FLAG)) {
00885 return (recv == obj) ? Qtrue : Qfalse;
00886 }
00887 else if (FLONUM_2_P(recv, obj) &&
00888 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) {
00889 return (recv == obj) ? Qtrue : Qfalse;
00890 }
00891 else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
00892 if (RBASIC_CLASS(recv) == rb_cFloat &&
00893 RBASIC_CLASS(obj) == rb_cFloat &&
00894 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) {
00895 double a = RFLOAT_VALUE(recv);
00896 double b = RFLOAT_VALUE(obj);
00897
00898 if (isnan(a) || isnan(b)) {
00899 return Qfalse;
00900 }
00901 return (a == b) ? Qtrue : Qfalse;
00902 }
00903 else if (RBASIC_CLASS(recv) == rb_cString &&
00904 RBASIC_CLASS(obj) == rb_cString &&
00905 BASIC_OP_UNREDEFINED_P(BOP_EQ, STRING_REDEFINED_OP_FLAG)) {
00906 return rb_str_equal(recv, obj);
00907 }
00908 }
00909
00910 {
00911 vm_search_method(ci, recv);
00912
00913 if (check_cfunc(ci->me, rb_obj_equal)) {
00914 return recv == obj ? Qtrue : Qfalse;
00915 }
00916 }
00917
00918 return Qundef;
00919 }
00920
00921 VALUE
00922 rb_equal_opt(VALUE obj1, VALUE obj2)
00923 {
00924 rb_call_info_t ci;
00925 ci.mid = idEq;
00926 ci.klass = 0;
00927 ci.method_state = 0;
00928 ci.me = NULL;
00929 ci.defined_class = 0;
00930 return opt_eq_func(obj1, obj2, &ci);
00931 }
00932
00933 static VALUE
00934 vm_call0(rb_thread_t*, VALUE, ID, int, const VALUE*, const rb_method_entry_t*, VALUE);
00935
00936 static VALUE
00937 check_match(VALUE pattern, VALUE target, enum vm_check_match_type type)
00938 {
00939 switch (type) {
00940 case VM_CHECKMATCH_TYPE_WHEN:
00941 return pattern;
00942 case VM_CHECKMATCH_TYPE_RESCUE:
00943 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
00944 rb_raise(rb_eTypeError, "class or module required for rescue clause");
00945 }
00946
00947 case VM_CHECKMATCH_TYPE_CASE: {
00948 VALUE defined_class;
00949 rb_method_entry_t *me = rb_method_entry_with_refinements(CLASS_OF(pattern), idEqq, &defined_class);
00950 if (me) {
00951 return vm_call0(GET_THREAD(), pattern, idEqq, 1, &target, me, defined_class);
00952 }
00953 else {
00954
00955 return rb_funcall2(pattern, idEqq, 1, &target);
00956 }
00957 }
00958 default:
00959 rb_bug("check_match: unreachable");
00960 }
00961 }
00962
00963
00964 #if defined(_MSC_VER) && _MSC_VER < 1300
00965 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
00966 #else
00967 #define CHECK_CMP_NAN(a, b)
00968 #endif
00969
00970 static inline VALUE
00971 double_cmp_lt(double a, double b)
00972 {
00973 CHECK_CMP_NAN(a, b);
00974 return a < b ? Qtrue : Qfalse;
00975 }
00976
00977 static inline VALUE
00978 double_cmp_le(double a, double b)
00979 {
00980 CHECK_CMP_NAN(a, b);
00981 return a <= b ? Qtrue : Qfalse;
00982 }
00983
00984 static inline VALUE
00985 double_cmp_gt(double a, double b)
00986 {
00987 CHECK_CMP_NAN(a, b);
00988 return a > b ? Qtrue : Qfalse;
00989 }
00990
00991 static inline VALUE
00992 double_cmp_ge(double a, double b)
00993 {
00994 CHECK_CMP_NAN(a, b);
00995 return a >= b ? Qtrue : Qfalse;
00996 }
00997
00998 static VALUE *
00999 vm_base_ptr(rb_control_frame_t *cfp)
01000 {
01001 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01002 VALUE *bp = prev_cfp->sp + cfp->iseq->local_size + 1;
01003
01004 if (cfp->iseq->type == ISEQ_TYPE_METHOD) {
01005
01006 bp += 1;
01007 }
01008
01009 #if VM_DEBUG_BP_CHECK
01010 if (bp != cfp->bp_check) {
01011 fprintf(stderr, "bp_check: %ld, bp: %ld\n",
01012 (long)(cfp->bp_check - GET_THREAD()->stack),
01013 (long)(bp - GET_THREAD()->stack));
01014 rb_bug("vm_base_ptr: unreachable");
01015 }
01016 #endif
01017
01018 return bp;
01019 }
01020
01021
01022
01023 static void
01024 vm_caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01025 {
01026 #define SAVE_RESTORE_CI(expr, ci) do { \
01027 int saved_argc = (ci)->argc; rb_block_t *saved_blockptr = (ci)->blockptr; \
01028 expr; \
01029 (ci)->argc = saved_argc; (ci)->blockptr = saved_blockptr; \
01030 } while (0)
01031
01032 if (UNLIKELY(ci->flag & VM_CALL_ARGS_BLOCKARG)) {
01033 rb_proc_t *po;
01034 VALUE proc;
01035
01036 proc = *(--cfp->sp);
01037
01038 if (proc != Qnil) {
01039 if (!rb_obj_is_proc(proc)) {
01040 VALUE b;
01041
01042 SAVE_RESTORE_CI(b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc"), ci);
01043
01044 if (NIL_P(b) || !rb_obj_is_proc(b)) {
01045 rb_raise(rb_eTypeError,
01046 "wrong argument type %s (expected Proc)",
01047 rb_obj_classname(proc));
01048 }
01049 proc = b;
01050 }
01051 GetProcPtr(proc, po);
01052 ci->blockptr = &po->block;
01053 RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
01054 }
01055 }
01056 else if (ci->blockiseq != 0) {
01057 ci->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
01058 ci->blockptr->iseq = ci->blockiseq;
01059 ci->blockptr->proc = 0;
01060 }
01061
01062
01063
01064 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) {
01065 VALUE ary = *(cfp->sp - 1);
01066 const VALUE *ptr;
01067 int i;
01068 VALUE tmp;
01069
01070 SAVE_RESTORE_CI(tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a"), ci);
01071
01072 if (NIL_P(tmp)) {
01073
01074 }
01075 else {
01076 long len = RARRAY_LEN(tmp);
01077 ptr = RARRAY_CONST_PTR(tmp);
01078 cfp->sp -= 1;
01079
01080 CHECK_VM_STACK_OVERFLOW(cfp, len);
01081
01082 for (i = 0; i < len; i++) {
01083 *cfp->sp++ = ptr[i];
01084 }
01085 ci->argc += i-1;
01086 }
01087 }
01088 }
01089
01090 static inline int
01091 vm_callee_setup_keyword_arg(rb_thread_t *th, const rb_iseq_t *iseq, int argc, int m, VALUE *orig_argv, VALUE *kwd)
01092 {
01093 VALUE keyword_hash = 0, orig_hash;
01094 int optional = iseq->arg_keywords - iseq->arg_keyword_required;
01095 VALUE *const sp = th->cfp->sp;
01096 const int mark_stack_len = th->mark_stack_len;
01097
01098 th->cfp->sp += argc;
01099 th->mark_stack_len -= argc;
01100
01101 if (argc > m &&
01102 !NIL_P(orig_hash = rb_check_hash_type(orig_argv[argc-1])) &&
01103 (keyword_hash = rb_extract_keywords(&orig_hash)) != 0) {
01104 if (!orig_hash) {
01105 argc--;
01106 }
01107 else {
01108 orig_argv[argc-1] = orig_hash;
01109 }
01110 }
01111 rb_get_kwargs(keyword_hash, iseq->arg_keyword_table, iseq->arg_keyword_required,
01112 (iseq->arg_keyword_check ? optional : -1-optional),
01113 NULL);
01114
01115 if (!keyword_hash) {
01116 keyword_hash = rb_hash_new();
01117 }
01118
01119 th->cfp->sp = sp;
01120 th->mark_stack_len = mark_stack_len;
01121
01122 *kwd = keyword_hash;
01123
01124 return argc;
01125 }
01126
01127 static inline int
01128 vm_callee_setup_arg_complex(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *orig_argv)
01129 {
01130 const int m = iseq->argc;
01131 const int opts = iseq->arg_opts - (iseq->arg_opts > 0);
01132 const int min = m + iseq->arg_post_len;
01133 const int max = (iseq->arg_rest == -1) ? m + opts + iseq->arg_post_len : UNLIMITED_ARGUMENTS;
01134 const int orig_argc = ci->argc;
01135 int argc = orig_argc;
01136 VALUE *argv = orig_argv;
01137 VALUE keyword_hash = Qnil;
01138 rb_num_t opt_pc = 0;
01139
01140 th->mark_stack_len = argc + iseq->arg_size;
01141
01142
01143 if (iseq->arg_keyword != -1) {
01144 argc = vm_callee_setup_keyword_arg(th, iseq, argc, min, orig_argv, &keyword_hash);
01145 }
01146
01147
01148 if ((argc < min) || (argc > max && max != UNLIMITED_ARGUMENTS)) {
01149 argument_error(iseq, argc, min, max);
01150 }
01151
01152 argv += m;
01153 argc -= m;
01154
01155
01156 if (iseq->arg_post_len) {
01157 if (!(orig_argc < iseq->arg_post_start)) {
01158 VALUE *new_argv = ALLOCA_N(VALUE, argc);
01159 MEMCPY(new_argv, argv, VALUE, argc);
01160 argv = new_argv;
01161 }
01162
01163 MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
01164 VALUE, iseq->arg_post_len);
01165 }
01166
01167
01168 if (iseq->arg_opts) {
01169 if (argc > opts) {
01170 argc -= opts;
01171 argv += opts;
01172 opt_pc = iseq->arg_opt_table[opts];
01173 }
01174 else {
01175 int i;
01176 for (i = argc; i<opts; i++) {
01177 orig_argv[i + m] = Qnil;
01178 }
01179 opt_pc = iseq->arg_opt_table[argc];
01180 argc = 0;
01181 }
01182 }
01183
01184
01185 if (iseq->arg_rest != -1) {
01186 orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
01187 argc = 0;
01188 }
01189
01190
01191 if (iseq->arg_keyword != -1) {
01192 int i;
01193 int arg_keywords_end = iseq->arg_keyword - (iseq->arg_block != -1);
01194 for (i = iseq->arg_keywords; 0 < i; i--) {
01195 orig_argv[arg_keywords_end - i] = Qnil;
01196 }
01197 orig_argv[iseq->arg_keyword] = keyword_hash;
01198 }
01199
01200
01201 if (iseq->arg_block != -1) {
01202 VALUE blockval = Qnil;
01203 const rb_block_t *blockptr = ci->blockptr;
01204
01205 if (blockptr) {
01206
01207 if (blockptr->proc == 0) {
01208 rb_proc_t *proc;
01209 blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
01210 GetProcPtr(blockval, proc);
01211 ci->blockptr = &proc->block;
01212 }
01213 else {
01214 blockval = blockptr->proc;
01215 }
01216 }
01217
01218 orig_argv[iseq->arg_block] = blockval;
01219 }
01220
01221 th->mark_stack_len = 0;
01222 return (int)opt_pc;
01223 }
01224
01225 static VALUE vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01226 static inline VALUE vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01227 static inline VALUE vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01228
01229 static inline void
01230 vm_callee_setup_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq,
01231 VALUE *argv, int is_lambda)
01232 {
01233 if (LIKELY(iseq->arg_simple & 0x01)) {
01234
01235 if (ci->argc != iseq->argc) {
01236 argument_error(iseq, ci->argc, iseq->argc, iseq->argc);
01237 }
01238 ci->aux.opt_pc = 0;
01239 CI_SET_FASTPATH(ci,
01240 (UNLIKELY(ci->flag & VM_CALL_TAILCALL) ?
01241 vm_call_iseq_setup_tailcall :
01242 vm_call_iseq_setup_normal),
01243 (!is_lambda &&
01244 !(ci->flag & VM_CALL_ARGS_SPLAT) &&
01245 !(ci->me->flag & NOEX_PROTECTED)));
01246 }
01247 else {
01248 ci->aux.opt_pc = vm_callee_setup_arg_complex(th, ci, iseq, argv);
01249 }
01250 }
01251
01252 static VALUE
01253 vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01254 {
01255 vm_callee_setup_arg(th, ci, ci->me->def->body.iseq, cfp->sp - ci->argc, 0);
01256 return vm_call_iseq_setup_2(th, cfp, ci);
01257 }
01258
01259 static VALUE
01260 vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01261 {
01262 if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
01263 return vm_call_iseq_setup_normal(th, cfp, ci);
01264 }
01265 else {
01266 return vm_call_iseq_setup_tailcall(th, cfp, ci);
01267 }
01268 }
01269
01270 static inline VALUE
01271 vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01272 {
01273 int i, local_size;
01274 VALUE *argv = cfp->sp - ci->argc;
01275 rb_iseq_t *iseq = ci->me->def->body.iseq;
01276 VALUE *sp = argv + iseq->arg_size;
01277
01278
01279 for (i = iseq->arg_size, local_size = iseq->local_size; i < local_size; i++) {
01280 *sp++ = Qnil;
01281 }
01282
01283 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, ci->recv, ci->defined_class,
01284 VM_ENVVAL_BLOCK_PTR(ci->blockptr),
01285 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me, iseq->stack_max);
01286
01287 cfp->sp = argv - 1 ;
01288 return Qundef;
01289 }
01290
01291 static inline VALUE
01292 vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01293 {
01294 int i;
01295 VALUE *argv = cfp->sp - ci->argc;
01296 rb_iseq_t *iseq = ci->me->def->body.iseq;
01297 VALUE *src_argv = argv;
01298 VALUE *sp_orig, *sp;
01299 VALUE finish_flag = VM_FRAME_TYPE_FINISH_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
01300
01301 cfp = th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01302
01303 RUBY_VM_CHECK_INTS(th);
01304
01305 sp_orig = sp = cfp->sp;
01306
01307
01308 sp[0] = ci->recv;
01309 sp++;
01310
01311
01312 for (i=0; i < iseq->arg_size; i++) {
01313 *sp++ = src_argv[i];
01314 }
01315
01316
01317 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
01318 *sp++ = Qnil;
01319 }
01320
01321 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag,
01322 ci->recv, ci->defined_class, VM_ENVVAL_BLOCK_PTR(ci->blockptr),
01323 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me, iseq->stack_max);
01324
01325 cfp->sp = sp_orig;
01326 return Qundef;
01327 }
01328
01329 static VALUE
01330 call_cfunc_m2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01331 {
01332 return (*func)(recv, rb_ary_new4(argc, argv));
01333 }
01334
01335 static VALUE
01336 call_cfunc_m1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01337 {
01338 return (*func)(argc, argv, recv);
01339 }
01340
01341 static VALUE
01342 call_cfunc_0(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01343 {
01344 return (*func)(recv);
01345 }
01346
01347 static VALUE
01348 call_cfunc_1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01349 {
01350 return (*func)(recv, argv[0]);
01351 }
01352
01353 static VALUE
01354 call_cfunc_2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01355 {
01356 return (*func)(recv, argv[0], argv[1]);
01357 }
01358
01359 static VALUE
01360 call_cfunc_3(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01361 {
01362 return (*func)(recv, argv[0], argv[1], argv[2]);
01363 }
01364
01365 static VALUE
01366 call_cfunc_4(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01367 {
01368 return (*func)(recv, argv[0], argv[1], argv[2], argv[3]);
01369 }
01370
01371 static VALUE
01372 call_cfunc_5(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01373 {
01374 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
01375 }
01376
01377 static VALUE
01378 call_cfunc_6(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01379 {
01380 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
01381 }
01382
01383 static VALUE
01384 call_cfunc_7(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01385 {
01386 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
01387 }
01388
01389 static VALUE
01390 call_cfunc_8(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01391 {
01392 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
01393 }
01394
01395 static VALUE
01396 call_cfunc_9(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01397 {
01398 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
01399 }
01400
01401 static VALUE
01402 call_cfunc_10(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01403 {
01404 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
01405 }
01406
01407 static VALUE
01408 call_cfunc_11(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01409 {
01410 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
01411 }
01412
01413 static VALUE
01414 call_cfunc_12(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01415 {
01416 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
01417 }
01418
01419 static VALUE
01420 call_cfunc_13(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01421 {
01422 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
01423 }
01424
01425 static VALUE
01426 call_cfunc_14(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01427 {
01428 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
01429 }
01430
01431 static VALUE
01432 call_cfunc_15(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01433 {
01434 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
01435 }
01436
01437 #ifndef VM_PROFILE
01438 #define VM_PROFILE 0
01439 #endif
01440
01441 #if VM_PROFILE
01442 static int vm_profile_counter[4];
01443 #define VM_PROFILE_UP(x) (vm_profile_counter[x]++)
01444 #define VM_PROFILE_ATEXIT() atexit(vm_profile_show_result)
01445 static void
01446 vm_profile_show_result(void)
01447 {
01448 fprintf(stderr, "VM Profile results: \n");
01449 fprintf(stderr, "r->c call: %d\n", vm_profile_counter[0]);
01450 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[1]);
01451 fprintf(stderr, "c->c call: %d\n", vm_profile_counter[2]);
01452 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[3]);
01453 }
01454 #else
01455 #define VM_PROFILE_UP(x)
01456 #define VM_PROFILE_ATEXIT()
01457 #endif
01458
01459 static inline
01460 const rb_method_cfunc_t *
01461 vm_method_cfunc_entry(const rb_method_entry_t *me)
01462 {
01463 #if VM_DEBUG_VERIFY_METHOD_CACHE
01464 switch (me->def->type) {
01465 case VM_METHOD_TYPE_CFUNC:
01466 case VM_METHOD_TYPE_NOTIMPLEMENTED:
01467 break;
01468 # define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
01469 METHOD_BUG(ISEQ);
01470 METHOD_BUG(ATTRSET);
01471 METHOD_BUG(IVAR);
01472 METHOD_BUG(BMETHOD);
01473 METHOD_BUG(ZSUPER);
01474 METHOD_BUG(UNDEF);
01475 METHOD_BUG(OPTIMIZED);
01476 METHOD_BUG(MISSING);
01477 METHOD_BUG(REFINED);
01478 # undef METHOD_BUG
01479 default:
01480 rb_bug("wrong method type: %d", me->def->type);
01481 }
01482 #endif
01483 return &me->def->body.cfunc;
01484 }
01485
01486 static VALUE
01487 vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01488 {
01489 VALUE val;
01490 const rb_method_entry_t *me = ci->me;
01491 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
01492 int len = cfunc->argc;
01493
01494
01495 VALUE recv = ci->recv;
01496 VALUE defined_class = ci->defined_class;
01497 rb_block_t *blockptr = ci->blockptr;
01498 int argc = ci->argc;
01499
01500 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id);
01501 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qundef);
01502
01503 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv, defined_class,
01504 VM_ENVVAL_BLOCK_PTR(blockptr), 0, th->cfp->sp, 1, me, 0);
01505
01506 if (len >= 0) rb_check_arity(argc, len, len);
01507
01508 reg_cfp->sp -= argc + 1;
01509 VM_PROFILE_UP(0);
01510 val = (*cfunc->invoker)(cfunc->func, recv, argc, reg_cfp->sp + 1);
01511
01512 if (reg_cfp != th->cfp + 1) {
01513 rb_bug("vm_call_cfunc - cfp consistency error");
01514 }
01515
01516 vm_pop_frame(th);
01517
01518 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val);
01519 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
01520
01521 return val;
01522 }
01523
01524 #if OPT_CALL_CFUNC_WITHOUT_FRAME
01525 static VALUE
01526 vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01527 {
01528 VALUE val;
01529 int argc = ci->argc;
01530 VALUE *argv = STACK_ADDR_FROM_TOP(argc);
01531 const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(ci->me);
01532
01533 th->passed_ci = ci;
01534 reg_cfp->sp -= argc + 1;
01535 ci->aux.inc_sp = argc + 1;
01536 VM_PROFILE_UP(0);
01537 val = (*cfunc->invoker)(cfunc->func, ci, argv);
01538
01539
01540 if (reg_cfp == th->cfp) {
01541 if (UNLIKELY(th->passed_ci != ci)) {
01542 rb_bug("vm_call_cfunc_latter: passed_ci error (ci: %p, passed_ci: %p)", ci, th->passed_ci);
01543 }
01544 th->passed_ci = 0;
01545 }
01546 else {
01547 if (UNLIKELY(reg_cfp != RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp))) {
01548 rb_bug("vm_call_cfunc_latter: cfp consistency error (%p, %p)", reg_cfp, th->cfp+1);
01549 }
01550 vm_pop_frame(th);
01551 VM_PROFILE_UP(1);
01552 }
01553
01554 return val;
01555 }
01556
01557 static VALUE
01558 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01559 {
01560 VALUE val;
01561 const rb_method_entry_t *me = ci->me;
01562 int len = vm_method_cfunc_entry(me)->argc;
01563 VALUE recv = ci->recv;
01564
01565 if (len >= 0) rb_check_arity(ci->argc, len, len);
01566
01567 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id);
01568 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qnil);
01569
01570 if (!(ci->me->flag & NOEX_PROTECTED) &&
01571 !(ci->flag & VM_CALL_ARGS_SPLAT)) {
01572 CI_SET_FASTPATH(ci, vm_call_cfunc_latter, 1);
01573 }
01574 val = vm_call_cfunc_latter(th, reg_cfp, ci);
01575
01576 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val);
01577 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
01578
01579 return val;
01580 }
01581
01582 void
01583 vm_call_cfunc_push_frame(rb_thread_t *th)
01584 {
01585 rb_call_info_t *ci = th->passed_ci;
01586 const rb_method_entry_t *me = ci->me;
01587 th->passed_ci = 0;
01588
01589 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, ci->recv, ci->defined_class,
01590 VM_ENVVAL_BLOCK_PTR(ci->blockptr), 0, th->cfp->sp + ci->aux.inc_sp, 1, me);
01591
01592 if (ci->call != vm_call_general) {
01593 ci->call = vm_call_cfunc_with_frame;
01594 }
01595 }
01596 #else
01597 static VALUE
01598 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01599 {
01600 return vm_call_cfunc_with_frame(th, reg_cfp, ci);
01601 }
01602 #endif
01603
01604 static VALUE
01605 vm_call_ivar(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01606 {
01607 VALUE val = vm_getivar(ci->recv, ci->me->def->body.attr.id, 0, ci, 1);
01608 cfp->sp -= 1;
01609 return val;
01610 }
01611
01612 static VALUE
01613 vm_call_attrset(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01614 {
01615 VALUE val = vm_setivar(ci->recv, ci->me->def->body.attr.id, *(cfp->sp - 1), 0, ci, 1);
01616 cfp->sp -= 2;
01617 return val;
01618 }
01619
01620 static inline VALUE
01621 vm_call_bmethod_body(rb_thread_t *th, rb_call_info_t *ci, const VALUE *argv)
01622 {
01623 rb_proc_t *proc;
01624 VALUE val;
01625
01626
01627 th->passed_bmethod_me = ci->me;
01628 GetProcPtr(ci->me->def->body.proc, proc);
01629 val = vm_invoke_proc(th, proc, ci->recv, ci->defined_class, ci->argc, argv, ci->blockptr);
01630
01631 return val;
01632 }
01633
01634 static VALUE
01635 vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01636 {
01637 VALUE *argv = ALLOCA_N(VALUE, ci->argc);
01638 MEMCPY(argv, cfp->sp - ci->argc, VALUE, ci->argc);
01639 cfp->sp += - ci->argc - 1;
01640
01641 return vm_call_bmethod_body(th, ci, argv);
01642 }
01643
01644 static
01645 #ifdef _MSC_VER
01646 __forceinline
01647 #else
01648 inline
01649 #endif
01650 VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01651
01652 static VALUE
01653 vm_call_opt_send(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01654 {
01655 int i = ci->argc - 1;
01656 VALUE sym;
01657 rb_call_info_t ci_entry;
01658
01659 if (ci->argc == 0) {
01660 rb_raise(rb_eArgError, "no method name given");
01661 }
01662
01663 ci_entry = *ci;
01664 ci = &ci_entry;
01665
01666 sym = TOPN(i);
01667
01668 if (SYMBOL_P(sym)) {
01669 ci->mid = SYM2ID(sym);
01670 }
01671 else if (!(ci->mid = rb_check_id(&sym))) {
01672 if (rb_method_basic_definition_p(CLASS_OF(ci->recv), idMethodMissing)) {
01673 VALUE exc = make_no_method_exception(rb_eNoMethodError, NULL, ci->recv, rb_long2int(ci->argc), &TOPN(i));
01674 rb_exc_raise(exc);
01675 }
01676 ci->mid = rb_to_id(sym);
01677 }
01678
01679
01680 if (i > 0) {
01681 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
01682 }
01683 ci->me =
01684 rb_method_entry_without_refinements(CLASS_OF(ci->recv),
01685 ci->mid, &ci->defined_class);
01686 ci->argc -= 1;
01687 DEC_SP(1);
01688
01689 ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
01690
01691 return vm_call_method(th, reg_cfp, ci);
01692 }
01693
01694 static VALUE
01695 vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01696 {
01697 rb_proc_t *proc;
01698 int argc = ci->argc;
01699 VALUE *argv = ALLOCA_N(VALUE, argc);
01700 GetProcPtr(ci->recv, proc);
01701 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
01702 cfp->sp -= argc + 1;
01703
01704 return rb_vm_invoke_proc(th, proc, argc, argv, ci->blockptr);
01705 }
01706
01707 static VALUE
01708 vm_call_method_missing(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01709 {
01710 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
01711 rb_call_info_t ci_entry;
01712
01713 ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
01714 ci_entry.argc = ci->argc+1;
01715 ci_entry.mid = idMethodMissing;
01716 ci_entry.blockptr = ci->blockptr;
01717 ci_entry.recv = ci->recv;
01718 ci_entry.me = rb_method_entry(CLASS_OF(ci_entry.recv), idMethodMissing, &ci_entry.defined_class);
01719
01720
01721 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
01722 if (ci->argc > 0) {
01723 MEMMOVE(argv+1, argv, VALUE, ci->argc);
01724 }
01725 argv[0] = ID2SYM(ci->mid);
01726 INC_SP(1);
01727
01728 th->method_missing_reason = ci->aux.missing_reason;
01729 return vm_call_method(th, reg_cfp, &ci_entry);
01730 }
01731
01732 static inline VALUE
01733 find_refinement(VALUE refinements, VALUE klass)
01734 {
01735 if (NIL_P(refinements)) {
01736 return Qnil;
01737 }
01738 return rb_hash_lookup(refinements, klass);
01739 }
01740
01741 static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
01742 static VALUE vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
01743
01744 static rb_control_frame_t *
01745 current_method_entry(rb_thread_t *th, rb_control_frame_t *cfp)
01746 {
01747 rb_control_frame_t *top_cfp = cfp;
01748
01749 if (cfp->iseq && cfp->iseq->type == ISEQ_TYPE_BLOCK) {
01750 rb_iseq_t *local_iseq = cfp->iseq->local_iseq;
01751 do {
01752 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01753 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
01754
01755 return top_cfp;
01756 }
01757 } while (cfp->iseq != local_iseq);
01758 }
01759 return cfp;
01760 }
01761
01762 static
01763 #ifdef _MSC_VER
01764 __forceinline
01765 #else
01766 inline
01767 #endif
01768 VALUE
01769 vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01770 {
01771 int enable_fastpath = 1;
01772 rb_call_info_t ci_temp;
01773
01774 start_method_dispatch:
01775 if (ci->me != 0) {
01776 if ((ci->me->flag == 0)) {
01777 VALUE klass;
01778
01779 normal_method_dispatch:
01780 switch (ci->me->def->type) {
01781 case VM_METHOD_TYPE_ISEQ:{
01782 CI_SET_FASTPATH(ci, vm_call_iseq_setup, enable_fastpath);
01783 return vm_call_iseq_setup(th, cfp, ci);
01784 }
01785 case VM_METHOD_TYPE_NOTIMPLEMENTED:
01786 case VM_METHOD_TYPE_CFUNC:
01787 CI_SET_FASTPATH(ci, vm_call_cfunc, enable_fastpath);
01788 return vm_call_cfunc(th, cfp, ci);
01789 case VM_METHOD_TYPE_ATTRSET:{
01790 rb_check_arity(ci->argc, 1, 1);
01791 ci->aux.index = 0;
01792 CI_SET_FASTPATH(ci, vm_call_attrset, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
01793 return vm_call_attrset(th, cfp, ci);
01794 }
01795 case VM_METHOD_TYPE_IVAR:{
01796 rb_check_arity(ci->argc, 0, 0);
01797 ci->aux.index = 0;
01798 CI_SET_FASTPATH(ci, vm_call_ivar, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
01799 return vm_call_ivar(th, cfp, ci);
01800 }
01801 case VM_METHOD_TYPE_MISSING:{
01802 ci->aux.missing_reason = 0;
01803 CI_SET_FASTPATH(ci, vm_call_method_missing, enable_fastpath);
01804 return vm_call_method_missing(th, cfp, ci);
01805 }
01806 case VM_METHOD_TYPE_BMETHOD:{
01807 CI_SET_FASTPATH(ci, vm_call_bmethod, enable_fastpath);
01808 return vm_call_bmethod(th, cfp, ci);
01809 }
01810 case VM_METHOD_TYPE_ZSUPER:{
01811 klass = ci->me->klass;
01812 klass = RCLASS_ORIGIN(klass);
01813 zsuper_method_dispatch:
01814 klass = RCLASS_SUPER(klass);
01815 if (!klass) {
01816 ci->me = 0;
01817 goto start_method_dispatch;
01818 }
01819 ci_temp = *ci;
01820 ci = &ci_temp;
01821
01822 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class);
01823
01824 if (ci->me != 0) {
01825 goto normal_method_dispatch;
01826 }
01827 else {
01828 goto start_method_dispatch;
01829 }
01830 }
01831 case VM_METHOD_TYPE_OPTIMIZED:{
01832 switch (ci->me->def->body.optimize_type) {
01833 case OPTIMIZED_METHOD_TYPE_SEND:
01834 CI_SET_FASTPATH(ci, vm_call_opt_send, enable_fastpath);
01835 return vm_call_opt_send(th, cfp, ci);
01836 case OPTIMIZED_METHOD_TYPE_CALL:
01837 CI_SET_FASTPATH(ci, vm_call_opt_call, enable_fastpath);
01838 return vm_call_opt_call(th, cfp, ci);
01839 default:
01840 rb_bug("vm_call_method: unsupported optimized method type (%d)",
01841 ci->me->def->body.optimize_type);
01842 }
01843 break;
01844 }
01845 case VM_METHOD_TYPE_UNDEF:
01846 break;
01847 case VM_METHOD_TYPE_REFINED:{
01848 NODE *cref = rb_vm_get_cref(cfp->iseq, cfp->ep);
01849 VALUE refinements = cref ? cref->nd_refinements : Qnil;
01850 VALUE refinement, defined_class;
01851 rb_method_entry_t *me;
01852
01853 refinement = find_refinement(refinements,
01854 ci->defined_class);
01855 if (NIL_P(refinement)) {
01856 goto no_refinement_dispatch;
01857 }
01858 me = rb_method_entry(refinement, ci->mid, &defined_class);
01859 if (me) {
01860 if (ci->call == vm_call_super_method) {
01861 rb_control_frame_t *top_cfp = current_method_entry(th, cfp);
01862 if (top_cfp->me &&
01863 rb_method_definition_eq(me->def, top_cfp->me->def)) {
01864 goto no_refinement_dispatch;
01865 }
01866 }
01867 ci->me = me;
01868 ci->defined_class = defined_class;
01869 if (me->def->type != VM_METHOD_TYPE_REFINED) {
01870 goto start_method_dispatch;
01871 }
01872 }
01873
01874 no_refinement_dispatch:
01875 if (ci->me->def->body.orig_me) {
01876 ci->me = ci->me->def->body.orig_me;
01877 if (UNDEFINED_METHOD_ENTRY_P(ci->me)) {
01878 ci->me = 0;
01879 }
01880 goto start_method_dispatch;
01881 }
01882 else {
01883 klass = ci->me->klass;
01884 goto zsuper_method_dispatch;
01885 }
01886 }
01887 }
01888 rb_bug("vm_call_method: unsupported method type (%d)", ci->me->def->type);
01889 }
01890 else {
01891 int noex_safe;
01892 if (!(ci->flag & VM_CALL_FCALL) && (ci->me->flag & NOEX_MASK) & NOEX_PRIVATE) {
01893 int stat = NOEX_PRIVATE;
01894
01895 if (ci->flag & VM_CALL_VCALL) {
01896 stat |= NOEX_VCALL;
01897 }
01898 ci->aux.missing_reason = stat;
01899 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
01900 return vm_call_method_missing(th, cfp, ci);
01901 }
01902 else if (!(ci->flag & VM_CALL_OPT_SEND) && (ci->me->flag & NOEX_MASK) & NOEX_PROTECTED) {
01903 enable_fastpath = 0;
01904 if (!rb_obj_is_kind_of(cfp->self, ci->defined_class)) {
01905 ci->aux.missing_reason = NOEX_PROTECTED;
01906 return vm_call_method_missing(th, cfp, ci);
01907 }
01908 else {
01909 goto normal_method_dispatch;
01910 }
01911 }
01912 else if ((noex_safe = NOEX_SAFE(ci->me->flag)) > th->safe_level && (noex_safe > 2)) {
01913 rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(ci->mid));
01914 }
01915 else {
01916 goto normal_method_dispatch;
01917 }
01918 }
01919 }
01920 else {
01921
01922 int stat = 0;
01923 if (ci->flag & VM_CALL_VCALL) {
01924 stat |= NOEX_VCALL;
01925 }
01926 if (ci->flag & VM_CALL_SUPER) {
01927 stat |= NOEX_SUPER;
01928 }
01929 if (ci->mid == idMethodMissing) {
01930 rb_control_frame_t *reg_cfp = cfp;
01931 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
01932 rb_raise_method_missing(th, ci->argc, argv, ci->recv, stat);
01933 }
01934 else {
01935 ci->aux.missing_reason = stat;
01936 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
01937 return vm_call_method_missing(th, cfp, ci);
01938 }
01939 }
01940
01941 rb_bug("vm_call_method: unreachable");
01942 }
01943
01944 static VALUE
01945 vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01946 {
01947 return vm_call_method(th, reg_cfp, ci);
01948 }
01949
01950 static VALUE
01951 vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01952 {
01953 return vm_call_method(th, reg_cfp, ci);
01954 }
01955
01956
01957
01958 static inline VALUE
01959 vm_search_normal_superclass(VALUE klass)
01960 {
01961 if (BUILTIN_TYPE(klass) == T_ICLASS &&
01962 FL_TEST(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
01963 klass = RBASIC(klass)->klass;
01964 }
01965 klass = RCLASS_ORIGIN(klass);
01966 return RCLASS_SUPER(klass);
01967 }
01968
01969 static void
01970 vm_super_outside(void)
01971 {
01972 rb_raise(rb_eNoMethodError, "super called outside of method");
01973 }
01974
01975 static int
01976 vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *iseq, VALUE sigval, rb_call_info_t *ci)
01977 {
01978 while (iseq && !iseq->klass) {
01979 iseq = iseq->parent_iseq;
01980 }
01981
01982 if (iseq == 0) {
01983 return -1;
01984 }
01985
01986 ci->mid = iseq->defined_method_id;
01987
01988 if (iseq != iseq->local_iseq) {
01989
01990 rb_control_frame_t *lcfp = GET_CFP();
01991
01992 if (!sigval) {
01993
01994 return -2;
01995 }
01996
01997 while (lcfp->iseq != iseq) {
01998 rb_thread_t *th = GET_THREAD();
01999 VALUE *tep = VM_EP_PREV_EP(lcfp->ep);
02000 while (1) {
02001 lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
02002 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, lcfp)) {
02003 return -1;
02004 }
02005 if (lcfp->ep == tep) {
02006 break;
02007 }
02008 }
02009 }
02010
02011
02012 if (!lcfp->me) {
02013 return -1;
02014 }
02015
02016 ci->mid = lcfp->me->def->original_id;
02017 ci->klass = vm_search_normal_superclass(lcfp->klass);
02018 }
02019 else {
02020 ci->klass = vm_search_normal_superclass(reg_cfp->klass);
02021 }
02022
02023 return 0;
02024 }
02025
02026 static void
02027 vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
02028 {
02029 VALUE current_defined_class;
02030 rb_iseq_t *iseq = GET_ISEQ();
02031 VALUE sigval = TOPN(ci->argc);
02032
02033 current_defined_class = GET_CFP()->klass;
02034 if (NIL_P(current_defined_class)) {
02035 vm_super_outside();
02036 }
02037
02038 if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
02039 current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
02040 }
02041
02042 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
02043 BUILTIN_TYPE(current_defined_class) != T_ICLASS &&
02044 !FL_TEST(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
02045 !rb_obj_is_kind_of(ci->recv, current_defined_class)) {
02046 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
02047 RBASIC(current_defined_class)->klass : current_defined_class;
02048
02049 rb_raise(rb_eTypeError,
02050 "self has wrong type to call super in this context: "
02051 "%"PRIsVALUE" (expected %"PRIsVALUE")",
02052 rb_obj_class(ci->recv), m);
02053 }
02054
02055 switch (vm_search_superclass(GET_CFP(), iseq, sigval, ci)) {
02056 case -1:
02057 vm_super_outside();
02058 case -2:
02059 rb_raise(rb_eRuntimeError,
02060 "implicit argument passing of super from method defined"
02061 " by define_method() is not supported."
02062 " Specify all arguments explicitly.");
02063 }
02064 if (!ci->klass) {
02065
02066 ci->aux.missing_reason = NOEX_SUPER;
02067 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
02068 return;
02069 }
02070
02071
02072 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class);
02073 ci->call = vm_call_super_method;
02074
02075 while (iseq && !iseq->klass) {
02076 iseq = iseq->parent_iseq;
02077 }
02078
02079 if (ci->me && ci->me->def->type == VM_METHOD_TYPE_ISEQ && ci->me->def->body.iseq == iseq) {
02080 ci->klass = RCLASS_SUPER(ci->defined_class);
02081 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class);
02082 }
02083 }
02084
02085
02086
02087 static inline int
02088 block_proc_is_lambda(const VALUE procval)
02089 {
02090 rb_proc_t *proc;
02091
02092 if (procval) {
02093 GetProcPtr(procval, proc);
02094 return proc->is_lambda;
02095 }
02096 else {
02097 return 0;
02098 }
02099 }
02100
02101 static inline VALUE
02102 vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
02103 VALUE self, int argc, const VALUE *argv,
02104 const rb_block_t *blockargptr)
02105 {
02106 NODE *ifunc = (NODE *) block->iseq;
02107 VALUE val, arg, blockarg;
02108 int lambda = block_proc_is_lambda(block->proc);
02109
02110 if (lambda) {
02111 arg = rb_ary_new4(argc, argv);
02112 }
02113 else if (argc == 0) {
02114 arg = Qnil;
02115 }
02116 else {
02117 arg = argv[0];
02118 }
02119
02120 if (blockargptr) {
02121 if (blockargptr->proc) {
02122 blockarg = blockargptr->proc;
02123 }
02124 else {
02125 blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
02126 }
02127 }
02128 else {
02129 blockarg = Qnil;
02130 }
02131
02132 vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC, self,
02133 0, VM_ENVVAL_PREV_EP_PTR(block->ep), 0,
02134 th->cfp->sp, 1, 0, 0);
02135
02136 val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
02137
02138 th->cfp++;
02139 return val;
02140 }
02141
02142
02143
02144
02145
02146
02147 static inline int
02148 vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
02149 int argc, VALUE *argv)
02150 {
02151 rb_num_t opt_pc = 0;
02152 int i;
02153 const int m = iseq->argc;
02154 const int r = iseq->arg_rest;
02155 int len = iseq->arg_post_len;
02156 int start = iseq->arg_post_start;
02157 int rsize = argc > m ? argc - m : 0;
02158 int psize = rsize > len ? len : rsize;
02159 int osize = 0;
02160 VALUE ary;
02161
02162
02163 rsize -= psize;
02164
02165 if (iseq->arg_opts) {
02166 const int opts = iseq->arg_opts - 1;
02167 if (rsize > opts) {
02168 osize = opts;
02169 opt_pc = iseq->arg_opt_table[opts];
02170 }
02171 else {
02172 osize = rsize;
02173 opt_pc = iseq->arg_opt_table[rsize];
02174 }
02175 }
02176 rsize -= osize;
02177
02178 if (0) {
02179 printf(" argc: %d\n", argc);
02180 printf(" len: %d\n", len);
02181 printf("start: %d\n", start);
02182 printf("rsize: %d\n", rsize);
02183 }
02184
02185 if (r == -1) {
02186
02187 MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
02188 }
02189 else {
02190 ary = rb_ary_new4(rsize, &argv[r]);
02191
02192
02193 MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
02194 argv[r] = ary;
02195 }
02196
02197 for (i=psize; i<len; i++) {
02198 argv[start + i] = Qnil;
02199 }
02200
02201 return (int)opt_pc;
02202 }
02203
02204 static inline int
02205 vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
02206 int orig_argc, VALUE *argv,
02207 const rb_block_t *blockptr)
02208 {
02209 int i;
02210 int argc = orig_argc;
02211 const int m = iseq->argc;
02212 const int min = m + iseq->arg_post_len;
02213 VALUE ary, arg0;
02214 VALUE keyword_hash = Qnil;
02215 int opt_pc = 0;
02216
02217 th->mark_stack_len = argc;
02218
02219
02220
02221
02222
02223
02224 arg0 = argv[0];
02225 if (!(iseq->arg_simple & 0x02) &&
02226 (min > 0 ||
02227 iseq->arg_opts > 2 ||
02228 iseq->arg_keyword != -1 ||
02229 0) &&
02230 argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) {
02231 th->mark_stack_len = argc = RARRAY_LENINT(ary);
02232
02233 CHECK_VM_STACK_OVERFLOW(th->cfp, argc);
02234
02235 MEMCPY(argv, RARRAY_CONST_PTR(ary), VALUE, argc);
02236 }
02237 else {
02238
02239
02240
02241
02242
02243
02244 argv[0] = arg0;
02245 }
02246
02247
02248 if (iseq->arg_keyword != -1) {
02249 argc = vm_callee_setup_keyword_arg(th, iseq, argc, min, argv, &keyword_hash);
02250 }
02251
02252 for (i=argc; i<m; i++) {
02253 argv[i] = Qnil;
02254 }
02255
02256 if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
02257 const int arg_size = iseq->arg_size;
02258 if (arg_size < argc) {
02259
02260
02261
02262
02263 th->mark_stack_len = argc = arg_size;
02264 }
02265 }
02266 else {
02267 int r = iseq->arg_rest;
02268
02269 if (iseq->arg_post_len ||
02270 iseq->arg_opts) {
02271 opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
02272 }
02273 else {
02274 if (argc < r) {
02275
02276
02277
02278 for (i=argc; i<r; i++) {
02279 argv[i] = Qnil;
02280 }
02281 argv[r] = rb_ary_new();
02282 }
02283 else {
02284 argv[r] = rb_ary_new4(argc-r, &argv[r]);
02285 }
02286 }
02287
02288 th->mark_stack_len = iseq->arg_size;
02289 }
02290
02291
02292 if (iseq->arg_keyword != -1) {
02293 int arg_keywords_end = iseq->arg_keyword - (iseq->arg_block != -1);
02294 for (i = iseq->arg_keywords; 0 < i; i--) {
02295 argv[arg_keywords_end - i] = Qnil;
02296 }
02297 argv[iseq->arg_keyword] = keyword_hash;
02298 }
02299
02300
02301 if (iseq->arg_block != -1) {
02302 VALUE procval = Qnil;
02303
02304 if (blockptr) {
02305 if (blockptr->proc == 0) {
02306 procval = rb_vm_make_proc(th, blockptr, rb_cProc);
02307 }
02308 else {
02309 procval = blockptr->proc;
02310 }
02311 }
02312
02313 argv[iseq->arg_block] = procval;
02314 }
02315
02316 th->mark_stack_len = 0;
02317 return opt_pc;
02318 }
02319
02320 static inline int
02321 vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
02322 int argc, VALUE *argv, const rb_block_t *blockptr, int lambda)
02323 {
02324 if (0) {
02325 printf(" argc: %d\n", argc);
02326 printf("iseq argc: %d\n", iseq->argc);
02327 printf("iseq opts: %d\n", iseq->arg_opts);
02328 printf("iseq rest: %d\n", iseq->arg_rest);
02329 printf("iseq post: %d\n", iseq->arg_post_len);
02330 printf("iseq blck: %d\n", iseq->arg_block);
02331 printf("iseq smpl: %d\n", iseq->arg_simple);
02332 printf(" lambda: %s\n", lambda ? "true" : "false");
02333 }
02334
02335 if (lambda) {
02336
02337 rb_call_info_t ci_entry;
02338 ci_entry.flag = 0;
02339 ci_entry.argc = argc;
02340 ci_entry.blockptr = (rb_block_t *)blockptr;
02341 vm_callee_setup_arg(th, &ci_entry, iseq, argv, 1);
02342 return ci_entry.aux.opt_pc;
02343 }
02344 else {
02345 return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
02346 }
02347 }
02348
02349 static VALUE
02350 vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
02351 {
02352 const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp);
02353 rb_iseq_t *iseq;
02354 VALUE type = GET_ISEQ()->local_iseq->type;
02355
02356 if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
02357 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
02358 }
02359 iseq = block->iseq;
02360
02361 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) {
02362 vm_caller_setup_args(th, GET_CFP(), ci);
02363 }
02364
02365 if (BUILTIN_TYPE(iseq) != T_NODE) {
02366 int opt_pc;
02367 const int arg_size = iseq->arg_size;
02368 int is_lambda = block_proc_is_lambda(block->proc);
02369 VALUE * const rsp = GET_SP() - ci->argc;
02370 SET_SP(rsp);
02371
02372 opt_pc = vm_yield_setup_args(th, iseq, ci->argc, rsp, 0, is_lambda);
02373
02374 vm_push_frame(th, iseq,
02375 is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK,
02376 block->self,
02377 block->klass,
02378 VM_ENVVAL_PREV_EP_PTR(block->ep),
02379 iseq->iseq_encoded + opt_pc,
02380 rsp + arg_size,
02381 iseq->local_size - arg_size, 0, iseq->stack_max);
02382
02383 return Qundef;
02384 }
02385 else {
02386 VALUE val = vm_yield_with_cfunc(th, block, block->self, ci->argc, STACK_ADDR_FROM_TOP(ci->argc), 0);
02387 POPN(ci->argc);
02388 return val;
02389 }
02390 }
02391
02392 static VALUE
02393 vm_make_proc_with_iseq(rb_iseq_t *blockiseq)
02394 {
02395 rb_block_t *blockptr;
02396 rb_thread_t *th = GET_THREAD();
02397 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
02398
02399 if (cfp == 0) {
02400 rb_bug("vm_make_proc_with_iseq: unreachable");
02401 }
02402
02403 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
02404 blockptr->iseq = blockiseq;
02405 blockptr->proc = 0;
02406
02407 return rb_vm_make_proc(th, blockptr, rb_cProc);
02408 }
02409
02410 static VALUE
02411 vm_once_exec(rb_iseq_t *iseq)
02412 {
02413 VALUE proc = vm_make_proc_with_iseq(iseq);
02414 return rb_proc_call_with_block(proc, 0, 0, Qnil);
02415 }
02416
02417 static VALUE
02418 vm_once_clear(VALUE data)
02419 {
02420 union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
02421 is->once.running_thread = NULL;
02422 return Qnil;
02423 }
02424