00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "insns.inc"
00013 #include <math.h>
00014 #include "constant.h"
00015 #include "internal.h"
00016 #include "probes.h"
00017 #include "probes_helper.h"
00018
00019
00020
00021 #ifndef INLINE
00022 #define INLINE inline
00023 #endif
00024
00025 static rb_control_frame_t *vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp);
00026
00027 static void
00028 vm_stackoverflow(void)
00029 {
00030 rb_exc_raise(sysstack_error);
00031 }
00032
00033 static inline rb_control_frame_t *
00034 vm_push_frame(rb_thread_t *th,
00035 const rb_iseq_t *iseq,
00036 VALUE type,
00037 VALUE self,
00038 VALUE klass,
00039 VALUE specval,
00040 const VALUE *pc,
00041 VALUE *sp,
00042 int local_size,
00043 const rb_method_entry_t *me)
00044 {
00045 rb_control_frame_t *const cfp = th->cfp - 1;
00046 int i;
00047
00048
00049 if ((void *)(sp + local_size) >= (void *)cfp) {
00050 vm_stackoverflow();
00051 }
00052 th->cfp = cfp;
00053
00054
00055
00056
00057 for (i=0; i < local_size; i++) {
00058 *sp++ = Qnil;
00059 }
00060
00061
00062 *sp = specval;
00063
00064
00065
00066 cfp->pc = (VALUE *)pc;
00067 cfp->sp = sp + 1;
00068 #if VM_DEBUG_BP_CHECK
00069 cfp->bp_check = sp + 1;
00070 #endif
00071 cfp->ep = sp;
00072 cfp->iseq = (rb_iseq_t *) iseq;
00073 cfp->flag = type;
00074 cfp->self = self;
00075 cfp->block_iseq = 0;
00076 cfp->proc = 0;
00077 cfp->me = me;
00078 if (klass) {
00079 cfp->klass = klass;
00080 }
00081 else {
00082 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00083 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, prev_cfp)) {
00084 cfp->klass = Qnil;
00085 }
00086 else {
00087 cfp->klass = prev_cfp->klass;
00088 }
00089 }
00090
00091 if (VMDEBUG == 2) {
00092 SDR();
00093 }
00094
00095 return cfp;
00096 }
00097
00098 static inline void
00099 vm_pop_frame(rb_thread_t *th)
00100 {
00101 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
00102
00103 if (VMDEBUG == 2) {
00104 SDR();
00105 }
00106 }
00107
00108
00109 static inline VALUE
00110 rb_arg_error_new(int argc, int min, int max)
00111 {
00112 VALUE err_mess = 0;
00113 if (min == max) {
00114 err_mess = rb_sprintf("wrong number of arguments (%d for %d)", argc, min);
00115 }
00116 else if (max == UNLIMITED_ARGUMENTS) {
00117 err_mess = rb_sprintf("wrong number of arguments (%d for %d+)", argc, min);
00118 }
00119 else {
00120 err_mess = rb_sprintf("wrong number of arguments (%d for %d..%d)", argc, min, max);
00121 }
00122 return rb_exc_new3(rb_eArgError, err_mess);
00123 }
00124
00125 NORETURN(static void argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc));
00126 static void
00127 argument_error(const rb_iseq_t *iseq, int miss_argc, int min_argc, int max_argc)
00128 {
00129 VALUE exc = rb_arg_error_new(miss_argc, min_argc, max_argc);
00130 VALUE bt = rb_make_backtrace();
00131 VALUE err_line = 0;
00132
00133 if (iseq) {
00134 int line_no = rb_iseq_first_lineno(iseq);
00135
00136 err_line = rb_sprintf("%s:%d:in `%s'",
00137 RSTRING_PTR(iseq->location.path),
00138 line_no, RSTRING_PTR(iseq->location.label));
00139 rb_funcall(bt, rb_intern("unshift"), 1, err_line);
00140 }
00141
00142 rb_funcall(exc, rb_intern("set_backtrace"), 1, bt);
00143 rb_exc_raise(exc);
00144 }
00145
00146 NORETURN(static void unknown_keyword_error(const rb_iseq_t *iseq, VALUE hash));
00147 static void
00148 unknown_keyword_error(const rb_iseq_t *iseq, VALUE hash)
00149 {
00150 VALUE sep = rb_usascii_str_new2(", "), keys;
00151 st_table *tbl = rb_hash_tbl(hash);
00152 const char *msg;
00153 int i;
00154 for (i = 0; i < iseq->arg_keywords; i++) {
00155 st_data_t key = ID2SYM(iseq->arg_keyword_table[i]);
00156 st_delete(tbl, &key, NULL);
00157 }
00158 keys = rb_funcall(hash, rb_intern("keys"), 0, 0);
00159 if (!RB_TYPE_P(keys, T_ARRAY)) rb_raise(rb_eArgError, "unknown keyword");
00160 msg = RARRAY_LEN(keys) == 1 ? "" : "s";
00161 keys = rb_funcall(keys, rb_intern("join"), 1, sep);
00162 rb_raise(rb_eArgError, "unknown keyword%s: %"PRIsVALUE, msg, keys);
00163 }
00164
00165 void
00166 rb_error_arity(int argc, int min, int max)
00167 {
00168 rb_exc_raise(rb_arg_error_new(argc, min, max));
00169 }
00170
00171
00172
00173 static inline NODE *
00174 lep_svar_place(rb_thread_t *th, VALUE *lep)
00175 {
00176 VALUE *svar;
00177
00178 if (lep && th->root_lep != lep) {
00179 svar = &lep[-1];
00180 }
00181 else {
00182 svar = &th->root_svar;
00183 }
00184 if (NIL_P(*svar)) {
00185 *svar = (VALUE)NEW_IF(Qnil, Qnil, Qnil);
00186 }
00187 return (NODE *)*svar;
00188 }
00189
00190 static VALUE
00191 lep_svar_get(rb_thread_t *th, VALUE *lep, rb_num_t key)
00192 {
00193 NODE *svar = lep_svar_place(th, lep);
00194
00195 switch (key) {
00196 case 0:
00197 return svar->u1.value;
00198 case 1:
00199 return svar->u2.value;
00200 default: {
00201 const VALUE ary = svar->u3.value;
00202
00203 if (NIL_P(ary)) {
00204 return Qnil;
00205 }
00206 else {
00207 return rb_ary_entry(ary, key - DEFAULT_SPECIAL_VAR_COUNT);
00208 }
00209 }
00210 }
00211 }
00212
00213 static void
00214 lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
00215 {
00216 NODE *svar = lep_svar_place(th, lep);
00217
00218 switch (key) {
00219 case 0:
00220 svar->u1.value = val;
00221 return;
00222 case 1:
00223 svar->u2.value = val;
00224 return;
00225 default: {
00226 VALUE ary = svar->u3.value;
00227
00228 if (NIL_P(ary)) {
00229 svar->u3.value = ary = rb_ary_new();
00230 }
00231 rb_ary_store(ary, key - DEFAULT_SPECIAL_VAR_COUNT, val);
00232 }
00233 }
00234 }
00235
00236 static inline VALUE
00237 vm_getspecial(rb_thread_t *th, VALUE *lep, rb_num_t key, rb_num_t type)
00238 {
00239 VALUE val;
00240
00241 if (type == 0) {
00242 val = lep_svar_get(th, lep, key);
00243 }
00244 else {
00245 VALUE backref = lep_svar_get(th, lep, 1);
00246
00247 if (type & 0x01) {
00248 switch (type >> 1) {
00249 case '&':
00250 val = rb_reg_last_match(backref);
00251 break;
00252 case '`':
00253 val = rb_reg_match_pre(backref);
00254 break;
00255 case '\'':
00256 val = rb_reg_match_post(backref);
00257 break;
00258 case '+':
00259 val = rb_reg_match_last(backref);
00260 break;
00261 default:
00262 rb_bug("unexpected back-ref");
00263 }
00264 }
00265 else {
00266 val = rb_reg_nth_match((int)(type >> 1), backref);
00267 }
00268 }
00269 return val;
00270 }
00271
00272 static NODE *
00273 vm_get_cref0(const rb_iseq_t *iseq, const VALUE *ep)
00274 {
00275 while (1) {
00276 if (VM_EP_LEP_P(ep)) {
00277 if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) return NULL;
00278 return iseq->cref_stack;
00279 }
00280 else if (ep[-1] != Qnil) {
00281 return (NODE *)ep[-1];
00282 }
00283 ep = VM_EP_PREV_EP(ep);
00284 }
00285 }
00286
00287 NODE *
00288 rb_vm_get_cref(const rb_iseq_t *iseq, const VALUE *ep)
00289 {
00290 NODE *cref = vm_get_cref0(iseq, ep);
00291
00292 if (cref == 0) {
00293 rb_bug("rb_vm_get_cref: unreachable");
00294 }
00295 return cref;
00296 }
00297
00298 void
00299 rb_vm_rewrite_cref_stack(NODE *node, VALUE old_klass, VALUE new_klass, NODE **new_cref_ptr)
00300 {
00301 NODE *new_node;
00302 while (node) {
00303 if (node->nd_clss == old_klass) {
00304 new_node = NEW_CREF(new_klass);
00305 COPY_CREF_OMOD(new_node, node);
00306 new_node->nd_next = node->nd_next;
00307 *new_cref_ptr = new_node;
00308 return;
00309 }
00310 new_node = NEW_CREF(node->nd_clss);
00311 COPY_CREF_OMOD(new_node, node);
00312 node = node->nd_next;
00313 *new_cref_ptr = new_node;
00314 new_cref_ptr = &new_node->nd_next;
00315 }
00316 *new_cref_ptr = NULL;
00317 }
00318
00319 static NODE *
00320 vm_cref_push(rb_thread_t *th, VALUE klass, int noex, rb_block_t *blockptr)
00321 {
00322 rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
00323 NODE *cref = NEW_CREF(klass);
00324 cref->nd_refinements = Qnil;
00325 cref->nd_visi = noex;
00326
00327 if (blockptr) {
00328 cref->nd_next = vm_get_cref0(blockptr->iseq, blockptr->ep);
00329 }
00330 else if (cfp) {
00331 cref->nd_next = vm_get_cref0(cfp->iseq, cfp->ep);
00332 }
00333
00334 if (cref->nd_next && cref->nd_next != (void *) 1 &&
00335 !NIL_P(cref->nd_next->nd_refinements)) {
00336 COPY_CREF_OMOD(cref, cref->nd_next);
00337 }
00338
00339 return cref;
00340 }
00341
00342 static inline VALUE
00343 vm_get_cbase(const rb_iseq_t *iseq, const VALUE *ep)
00344 {
00345 NODE *cref = rb_vm_get_cref(iseq, ep);
00346 VALUE klass = Qundef;
00347
00348 while (cref) {
00349 if ((klass = cref->nd_clss) != 0) {
00350 break;
00351 }
00352 cref = cref->nd_next;
00353 }
00354
00355 return klass;
00356 }
00357
00358 static inline VALUE
00359 vm_get_const_base(const rb_iseq_t *iseq, const VALUE *ep)
00360 {
00361 NODE *cref = rb_vm_get_cref(iseq, ep);
00362 VALUE klass = Qundef;
00363
00364 while (cref) {
00365 if (!(cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) &&
00366 (klass = cref->nd_clss) != 0) {
00367 break;
00368 }
00369 cref = cref->nd_next;
00370 }
00371
00372 return klass;
00373 }
00374
00375 static inline void
00376 vm_check_if_namespace(VALUE klass)
00377 {
00378 VALUE str;
00379 if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
00380 str = rb_inspect(klass);
00381 rb_raise(rb_eTypeError, "%s is not a class/module",
00382 StringValuePtr(str));
00383 }
00384 }
00385
00386 static inline VALUE
00387 vm_get_iclass(rb_control_frame_t *cfp, VALUE klass)
00388 {
00389 if (RB_TYPE_P(klass, T_MODULE) &&
00390 FL_TEST(klass, RMODULE_IS_OVERLAID) &&
00391 RB_TYPE_P(cfp->klass, T_ICLASS) &&
00392 RBASIC(cfp->klass)->klass == klass) {
00393 return cfp->klass;
00394 }
00395 else {
00396 return klass;
00397 }
00398 }
00399
00400 static inline VALUE
00401 vm_get_ev_const(rb_thread_t *th, const rb_iseq_t *iseq,
00402 VALUE orig_klass, ID id, int is_defined)
00403 {
00404 VALUE val;
00405
00406 if (orig_klass == Qnil) {
00407
00408 const NODE *root_cref = rb_vm_get_cref(iseq, th->cfp->ep);
00409 const NODE *cref;
00410 VALUE klass = orig_klass;
00411
00412 while (root_cref && root_cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
00413 root_cref = root_cref->nd_next;
00414 }
00415 cref = root_cref;
00416 while (cref && cref->nd_next) {
00417 if (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL) {
00418 klass = Qnil;
00419 }
00420 else {
00421 klass = cref->nd_clss;
00422 }
00423 cref = cref->nd_next;
00424
00425 if (!NIL_P(klass)) {
00426 VALUE av, am = 0;
00427 st_data_t data;
00428 search_continue:
00429 if (RCLASS_CONST_TBL(klass) &&
00430 st_lookup(RCLASS_CONST_TBL(klass), id, &data)) {
00431 val = ((rb_const_entry_t*)data)->value;
00432 if (val == Qundef) {
00433 if (am == klass) break;
00434 am = klass;
00435 if (is_defined) return 1;
00436 if (rb_autoloading_value(klass, id, &av)) return av;
00437 rb_autoload_load(klass, id);
00438 goto search_continue;
00439 }
00440 else {
00441 if (is_defined) {
00442 return 1;
00443 }
00444 else {
00445 return val;
00446 }
00447 }
00448 }
00449 }
00450 }
00451
00452
00453 if (root_cref && !NIL_P(root_cref->nd_clss)) {
00454 klass = vm_get_iclass(th->cfp, root_cref->nd_clss);
00455 }
00456 else {
00457 klass = CLASS_OF(th->cfp->self);
00458 }
00459
00460 if (is_defined) {
00461 return rb_const_defined(klass, id);
00462 }
00463 else {
00464 return rb_const_get(klass, id);
00465 }
00466 }
00467 else {
00468 vm_check_if_namespace(orig_klass);
00469 if (is_defined) {
00470 return rb_public_const_defined_from(orig_klass, id);
00471 }
00472 else {
00473 return rb_public_const_get_from(orig_klass, id);
00474 }
00475 }
00476 }
00477
00478 static inline VALUE
00479 vm_get_cvar_base(NODE *cref, rb_control_frame_t *cfp)
00480 {
00481 VALUE klass;
00482
00483 if (!cref) {
00484 rb_bug("vm_get_cvar_base: no cref");
00485 }
00486
00487 while (cref->nd_next &&
00488 (NIL_P(cref->nd_clss) || FL_TEST(cref->nd_clss, FL_SINGLETON) ||
00489 (cref->flags & NODE_FL_CREF_PUSHED_BY_EVAL))) {
00490 cref = cref->nd_next;
00491 }
00492 if (!cref->nd_next) {
00493 rb_warn("class variable access from toplevel");
00494 }
00495
00496 klass = vm_get_iclass(cfp, cref->nd_clss);
00497
00498 if (NIL_P(klass)) {
00499 rb_raise(rb_eTypeError, "no class variables available");
00500 }
00501 return klass;
00502 }
00503
00504 static VALUE
00505 vm_search_const_defined_class(const VALUE cbase, ID id)
00506 {
00507 if (rb_const_defined_at(cbase, id)) return cbase;
00508 if (cbase == rb_cObject) {
00509 VALUE tmp = RCLASS_SUPER(cbase);
00510 while (tmp) {
00511 if (rb_const_defined_at(tmp, id)) return tmp;
00512 tmp = RCLASS_SUPER(tmp);
00513 }
00514 }
00515 return 0;
00516 }
00517
00518 #ifndef USE_IC_FOR_IVAR
00519 #define USE_IC_FOR_IVAR 1
00520 #endif
00521
00522 static inline VALUE
00523 vm_getivar(VALUE obj, ID id, IC ic, rb_call_info_t *ci, int is_attr)
00524 {
00525 #if USE_IC_FOR_IVAR
00526 if (RB_TYPE_P(obj, T_OBJECT)) {
00527 VALUE val = Qundef;
00528 VALUE klass = RBASIC(obj)->klass;
00529
00530 if (LIKELY((!is_attr && (ic->ic_class == klass && ic->ic_vmstat == GET_VM_STATE_VERSION())) ||
00531 (is_attr && ci->aux.index > 0))) {
00532 long index = !is_attr ? ic->ic_value.index : ci->aux.index - 1;
00533 long len = ROBJECT_NUMIV(obj);
00534 VALUE *ptr = ROBJECT_IVPTR(obj);
00535
00536 if (index < len) {
00537 val = ptr[index];
00538 }
00539 }
00540 else {
00541 st_data_t index;
00542 long len = ROBJECT_NUMIV(obj);
00543 VALUE *ptr = ROBJECT_IVPTR(obj);
00544 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
00545
00546 if (iv_index_tbl) {
00547 if (st_lookup(iv_index_tbl, id, &index)) {
00548 if ((long)index < len) {
00549 val = ptr[index];
00550 }
00551 if (!is_attr) {
00552 ic->ic_class = klass;
00553 ic->ic_value.index = index;
00554 ic->ic_vmstat = GET_VM_STATE_VERSION();
00555 }
00556 else {
00557 ci->aux.index = index + 1;
00558 }
00559 }
00560 }
00561 }
00562
00563 if (UNLIKELY(val == Qundef)) {
00564 if (!is_attr) rb_warning("instance variable %s not initialized", rb_id2name(id));
00565 val = Qnil;
00566 }
00567 return val;
00568 }
00569 #endif
00570 if (is_attr)
00571 return rb_attr_get(obj, id);
00572 return rb_ivar_get(obj, id);
00573 }
00574
00575 static inline VALUE
00576 vm_setivar(VALUE obj, ID id, VALUE val, IC ic, rb_call_info_t *ci, int is_attr)
00577 {
00578 #if USE_IC_FOR_IVAR
00579 if (!OBJ_UNTRUSTED(obj) && rb_safe_level() >= 4) {
00580 rb_raise(rb_eSecurityError, "Insecure: can't modify instance variable");
00581 }
00582
00583 rb_check_frozen(obj);
00584
00585 if (RB_TYPE_P(obj, T_OBJECT)) {
00586 VALUE klass = RBASIC(obj)->klass;
00587 st_data_t index;
00588
00589 if (LIKELY(
00590 (!is_attr && ic->ic_class == klass && ic->ic_vmstat == GET_VM_STATE_VERSION()) ||
00591 (is_attr && ci->aux.index > 0))) {
00592 long index = !is_attr ? ic->ic_value.index : ci->aux.index-1;
00593 long len = ROBJECT_NUMIV(obj);
00594 VALUE *ptr = ROBJECT_IVPTR(obj);
00595
00596 if (index < len) {
00597 ptr[index] = val;
00598 return val;
00599 }
00600 }
00601 else {
00602 struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
00603
00604 if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
00605 if (!is_attr) {
00606 ic->ic_class = klass;
00607 ic->ic_value.index = index;
00608 ic->ic_vmstat = GET_VM_STATE_VERSION();
00609 }
00610 else {
00611 ci->aux.index = index + 1;
00612 }
00613 }
00614
00615 }
00616 }
00617 #endif
00618 return rb_ivar_set(obj, id, val);
00619 }
00620
00621 static VALUE
00622 vm_getinstancevariable(VALUE obj, ID id, IC ic)
00623 {
00624 return vm_getivar(obj, id, ic, 0, 0);
00625 }
00626
00627 static void
00628 vm_setinstancevariable(VALUE obj, ID id, VALUE val, IC ic)
00629 {
00630 vm_setivar(obj, id, val, ic, 0, 0);
00631 }
00632
00633 static VALUE
00634 vm_throw(rb_thread_t *th, rb_control_frame_t *reg_cfp,
00635 rb_num_t throw_state, VALUE throwobj)
00636 {
00637 int state = (int)(throw_state & 0xff);
00638 int flag = (int)(throw_state & 0x8000);
00639 rb_num_t level = throw_state >> 16;
00640
00641 if (state != 0) {
00642 VALUE *pt = 0;
00643 if (flag != 0) {
00644 pt = (void *) 1;
00645 }
00646 else {
00647 if (state == TAG_BREAK) {
00648 rb_control_frame_t *cfp = GET_CFP();
00649 VALUE *ep = GET_EP();
00650 int is_orphan = 1;
00651 rb_iseq_t *base_iseq = GET_ISEQ();
00652
00653 search_parent:
00654 if (cfp->iseq->type != ISEQ_TYPE_BLOCK) {
00655 if (cfp->iseq->type == ISEQ_TYPE_CLASS) {
00656 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00657 ep = cfp->ep;
00658 goto search_parent;
00659 }
00660 ep = VM_EP_PREV_EP(ep);
00661 base_iseq = base_iseq->parent_iseq;
00662
00663 while ((VALUE *) cfp < th->stack + th->stack_size) {
00664 if (cfp->ep == ep) {
00665 goto search_parent;
00666 }
00667 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00668 }
00669 rb_bug("VM (throw): can't find break base.");
00670 }
00671
00672 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
00673
00674 is_orphan = 0;
00675 pt = cfp->ep;
00676 state = TAG_RETURN;
00677 }
00678 else {
00679 ep = VM_EP_PREV_EP(ep);
00680
00681 while ((VALUE *)cfp < th->stack + th->stack_size) {
00682 if (cfp->ep == ep) {
00683 VALUE epc = cfp->pc - cfp->iseq->iseq_encoded;
00684 rb_iseq_t *iseq = cfp->iseq;
00685 int i;
00686
00687 for (i=0; i<iseq->catch_table_size; i++) {
00688 struct iseq_catch_table_entry *entry = &iseq->catch_table[i];
00689
00690 if (entry->type == CATCH_TYPE_BREAK &&
00691 entry->start < epc && entry->end >= epc) {
00692 if (entry->cont == epc) {
00693 goto found;
00694 }
00695 else {
00696 break;
00697 }
00698 }
00699 }
00700 break;
00701
00702 found:
00703 pt = ep;
00704 is_orphan = 0;
00705 break;
00706 }
00707 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00708 }
00709 }
00710
00711 if (is_orphan) {
00712 rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
00713 }
00714 }
00715 else if (state == TAG_RETRY) {
00716 rb_num_t i;
00717 pt = VM_EP_PREV_EP(GET_EP());
00718 for (i = 0; i < level; i++) {
00719 pt = GC_GUARDED_PTR_REF((VALUE *) * pt);
00720 }
00721 }
00722 else if (state == TAG_RETURN) {
00723 rb_control_frame_t *cfp = GET_CFP();
00724 VALUE *ep = GET_EP();
00725 VALUE *target_lep = VM_CF_LEP(cfp);
00726 int in_class_frame = 0;
00727
00728
00729 while ((VALUE *) cfp < th->stack + th->stack_size) {
00730 VALUE *lep = VM_CF_LEP(cfp);
00731
00732 if (!target_lep) {
00733 target_lep = lep;
00734 }
00735
00736 if (lep == target_lep && cfp->iseq->type == ISEQ_TYPE_CLASS) {
00737 in_class_frame = 1;
00738 target_lep = 0;
00739 }
00740
00741 if (lep == target_lep) {
00742 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_LAMBDA) {
00743 VALUE *tep = ep;
00744
00745 if (in_class_frame) {
00746
00747 ep = cfp->ep;
00748 goto valid_return;
00749 }
00750
00751 while (target_lep != tep) {
00752 if (cfp->ep == tep) {
00753
00754 ep = cfp->ep;
00755 goto valid_return;
00756 }
00757 tep = VM_EP_PREV_EP(tep);
00758 }
00759 }
00760 }
00761
00762 if (cfp->ep == target_lep && cfp->iseq->type == ISEQ_TYPE_METHOD) {
00763 ep = target_lep;
00764 goto valid_return;
00765 }
00766
00767 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00768 }
00769
00770 rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
00771
00772 valid_return:
00773 pt = ep;
00774 }
00775 else {
00776 rb_bug("isns(throw): unsupport throw type");
00777 }
00778 }
00779 th->state = state;
00780 return (VALUE)NEW_THROW_OBJECT(throwobj, (VALUE) pt, state);
00781 }
00782 else {
00783
00784 VALUE err = throwobj;
00785
00786 if (FIXNUM_P(err)) {
00787 th->state = FIX2INT(err);
00788 }
00789 else if (SYMBOL_P(err)) {
00790 th->state = TAG_THROW;
00791 }
00792 else if (BUILTIN_TYPE(err) == T_NODE) {
00793 th->state = GET_THROWOBJ_STATE(err);
00794 }
00795 else {
00796 th->state = TAG_RAISE;
00797
00798 }
00799 return err;
00800 }
00801 }
00802
00803 static inline void
00804 vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
00805 {
00806 int is_splat = flag & 0x01;
00807 rb_num_t space_size = num + is_splat;
00808 VALUE *base = cfp->sp, *ptr;
00809 rb_num_t len;
00810
00811 if (!RB_TYPE_P(ary, T_ARRAY)) {
00812 ary = rb_ary_to_ary(ary);
00813 }
00814
00815 cfp->sp += space_size;
00816
00817 ptr = RARRAY_PTR(ary);
00818 len = (rb_num_t)RARRAY_LEN(ary);
00819
00820 if (flag & 0x02) {
00821
00822 rb_num_t i = 0, j;
00823
00824 if (len < num) {
00825 for (i=0; i<num-len; i++) {
00826 *base++ = Qnil;
00827 }
00828 }
00829 for (j=0; i<num; i++, j++) {
00830 VALUE v = ptr[len - j - 1];
00831 *base++ = v;
00832 }
00833 if (is_splat) {
00834 *base = rb_ary_new4(len - j, ptr);
00835 }
00836 }
00837 else {
00838
00839 rb_num_t i;
00840 VALUE *bptr = &base[space_size - 1];
00841
00842 for (i=0; i<num; i++) {
00843 if (len <= i) {
00844 for (; i<num; i++) {
00845 *bptr-- = Qnil;
00846 }
00847 break;
00848 }
00849 *bptr-- = ptr[i];
00850 }
00851 if (is_splat) {
00852 if (num > len) {
00853 *bptr = rb_ary_new();
00854 }
00855 else {
00856 *bptr = rb_ary_new4(len - num, ptr + num);
00857 }
00858 }
00859 }
00860 RB_GC_GUARD(ary);
00861 }
00862
00863 static VALUE vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
00864
00865 static void
00866 vm_search_method(rb_call_info_t *ci, VALUE recv)
00867 {
00868 VALUE klass = CLASS_OF(recv);
00869
00870 #if OPT_INLINE_METHOD_CACHE
00871 if (LIKELY(GET_VM_STATE_VERSION() == ci->vmstat && klass == ci->klass)) {
00872
00873 }
00874 else {
00875 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class);
00876 ci->klass = klass;
00877 ci->vmstat = GET_VM_STATE_VERSION();
00878 ci->call = vm_call_general;
00879 }
00880 #else
00881 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class);
00882 ci->call = vm_call_general;
00883 ci->klass = klass;
00884 #endif
00885 }
00886
00887 static inline int
00888 check_cfunc(const rb_method_entry_t *me, VALUE (*func)())
00889 {
00890 if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
00891 me->def->body.cfunc.func == func) {
00892 return 1;
00893 }
00894 else {
00895 return 0;
00896 }
00897 }
00898
00899 static
00900 #ifndef NO_BIG_INLINE
00901 inline
00902 #endif
00903 VALUE
00904 opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci)
00905 {
00906 if (FIXNUM_2_P(recv, obj) &&
00907 BASIC_OP_UNREDEFINED_P(BOP_EQ, FIXNUM_REDEFINED_OP_FLAG)) {
00908 return (recv == obj) ? Qtrue : Qfalse;
00909 }
00910 else if (FLONUM_2_P(recv, obj) &&
00911 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) {
00912 return (recv == obj) ? Qtrue : Qfalse;
00913 }
00914 else if (!SPECIAL_CONST_P(recv) && !SPECIAL_CONST_P(obj)) {
00915 if (HEAP_CLASS_OF(recv) == rb_cFloat &&
00916 HEAP_CLASS_OF(obj) == rb_cFloat &&
00917 BASIC_OP_UNREDEFINED_P(BOP_EQ, FLOAT_REDEFINED_OP_FLAG)) {
00918 double a = RFLOAT_VALUE(recv);
00919 double b = RFLOAT_VALUE(obj);
00920
00921 if (isnan(a) || isnan(b)) {
00922 return Qfalse;
00923 }
00924 return (a == b) ? Qtrue : Qfalse;
00925 }
00926 else if (HEAP_CLASS_OF(recv) == rb_cString &&
00927 HEAP_CLASS_OF(obj) == rb_cString &&
00928 BASIC_OP_UNREDEFINED_P(BOP_EQ, STRING_REDEFINED_OP_FLAG)) {
00929 return rb_str_equal(recv, obj);
00930 }
00931 }
00932
00933 {
00934 vm_search_method(ci, recv);
00935
00936 if (check_cfunc(ci->me, rb_obj_equal)) {
00937 return recv == obj ? Qtrue : Qfalse;
00938 }
00939 }
00940
00941 return Qundef;
00942 }
00943
00944 static VALUE
00945 vm_call0(rb_thread_t*, VALUE, ID, int, const VALUE*, const rb_method_entry_t*, VALUE);
00946
00947 static VALUE
00948 check_match(VALUE pattern, VALUE target, enum vm_check_match_type type)
00949 {
00950 switch (type) {
00951 case VM_CHECKMATCH_TYPE_WHEN:
00952 return pattern;
00953 case VM_CHECKMATCH_TYPE_RESCUE:
00954 if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
00955 rb_raise(rb_eTypeError, "class or module required for rescue clause");
00956 }
00957
00958 case VM_CHECKMATCH_TYPE_CASE: {
00959 VALUE defined_class;
00960 rb_method_entry_t *me = rb_method_entry_with_refinements(CLASS_OF(pattern), idEqq, &defined_class);
00961 if (me) {
00962 return vm_call0(GET_THREAD(), pattern, idEqq, 1, &target, me, defined_class);
00963 }
00964 else {
00965
00966 return rb_funcall2(pattern, idEqq, 1, &target);
00967 }
00968 }
00969 default:
00970 rb_bug("check_match: unreachable");
00971 }
00972 }
00973
00974
00975 #if defined(_MSC_VER) && _MSC_VER < 1300
00976 #define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
00977 #else
00978 #define CHECK_CMP_NAN(a, b)
00979 #endif
00980
00981 static inline VALUE
00982 double_cmp_lt(double a, double b)
00983 {
00984 CHECK_CMP_NAN(a, b);
00985 return a < b ? Qtrue : Qfalse;
00986 }
00987
00988 static inline VALUE
00989 double_cmp_le(double a, double b)
00990 {
00991 CHECK_CMP_NAN(a, b);
00992 return a <= b ? Qtrue : Qfalse;
00993 }
00994
00995 static inline VALUE
00996 double_cmp_gt(double a, double b)
00997 {
00998 CHECK_CMP_NAN(a, b);
00999 return a > b ? Qtrue : Qfalse;
01000 }
01001
01002 static inline VALUE
01003 double_cmp_ge(double a, double b)
01004 {
01005 CHECK_CMP_NAN(a, b);
01006 return a >= b ? Qtrue : Qfalse;
01007 }
01008
01009 static VALUE *
01010 vm_base_ptr(rb_control_frame_t *cfp)
01011 {
01012 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01013 VALUE *bp = prev_cfp->sp + cfp->iseq->local_size + 1;
01014
01015 if (cfp->iseq->type == ISEQ_TYPE_METHOD) {
01016
01017 bp += 1;
01018 }
01019
01020 #if VM_DEBUG_BP_CHECK
01021 if (bp != cfp->bp_check) {
01022 fprintf(stderr, "bp_check: %ld, bp: %ld\n",
01023 (long)(cfp->bp_check - GET_THREAD()->stack),
01024 (long)(bp - GET_THREAD()->stack));
01025 rb_bug("vm_base_ptr: unreachable");
01026 }
01027 #endif
01028
01029 return bp;
01030 }
01031
01032
01033
01034 static void
01035 vm_caller_setup_args(const rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01036 {
01037 #define SAVE_RESTORE_CI(expr, ci) do { \
01038 int saved_argc = (ci)->argc; rb_block_t *saved_blockptr = (ci)->blockptr; \
01039 expr; \
01040 (ci)->argc = saved_argc; (ci)->blockptr = saved_blockptr; \
01041 } while (0)
01042
01043 if (UNLIKELY(ci->flag & VM_CALL_ARGS_BLOCKARG)) {
01044 rb_proc_t *po;
01045 VALUE proc;
01046
01047 proc = *(--cfp->sp);
01048
01049 if (proc != Qnil) {
01050 if (!rb_obj_is_proc(proc)) {
01051 VALUE b;
01052
01053 SAVE_RESTORE_CI(b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc"), ci);
01054
01055 if (NIL_P(b) || !rb_obj_is_proc(b)) {
01056 rb_raise(rb_eTypeError,
01057 "wrong argument type %s (expected Proc)",
01058 rb_obj_classname(proc));
01059 }
01060 proc = b;
01061 }
01062 GetProcPtr(proc, po);
01063 ci->blockptr = &po->block;
01064 RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp)->proc = proc;
01065 }
01066 }
01067 else if (ci->blockiseq != 0) {
01068 ci->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
01069 ci->blockptr->iseq = ci->blockiseq;
01070 ci->blockptr->proc = 0;
01071 }
01072
01073
01074
01075 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) {
01076 VALUE ary = *(cfp->sp - 1);
01077 VALUE *ptr;
01078 int i;
01079 VALUE tmp;
01080
01081 SAVE_RESTORE_CI(tmp = rb_check_convert_type(ary, T_ARRAY, "Array", "to_a"), ci);
01082
01083 if (NIL_P(tmp)) {
01084
01085 }
01086 else {
01087 long len = RARRAY_LEN(tmp);
01088 ptr = RARRAY_PTR(tmp);
01089 cfp->sp -= 1;
01090
01091 CHECK_VM_STACK_OVERFLOW(cfp, len);
01092
01093 for (i = 0; i < len; i++) {
01094 *cfp->sp++ = ptr[i];
01095 }
01096 ci->argc += i-1;
01097 }
01098 }
01099 }
01100
01101 static int
01102 separate_symbol(st_data_t key, st_data_t value, st_data_t arg)
01103 {
01104 VALUE *kwdhash = (VALUE *)arg;
01105
01106 if (!SYMBOL_P(key)) kwdhash++;
01107 if (!*kwdhash) *kwdhash = rb_hash_new();
01108 rb_hash_aset(*kwdhash, (VALUE)key, (VALUE)value);
01109 return ST_CONTINUE;
01110 }
01111
01112 static VALUE
01113 extract_keywords(VALUE *orighash)
01114 {
01115 VALUE parthash[2] = {0, 0};
01116 VALUE hash = *orighash;
01117
01118 if (RHASH_EMPTY_P(hash)) {
01119 *orighash = 0;
01120 return hash;
01121 }
01122 st_foreach(RHASH_TBL(hash), separate_symbol, (st_data_t)&parthash);
01123 *orighash = parthash[1];
01124 return parthash[0];
01125 }
01126
01127 static inline int
01128 vm_callee_setup_keyword_arg(const rb_iseq_t *iseq, int argc, int m, VALUE *orig_argv, VALUE *kwd)
01129 {
01130 VALUE keyword_hash, orig_hash;
01131 int i, j;
01132
01133 if (argc > m &&
01134 !NIL_P(orig_hash = rb_check_hash_type(orig_argv[argc-1])) &&
01135 (keyword_hash = extract_keywords(&orig_hash)) != 0) {
01136 if (!orig_hash) {
01137 argc--;
01138 }
01139 else {
01140 orig_argv[argc-1] = orig_hash;
01141 }
01142 if (iseq->arg_keyword_check) {
01143 for (i = j = 0; i < iseq->arg_keywords; i++) {
01144 if (st_lookup(RHASH_TBL(keyword_hash), ID2SYM(iseq->arg_keyword_table[i]), 0)) j++;
01145 }
01146 if (RHASH_TBL(keyword_hash)->num_entries > (unsigned int) j) {
01147 unknown_keyword_error(iseq, keyword_hash);
01148 }
01149 }
01150 }
01151 else {
01152 keyword_hash = rb_hash_new();
01153 }
01154
01155 *kwd = keyword_hash;
01156
01157 return argc;
01158 }
01159
01160 static inline int
01161 vm_callee_setup_arg_complex(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq, VALUE *orig_argv)
01162 {
01163 const int m = iseq->argc;
01164 const int opts = iseq->arg_opts - (iseq->arg_opts > 0);
01165 const int min = m + iseq->arg_post_len;
01166 const int max = (iseq->arg_rest == -1) ? m + opts + iseq->arg_post_len : UNLIMITED_ARGUMENTS;
01167 const int orig_argc = ci->argc;
01168 int argc = orig_argc;
01169 VALUE *argv = orig_argv;
01170 VALUE keyword_hash = Qnil;
01171 rb_num_t opt_pc = 0;
01172
01173 th->mark_stack_len = argc + iseq->arg_size;
01174
01175
01176 if (iseq->arg_keyword != -1) {
01177 argc = vm_callee_setup_keyword_arg(iseq, argc, m, orig_argv, &keyword_hash);
01178 }
01179
01180
01181 if ((argc < min) || (argc > max && max != UNLIMITED_ARGUMENTS)) {
01182 argument_error(iseq, argc, min, max);
01183 }
01184
01185 argv += m;
01186 argc -= m;
01187
01188
01189 if (iseq->arg_post_len) {
01190 if (!(orig_argc < iseq->arg_post_start)) {
01191 VALUE *new_argv = ALLOCA_N(VALUE, argc);
01192 MEMCPY(new_argv, argv, VALUE, argc);
01193 argv = new_argv;
01194 }
01195
01196 MEMCPY(&orig_argv[iseq->arg_post_start], &argv[argc -= iseq->arg_post_len],
01197 VALUE, iseq->arg_post_len);
01198 }
01199
01200
01201 if (iseq->arg_opts) {
01202 if (argc > opts) {
01203 argc -= opts;
01204 argv += opts;
01205 opt_pc = iseq->arg_opt_table[opts];
01206 }
01207 else {
01208 int i;
01209 for (i = argc; i<opts; i++) {
01210 orig_argv[i + m] = Qnil;
01211 }
01212 opt_pc = iseq->arg_opt_table[argc];
01213 argc = 0;
01214 }
01215 }
01216
01217
01218 if (iseq->arg_rest != -1) {
01219 orig_argv[iseq->arg_rest] = rb_ary_new4(argc, argv);
01220 argc = 0;
01221 }
01222
01223
01224 if (iseq->arg_keyword != -1) {
01225 orig_argv[iseq->arg_keyword] = keyword_hash;
01226 }
01227
01228
01229 if (iseq->arg_block != -1) {
01230 VALUE blockval = Qnil;
01231 const rb_block_t *blockptr = ci->blockptr;
01232
01233 if (blockptr) {
01234
01235 if (blockptr->proc == 0) {
01236 rb_proc_t *proc;
01237 blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
01238 GetProcPtr(blockval, proc);
01239 ci->blockptr = &proc->block;
01240 }
01241 else {
01242 blockval = blockptr->proc;
01243 }
01244 }
01245
01246 orig_argv[iseq->arg_block] = blockval;
01247 }
01248
01249 th->mark_stack_len = 0;
01250 return (int)opt_pc;
01251 }
01252
01253 static VALUE vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01254 static inline VALUE vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01255 static inline VALUE vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01256
01257 static inline void
01258 vm_callee_setup_arg(rb_thread_t *th, rb_call_info_t *ci, const rb_iseq_t *iseq,
01259 VALUE *argv, int is_lambda)
01260 {
01261 if (LIKELY(iseq->arg_simple & 0x01)) {
01262
01263 if (ci->argc != iseq->argc) {
01264 argument_error(iseq, ci->argc, iseq->argc, iseq->argc);
01265 }
01266 ci->aux.opt_pc = 0;
01267 CI_SET_FASTPATH(ci,
01268 (UNLIKELY(ci->flag & VM_CALL_TAILCALL) ?
01269 vm_call_iseq_setup_tailcall :
01270 vm_call_iseq_setup_normal),
01271 (!is_lambda &&
01272 !(ci->flag & VM_CALL_ARGS_SPLAT) &&
01273 !(ci->me->flag & NOEX_PROTECTED)));
01274 }
01275 else {
01276 ci->aux.opt_pc = vm_callee_setup_arg_complex(th, ci, iseq, argv);
01277 }
01278 }
01279
01280 static VALUE
01281 vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01282 {
01283 vm_callee_setup_arg(th, ci, ci->me->def->body.iseq, cfp->sp - ci->argc, 0);
01284 return vm_call_iseq_setup_2(th, cfp, ci);
01285 }
01286
01287 static VALUE
01288 vm_call_iseq_setup_2(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01289 {
01290 if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
01291 return vm_call_iseq_setup_normal(th, cfp, ci);
01292 }
01293 else {
01294 return vm_call_iseq_setup_tailcall(th, cfp, ci);
01295 }
01296 }
01297
01298 static inline VALUE
01299 vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01300 {
01301 int i;
01302 VALUE *argv = cfp->sp - ci->argc;
01303 rb_iseq_t *iseq = ci->me->def->body.iseq;
01304 VALUE *sp = argv + iseq->arg_size;
01305
01306 CHECK_VM_STACK_OVERFLOW(cfp, iseq->stack_max);
01307
01308
01309 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
01310 *sp++ = Qnil;
01311 }
01312
01313 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, ci->recv, ci->defined_class,
01314 VM_ENVVAL_BLOCK_PTR(ci->blockptr),
01315 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me);
01316
01317 cfp->sp = argv - 1 ;
01318 return Qundef;
01319 }
01320
01321 static inline VALUE
01322 vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01323 {
01324 int i;
01325 VALUE *argv = cfp->sp - ci->argc;
01326 rb_iseq_t *iseq = ci->me->def->body.iseq;
01327 VALUE *src_argv = argv;
01328 VALUE *sp_orig, *sp;
01329 VALUE finish_flag = VM_FRAME_TYPE_FINISH_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
01330
01331 cfp = th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01332
01333 CHECK_VM_STACK_OVERFLOW(cfp, iseq->stack_max);
01334 RUBY_VM_CHECK_INTS(th);
01335
01336 sp_orig = sp = cfp->sp;
01337
01338
01339 sp[0] = ci->recv;
01340 sp++;
01341
01342
01343 for (i=0; i < iseq->arg_size; i++) {
01344 *sp++ = src_argv[i];
01345 }
01346
01347
01348 for (i = 0; i < iseq->local_size - iseq->arg_size; i++) {
01349 *sp++ = Qnil;
01350 }
01351
01352 vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag,
01353 ci->recv, ci->defined_class, VM_ENVVAL_BLOCK_PTR(ci->blockptr),
01354 iseq->iseq_encoded + ci->aux.opt_pc, sp, 0, ci->me);
01355
01356 cfp->sp = sp_orig;
01357 return Qundef;
01358 }
01359
01360 static VALUE
01361 call_cfunc_m2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01362 {
01363 return (*func)(recv, rb_ary_new4(argc, argv));
01364 }
01365
01366 static VALUE
01367 call_cfunc_m1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01368 {
01369 return (*func)(argc, argv, recv);
01370 }
01371
01372 static VALUE
01373 call_cfunc_0(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01374 {
01375 return (*func)(recv);
01376 }
01377
01378 static VALUE
01379 call_cfunc_1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01380 {
01381 return (*func)(recv, argv[0]);
01382 }
01383
01384 static VALUE
01385 call_cfunc_2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01386 {
01387 return (*func)(recv, argv[0], argv[1]);
01388 }
01389
01390 static VALUE
01391 call_cfunc_3(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01392 {
01393 return (*func)(recv, argv[0], argv[1], argv[2]);
01394 }
01395
01396 static VALUE
01397 call_cfunc_4(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01398 {
01399 return (*func)(recv, argv[0], argv[1], argv[2], argv[3]);
01400 }
01401
01402 static VALUE
01403 call_cfunc_5(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01404 {
01405 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
01406 }
01407
01408 static VALUE
01409 call_cfunc_6(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01410 {
01411 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
01412 }
01413
01414 static VALUE
01415 call_cfunc_7(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01416 {
01417 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
01418 }
01419
01420 static VALUE
01421 call_cfunc_8(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01422 {
01423 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
01424 }
01425
01426 static VALUE
01427 call_cfunc_9(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01428 {
01429 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
01430 }
01431
01432 static VALUE
01433 call_cfunc_10(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01434 {
01435 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
01436 }
01437
01438 static VALUE
01439 call_cfunc_11(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01440 {
01441 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
01442 }
01443
01444 static VALUE
01445 call_cfunc_12(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01446 {
01447 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
01448 }
01449
01450 static VALUE
01451 call_cfunc_13(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01452 {
01453 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
01454 }
01455
01456 static VALUE
01457 call_cfunc_14(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01458 {
01459 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
01460 }
01461
01462 static VALUE
01463 call_cfunc_15(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
01464 {
01465 return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
01466 }
01467
01468 #ifndef VM_PROFILE
01469 #define VM_PROFILE 0
01470 #endif
01471
01472 #if VM_PROFILE
01473 static int vm_profile_counter[4];
01474 #define VM_PROFILE_UP(x) (vm_profile_counter[x]++)
01475 #define VM_PROFILE_ATEXIT() atexit(vm_profile_show_result)
01476 static void vm_profile_show_result(void)
01477 {
01478 fprintf(stderr, "VM Profile results: \n");
01479 fprintf(stderr, "r->c call: %d\n", vm_profile_counter[0]);
01480 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[1]);
01481 fprintf(stderr, "c->c call: %d\n", vm_profile_counter[2]);
01482 fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[3]);
01483 }
01484 #else
01485 #define VM_PROFILE_UP(x)
01486 #define VM_PROFILE_ATEXIT()
01487 #endif
01488
01489 static VALUE
01490 vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01491 {
01492 VALUE val;
01493 const rb_method_entry_t *me = ci->me;
01494 const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
01495 int len = cfunc->argc;
01496
01497
01498 VALUE recv = ci->recv;
01499 VALUE defined_class = ci->defined_class;
01500 rb_block_t *blockptr = ci->blockptr;
01501 int argc = ci->argc;
01502
01503 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id);
01504 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qundef);
01505
01506 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv, defined_class,
01507 VM_ENVVAL_BLOCK_PTR(blockptr), 0, th->cfp->sp, 1, me);
01508
01509 if (len >= 0) rb_check_arity(argc, len, len);
01510
01511 reg_cfp->sp -= argc + 1;
01512 VM_PROFILE_UP(0);
01513 val = (*cfunc->invoker)(cfunc->func, recv, argc, reg_cfp->sp + 1);
01514
01515 if (reg_cfp != th->cfp + 1) {
01516 rb_bug("vm_call_cfunc - cfp consistency error");
01517 }
01518
01519 vm_pop_frame(th);
01520
01521 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val);
01522 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
01523
01524 return val;
01525 }
01526
01527 #if OPT_CALL_CFUNC_WITHOUT_FRAME
01528 static VALUE
01529 vm_call_cfunc_latter(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01530 {
01531 VALUE val;
01532 int argc = ci->argc;
01533 VALUE *argv = STACK_ADDR_FROM_TOP(argc);
01534 const rb_method_cfunc_t *cfunc = &ci->me->def->body.cfunc;
01535
01536 th->passed_ci = ci;
01537 reg_cfp->sp -= argc + 1;
01538 ci->aux.inc_sp = argc + 1;
01539 VM_PROFILE_UP(0);
01540 val = (*cfunc->invoker)(cfunc->func, ci, argv);
01541
01542
01543 if (reg_cfp == th->cfp) {
01544 if (UNLIKELY(th->passed_ci != ci)) {
01545 rb_bug("vm_call_cfunc_latter: passed_ci error (ci: %p, passed_ci: %p)", ci, th->passed_ci);
01546 }
01547 th->passed_ci = 0;
01548 }
01549 else {
01550 if (UNLIKELY(reg_cfp != RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp))) {
01551 rb_bug("vm_call_cfunc_latter: cfp consistency error (%p, %p)", reg_cfp, th->cfp+1);
01552 }
01553 vm_pop_frame(th);
01554 VM_PROFILE_UP(1);
01555 }
01556
01557 return val;
01558 }
01559
01560 static VALUE
01561 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01562 {
01563 VALUE val;
01564 const rb_method_entry_t *me = ci->me;
01565 int len = me->def->body.cfunc.argc;
01566 VALUE recv = ci->recv;
01567
01568 if (len >= 0) rb_check_arity(ci->argc, len, len);
01569
01570 RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->klass, me->called_id);
01571 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->klass, Qnil);
01572
01573 if (!(ci->me->flag & NOEX_PROTECTED) &&
01574 !(ci->flag & VM_CALL_ARGS_SPLAT)) {
01575 CI_SET_FASTPATH(ci, vm_call_cfunc_latter, 1);
01576 }
01577 val = vm_call_cfunc_latter(th, reg_cfp, ci);
01578
01579 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, recv, me->called_id, me->klass, val);
01580 RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, me->klass, me->called_id);
01581
01582 return val;
01583 }
01584
01585 void
01586 vm_call_cfunc_push_frame(rb_thread_t *th)
01587 {
01588 rb_call_info_t *ci = th->passed_ci;
01589 const rb_method_entry_t *me = ci->me;
01590 th->passed_ci = 0;
01591
01592 vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, ci->recv, ci->defined_class,
01593 VM_ENVVAL_BLOCK_PTR(ci->blockptr), 0, th->cfp->sp + ci->aux.inc_sp, 1, me);
01594
01595 if (ci->call != vm_call_general) {
01596 ci->call = vm_call_cfunc_with_frame;
01597 }
01598 }
01599 #else
01600 static VALUE
01601 vm_call_cfunc(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01602 {
01603 return vm_call_cfunc_with_frame(th, reg_cfp, ci);
01604 }
01605 #endif
01606
01607 static VALUE
01608 vm_call_ivar(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01609 {
01610 VALUE val = vm_getivar(ci->recv, ci->me->def->body.attr.id, 0, ci, 1);
01611 cfp->sp -= 1;
01612 return val;
01613 }
01614
01615 static VALUE
01616 vm_call_attrset(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01617 {
01618 VALUE val = vm_setivar(ci->recv, ci->me->def->body.attr.id, *(cfp->sp - 1), 0, ci, 1);
01619 cfp->sp -= 2;
01620 return val;
01621 }
01622
01623 static inline VALUE
01624 vm_call_bmethod_body(rb_thread_t *th, rb_call_info_t *ci, const VALUE *argv)
01625 {
01626 rb_proc_t *proc;
01627 VALUE val;
01628
01629 RUBY_DTRACE_METHOD_ENTRY_HOOK(th, ci->me->klass, ci->me->called_id);
01630 EXEC_EVENT_HOOK(th, RUBY_EVENT_CALL, ci->recv, ci->me->called_id, ci->me->klass, Qnil);
01631
01632
01633 th->passed_me = ci->me;
01634 GetProcPtr(ci->me->def->body.proc, proc);
01635 val = vm_invoke_proc(th, proc, ci->recv, ci->defined_class, ci->argc, argv, ci->blockptr);
01636
01637 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, ci->recv, ci->me->called_id, ci->me->klass, val);
01638 RUBY_DTRACE_METHOD_RETURN_HOOK(th, ci->me->klass, ci->me->called_id);
01639
01640 return val;
01641 }
01642
01643 static VALUE
01644 vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01645 {
01646 VALUE *argv = ALLOCA_N(VALUE, ci->argc);
01647 MEMCPY(argv, cfp->sp - ci->argc, VALUE, ci->argc);
01648 cfp->sp += - ci->argc - 1;
01649
01650 return vm_call_bmethod_body(th, ci, argv);
01651 }
01652
01653 static
01654 #ifdef _MSC_VER
01655 __forceinline
01656 #else
01657 inline
01658 #endif
01659 VALUE vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci);
01660
01661 static VALUE
01662 vm_call_opt_send(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01663 {
01664 int i = ci->argc - 1;
01665 VALUE sym;
01666 rb_call_info_t ci_entry;
01667
01668 if (ci->argc == 0) {
01669 rb_raise(rb_eArgError, "no method name given");
01670 }
01671
01672 ci_entry = *ci;
01673 ci = &ci_entry;
01674
01675 sym = TOPN(i);
01676
01677 if (SYMBOL_P(sym)) {
01678 ci->mid = SYM2ID(sym);
01679 }
01680 else if (!(ci->mid = rb_check_id(&sym))) {
01681 if (rb_method_basic_definition_p(CLASS_OF(ci->recv), idMethodMissing)) {
01682 VALUE exc = make_no_method_exception(rb_eNoMethodError, NULL, ci->recv, rb_long2int(ci->argc), &TOPN(i));
01683 rb_exc_raise(exc);
01684 }
01685 ci->mid = rb_to_id(sym);
01686 }
01687
01688
01689 if (i > 0) {
01690 MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
01691 }
01692 ci->me =
01693 rb_method_entry_without_refinements(CLASS_OF(ci->recv),
01694 ci->mid, &ci->defined_class);
01695 ci->argc -= 1;
01696 DEC_SP(1);
01697
01698 ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
01699
01700 return vm_call_method(th, reg_cfp, ci);
01701 }
01702
01703 static VALUE
01704 vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01705 {
01706 rb_proc_t *proc;
01707 int argc = ci->argc;
01708 VALUE *argv = ALLOCA_N(VALUE, argc);
01709 GetProcPtr(ci->recv, proc);
01710 MEMCPY(argv, cfp->sp - argc, VALUE, argc);
01711 cfp->sp -= argc + 1;
01712
01713 return rb_vm_invoke_proc(th, proc, argc, argv, ci->blockptr);
01714 }
01715
01716 static VALUE
01717 vm_call_method_missing(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01718 {
01719 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
01720 rb_call_info_t ci_entry;
01721
01722 ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
01723 ci_entry.argc = ci->argc+1;
01724 ci_entry.mid = idMethodMissing;
01725 ci_entry.blockptr = ci->blockptr;
01726 ci_entry.recv = ci->recv;
01727 ci_entry.me = rb_method_entry(CLASS_OF(ci_entry.recv), idMethodMissing, &ci_entry.defined_class);
01728
01729
01730 CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
01731 if (ci->argc > 0) {
01732 MEMMOVE(argv+1, argv, VALUE, ci->argc);
01733 }
01734 argv[0] = ID2SYM(ci->mid);
01735 INC_SP(1);
01736
01737 th->method_missing_reason = ci->aux.missing_reason;
01738 return vm_call_method(th, reg_cfp, &ci_entry);
01739 }
01740
01741 static inline VALUE
01742 find_refinement(VALUE refinements, VALUE klass)
01743 {
01744 if (NIL_P(refinements)) {
01745 return Qnil;
01746 }
01747 return rb_hash_lookup(refinements, klass);
01748 }
01749
01750 static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
01751 static VALUE vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci);
01752
01753 static rb_control_frame_t *
01754 current_method_entry(rb_thread_t *th, rb_control_frame_t *cfp)
01755 {
01756 rb_control_frame_t *top_cfp = cfp;
01757
01758 if (cfp->iseq && cfp->iseq->type == ISEQ_TYPE_BLOCK) {
01759 rb_iseq_t *local_iseq = cfp->iseq->local_iseq;
01760 do {
01761 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01762 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
01763
01764 return top_cfp;
01765 }
01766 } while (cfp->iseq != local_iseq);
01767 }
01768 return cfp;
01769 }
01770
01771 static
01772 #ifdef _MSC_VER
01773 __forceinline
01774 #else
01775 inline
01776 #endif
01777 VALUE
01778 vm_call_method(rb_thread_t *th, rb_control_frame_t *cfp, rb_call_info_t *ci)
01779 {
01780 int enable_fastpath = 1;
01781 rb_call_info_t ci_temp;
01782
01783 start_method_dispatch:
01784 if (ci->me != 0) {
01785 if ((ci->me->flag == 0)) {
01786 VALUE klass;
01787
01788 normal_method_dispatch:
01789 switch (ci->me->def->type) {
01790 case VM_METHOD_TYPE_ISEQ:{
01791 CI_SET_FASTPATH(ci, vm_call_iseq_setup, enable_fastpath);
01792 return vm_call_iseq_setup(th, cfp, ci);
01793 }
01794 case VM_METHOD_TYPE_NOTIMPLEMENTED:
01795 case VM_METHOD_TYPE_CFUNC:
01796 CI_SET_FASTPATH(ci, vm_call_cfunc, enable_fastpath);
01797 return vm_call_cfunc(th, cfp, ci);
01798 case VM_METHOD_TYPE_ATTRSET:{
01799 rb_check_arity(ci->argc, 1, 1);
01800 ci->aux.index = 0;
01801 CI_SET_FASTPATH(ci, vm_call_attrset, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
01802 return vm_call_attrset(th, cfp, ci);
01803 }
01804 case VM_METHOD_TYPE_IVAR:{
01805 rb_check_arity(ci->argc, 0, 0);
01806 ci->aux.index = 0;
01807 CI_SET_FASTPATH(ci, vm_call_ivar, enable_fastpath && !(ci->flag & VM_CALL_ARGS_SPLAT));
01808 return vm_call_ivar(th, cfp, ci);
01809 }
01810 case VM_METHOD_TYPE_MISSING:{
01811 ci->aux.missing_reason = 0;
01812 CI_SET_FASTPATH(ci, vm_call_method_missing, enable_fastpath);
01813 return vm_call_method_missing(th, cfp, ci);
01814 }
01815 case VM_METHOD_TYPE_BMETHOD:{
01816 CI_SET_FASTPATH(ci, vm_call_bmethod, enable_fastpath);
01817 return vm_call_bmethod(th, cfp, ci);
01818 }
01819 case VM_METHOD_TYPE_ZSUPER:{
01820 klass = ci->me->klass;
01821 klass = RCLASS_ORIGIN(klass);
01822 zsuper_method_dispatch:
01823 klass = RCLASS_SUPER(klass);
01824 ci_temp = *ci;
01825 ci = &ci_temp;
01826
01827 ci->me = rb_method_entry(klass, ci->mid, &ci->defined_class);
01828
01829 if (ci->me != 0) {
01830 goto normal_method_dispatch;
01831 }
01832 else {
01833 goto start_method_dispatch;
01834 }
01835 }
01836 case VM_METHOD_TYPE_OPTIMIZED:{
01837 switch (ci->me->def->body.optimize_type) {
01838 case OPTIMIZED_METHOD_TYPE_SEND:
01839 CI_SET_FASTPATH(ci, vm_call_opt_send, enable_fastpath);
01840 return vm_call_opt_send(th, cfp, ci);
01841 case OPTIMIZED_METHOD_TYPE_CALL:
01842 CI_SET_FASTPATH(ci, vm_call_opt_call, enable_fastpath);
01843 return vm_call_opt_call(th, cfp, ci);
01844 default:
01845 rb_bug("vm_call_method: unsupported optimized method type (%d)",
01846 ci->me->def->body.optimize_type);
01847 }
01848 break;
01849 }
01850 case VM_METHOD_TYPE_UNDEF:
01851 break;
01852 case VM_METHOD_TYPE_REFINED:{
01853 NODE *cref = rb_vm_get_cref(cfp->iseq, cfp->ep);
01854 VALUE refinements = cref ? cref->nd_refinements : Qnil;
01855 VALUE refinement, defined_class;
01856 rb_method_entry_t *me;
01857
01858 refinement = find_refinement(refinements,
01859 ci->defined_class);
01860 if (NIL_P(refinement)) {
01861 goto no_refinement_dispatch;
01862 }
01863 me = rb_method_entry(refinement, ci->mid, &defined_class);
01864 if (me) {
01865 if (ci->call == vm_call_super_method) {
01866 rb_control_frame_t *top_cfp = current_method_entry(th, cfp);
01867 if (top_cfp->me &&
01868 rb_method_definition_eq(me->def, top_cfp->me->def)) {
01869 goto no_refinement_dispatch;
01870 }
01871 }
01872 ci->me = me;
01873 ci->defined_class = defined_class;
01874 if (me->def->type != VM_METHOD_TYPE_REFINED) {
01875 goto start_method_dispatch;
01876 }
01877 }
01878
01879 no_refinement_dispatch:
01880 if (ci->me->def->body.orig_me) {
01881 ci->me = ci->me->def->body.orig_me;
01882 if (UNDEFINED_METHOD_ENTRY_P(ci->me)) {
01883 ci->me = 0;
01884 }
01885 goto start_method_dispatch;
01886 }
01887 else {
01888 klass = ci->me->klass;
01889 goto zsuper_method_dispatch;
01890 }
01891 }
01892 }
01893 rb_bug("vm_call_method: unsupported method type (%d)", ci->me->def->type);
01894 }
01895 else {
01896 int noex_safe;
01897 if (!(ci->flag & VM_CALL_FCALL) && (ci->me->flag & NOEX_MASK) & NOEX_PRIVATE) {
01898 int stat = NOEX_PRIVATE;
01899
01900 if (ci->flag & VM_CALL_VCALL) {
01901 stat |= NOEX_VCALL;
01902 }
01903 ci->aux.missing_reason = stat;
01904 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
01905 return vm_call_method_missing(th, cfp, ci);
01906 }
01907 else if (!(ci->flag & VM_CALL_OPT_SEND) && (ci->me->flag & NOEX_MASK) & NOEX_PROTECTED) {
01908 enable_fastpath = 0;
01909 if (!rb_obj_is_kind_of(cfp->self, ci->defined_class)) {
01910 ci->aux.missing_reason = NOEX_PROTECTED;
01911 return vm_call_method_missing(th, cfp, ci);
01912 }
01913 else {
01914 goto normal_method_dispatch;
01915 }
01916 }
01917 else if ((noex_safe = NOEX_SAFE(ci->me->flag)) > th->safe_level && (noex_safe > 2)) {
01918 rb_raise(rb_eSecurityError, "calling insecure method: %s", rb_id2name(ci->mid));
01919 }
01920 else {
01921 goto normal_method_dispatch;
01922 }
01923 }
01924 }
01925 else {
01926
01927 int stat = 0;
01928 if (ci->flag & VM_CALL_VCALL) {
01929 stat |= NOEX_VCALL;
01930 }
01931 if (ci->flag & VM_CALL_SUPER) {
01932 stat |= NOEX_SUPER;
01933 }
01934 if (ci->mid == idMethodMissing) {
01935 rb_control_frame_t *reg_cfp = cfp;
01936 VALUE *argv = STACK_ADDR_FROM_TOP(ci->argc);
01937 rb_raise_method_missing(th, ci->argc, argv, ci->recv, stat);
01938 }
01939 else {
01940 ci->aux.missing_reason = stat;
01941 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
01942 return vm_call_method_missing(th, cfp, ci);
01943 }
01944 }
01945
01946 rb_bug("vm_call_method: unreachable");
01947 }
01948
01949 static VALUE
01950 vm_call_general(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01951 {
01952 return vm_call_method(th, reg_cfp, ci);
01953 }
01954
01955 static VALUE
01956 vm_call_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
01957 {
01958 return vm_call_method(th, reg_cfp, ci);
01959 }
01960
01961
01962
01963 static inline VALUE
01964 vm_search_normal_superclass(VALUE klass)
01965 {
01966 if (BUILTIN_TYPE(klass) == T_ICLASS &&
01967 FL_TEST(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
01968 klass = RBASIC(klass)->klass;
01969 }
01970 klass = RCLASS_ORIGIN(klass);
01971 return RCLASS_SUPER(klass);
01972 }
01973
01974 static void
01975 vm_super_outside(void)
01976 {
01977 rb_raise(rb_eNoMethodError, "super called outside of method");
01978 }
01979
01980 static int
01981 vm_search_superclass(rb_control_frame_t *reg_cfp, rb_iseq_t *iseq, VALUE sigval, rb_call_info_t *ci)
01982 {
01983 while (iseq && !iseq->klass) {
01984 iseq = iseq->parent_iseq;
01985 }
01986
01987 if (iseq == 0) {
01988 return -1;
01989 }
01990
01991 ci->mid = iseq->defined_method_id;
01992
01993 if (iseq != iseq->local_iseq) {
01994
01995 rb_control_frame_t *lcfp = GET_CFP();
01996
01997 if (!sigval) {
01998
01999 return -2;
02000 }
02001
02002 while (lcfp->iseq != iseq) {
02003 rb_thread_t *th = GET_THREAD();
02004 VALUE *tep = VM_EP_PREV_EP(lcfp->ep);
02005 while (1) {
02006 lcfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(lcfp);
02007 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, lcfp)) {
02008 return -1;
02009 }
02010 if (lcfp->ep == tep) {
02011 break;
02012 }
02013 }
02014 }
02015
02016
02017 if (!lcfp->me) {
02018 return -1;
02019 }
02020
02021 ci->mid = lcfp->me->def->original_id;
02022 ci->klass = vm_search_normal_superclass(lcfp->klass);
02023 }
02024 else {
02025 ci->klass = vm_search_normal_superclass(reg_cfp->klass);
02026 }
02027
02028 return 0;
02029 }
02030
02031 static void
02032 vm_search_super_method(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
02033 {
02034 VALUE current_defined_class;
02035 rb_iseq_t *iseq = GET_ISEQ();
02036 VALUE sigval = TOPN(ci->argc);
02037
02038 current_defined_class = GET_CFP()->klass;
02039 if (NIL_P(current_defined_class)) {
02040 vm_super_outside();
02041 }
02042
02043 if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
02044 current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
02045 }
02046
02047 if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
02048 BUILTIN_TYPE(current_defined_class) != T_ICLASS &&
02049 !FL_TEST(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
02050 !rb_obj_is_kind_of(ci->recv, current_defined_class)) {
02051 VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
02052 RBASIC(current_defined_class)->klass : current_defined_class;
02053
02054 rb_raise(rb_eTypeError,
02055 "self has wrong type to call super in this context: "
02056 "%"PRIsVALUE" (expected %"PRIsVALUE")",
02057 rb_obj_class(ci->recv), m);
02058 }
02059
02060 switch (vm_search_superclass(GET_CFP(), iseq, sigval, ci)) {
02061 case -1:
02062 vm_super_outside();
02063 case -2:
02064 rb_raise(rb_eRuntimeError,
02065 "implicit argument passing of super from method defined"
02066 " by define_method() is not supported."
02067 " Specify all arguments explicitly.");
02068 }
02069 if (!ci->klass) {
02070
02071 ci->aux.missing_reason = NOEX_SUPER;
02072 CI_SET_FASTPATH(ci, vm_call_method_missing, 1);
02073 return;
02074 }
02075
02076
02077 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class);
02078 ci->call = vm_call_super_method;
02079
02080 while (iseq && !iseq->klass) {
02081 iseq = iseq->parent_iseq;
02082 }
02083
02084 if (ci->me && ci->me->def->type == VM_METHOD_TYPE_ISEQ && ci->me->def->body.iseq == iseq) {
02085 ci->klass = RCLASS_SUPER(ci->defined_class);
02086 ci->me = rb_method_entry(ci->klass, ci->mid, &ci->defined_class);
02087 }
02088 }
02089
02090
02091
02092 static inline int
02093 block_proc_is_lambda(const VALUE procval)
02094 {
02095 rb_proc_t *proc;
02096
02097 if (procval) {
02098 GetProcPtr(procval, proc);
02099 return proc->is_lambda;
02100 }
02101 else {
02102 return 0;
02103 }
02104 }
02105
02106 static inline VALUE
02107 vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block,
02108 VALUE self, int argc, const VALUE *argv,
02109 const rb_block_t *blockargptr)
02110 {
02111 NODE *ifunc = (NODE *) block->iseq;
02112 VALUE val, arg, blockarg;
02113 int lambda = block_proc_is_lambda(block->proc);
02114
02115 if (lambda) {
02116 arg = rb_ary_new4(argc, argv);
02117 }
02118 else if (argc == 0) {
02119 arg = Qnil;
02120 }
02121 else {
02122 arg = argv[0];
02123 }
02124
02125 if (blockargptr) {
02126 if (blockargptr->proc) {
02127 blockarg = blockargptr->proc;
02128 }
02129 else {
02130 blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
02131 }
02132 }
02133 else {
02134 blockarg = Qnil;
02135 }
02136
02137 vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC, self,
02138 0, VM_ENVVAL_PREV_EP_PTR(block->ep), 0,
02139 th->cfp->sp, 1, 0);
02140
02141 val = (*ifunc->nd_cfnc) (arg, ifunc->nd_tval, argc, argv, blockarg);
02142
02143 th->cfp++;
02144 return val;
02145 }
02146
02147
02148
02149
02150
02151
02152 static inline int
02153 vm_yield_setup_block_args_complex(rb_thread_t *th, const rb_iseq_t *iseq,
02154 int argc, VALUE *argv)
02155 {
02156 rb_num_t opt_pc = 0;
02157 int i;
02158 const int m = iseq->argc;
02159 const int r = iseq->arg_rest;
02160 int len = iseq->arg_post_len;
02161 int start = iseq->arg_post_start;
02162 int rsize = argc > m ? argc - m : 0;
02163 int psize = rsize > len ? len : rsize;
02164 int osize = 0;
02165 VALUE ary;
02166
02167
02168 rsize -= psize;
02169
02170 if (iseq->arg_opts) {
02171 const int opts = iseq->arg_opts - 1;
02172 if (rsize > opts) {
02173 osize = opts;
02174 opt_pc = iseq->arg_opt_table[opts];
02175 }
02176 else {
02177 osize = rsize;
02178 opt_pc = iseq->arg_opt_table[rsize];
02179 }
02180 }
02181 rsize -= osize;
02182
02183 if (0) {
02184 printf(" argc: %d\n", argc);
02185 printf(" len: %d\n", len);
02186 printf("start: %d\n", start);
02187 printf("rsize: %d\n", rsize);
02188 }
02189
02190 if (r == -1) {
02191
02192 MEMMOVE(&argv[start], &argv[m+osize], VALUE, psize);
02193 }
02194 else {
02195 ary = rb_ary_new4(rsize, &argv[r]);
02196
02197
02198 MEMMOVE(&argv[start], &argv[m+rsize+osize], VALUE, psize);
02199 argv[r] = ary;
02200 }
02201
02202 for (i=psize; i<len; i++) {
02203 argv[start + i] = Qnil;
02204 }
02205
02206 return (int)opt_pc;
02207 }
02208
02209 static inline int
02210 vm_yield_setup_block_args(rb_thread_t *th, const rb_iseq_t * iseq,
02211 int orig_argc, VALUE *argv,
02212 const rb_block_t *blockptr)
02213 {
02214 int i;
02215 int argc = orig_argc;
02216 const int m = iseq->argc;
02217 VALUE ary, arg0;
02218 VALUE keyword_hash = Qnil;
02219 int opt_pc = 0;
02220
02221 th->mark_stack_len = argc;
02222
02223
02224
02225
02226
02227
02228 arg0 = argv[0];
02229 if (!(iseq->arg_simple & 0x02) &&
02230 ((m + iseq->arg_post_len) > 0 ||
02231 iseq->arg_opts > 2 ||
02232 iseq->arg_keyword != -1 ||
02233 0) &&
02234 argc == 1 && !NIL_P(ary = rb_check_array_type(arg0))) {
02235 th->mark_stack_len = argc = RARRAY_LENINT(ary);
02236
02237 CHECK_VM_STACK_OVERFLOW(th->cfp, argc);
02238
02239 MEMCPY(argv, RARRAY_PTR(ary), VALUE, argc);
02240 }
02241 else {
02242
02243
02244
02245
02246
02247
02248 argv[0] = arg0;
02249 }
02250
02251
02252 if (iseq->arg_keyword != -1) {
02253 argc = vm_callee_setup_keyword_arg(iseq, argc, m, argv, &keyword_hash);
02254 }
02255
02256 for (i=argc; i<m; i++) {
02257 argv[i] = Qnil;
02258 }
02259
02260 if (iseq->arg_rest == -1 && iseq->arg_opts == 0) {
02261 const int arg_size = iseq->arg_size;
02262 if (arg_size < argc) {
02263
02264
02265
02266
02267 th->mark_stack_len = argc = arg_size;
02268 }
02269 }
02270 else {
02271 int r = iseq->arg_rest;
02272
02273 if (iseq->arg_post_len ||
02274 iseq->arg_opts) {
02275 opt_pc = vm_yield_setup_block_args_complex(th, iseq, argc, argv);
02276 }
02277 else {
02278 if (argc < r) {
02279
02280
02281
02282 for (i=argc; i<r; i++) {
02283 argv[i] = Qnil;
02284 }
02285 argv[r] = rb_ary_new();
02286 }
02287 else {
02288 argv[r] = rb_ary_new4(argc-r, &argv[r]);
02289 }
02290 }
02291
02292 th->mark_stack_len = iseq->arg_size;
02293 }
02294
02295
02296 if (iseq->arg_keyword != -1) {
02297 argv[iseq->arg_keyword] = keyword_hash;
02298 }
02299
02300
02301 if (iseq->arg_block != -1) {
02302 VALUE procval = Qnil;
02303
02304 if (blockptr) {
02305 if (blockptr->proc == 0) {
02306 procval = rb_vm_make_proc(th, blockptr, rb_cProc);
02307 }
02308 else {
02309 procval = blockptr->proc;
02310 }
02311 }
02312
02313 argv[iseq->arg_block] = procval;
02314 }
02315
02316 th->mark_stack_len = 0;
02317 return opt_pc;
02318 }
02319
02320 static inline int
02321 vm_yield_setup_args(rb_thread_t * const th, const rb_iseq_t *iseq,
02322 int argc, VALUE *argv, const rb_block_t *blockptr, int lambda)
02323 {
02324 if (0) {
02325 printf(" argc: %d\n", argc);
02326 printf("iseq argc: %d\n", iseq->argc);
02327 printf("iseq opts: %d\n", iseq->arg_opts);
02328 printf("iseq rest: %d\n", iseq->arg_rest);
02329 printf("iseq post: %d\n", iseq->arg_post_len);
02330 printf("iseq blck: %d\n", iseq->arg_block);
02331 printf("iseq smpl: %d\n", iseq->arg_simple);
02332 printf(" lambda: %s\n", lambda ? "true" : "false");
02333 }
02334
02335 if (lambda) {
02336
02337 rb_call_info_t ci_entry;
02338 ci_entry.flag = 0;
02339 ci_entry.argc = argc;
02340 ci_entry.blockptr = (rb_block_t *)blockptr;
02341 vm_callee_setup_arg(th, &ci_entry, iseq, argv, 1);
02342 return ci_entry.aux.opt_pc;
02343 }
02344 else {
02345 return vm_yield_setup_block_args(th, iseq, argc, argv, blockptr);
02346 }
02347 }
02348
02349 static VALUE
02350 vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci)
02351 {
02352 const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp);
02353 rb_iseq_t *iseq;
02354 VALUE type = GET_ISEQ()->local_iseq->type;
02355
02356 if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
02357 rb_vm_localjump_error("no block given (yield)", Qnil, 0);
02358 }
02359 iseq = block->iseq;
02360
02361 if (UNLIKELY(ci->flag & VM_CALL_ARGS_SPLAT)) {
02362 vm_caller_setup_args(th, GET_CFP(), ci);
02363 }
02364
02365 if (BUILTIN_TYPE(iseq) != T_NODE) {
02366 int opt_pc;
02367 const int arg_size = iseq->arg_size;
02368 VALUE * const rsp = GET_SP() - ci->argc;
02369 SET_SP(rsp);
02370
02371 CHECK_VM_STACK_OVERFLOW(GET_CFP(), iseq->stack_max);
02372 opt_pc = vm_yield_setup_args(th, iseq, ci->argc, rsp, 0, block_proc_is_lambda(block->proc));
02373
02374 vm_push_frame(th, iseq, VM_FRAME_MAGIC_BLOCK, block->self,
02375 block->klass,
02376 VM_ENVVAL_PREV_EP_PTR(block->ep),
02377 iseq->iseq_encoded + opt_pc,
02378 rsp + arg_size,
02379 iseq->local_size - arg_size, 0);
02380
02381 return Qundef;
02382 }
02383 else {
02384 VALUE val = vm_yield_with_cfunc(th, block, block->self, ci->argc, STACK_ADDR_FROM_TOP(ci->argc), 0);
02385 POPN(ci->argc);
02386 return val;
02387 }
02388 }
02389