00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024 #include "ruby/ruby.h"
00025 #include "ruby/debug.h"
00026 #include "ruby/encoding.h"
00027
00028 #include "internal.h"
00029 #include "vm_core.h"
00030 #include "eval_intern.h"
00031
00032
00033
00034 typedef struct rb_event_hook_struct {
00035 rb_event_hook_flag_t hook_flags;
00036 rb_event_flag_t events;
00037 rb_event_hook_func_t func;
00038 VALUE data;
00039 struct rb_event_hook_struct *next;
00040 } rb_event_hook_t;
00041
00042 typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *arg);
00043
00044 #define MAX_EVENT_NUM 32
00045
00046 static int ruby_event_flag_count[MAX_EVENT_NUM] = {0};
00047
00048
00049
00050 void
00051 rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks)
00052 {
00053 rb_event_hook_t *hook = hooks->hooks;
00054
00055 while (hook) {
00056 rb_gc_mark(hook->data);
00057 hook = hook->next;
00058 }
00059 }
00060
00061
00062
00063 static void
00064 recalc_add_ruby_vm_event_flags(rb_event_flag_t events)
00065 {
00066 int i;
00067 ruby_vm_event_flags = 0;
00068
00069 for (i=0; i<MAX_EVENT_NUM; i++) {
00070 if (events & (1 << i)) {
00071 ruby_event_flag_count[i]++;
00072 }
00073 ruby_vm_event_flags |= ruby_event_flag_count[i] ? (1<<i) : 0;
00074 }
00075
00076 rb_objspace_set_event_hook(ruby_vm_event_flags);
00077 }
00078
00079 static void
00080 recalc_remove_ruby_vm_event_flags(rb_event_flag_t events)
00081 {
00082 int i;
00083 ruby_vm_event_flags = 0;
00084
00085 for (i=0; i<MAX_EVENT_NUM; i++) {
00086 if (events & (1 << i)) {
00087 ruby_event_flag_count[i]--;
00088 }
00089 ruby_vm_event_flags |= ruby_event_flag_count[i] ? (1<<i) : 0;
00090 }
00091
00092 rb_objspace_set_event_hook(ruby_vm_event_flags);
00093 }
00094
00095
00096
00097 static rb_thread_t *
00098 thval2thread_t(VALUE thval)
00099 {
00100 rb_thread_t *th;
00101 GetThreadPtr(thval, th);
00102 return th;
00103 }
00104
00105 static rb_event_hook_t *
00106 alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
00107 {
00108 rb_event_hook_t *hook;
00109
00110 if ((events & RUBY_INTERNAL_EVENT_MASK) && (events & ~RUBY_INTERNAL_EVENT_MASK)) {
00111 rb_raise(rb_eTypeError, "Can not specify normal event and internal event simultaneously.");
00112 }
00113
00114 hook = ALLOC(rb_event_hook_t);
00115 hook->hook_flags = hook_flags;
00116 hook->events = events;
00117 hook->func = func;
00118 hook->data = data;
00119 return hook;
00120 }
00121
00122 static void
00123 connect_event_hook(rb_hook_list_t *list, rb_event_hook_t *hook)
00124 {
00125 hook->next = list->hooks;
00126 list->hooks = hook;
00127 recalc_add_ruby_vm_event_flags(hook->events);
00128 list->events |= hook->events;
00129 }
00130
00131 static void
00132 rb_threadptr_add_event_hook(rb_thread_t *th, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
00133 {
00134 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
00135 connect_event_hook(&th->event_hooks, hook);
00136 }
00137
00138 void
00139 rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
00140 {
00141 rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
00142 }
00143
00144 void
00145 rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
00146 {
00147 rb_event_hook_t *hook = alloc_event_hook(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
00148 connect_event_hook(&GET_VM()->event_hooks, hook);
00149 }
00150
00151 void
00152 rb_thread_add_event_hook2(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
00153 {
00154 rb_threadptr_add_event_hook(thval2thread_t(thval), func, events, data, hook_flags);
00155 }
00156
00157 void
00158 rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flags)
00159 {
00160 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
00161 connect_event_hook(&GET_VM()->event_hooks, hook);
00162 }
00163
00164
00165 static int
00166 remove_event_hook(rb_hook_list_t *list, rb_event_hook_func_t func, VALUE data)
00167 {
00168 int ret = 0;
00169 rb_event_hook_t *hook = list->hooks;
00170
00171 while (hook) {
00172 if (func == 0 || hook->func == func) {
00173 if (data == Qundef || hook->data == data) {
00174 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
00175 ret+=1;
00176 list->need_clean++;
00177 }
00178 }
00179 hook = hook->next;
00180 }
00181
00182 return ret;
00183 }
00184
00185 static int
00186 rb_threadptr_remove_event_hook(rb_thread_t *th, rb_event_hook_func_t func, VALUE data)
00187 {
00188 return remove_event_hook(&th->event_hooks, func, data);
00189 }
00190
00191 int
00192 rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
00193 {
00194 return rb_threadptr_remove_event_hook(thval2thread_t(thval), func, Qundef);
00195 }
00196
00197 int
00198 rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
00199 {
00200 return rb_threadptr_remove_event_hook(thval2thread_t(thval), func, data);
00201 }
00202
00203 int
00204 rb_remove_event_hook(rb_event_hook_func_t func)
00205 {
00206 return remove_event_hook(&GET_VM()->event_hooks, func, Qundef);
00207 }
00208
00209 int
00210 rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
00211 {
00212 return remove_event_hook(&GET_VM()->event_hooks, func, data);
00213 }
00214
00215 static int
00216 clear_trace_func_i(st_data_t key, st_data_t val, st_data_t flag)
00217 {
00218 rb_thread_t *th;
00219 GetThreadPtr((VALUE)key, th);
00220 rb_threadptr_remove_event_hook(th, 0, Qundef);
00221 return ST_CONTINUE;
00222 }
00223
00224 void
00225 rb_clear_trace_func(void)
00226 {
00227 st_foreach(GET_VM()->living_threads, clear_trace_func_i, (st_data_t) 0);
00228 rb_remove_event_hook(0);
00229 }
00230
00231
00232
00233 static void
00234 clean_hooks(rb_hook_list_t *list)
00235 {
00236 rb_event_hook_t *hook, **nextp = &list->hooks;
00237
00238 list->events = 0;
00239 list->need_clean = 0;
00240
00241 while ((hook = *nextp) != 0) {
00242 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
00243 *nextp = hook->next;
00244 recalc_remove_ruby_vm_event_flags(hook->events);
00245 xfree(hook);
00246 }
00247 else {
00248 list->events |= hook->events;
00249 nextp = &hook->next;
00250 }
00251 }
00252 }
00253
00254 static void
00255 exec_hooks_body(rb_thread_t *th, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
00256 {
00257 rb_event_hook_t *hook;
00258
00259 for (hook = list->hooks; hook; hook = hook->next) {
00260 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) && (trace_arg->event & hook->events)) {
00261 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
00262 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
00263 }
00264 else {
00265 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
00266 }
00267 }
00268 }
00269 }
00270
00271 static int
00272 exec_hooks_precheck(rb_thread_t *th, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
00273 {
00274 if ((list->events & trace_arg->event) == 0) return 0;
00275
00276 if (UNLIKELY(list->need_clean > 0)) {
00277 if (th->vm->trace_running <= 1) {
00278 clean_hooks(list);
00279 }
00280 }
00281 return 1;
00282 }
00283
00284 static void
00285 exec_hooks_unprotected(rb_thread_t *th, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
00286 {
00287 if (exec_hooks_precheck(th, list, trace_arg) == 0) return;
00288 exec_hooks_body(th, list, trace_arg);
00289 }
00290
00291 static int
00292 exec_hooks_protected(rb_thread_t *th, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
00293 {
00294 int state;
00295 volatile int raised;
00296
00297 if (exec_hooks_precheck(th, list, trace_arg) == 0) return 0;
00298
00299 raised = rb_threadptr_reset_raised(th);
00300
00301
00302
00303 TH_PUSH_TAG(th);
00304 if ((state = TH_EXEC_TAG()) == 0) {
00305 exec_hooks_body(th, list, trace_arg);
00306 }
00307 TH_POP_TAG();
00308
00309 if (raised) {
00310 rb_threadptr_set_raised(th);
00311 }
00312
00313 return state;
00314 }
00315
00316 static void
00317 rb_threadptr_exec_event_hooks_orig(rb_trace_arg_t *trace_arg, int pop_p)
00318 {
00319 rb_thread_t *th = trace_arg->th;
00320
00321 if (trace_arg->event & RUBY_INTERNAL_EVENT_MASK) {
00322 if (th->trace_arg && (th->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
00323
00324 }
00325 else {
00326 rb_trace_arg_t *prev_trace_arg = th->trace_arg;
00327 th->vm->trace_running++;
00328 th->trace_arg = trace_arg;
00329 exec_hooks_unprotected(th, &th->event_hooks, trace_arg);
00330 exec_hooks_unprotected(th, &th->vm->event_hooks, trace_arg);
00331 th->trace_arg = prev_trace_arg;
00332 th->vm->trace_running--;
00333 }
00334 }
00335 else {
00336 if (th->trace_arg == 0 &&
00337 trace_arg->self != rb_mRubyVMFrozenCore ) {
00338 const VALUE errinfo = th->errinfo;
00339 const int outer_state = th->state;
00340 const VALUE old_recursive = rb_threadptr_reset_recursive_data(th);
00341 int state = 0;
00342 th->state = 0;
00343 th->errinfo = Qnil;
00344
00345 th->vm->trace_running++;
00346 th->trace_arg = trace_arg;
00347 {
00348
00349 state = exec_hooks_protected(th, &th->event_hooks, trace_arg);
00350 if (state) goto terminate;
00351
00352
00353 state = exec_hooks_protected(th, &th->vm->event_hooks, trace_arg);
00354 if (state) goto terminate;
00355
00356 th->errinfo = errinfo;
00357 }
00358 terminate:
00359 th->trace_arg = 0;
00360 th->vm->trace_running--;
00361 rb_threadptr_restore_recursive_data(th, old_recursive);
00362
00363 if (state) {
00364 if (pop_p) {
00365 if (VM_FRAME_TYPE_FINISH_P(th->cfp)) {
00366 th->tag = th->tag->prev;
00367 }
00368 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
00369 }
00370 TH_JUMP_TAG(th, state);
00371 }
00372 th->state = outer_state;
00373 }
00374 }
00375 }
00376
00377 void
00378 rb_threadptr_exec_event_hooks_and_pop_frame(rb_trace_arg_t *trace_arg)
00379 {
00380 rb_threadptr_exec_event_hooks_orig(trace_arg, 1);
00381 }
00382
00383 void
00384 rb_threadptr_exec_event_hooks(rb_trace_arg_t *trace_arg)
00385 {
00386 rb_threadptr_exec_event_hooks_orig(trace_arg, 0);
00387 }
00388
00389 VALUE
00390 rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
00391 {
00392 volatile int raised;
00393 volatile int outer_state;
00394 VALUE result = Qnil;
00395 rb_thread_t *th = GET_THREAD();
00396 int state;
00397 const int tracing = th->trace_arg ? 1 : 0;
00398 rb_trace_arg_t dummy_trace_arg;
00399 dummy_trace_arg.event = 0;
00400
00401 if (!tracing) th->vm->trace_running++;
00402 if (!th->trace_arg) th->trace_arg = &dummy_trace_arg;
00403
00404 raised = rb_threadptr_reset_raised(th);
00405 outer_state = th->state;
00406 th->state = 0;
00407
00408 TH_PUSH_TAG(th);
00409 if ((state = TH_EXEC_TAG()) == 0) {
00410 result = (*func)(arg);
00411 }
00412 TH_POP_TAG();
00413
00414 if (raised) {
00415 rb_threadptr_set_raised(th);
00416 }
00417
00418 if (th->trace_arg == &dummy_trace_arg) th->trace_arg = 0;
00419 if (!tracing) th->vm->trace_running--;
00420
00421 if (state) {
00422 JUMP_TAG(state);
00423 }
00424
00425 th->state = outer_state;
00426 return result;
00427 }
00428
00429 static void call_trace_func(rb_event_flag_t, VALUE data, VALUE self, ID id, VALUE klass);
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492 static VALUE
00493 set_trace_func(VALUE obj, VALUE trace)
00494 {
00495
00496 rb_remove_event_hook(call_trace_func);
00497
00498 if (NIL_P(trace)) {
00499 return Qnil;
00500 }
00501
00502 if (!rb_obj_is_proc(trace)) {
00503 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
00504 }
00505
00506 rb_add_event_hook(call_trace_func, RUBY_EVENT_ALL, trace);
00507 return trace;
00508 }
00509
00510 static void
00511 thread_add_trace_func(rb_thread_t *th, VALUE trace)
00512 {
00513 if (!rb_obj_is_proc(trace)) {
00514 rb_raise(rb_eTypeError, "trace_func needs to be Proc");
00515 }
00516
00517 rb_threadptr_add_event_hook(th, call_trace_func, RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
00518 }
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529 static VALUE
00530 thread_add_trace_func_m(VALUE obj, VALUE trace)
00531 {
00532 rb_thread_t *th;
00533
00534 GetThreadPtr(obj, th);
00535 thread_add_trace_func(th, trace);
00536 return trace;
00537 }
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550 static VALUE
00551 thread_set_trace_func_m(VALUE obj, VALUE trace)
00552 {
00553 rb_thread_t *th;
00554
00555 GetThreadPtr(obj, th);
00556 rb_threadptr_remove_event_hook(th, call_trace_func, Qundef);
00557
00558 if (NIL_P(trace)) {
00559 return Qnil;
00560 }
00561
00562 thread_add_trace_func(th, trace);
00563 return trace;
00564 }
00565
00566 static const char *
00567 get_event_name(rb_event_flag_t event)
00568 {
00569 switch (event) {
00570 case RUBY_EVENT_LINE: return "line";
00571 case RUBY_EVENT_CLASS: return "class";
00572 case RUBY_EVENT_END: return "end";
00573 case RUBY_EVENT_CALL: return "call";
00574 case RUBY_EVENT_RETURN: return "return";
00575 case RUBY_EVENT_C_CALL: return "c-call";
00576 case RUBY_EVENT_C_RETURN: return "c-return";
00577 case RUBY_EVENT_RAISE: return "raise";
00578 default:
00579 return "unknown";
00580 }
00581 }
00582
00583 static ID
00584 get_event_id(rb_event_flag_t event)
00585 {
00586 ID id;
00587
00588 switch (event) {
00589 #define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
00590 C(line, LINE);
00591 C(class, CLASS);
00592 C(end, END);
00593 C(call, CALL);
00594 C(return, RETURN);
00595 C(c_call, C_CALL);
00596 C(c_return, C_RETURN);
00597 C(raise, RAISE);
00598 C(b_call, B_CALL);
00599 C(b_return, B_RETURN);
00600 C(thread_begin, THREAD_BEGIN);
00601 C(thread_end, THREAD_END);
00602 C(specified_line, SPECIFIED_LINE);
00603 case RUBY_EVENT_LINE | RUBY_EVENT_SPECIFIED_LINE: CONST_ID(id, "line"); return id;
00604 #undef C
00605 default:
00606 return 0;
00607 }
00608 }
00609
00610 static void
00611 call_trace_func(rb_event_flag_t event, VALUE proc, VALUE self, ID id, VALUE klass)
00612 {
00613 const char *srcfile = rb_sourcefile();
00614 VALUE eventname = rb_str_new2(get_event_name(event));
00615 VALUE filename = srcfile ? rb_str_new2(srcfile) : Qnil;
00616 VALUE argv[6];
00617 int line = rb_sourceline();
00618 rb_thread_t *th = GET_THREAD();
00619
00620 if (!klass) {
00621 rb_thread_method_id_and_class(th, &id, &klass);
00622 }
00623
00624 if (klass) {
00625 if (RB_TYPE_P(klass, T_ICLASS)) {
00626 klass = RBASIC(klass)->klass;
00627 }
00628 else if (FL_TEST(klass, FL_SINGLETON)) {
00629 klass = rb_ivar_get(klass, id__attached__);
00630 }
00631 }
00632
00633 argv[0] = eventname;
00634 argv[1] = filename;
00635 argv[2] = INT2FIX(line);
00636 argv[3] = id ? ID2SYM(id) : Qnil;
00637 argv[4] = (self && srcfile) ? rb_binding_new() : Qnil;
00638 argv[5] = klass ? klass : Qnil;
00639
00640 rb_proc_call_with_block(proc, 6, argv, Qnil);
00641 }
00642
00643
00644
00645 static VALUE rb_cTracePoint;
00646
00647 typedef struct rb_tp_struct {
00648 rb_event_flag_t events;
00649 rb_thread_t *target_th;
00650 void (*func)(VALUE tpval, void *data);
00651 void *data;
00652 VALUE proc;
00653 int tracing;
00654 VALUE self;
00655 } rb_tp_t;
00656
00657 static void
00658 tp_mark(void *ptr)
00659 {
00660 if (ptr) {
00661 rb_tp_t *tp = (rb_tp_t *)ptr;
00662 rb_gc_mark(tp->proc);
00663 if (tp->target_th) rb_gc_mark(tp->target_th->self);
00664 }
00665 }
00666
00667 static size_t
00668 tp_memsize(const void *ptr)
00669 {
00670 return sizeof(rb_tp_t);
00671 }
00672
00673 static const rb_data_type_t tp_data_type = {
00674 "tracepoint",
00675 {tp_mark, RUBY_TYPED_NEVER_FREE, tp_memsize,},
00676 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
00677 };
00678
00679 static VALUE
00680 tp_alloc(VALUE klass)
00681 {
00682 rb_tp_t *tp;
00683 return TypedData_Make_Struct(klass, rb_tp_t, &tp_data_type, tp);
00684 }
00685
00686 static rb_event_flag_t
00687 symbol2event_flag(VALUE v)
00688 {
00689 static ID id;
00690 VALUE sym = rb_convert_type(v, T_SYMBOL, "Symbol", "to_sym");
00691
00692 #define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
00693 C(line, LINE);
00694 C(class, CLASS);
00695 C(end, END);
00696 C(call, CALL);
00697 C(return, RETURN);
00698 C(c_call, C_CALL);
00699 C(c_return, C_RETURN);
00700 C(raise, RAISE);
00701 C(b_call, B_CALL);
00702 C(b_return, B_RETURN);
00703 C(thread_begin, THREAD_BEGIN);
00704 C(thread_end, THREAD_END);
00705 C(specified_line, SPECIFIED_LINE);
00706 #undef C
00707 CONST_ID(id, "a_call"); if (sym == ID2SYM(id)) return RUBY_EVENT_CALL | RUBY_EVENT_B_CALL | RUBY_EVENT_C_CALL;
00708 CONST_ID(id, "a_return"); if (sym == ID2SYM(id)) return RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN | RUBY_EVENT_C_RETURN;
00709 rb_raise(rb_eArgError, "unknown event: %s", rb_id2name(SYM2ID(sym)));
00710 }
00711
00712 static rb_tp_t *
00713 tpptr(VALUE tpval)
00714 {
00715 rb_tp_t *tp;
00716 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
00717 return tp;
00718 }
00719
00720 static rb_trace_arg_t *
00721 get_trace_arg(void)
00722 {
00723 rb_trace_arg_t *trace_arg = GET_THREAD()->trace_arg;
00724 if (trace_arg == 0) {
00725 rb_raise(rb_eRuntimeError, "access from outside");
00726 }
00727 return trace_arg;
00728 }
00729
00730 struct rb_trace_arg_struct *
00731 rb_tracearg_from_tracepoint(VALUE tpval)
00732 {
00733 return get_trace_arg();
00734 }
00735
00736 rb_event_flag_t
00737 rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
00738 {
00739 return trace_arg->event;
00740 }
00741
00742 VALUE
00743 rb_tracearg_event(rb_trace_arg_t *trace_arg)
00744 {
00745 return ID2SYM(get_event_id(trace_arg->event));
00746 }
00747
00748 static void
00749 fill_path_and_lineno(rb_trace_arg_t *trace_arg)
00750 {
00751 if (trace_arg->path == Qundef) {
00752 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->th, trace_arg->cfp);
00753
00754 if (cfp) {
00755 trace_arg->path = cfp->iseq->location.path;
00756 trace_arg->lineno = rb_vm_get_sourceline(cfp);
00757 }
00758 else {
00759 trace_arg->path = Qnil;
00760 trace_arg->lineno = 0;
00761 }
00762 }
00763 }
00764
00765 VALUE
00766 rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
00767 {
00768 fill_path_and_lineno(trace_arg);
00769 return INT2FIX(trace_arg->lineno);
00770 }
00771 VALUE
00772 rb_tracearg_path(rb_trace_arg_t *trace_arg)
00773 {
00774 fill_path_and_lineno(trace_arg);
00775 return trace_arg->path;
00776 }
00777
00778 static void
00779 fill_id_and_klass(rb_trace_arg_t *trace_arg)
00780 {
00781 if (!trace_arg->klass_solved) {
00782 if (!trace_arg->klass) {
00783 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->klass);
00784 }
00785
00786 if (trace_arg->klass) {
00787 if (RB_TYPE_P(trace_arg->klass, T_ICLASS)) {
00788 trace_arg->klass = RBASIC(trace_arg->klass)->klass;
00789 }
00790 }
00791 else {
00792 trace_arg->klass = Qnil;
00793 }
00794
00795 trace_arg->klass_solved = 1;
00796 }
00797 }
00798
00799 VALUE
00800 rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
00801 {
00802 fill_id_and_klass(trace_arg);
00803 return trace_arg->id ? ID2SYM(trace_arg->id) : Qnil;
00804 }
00805
00806 VALUE
00807 rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
00808 {
00809 fill_id_and_klass(trace_arg);
00810 return trace_arg->klass;
00811 }
00812
00813 VALUE
00814 rb_tracearg_binding(rb_trace_arg_t *trace_arg)
00815 {
00816 rb_control_frame_t *cfp;
00817 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->th, trace_arg->cfp);
00818
00819 if (cfp) {
00820 return rb_binding_new_with_cfp(trace_arg->th, cfp);
00821 }
00822 else {
00823 return Qnil;
00824 }
00825 }
00826
00827 VALUE
00828 rb_tracearg_self(rb_trace_arg_t *trace_arg)
00829 {
00830 return trace_arg->self;
00831 }
00832
00833 VALUE
00834 rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
00835 {
00836 if (trace_arg->event & (RUBY_EVENT_RETURN | RUBY_EVENT_C_RETURN | RUBY_EVENT_B_RETURN)) {
00837
00838 }
00839 else {
00840 rb_raise(rb_eRuntimeError, "not supported by this event");
00841 }
00842 if (trace_arg->data == Qundef) {
00843 rb_bug("tp_attr_return_value_m: unreachable");
00844 }
00845 return trace_arg->data;
00846 }
00847
00848 VALUE
00849 rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
00850 {
00851 if (trace_arg->event & (RUBY_EVENT_RAISE)) {
00852
00853 }
00854 else {
00855 rb_raise(rb_eRuntimeError, "not supported by this event");
00856 }
00857 if (trace_arg->data == Qundef) {
00858 rb_bug("tp_attr_raised_exception_m: unreachable");
00859 }
00860 return trace_arg->data;
00861 }
00862
00863 VALUE
00864 rb_tracearg_object(rb_trace_arg_t *trace_arg)
00865 {
00866 if (trace_arg->event & (RUBY_INTERNAL_EVENT_NEWOBJ | RUBY_INTERNAL_EVENT_FREEOBJ)) {
00867
00868 }
00869 else {
00870 rb_raise(rb_eRuntimeError, "not supported by this event");
00871 }
00872 if (trace_arg->data == Qundef) {
00873 rb_bug("tp_attr_raised_exception_m: unreachable");
00874 }
00875 return trace_arg->data;
00876 }
00877
00878
00879
00880
00881
00882
00883 static VALUE
00884 tracepoint_attr_event(VALUE tpval)
00885 {
00886 return rb_tracearg_event(get_trace_arg());
00887 }
00888
00889
00890
00891
00892 static VALUE
00893 tracepoint_attr_lineno(VALUE tpval)
00894 {
00895 return rb_tracearg_lineno(get_trace_arg());
00896 }
00897
00898
00899
00900
00901 static VALUE
00902 tracepoint_attr_path(VALUE tpval)
00903 {
00904 return rb_tracearg_path(get_trace_arg());
00905 }
00906
00907
00908
00909
00910 static VALUE
00911 tracepoint_attr_method_id(VALUE tpval)
00912 {
00913 return rb_tracearg_method_id(get_trace_arg());
00914 }
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928
00929
00930
00931
00932
00933
00934
00935
00936
00937
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950 static VALUE
00951 tracepoint_attr_defined_class(VALUE tpval)
00952 {
00953 return rb_tracearg_defined_class(get_trace_arg());
00954 }
00955
00956
00957
00958
00959 static VALUE
00960 tracepoint_attr_binding(VALUE tpval)
00961 {
00962 return rb_tracearg_binding(get_trace_arg());
00963 }
00964
00965
00966
00967
00968
00969
00970
00971 static VALUE
00972 tracepoint_attr_self(VALUE tpval)
00973 {
00974 return rb_tracearg_self(get_trace_arg());
00975 }
00976
00977
00978
00979
00980 static VALUE
00981 tracepoint_attr_return_value(VALUE tpval)
00982 {
00983 return rb_tracearg_return_value(get_trace_arg());
00984 }
00985
00986
00987
00988
00989 static VALUE
00990 tracepoint_attr_raised_exception(VALUE tpval)
00991 {
00992 return rb_tracearg_raised_exception(get_trace_arg());
00993 }
00994
00995 static void
00996 tp_call_trace(VALUE tpval, rb_trace_arg_t *trace_arg)
00997 {
00998 rb_tp_t *tp = tpptr(tpval);
00999
01000 if (tp->func) {
01001 (*tp->func)(tpval, tp->data);
01002 }
01003 else {
01004 rb_proc_call_with_block((VALUE)tp->proc, 1, &tpval, Qnil);
01005 }
01006 }
01007
01008 VALUE
01009 rb_tracepoint_enable(VALUE tpval)
01010 {
01011 rb_tp_t *tp;
01012
01013 tp = tpptr(tpval);
01014
01015 if (tp->target_th) {
01016 rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
01017 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
01018 }
01019 else {
01020 rb_add_event_hook2((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
01021 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
01022 }
01023 tp->tracing = 1;
01024 return Qundef;
01025 }
01026
01027 VALUE
01028 rb_tracepoint_disable(VALUE tpval)
01029 {
01030 rb_tp_t *tp;
01031
01032 tp = tpptr(tpval);
01033
01034 if (tp->target_th) {
01035 rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
01036 }
01037 else {
01038 rb_remove_event_hook_with_data((rb_event_hook_func_t)tp_call_trace, tpval);
01039 }
01040 tp->tracing = 0;
01041 return Qundef;
01042 }
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077
01078
01079
01080
01081 static VALUE
01082 tracepoint_enable_m(VALUE tpval)
01083 {
01084 rb_tp_t *tp = tpptr(tpval);
01085 int previous_tracing = tp->tracing;
01086 rb_tracepoint_enable(tpval);
01087
01088 if (rb_block_given_p()) {
01089 return rb_ensure(rb_yield, Qnil,
01090 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
01091 tpval);
01092 }
01093 else {
01094 return previous_tracing ? Qtrue : Qfalse;
01095 }
01096 }
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132 static VALUE
01133 tracepoint_disable_m(VALUE tpval)
01134 {
01135 rb_tp_t *tp = tpptr(tpval);
01136 int previous_tracing = tp->tracing;
01137 rb_tracepoint_disable(tpval);
01138
01139 if (rb_block_given_p()) {
01140 return rb_ensure(rb_yield, Qnil,
01141 previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
01142 tpval);
01143 }
01144 else {
01145 return previous_tracing ? Qtrue : Qfalse;
01146 }
01147 }
01148
01149
01150
01151
01152
01153
01154
01155 VALUE
01156 rb_tracepoint_enabled_p(VALUE tpval)
01157 {
01158 rb_tp_t *tp = tpptr(tpval);
01159 return tp->tracing ? Qtrue : Qfalse;
01160 }
01161
01162 static VALUE
01163 tracepoint_new(VALUE klass, rb_thread_t *target_th, rb_event_flag_t events, void (func)(VALUE, void*), void *data, VALUE proc)
01164 {
01165 VALUE tpval = tp_alloc(klass);
01166 rb_tp_t *tp;
01167 TypedData_Get_Struct(tpval, rb_tp_t, &tp_data_type, tp);
01168
01169 tp->proc = proc;
01170 tp->func = func;
01171 tp->data = data;
01172 tp->events = events;
01173 tp->self = tpval;
01174
01175 return tpval;
01176 }
01177
01178 VALUE
01179 rb_tracepoint_new(VALUE target_thval, rb_event_flag_t events, void (*func)(VALUE, void *), void *data)
01180 {
01181 rb_thread_t *target_th = 0;
01182 if (RTEST(target_thval)) {
01183 GetThreadPtr(target_thval, target_th);
01184
01185
01186
01187 }
01188 return tracepoint_new(rb_cTracePoint, target_th, events, func, data, Qundef);
01189 }
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236
01237
01238 static VALUE
01239 tracepoint_new_s(int argc, VALUE *argv, VALUE self)
01240 {
01241 rb_event_flag_t events = 0;
01242 int i;
01243
01244 if (argc > 0) {
01245 for (i=0; i<argc; i++) {
01246 events |= symbol2event_flag(argv[i]);
01247 }
01248 }
01249 else {
01250 events = RUBY_EVENT_TRACEPOINT_ALL;
01251 }
01252
01253 if (!rb_block_given_p()) {
01254 rb_raise(rb_eThreadError, "must be called with a block");
01255 }
01256
01257 return tracepoint_new(self, 0, events, 0, 0, rb_block_proc());
01258 }
01259
01260 static VALUE
01261 tracepoint_trace_s(int argc, VALUE *argv, VALUE self)
01262 {
01263 VALUE trace = tracepoint_new_s(argc, argv, self);
01264 rb_tracepoint_enable(trace);
01265 return trace;
01266 }
01267
01268
01269
01270
01271
01272
01273
01274
01275
01276 static VALUE
01277 tracepoint_inspect(VALUE self)
01278 {
01279 rb_tp_t *tp = tpptr(self);
01280 rb_trace_arg_t *trace_arg = GET_THREAD()->trace_arg;
01281
01282 if (trace_arg) {
01283 switch (trace_arg->event) {
01284 case RUBY_EVENT_LINE:
01285 case RUBY_EVENT_SPECIFIED_LINE:
01286 {
01287 VALUE sym = rb_tracearg_method_id(trace_arg);
01288 if (NIL_P(sym))
01289 goto default_inspect;
01290 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d in `%"PRIsVALUE"'>",
01291 rb_tracearg_event(trace_arg),
01292 rb_tracearg_path(trace_arg),
01293 FIX2INT(rb_tracearg_lineno(trace_arg)),
01294 sym);
01295 }
01296 case RUBY_EVENT_CALL:
01297 case RUBY_EVENT_C_CALL:
01298 case RUBY_EVENT_RETURN:
01299 case RUBY_EVENT_C_RETURN:
01300 return rb_sprintf("#<TracePoint:%"PRIsVALUE" `%"PRIsVALUE"'@%"PRIsVALUE":%d>",
01301 rb_tracearg_event(trace_arg),
01302 rb_tracearg_method_id(trace_arg),
01303 rb_tracearg_path(trace_arg),
01304 FIX2INT(rb_tracearg_lineno(trace_arg)));
01305 case RUBY_EVENT_THREAD_BEGIN:
01306 case RUBY_EVENT_THREAD_END:
01307 return rb_sprintf("#<TracePoint:%"PRIsVALUE" %"PRIsVALUE">",
01308 rb_tracearg_event(trace_arg),
01309 rb_tracearg_self(trace_arg));
01310 default:
01311 default_inspect:
01312 return rb_sprintf("#<TracePoint:%"PRIsVALUE"@%"PRIsVALUE":%d>",
01313 rb_tracearg_event(trace_arg),
01314 rb_tracearg_path(trace_arg),
01315 FIX2INT(rb_tracearg_lineno(trace_arg)));
01316 }
01317 }
01318 else {
01319 return rb_sprintf("#<TracePoint:%s>", tp->tracing ? "enabled" : "disabled");
01320 }
01321 }
01322
01323 static void Init_postponed_job(void);
01324
01325
01326 void
01327 Init_vm_trace(void)
01328 {
01329
01330 rb_define_global_function("set_trace_func", set_trace_func, 1);
01331 rb_define_method(rb_cThread, "set_trace_func", thread_set_trace_func_m, 1);
01332 rb_define_method(rb_cThread, "add_trace_func", thread_add_trace_func_m, 1);
01333
01334
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347
01348
01349
01350
01351
01352
01353
01354
01355
01356
01357
01358
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378
01379
01380 rb_cTracePoint = rb_define_class("TracePoint", rb_cObject);
01381 rb_undef_alloc_func(rb_cTracePoint);
01382 rb_undef_method(CLASS_OF(rb_cTracePoint), "new");
01383 rb_define_singleton_method(rb_cTracePoint, "new", tracepoint_new_s, -1);
01384
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398 rb_define_singleton_method(rb_cTracePoint, "trace", tracepoint_trace_s, -1);
01399
01400 rb_define_method(rb_cTracePoint, "enable", tracepoint_enable_m, 0);
01401 rb_define_method(rb_cTracePoint, "disable", tracepoint_disable_m, 0);
01402 rb_define_method(rb_cTracePoint, "enabled?", rb_tracepoint_enabled_p, 0);
01403
01404 rb_define_method(rb_cTracePoint, "inspect", tracepoint_inspect, 0);
01405
01406 rb_define_method(rb_cTracePoint, "event", tracepoint_attr_event, 0);
01407 rb_define_method(rb_cTracePoint, "lineno", tracepoint_attr_lineno, 0);
01408 rb_define_method(rb_cTracePoint, "path", tracepoint_attr_path, 0);
01409 rb_define_method(rb_cTracePoint, "method_id", tracepoint_attr_method_id, 0);
01410 rb_define_method(rb_cTracePoint, "defined_class", tracepoint_attr_defined_class, 0);
01411 rb_define_method(rb_cTracePoint, "binding", tracepoint_attr_binding, 0);
01412 rb_define_method(rb_cTracePoint, "self", tracepoint_attr_self, 0);
01413 rb_define_method(rb_cTracePoint, "return_value", tracepoint_attr_return_value, 0);
01414 rb_define_method(rb_cTracePoint, "raised_exception", tracepoint_attr_raised_exception, 0);
01415
01416
01417
01418 Init_postponed_job();
01419 }
01420
01421 typedef struct rb_postponed_job_struct {
01422 unsigned long flags;
01423 struct rb_thread_struct *th;
01424 rb_postponed_job_func_t func;
01425 void *data;
01426 } rb_postponed_job_t;
01427
01428 #define MAX_POSTPONED_JOB 1000
01429 #define MAX_POSTPONED_JOB_SPECIAL_ADDITION 24
01430
01431 static void
01432 Init_postponed_job(void)
01433 {
01434 rb_vm_t *vm = GET_VM();
01435 vm->postponed_job_buffer = ALLOC_N(rb_postponed_job_t, MAX_POSTPONED_JOB);
01436 vm->postponed_job_index = 0;
01437 }
01438
01439 enum postponed_job_register_result {
01440 PJRR_SUCESS = 0,
01441 PJRR_FULL = 1,
01442 PJRR_INTERRUPTED = 2
01443 };
01444
01445 static enum postponed_job_register_result
01446 postponed_job_register(rb_thread_t *th, rb_vm_t *vm,
01447 unsigned int flags, rb_postponed_job_func_t func, void *data, int max, int expected_index)
01448 {
01449 rb_postponed_job_t *pjob;
01450
01451 if (expected_index >= max) return PJRR_FULL;
01452
01453 if (ATOMIC_CAS(vm->postponed_job_index, expected_index, expected_index+1) == expected_index) {
01454 pjob = &vm->postponed_job_buffer[expected_index];
01455 }
01456 else {
01457 return PJRR_INTERRUPTED;
01458 }
01459
01460 pjob->flags = flags;
01461 pjob->th = th;
01462 pjob->func = func;
01463 pjob->data = data;
01464
01465 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th);
01466
01467 return PJRR_SUCESS;
01468 }
01469
01470
01471
01472 int
01473 rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
01474 {
01475 rb_thread_t *th = GET_THREAD();
01476 rb_vm_t *vm = th->vm;
01477
01478 begin:
01479 switch (postponed_job_register(th, vm, flags, func, data, MAX_POSTPONED_JOB, vm->postponed_job_index)) {
01480 case PJRR_SUCESS : return 1;
01481 case PJRR_FULL : return 0;
01482 case PJRR_INTERRUPTED: goto begin;
01483 default: rb_bug("unreachable\n");
01484 }
01485 }
01486
01487
01488 int
01489 rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
01490 {
01491 rb_thread_t *th = GET_THREAD();
01492 rb_vm_t *vm = th->vm;
01493 rb_postponed_job_t *pjob;
01494 int i, index;
01495
01496 begin:
01497 index = vm->postponed_job_index;
01498 for (i=0; i<index; i++) {
01499 pjob = &vm->postponed_job_buffer[i];
01500 if (pjob->func == func) {
01501 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th);
01502 return 2;
01503 }
01504 }
01505 switch (postponed_job_register(th, vm, flags, func, data, MAX_POSTPONED_JOB + MAX_POSTPONED_JOB_SPECIAL_ADDITION, index)) {
01506 case PJRR_SUCESS : return 1;
01507 case PJRR_FULL : return 0;
01508 case PJRR_INTERRUPTED: goto begin;
01509 default: rb_bug("unreachable\n");
01510 }
01511 }
01512
01513 void
01514 rb_postponed_job_flush(rb_vm_t *vm)
01515 {
01516 rb_thread_t *th = GET_THREAD();
01517 unsigned long saved_postponed_job_interrupt_mask = th->interrupt_mask & POSTPONED_JOB_INTERRUPT_MASK;
01518 VALUE saved_errno = th->errinfo;
01519
01520 th->errinfo = Qnil;
01521
01522 th->interrupt_mask |= POSTPONED_JOB_INTERRUPT_MASK;
01523 {
01524 TH_PUSH_TAG(th);
01525 EXEC_TAG();
01526 {
01527 int index;
01528 while ((index = vm->postponed_job_index) > 0) {
01529 if (ATOMIC_CAS(vm->postponed_job_index, index, index-1) == index) {
01530 rb_postponed_job_t *pjob = &vm->postponed_job_buffer[index-1];
01531 (*pjob->func)(pjob->data);
01532 }
01533 }
01534 }
01535 TH_POP_TAG();
01536 }
01537
01538 th->interrupt_mask &= ~(saved_postponed_job_interrupt_mask ^ POSTPONED_JOB_INTERRUPT_MASK);
01539 th->errinfo = saved_errno;
01540 }
01541