00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #include "ruby/ruby.h"
00013 #include "internal.h"
00014 #include "vm_core.h"
00015 #include "gc.h"
00016 #include "eval_intern.h"
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 #if !defined(FIBER_USE_NATIVE)
00030 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
00031 # if 0
00032 # elif defined(__NetBSD__)
00033
00034
00035
00036
00037
00038 # define FIBER_USE_NATIVE 0
00039 # elif defined(__sun)
00040
00041
00042 # define FIBER_USE_NATIVE 0
00043 # elif defined(__ia64)
00044
00045
00046 # define FIBER_USE_NATIVE 0
00047 # elif defined(__GNU__)
00048
00049
00050
00051
00052
00053 # define FIBER_USE_NATIVE 0
00054 # else
00055 # define FIBER_USE_NATIVE 1
00056 # endif
00057 # elif defined(_WIN32)
00058 # if _WIN32_WINNT >= 0x0400
00059
00060
00061
00062
00063 # define FIBER_USE_NATIVE 1
00064 # endif
00065 # endif
00066 #endif
00067 #if !defined(FIBER_USE_NATIVE)
00068 #define FIBER_USE_NATIVE 0
00069 #endif
00070
00071 #if FIBER_USE_NATIVE
00072 #ifndef _WIN32
00073 #include <unistd.h>
00074 #include <sys/mman.h>
00075 #include <ucontext.h>
00076 #endif
00077 #define RB_PAGE_SIZE (pagesize)
00078 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
00079 static long pagesize;
00080 #endif
00081
00082 #define CAPTURE_JUST_VALID_VM_STACK 1
00083
00084 enum context_type {
00085 CONTINUATION_CONTEXT = 0,
00086 FIBER_CONTEXT = 1,
00087 ROOT_FIBER_CONTEXT = 2
00088 };
00089
00090 typedef struct rb_context_struct {
00091 enum context_type type;
00092 VALUE self;
00093 int argc;
00094 VALUE value;
00095 VALUE *vm_stack;
00096 #ifdef CAPTURE_JUST_VALID_VM_STACK
00097 size_t vm_stack_slen;
00098 size_t vm_stack_clen;
00099 #endif
00100 struct {
00101 VALUE *stack;
00102 VALUE *stack_src;
00103 size_t stack_size;
00104 #ifdef __ia64
00105 VALUE *register_stack;
00106 VALUE *register_stack_src;
00107 int register_stack_size;
00108 #endif
00109 } machine;
00110 rb_thread_t saved_thread;
00111 rb_jmpbuf_t jmpbuf;
00112 rb_ensure_entry_t *ensure_array;
00113 rb_ensure_list_t *ensure_list;
00114 } rb_context_t;
00115
00116 enum fiber_status {
00117 CREATED,
00118 RUNNING,
00119 TERMINATED
00120 };
00121
00122 #if FIBER_USE_NATIVE && !defined(_WIN32)
00123 #define MAX_MACHINE_STACK_CACHE 10
00124 static int machine_stack_cache_index = 0;
00125 typedef struct machine_stack_cache_struct {
00126 void *ptr;
00127 size_t size;
00128 } machine_stack_cache_t;
00129 static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
00130 static machine_stack_cache_t terminated_machine_stack;
00131 #endif
00132
00133 typedef struct rb_fiber_struct {
00134 rb_context_t cont;
00135 VALUE prev;
00136 enum fiber_status status;
00137 struct rb_fiber_struct *prev_fiber;
00138 struct rb_fiber_struct *next_fiber;
00139
00140
00141
00142
00143 int transferred;
00144
00145 #if FIBER_USE_NATIVE
00146 #ifdef _WIN32
00147 void *fib_handle;
00148 #else
00149 ucontext_t context;
00150
00151
00152
00153
00154 void *ss_sp;
00155 size_t ss_size;
00156 #endif
00157 #endif
00158 } rb_fiber_t;
00159
00160 static const rb_data_type_t cont_data_type, fiber_data_type;
00161 static VALUE rb_cContinuation;
00162 static VALUE rb_cFiber;
00163 static VALUE rb_eFiberError;
00164
00165 #define GetContPtr(obj, ptr) \
00166 TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
00167
00168 #define GetFiberPtr(obj, ptr) do {\
00169 TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
00170 if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
00171 } while (0)
00172
00173 NOINLINE(static VALUE cont_capture(volatile int *stat));
00174
00175 #define THREAD_MUST_BE_RUNNING(th) do { \
00176 if (!(th)->tag) rb_raise(rb_eThreadError, "not running thread"); \
00177 } while (0)
00178
00179 static void
00180 cont_mark(void *ptr)
00181 {
00182 RUBY_MARK_ENTER("cont");
00183 if (ptr) {
00184 rb_context_t *cont = ptr;
00185 rb_gc_mark(cont->value);
00186 rb_thread_mark(&cont->saved_thread);
00187 rb_gc_mark(cont->saved_thread.self);
00188
00189 if (cont->vm_stack) {
00190 #ifdef CAPTURE_JUST_VALID_VM_STACK
00191 rb_gc_mark_locations(cont->vm_stack,
00192 cont->vm_stack + cont->vm_stack_slen + cont->vm_stack_clen);
00193 #else
00194 rb_gc_mark_localtion(cont->vm_stack,
00195 cont->vm_stack, cont->saved_thread.stack_size);
00196 #endif
00197 }
00198
00199 if (cont->machine.stack) {
00200 if (cont->type == CONTINUATION_CONTEXT) {
00201
00202 rb_gc_mark_locations(cont->machine.stack,
00203 cont->machine.stack + cont->machine.stack_size);
00204 }
00205 else {
00206
00207 rb_thread_t *th;
00208 rb_fiber_t *fib = (rb_fiber_t*)cont;
00209 GetThreadPtr(cont->saved_thread.self, th);
00210 if ((th->fiber != cont->self) && fib->status == RUNNING) {
00211 rb_gc_mark_locations(cont->machine.stack,
00212 cont->machine.stack + cont->machine.stack_size);
00213 }
00214 }
00215 }
00216 #ifdef __ia64
00217 if (cont->machine.register_stack) {
00218 rb_gc_mark_locations(cont->machine.register_stack,
00219 cont->machine.register_stack + cont->machine.register_stack_size);
00220 }
00221 #endif
00222 }
00223 RUBY_MARK_LEAVE("cont");
00224 }
00225
00226 static void
00227 cont_free(void *ptr)
00228 {
00229 RUBY_FREE_ENTER("cont");
00230 if (ptr) {
00231 rb_context_t *cont = ptr;
00232 RUBY_FREE_UNLESS_NULL(cont->saved_thread.stack); fflush(stdout);
00233 #if FIBER_USE_NATIVE
00234 if (cont->type == CONTINUATION_CONTEXT) {
00235
00236 ruby_xfree(cont->ensure_array);
00237 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
00238 }
00239 else {
00240
00241 #ifdef _WIN32
00242 if (GET_THREAD()->fiber != cont->self && cont->type != ROOT_FIBER_CONTEXT) {
00243
00244 rb_fiber_t *fib = (rb_fiber_t*)cont;
00245 if (fib->fib_handle) {
00246 DeleteFiber(fib->fib_handle);
00247 }
00248 }
00249 #else
00250 if (GET_THREAD()->fiber != cont->self) {
00251 rb_fiber_t *fib = (rb_fiber_t*)cont;
00252 if (fib->ss_sp) {
00253 if (cont->type == ROOT_FIBER_CONTEXT) {
00254 rb_bug("Illegal root fiber parameter");
00255 }
00256 munmap((void*)fib->ss_sp, fib->ss_size);
00257 }
00258 }
00259 else {
00260
00261
00262
00263 }
00264 #endif
00265 }
00266 #else
00267 ruby_xfree(cont->ensure_array);
00268 RUBY_FREE_UNLESS_NULL(cont->machine.stack);
00269 #endif
00270 #ifdef __ia64
00271 RUBY_FREE_UNLESS_NULL(cont->machine.register_stack);
00272 #endif
00273 RUBY_FREE_UNLESS_NULL(cont->vm_stack);
00274
00275
00276 ruby_xfree(ptr);
00277 }
00278 RUBY_FREE_LEAVE("cont");
00279 }
00280
00281 static size_t
00282 cont_memsize(const void *ptr)
00283 {
00284 const rb_context_t *cont = ptr;
00285 size_t size = 0;
00286 if (cont) {
00287 size = sizeof(*cont);
00288 if (cont->vm_stack) {
00289 #ifdef CAPTURE_JUST_VALID_VM_STACK
00290 size_t n = (cont->vm_stack_slen + cont->vm_stack_clen);
00291 #else
00292 size_t n = cont->saved_thread.stack_size;
00293 #endif
00294 size += n * sizeof(*cont->vm_stack);
00295 }
00296
00297 if (cont->machine.stack) {
00298 size += cont->machine.stack_size * sizeof(*cont->machine.stack);
00299 }
00300 #ifdef __ia64
00301 if (cont->machine.register_stack) {
00302 size += cont->machine.register_stack_size * sizeof(*cont->machine.register_stack);
00303 }
00304 #endif
00305 }
00306 return size;
00307 }
00308
00309 static void
00310 fiber_mark(void *ptr)
00311 {
00312 RUBY_MARK_ENTER("cont");
00313 if (ptr) {
00314 rb_fiber_t *fib = ptr;
00315 rb_gc_mark(fib->prev);
00316 cont_mark(&fib->cont);
00317 }
00318 RUBY_MARK_LEAVE("cont");
00319 }
00320
00321 static void
00322 fiber_link_join(rb_fiber_t *fib)
00323 {
00324 VALUE current_fibval = rb_fiber_current();
00325 rb_fiber_t *current_fib;
00326 GetFiberPtr(current_fibval, current_fib);
00327
00328
00329 fib->next_fiber = current_fib->next_fiber;
00330 fib->prev_fiber = current_fib;
00331 current_fib->next_fiber->prev_fiber = fib;
00332 current_fib->next_fiber = fib;
00333 }
00334
00335 static void
00336 fiber_link_remove(rb_fiber_t *fib)
00337 {
00338 fib->prev_fiber->next_fiber = fib->next_fiber;
00339 fib->next_fiber->prev_fiber = fib->prev_fiber;
00340 }
00341
00342 static void
00343 fiber_free(void *ptr)
00344 {
00345 RUBY_FREE_ENTER("fiber");
00346 if (ptr) {
00347 rb_fiber_t *fib = ptr;
00348 if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00349 fib->cont.saved_thread.local_storage) {
00350 st_free_table(fib->cont.saved_thread.local_storage);
00351 }
00352 fiber_link_remove(fib);
00353
00354 cont_free(&fib->cont);
00355 }
00356 RUBY_FREE_LEAVE("fiber");
00357 }
00358
00359 static size_t
00360 fiber_memsize(const void *ptr)
00361 {
00362 const rb_fiber_t *fib = ptr;
00363 size_t size = 0;
00364 if (ptr) {
00365 size = sizeof(*fib);
00366 if (fib->cont.type != ROOT_FIBER_CONTEXT &&
00367 fib->cont.saved_thread.local_storage != NULL) {
00368 size += st_memsize(fib->cont.saved_thread.local_storage);
00369 }
00370 size += cont_memsize(&fib->cont);
00371 }
00372 return size;
00373 }
00374
00375 VALUE
00376 rb_obj_is_fiber(VALUE obj)
00377 {
00378 if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
00379 return Qtrue;
00380 }
00381 else {
00382 return Qfalse;
00383 }
00384 }
00385
00386 static void
00387 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
00388 {
00389 size_t size;
00390
00391 SET_MACHINE_STACK_END(&th->machine.stack_end);
00392 #ifdef __ia64
00393 th->machine.register_stack_end = rb_ia64_bsp();
00394 #endif
00395
00396 if (th->machine.stack_start > th->machine.stack_end) {
00397 size = cont->machine.stack_size = th->machine.stack_start - th->machine.stack_end;
00398 cont->machine.stack_src = th->machine.stack_end;
00399 }
00400 else {
00401 size = cont->machine.stack_size = th->machine.stack_end - th->machine.stack_start;
00402 cont->machine.stack_src = th->machine.stack_start;
00403 }
00404
00405 if (cont->machine.stack) {
00406 REALLOC_N(cont->machine.stack, VALUE, size);
00407 }
00408 else {
00409 cont->machine.stack = ALLOC_N(VALUE, size);
00410 }
00411
00412 FLUSH_REGISTER_WINDOWS;
00413 MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
00414
00415 #ifdef __ia64
00416 rb_ia64_flushrs();
00417 size = cont->machine.register_stack_size = th->machine.register_stack_end - th->machine.register_stack_start;
00418 cont->machine.register_stack_src = th->machine.register_stack_start;
00419 if (cont->machine.register_stack) {
00420 REALLOC_N(cont->machine.register_stack, VALUE, size);
00421 }
00422 else {
00423 cont->machine.register_stack = ALLOC_N(VALUE, size);
00424 }
00425
00426 MEMCPY(cont->machine.register_stack, cont->machine.register_stack_src, VALUE, size);
00427 #endif
00428 }
00429
00430 static const rb_data_type_t cont_data_type = {
00431 "continuation",
00432 {cont_mark, cont_free, cont_memsize,},
00433 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
00434 };
00435
00436 static void
00437 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
00438 {
00439
00440 cont->saved_thread = *th;
00441
00442
00443 cont->saved_thread.machine.stack_start = 0;
00444 cont->saved_thread.machine.stack_end = 0;
00445 #ifdef __ia64
00446 cont->saved_thread.machine.register_stack_start = 0;
00447 cont->saved_thread.machine.register_stack_end = 0;
00448 #endif
00449 }
00450
00451 static void
00452 cont_init(rb_context_t *cont, rb_thread_t *th)
00453 {
00454
00455 cont_save_thread(cont, th);
00456 cont->saved_thread.local_storage = 0;
00457 }
00458
00459 static rb_context_t *
00460 cont_new(VALUE klass)
00461 {
00462 rb_context_t *cont;
00463 volatile VALUE contval;
00464 rb_thread_t *th = GET_THREAD();
00465
00466 THREAD_MUST_BE_RUNNING(th);
00467 contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
00468 cont->self = contval;
00469 cont_init(cont, th);
00470 return cont;
00471 }
00472
00473 static VALUE
00474 cont_capture(volatile int *stat)
00475 {
00476 rb_context_t *cont;
00477 rb_thread_t *th = GET_THREAD(), *sth;
00478 volatile VALUE contval;
00479
00480 THREAD_MUST_BE_RUNNING(th);
00481 rb_vm_stack_to_heap(th);
00482 cont = cont_new(rb_cContinuation);
00483 contval = cont->self;
00484 sth = &cont->saved_thread;
00485
00486 #ifdef CAPTURE_JUST_VALID_VM_STACK
00487 cont->vm_stack_slen = th->cfp->sp + th->mark_stack_len - th->stack;
00488 cont->vm_stack_clen = th->stack + th->stack_size - (VALUE*)th->cfp;
00489 cont->vm_stack = ALLOC_N(VALUE, cont->vm_stack_slen + cont->vm_stack_clen);
00490 MEMCPY(cont->vm_stack, th->stack, VALUE, cont->vm_stack_slen);
00491 MEMCPY(cont->vm_stack + cont->vm_stack_slen, (VALUE*)th->cfp, VALUE, cont->vm_stack_clen);
00492 #else
00493 cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
00494 MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
00495 #endif
00496 sth->stack = 0;
00497
00498 cont_save_machine_stack(th, cont);
00499
00500
00501 {
00502 rb_ensure_list_t *p;
00503 int size = 0;
00504 rb_ensure_entry_t *entry;
00505 for (p=th->ensure_list; p; p=p->next)
00506 size++;
00507 entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
00508 for (p=th->ensure_list; p; p=p->next) {
00509 if (!p->entry.marker)
00510 p->entry.marker = rb_ary_tmp_new(0);
00511 *entry++ = p->entry;
00512 }
00513 entry->marker = 0;
00514 }
00515
00516 if (ruby_setjmp(cont->jmpbuf)) {
00517 volatile VALUE value;
00518
00519 value = cont->value;
00520 if (cont->argc == -1) rb_exc_raise(value);
00521 cont->value = Qnil;
00522 *stat = 1;
00523 return value;
00524 }
00525 else {
00526 *stat = 0;
00527 return contval;
00528 }
00529 }
00530
00531 static void
00532 cont_restore_thread(rb_context_t *cont)
00533 {
00534 rb_thread_t *th = GET_THREAD(), *sth = &cont->saved_thread;
00535
00536
00537 if (cont->type == CONTINUATION_CONTEXT) {
00538
00539 VALUE fib;
00540
00541 th->fiber = sth->fiber;
00542 fib = th->fiber ? th->fiber : th->root_fiber;
00543
00544 if (fib) {
00545 rb_fiber_t *fcont;
00546 GetFiberPtr(fib, fcont);
00547 th->stack_size = fcont->cont.saved_thread.stack_size;
00548 th->stack = fcont->cont.saved_thread.stack;
00549 }
00550 #ifdef CAPTURE_JUST_VALID_VM_STACK
00551 MEMCPY(th->stack, cont->vm_stack, VALUE, cont->vm_stack_slen);
00552 MEMCPY(th->stack + sth->stack_size - cont->vm_stack_clen,
00553 cont->vm_stack + cont->vm_stack_slen, VALUE, cont->vm_stack_clen);
00554 #else
00555 MEMCPY(th->stack, cont->vm_stack, VALUE, sth->stack_size);
00556 #endif
00557 }
00558 else {
00559
00560 th->stack = sth->stack;
00561 th->stack_size = sth->stack_size;
00562 th->local_storage = sth->local_storage;
00563 th->fiber = cont->self;
00564 }
00565
00566 th->cfp = sth->cfp;
00567 th->safe_level = sth->safe_level;
00568 th->raised_flag = sth->raised_flag;
00569 th->state = sth->state;
00570 th->status = sth->status;
00571 th->tag = sth->tag;
00572 th->protect_tag = sth->protect_tag;
00573 th->errinfo = sth->errinfo;
00574 th->first_proc = sth->first_proc;
00575 th->root_lep = sth->root_lep;
00576 th->root_svar = sth->root_svar;
00577 th->ensure_list = sth->ensure_list;
00578
00579 }
00580
00581 #if FIBER_USE_NATIVE
00582 #ifdef _WIN32
00583 static void
00584 fiber_set_stack_location(void)
00585 {
00586 rb_thread_t *th = GET_THREAD();
00587 VALUE *ptr;
00588
00589 SET_MACHINE_STACK_END(&ptr);
00590 th->machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
00591 }
00592
00593 static VOID CALLBACK
00594 fiber_entry(void *arg)
00595 {
00596 fiber_set_stack_location();
00597 rb_fiber_start();
00598 }
00599 #else
00600
00601
00602
00603
00604
00605
00606 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
00607 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
00608 #else
00609 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
00610 #endif
00611
00612 static char*
00613 fiber_machine_stack_alloc(size_t size)
00614 {
00615 char *ptr;
00616
00617 if (machine_stack_cache_index > 0) {
00618 if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
00619 ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
00620 machine_stack_cache_index--;
00621 machine_stack_cache[machine_stack_cache_index].ptr = NULL;
00622 machine_stack_cache[machine_stack_cache_index].size = 0;
00623 }
00624 else{
00625
00626 rb_bug("machine_stack_cache size is not canonicalized");
00627 }
00628 }
00629 else {
00630 void *page;
00631 STACK_GROW_DIR_DETECTION;
00632
00633 errno = 0;
00634 ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
00635 if (ptr == MAP_FAILED) {
00636 rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", strerror(errno));
00637 }
00638
00639
00640 page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
00641 if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
00642 rb_raise(rb_eFiberError, "mprotect failed");
00643 }
00644 }
00645
00646 return ptr;
00647 }
00648 #endif
00649
00650 static void
00651 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
00652 {
00653 rb_thread_t *sth = &fib->cont.saved_thread;
00654
00655 #ifdef _WIN32
00656 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00657 if (!fib->fib_handle) {
00658
00659 rb_gc();
00660 fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
00661 if (!fib->fib_handle) {
00662 rb_raise(rb_eFiberError, "can't create fiber");
00663 }
00664 }
00665 sth->machine.stack_maxsize = size;
00666 #else
00667 ucontext_t *context = &fib->context;
00668 char *ptr;
00669 STACK_GROW_DIR_DETECTION;
00670
00671 getcontext(context);
00672 ptr = fiber_machine_stack_alloc(size);
00673 context->uc_link = NULL;
00674 context->uc_stack.ss_sp = ptr;
00675 context->uc_stack.ss_size = size;
00676 fib->ss_sp = ptr;
00677 fib->ss_size = size;
00678 makecontext(context, rb_fiber_start, 0);
00679 sth->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
00680 sth->machine.stack_maxsize = size - RB_PAGE_SIZE;
00681 #endif
00682 #ifdef __ia64
00683 sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
00684 #endif
00685 }
00686
00687 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
00688
00689 static void
00690 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
00691 {
00692 rb_thread_t *th = GET_THREAD(), *sth = &newfib->cont.saved_thread;
00693
00694 if (newfib->status != RUNNING) {
00695 fiber_initialize_machine_stack_context(newfib, th->vm->default_params.fiber_machine_stack_size);
00696 }
00697
00698
00699 cont_restore_thread(&newfib->cont);
00700 th->machine.stack_maxsize = sth->machine.stack_maxsize;
00701 if (sth->machine.stack_end && (newfib != oldfib)) {
00702 rb_bug("fiber_setcontext: sth->machine.stack_end has non zero value");
00703 }
00704
00705
00706 if (oldfib->status != TERMINATED) {
00707 STACK_GROW_DIR_DETECTION;
00708 SET_MACHINE_STACK_END(&th->machine.stack_end);
00709 if (STACK_DIR_UPPER(0, 1)) {
00710 oldfib->cont.machine.stack_size = th->machine.stack_start - th->machine.stack_end;
00711 oldfib->cont.machine.stack = th->machine.stack_end;
00712 }
00713 else {
00714 oldfib->cont.machine.stack_size = th->machine.stack_end - th->machine.stack_start;
00715 oldfib->cont.machine.stack = th->machine.stack_start;
00716 }
00717 }
00718
00719 oldfib->cont.saved_thread.machine.stack_start = th->machine.stack_start;
00720 th->machine.stack_start = sth->machine.stack_start;
00721
00722 oldfib->cont.saved_thread.machine.stack_end = 0;
00723 #ifndef _WIN32
00724 if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib->cont.self) {
00725 rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
00726 }
00727 #endif
00728
00729
00730 #ifdef _WIN32
00731 SwitchToFiber(newfib->fib_handle);
00732 #else
00733 swapcontext(&oldfib->context, &newfib->context);
00734 #endif
00735 }
00736 #endif
00737
00738 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
00739
00740 static void
00741 cont_restore_1(rb_context_t *cont)
00742 {
00743 cont_restore_thread(cont);
00744
00745
00746 #ifdef _M_AMD64
00747 {
00748
00749 jmp_buf buf;
00750 setjmp(buf);
00751 ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
00752 ((_JUMP_BUFFER*)(&buf))->Frame;
00753 }
00754 #endif
00755 if (cont->machine.stack_src) {
00756 FLUSH_REGISTER_WINDOWS;
00757 MEMCPY(cont->machine.stack_src, cont->machine.stack,
00758 VALUE, cont->machine.stack_size);
00759 }
00760
00761 #ifdef __ia64
00762 if (cont->machine.register_stack_src) {
00763 MEMCPY(cont->machine.register_stack_src, cont->machine.register_stack,
00764 VALUE, cont->machine.register_stack_size);
00765 }
00766 #endif
00767
00768 ruby_longjmp(cont->jmpbuf, 1);
00769 }
00770
00771 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
00772
00773 #ifdef __ia64
00774 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
00775 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
00776 static volatile int C(a), C(b), C(c), C(d), C(e);
00777 static volatile int C(f), C(g), C(h), C(i), C(j);
00778 static volatile int C(k), C(l), C(m), C(n), C(o);
00779 static volatile int C(p), C(q), C(r), C(s), C(t);
00780 #if 0
00781 {}
00782 #endif
00783 int rb_dummy_false = 0;
00784 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
00785 static void
00786 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
00787 {
00788 if (rb_dummy_false) {
00789
00790 E(a) = E(b) = E(c) = E(d) = E(e) =
00791 E(f) = E(g) = E(h) = E(i) = E(j) =
00792 E(k) = E(l) = E(m) = E(n) = E(o) =
00793 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00794 E(a) = E(b) = E(c) = E(d) = E(e) =
00795 E(f) = E(g) = E(h) = E(i) = E(j) =
00796 E(k) = E(l) = E(m) = E(n) = E(o) =
00797 E(p) = E(q) = E(r) = E(s) = E(t) = 0;
00798 }
00799 if (curr_bsp < cont->machine.register_stack_src+cont->machine.register_stack_size) {
00800 register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
00801 }
00802 cont_restore_0(cont, vp);
00803 }
00804 #undef C
00805 #undef E
00806 #endif
00807
00808 static void
00809 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
00810 {
00811 if (cont->machine.stack_src) {
00812 #ifdef HAVE_ALLOCA
00813 #define STACK_PAD_SIZE 1
00814 #else
00815 #define STACK_PAD_SIZE 1024
00816 #endif
00817 VALUE space[STACK_PAD_SIZE];
00818
00819 #if !STACK_GROW_DIRECTION
00820 if (addr_in_prev_frame > &space[0]) {
00821
00822 #endif
00823 #if STACK_GROW_DIRECTION <= 0
00824 volatile VALUE *const end = cont->machine.stack_src;
00825 if (&space[0] > end) {
00826 # ifdef HAVE_ALLOCA
00827 volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
00828 space[0] = *sp;
00829 # else
00830 cont_restore_0(cont, &space[0]);
00831 # endif
00832 }
00833 #endif
00834 #if !STACK_GROW_DIRECTION
00835 }
00836 else {
00837
00838 #endif
00839 #if STACK_GROW_DIRECTION >= 0
00840 volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
00841 if (&space[STACK_PAD_SIZE] < end) {
00842 # ifdef HAVE_ALLOCA
00843 volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
00844 space[0] = *sp;
00845 # else
00846 cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
00847 # endif
00848 }
00849 #endif
00850 #if !STACK_GROW_DIRECTION
00851 }
00852 #endif
00853 }
00854 cont_restore_1(cont);
00855 }
00856 #ifdef __ia64
00857 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
00858 #endif
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925 static VALUE
00926 rb_callcc(VALUE self)
00927 {
00928 volatile int called;
00929 volatile VALUE val = cont_capture(&called);
00930
00931 if (called) {
00932 return val;
00933 }
00934 else {
00935 return rb_yield(val);
00936 }
00937 }
00938
00939 static VALUE
00940 make_passing_arg(int argc, VALUE *argv)
00941 {
00942 switch (argc) {
00943 case 0:
00944 return Qnil;
00945 case 1:
00946 return argv[0];
00947 default:
00948 return rb_ary_new4(argc, argv);
00949 }
00950 }
00951
00952
00953
00954 void
00955 ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(ANYARGS), VALUE (*rollback_func)(ANYARGS))
00956 {
00957 st_table **table_p = &GET_VM()->ensure_rollback_table;
00958 if (UNLIKELY(*table_p == NULL)) {
00959 *table_p = st_init_numtable();
00960 }
00961 st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
00962 }
00963
00964 static inline VALUE
00965 lookup_rollback_func(VALUE (*ensure_func)(ANYARGS))
00966 {
00967 st_table *table = GET_VM()->ensure_rollback_table;
00968 st_data_t val;
00969 if (table && st_lookup(table, (st_data_t)ensure_func, &val))
00970 return (VALUE) val;
00971 return Qundef;
00972 }
00973
00974
00975 static inline void
00976 rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
00977 {
00978 rb_ensure_list_t *p;
00979 rb_ensure_entry_t *entry;
00980 size_t i;
00981 size_t cur_size;
00982 size_t target_size;
00983 size_t base_point;
00984 VALUE (*func)(ANYARGS);
00985
00986 cur_size = 0;
00987 for (p=current; p; p=p->next)
00988 cur_size++;
00989 target_size = 0;
00990 for (entry=target; entry->marker; entry++)
00991 target_size++;
00992
00993
00994 p = current;
00995 base_point = cur_size;
00996 while (base_point) {
00997 if (target_size >= base_point &&
00998 p->entry.marker == target[target_size - base_point].marker)
00999 break;
01000 base_point --;
01001 p = p->next;
01002 }
01003
01004
01005 for (i=0; i < target_size - base_point; i++) {
01006 if (!lookup_rollback_func(target[i].e_proc)) {
01007 rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
01008 }
01009 }
01010
01011 while (cur_size > base_point) {
01012
01013 (*current->entry.e_proc)(current->entry.data2);
01014 current = current->next;
01015 cur_size--;
01016 }
01017
01018 while (i--) {
01019 func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i].e_proc);
01020 if ((VALUE)func != Qundef) {
01021 (*func)(target[i].data2);
01022 }
01023 }
01024 }
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042 static VALUE
01043 rb_cont_call(int argc, VALUE *argv, VALUE contval)
01044 {
01045 rb_context_t *cont;
01046 rb_thread_t *th = GET_THREAD();
01047 GetContPtr(contval, cont);
01048
01049 if (cont->saved_thread.self != th->self) {
01050 rb_raise(rb_eRuntimeError, "continuation called across threads");
01051 }
01052 if (cont->saved_thread.protect_tag != th->protect_tag) {
01053 rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
01054 }
01055 if (cont->saved_thread.fiber) {
01056 rb_fiber_t *fcont;
01057 GetFiberPtr(cont->saved_thread.fiber, fcont);
01058
01059 if (th->fiber != cont->saved_thread.fiber) {
01060 rb_raise(rb_eRuntimeError, "continuation called across fiber");
01061 }
01062 }
01063 rollback_ensure_stack(contval, th->ensure_list, cont->ensure_array);
01064
01065 cont->argc = argc;
01066 cont->value = make_passing_arg(argc, argv);
01067
01068
01069 th->trace_arg = cont->saved_thread.trace_arg;
01070
01071 cont_restore_0(cont, &contval);
01072 return Qnil;
01073 }
01074
01075
01076
01077
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141 static const rb_data_type_t fiber_data_type = {
01142 "fiber",
01143 {fiber_mark, fiber_free, fiber_memsize,},
01144 NULL, NULL, RUBY_TYPED_FREE_IMMEDIATELY
01145 };
01146
01147 static VALUE
01148 fiber_alloc(VALUE klass)
01149 {
01150 return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
01151 }
01152
01153 static rb_fiber_t*
01154 fiber_t_alloc(VALUE fibval)
01155 {
01156 rb_fiber_t *fib;
01157 rb_thread_t *th = GET_THREAD();
01158
01159 if (DATA_PTR(fibval) != 0) {
01160 rb_raise(rb_eRuntimeError, "cannot initialize twice");
01161 }
01162
01163 THREAD_MUST_BE_RUNNING(th);
01164 fib = ALLOC(rb_fiber_t);
01165 memset(fib, 0, sizeof(rb_fiber_t));
01166 fib->cont.self = fibval;
01167 fib->cont.type = FIBER_CONTEXT;
01168 cont_init(&fib->cont, th);
01169 fib->prev = Qnil;
01170 fib->status = CREATED;
01171
01172 DATA_PTR(fibval) = fib;
01173
01174 return fib;
01175 }
01176
01177 static VALUE
01178 fiber_init(VALUE fibval, VALUE proc)
01179 {
01180 rb_fiber_t *fib = fiber_t_alloc(fibval);
01181 rb_context_t *cont = &fib->cont;
01182 rb_thread_t *th = &cont->saved_thread;
01183
01184
01185 cont->vm_stack = 0;
01186
01187 th->stack = 0;
01188 th->stack_size = 0;
01189
01190 fiber_link_join(fib);
01191
01192 th->stack_size = th->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
01193 th->stack = ALLOC_N(VALUE, th->stack_size);
01194
01195 th->cfp = (void *)(th->stack + th->stack_size);
01196 th->cfp--;
01197 th->cfp->pc = 0;
01198 th->cfp->sp = th->stack + 1;
01199 #if VM_DEBUG_BP_CHECK
01200 th->cfp->bp_check = 0;
01201 #endif
01202 th->cfp->ep = th->stack;
01203 *th->cfp->ep = VM_ENVVAL_BLOCK_PTR(0);
01204 th->cfp->self = Qnil;
01205 th->cfp->klass = Qnil;
01206 th->cfp->flag = 0;
01207 th->cfp->iseq = 0;
01208 th->cfp->proc = 0;
01209 th->cfp->block_iseq = 0;
01210 th->cfp->me = 0;
01211 th->tag = 0;
01212 th->local_storage = st_init_numtable();
01213
01214 th->first_proc = proc;
01215
01216 #if !FIBER_USE_NATIVE
01217 MEMCPY(&cont->jmpbuf, &th->root_jmpbuf, rb_jmpbuf_t, 1);
01218 #endif
01219
01220 return fibval;
01221 }
01222
01223
01224 static VALUE
01225 rb_fiber_init(VALUE fibval)
01226 {
01227 return fiber_init(fibval, rb_block_proc());
01228 }
01229
01230 VALUE
01231 rb_fiber_new(VALUE (*func)(ANYARGS), VALUE obj)
01232 {
01233 return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
01234 }
01235
01236 static VALUE
01237 return_fiber(void)
01238 {
01239 rb_fiber_t *fib;
01240 VALUE curr = rb_fiber_current();
01241 VALUE prev;
01242 GetFiberPtr(curr, fib);
01243
01244 prev = fib->prev;
01245 if (NIL_P(prev)) {
01246 const VALUE root_fiber = GET_THREAD()->root_fiber;
01247
01248 if (root_fiber == curr) {
01249 rb_raise(rb_eFiberError, "can't yield from root fiber");
01250 }
01251 return root_fiber;
01252 }
01253 else {
01254 fib->prev = Qnil;
01255 return prev;
01256 }
01257 }
01258
01259 VALUE rb_fiber_transfer(VALUE fib, int argc, VALUE *argv);
01260
01261 static void
01262 rb_fiber_terminate(rb_fiber_t *fib)
01263 {
01264 VALUE value = fib->cont.value;
01265 fib->status = TERMINATED;
01266 #if FIBER_USE_NATIVE && !defined(_WIN32)
01267
01268 terminated_machine_stack.ptr = fib->ss_sp;
01269 terminated_machine_stack.size = fib->ss_size / sizeof(VALUE);
01270 fib->ss_sp = NULL;
01271 fib->context.uc_stack.ss_sp = NULL;
01272 fib->cont.machine.stack = NULL;
01273 fib->cont.machine.stack_size = 0;
01274 #endif
01275 rb_fiber_transfer(return_fiber(), 1, &value);
01276 }
01277
01278 void
01279 rb_fiber_start(void)
01280 {
01281 rb_thread_t *th = GET_THREAD();
01282 rb_fiber_t *fib;
01283 rb_context_t *cont;
01284 rb_proc_t *proc;
01285 int state;
01286
01287 GetFiberPtr(th->fiber, fib);
01288 cont = &fib->cont;
01289
01290 TH_PUSH_TAG(th);
01291 if ((state = EXEC_TAG()) == 0) {
01292 int argc;
01293 const VALUE *argv, args = cont->value;
01294 GetProcPtr(cont->saved_thread.first_proc, proc);
01295 argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
01296 cont->value = Qnil;
01297 th->errinfo = Qnil;
01298 th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
01299 th->root_svar = Qnil;
01300
01301 fib->status = RUNNING;
01302 cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
01303 }
01304 TH_POP_TAG();
01305
01306 if (state) {
01307 if (state == TAG_RAISE || state == TAG_FATAL) {
01308 rb_threadptr_pending_interrupt_enque(th, th->errinfo);
01309 }
01310 else {
01311 VALUE err = rb_vm_make_jump_tag_but_local_jump(state, th->errinfo);
01312 if (!NIL_P(err))
01313 rb_threadptr_pending_interrupt_enque(th, err);
01314 }
01315 RUBY_VM_SET_INTERRUPT(th);
01316 }
01317
01318 rb_fiber_terminate(fib);
01319 rb_bug("rb_fiber_start: unreachable");
01320 }
01321
01322 static rb_fiber_t *
01323 root_fiber_alloc(rb_thread_t *th)
01324 {
01325 rb_fiber_t *fib;
01326
01327 fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
01328 fib->cont.type = ROOT_FIBER_CONTEXT;
01329 #if FIBER_USE_NATIVE
01330 #ifdef _WIN32
01331 fib->fib_handle = ConvertThreadToFiber(0);
01332 #endif
01333 #endif
01334 fib->status = RUNNING;
01335 fib->prev_fiber = fib->next_fiber = fib;
01336
01337 return fib;
01338 }
01339
01340 VALUE
01341 rb_fiber_current(void)
01342 {
01343 rb_thread_t *th = GET_THREAD();
01344 if (th->fiber == 0) {
01345
01346 rb_fiber_t *fib = root_fiber_alloc(th);
01347 th->root_fiber = th->fiber = fib->cont.self;
01348 }
01349 return th->fiber;
01350 }
01351
01352 static VALUE
01353 fiber_store(rb_fiber_t *next_fib)
01354 {
01355 rb_thread_t *th = GET_THREAD();
01356 rb_fiber_t *fib;
01357
01358 if (th->fiber) {
01359 GetFiberPtr(th->fiber, fib);
01360 cont_save_thread(&fib->cont, th);
01361 }
01362 else {
01363
01364 fib = root_fiber_alloc(th);
01365 th->root_fiber = th->fiber = fib->cont.self;
01366 }
01367
01368 #if !FIBER_USE_NATIVE
01369 cont_save_machine_stack(th, &fib->cont);
01370 #endif
01371
01372 if (FIBER_USE_NATIVE || ruby_setjmp(fib->cont.jmpbuf)) {
01373 #if FIBER_USE_NATIVE
01374 fiber_setcontext(next_fib, fib);
01375 #ifndef _WIN32
01376 if (terminated_machine_stack.ptr) {
01377 if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
01378 machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
01379 machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
01380 machine_stack_cache_index++;
01381 }
01382 else {
01383 if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
01384 munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
01385 }
01386 else {
01387 rb_bug("terminated fiber resumed");
01388 }
01389 }
01390 terminated_machine_stack.ptr = NULL;
01391 terminated_machine_stack.size = 0;
01392 }
01393 #endif
01394 #endif
01395
01396 GetFiberPtr(th->fiber, fib);
01397 if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
01398 return fib->cont.value;
01399 }
01400 #if !FIBER_USE_NATIVE
01401 else {
01402 return Qundef;
01403 }
01404 #endif
01405 }
01406
01407 static inline VALUE
01408 fiber_switch(VALUE fibval, int argc, VALUE *argv, int is_resume)
01409 {
01410 VALUE value;
01411 rb_fiber_t *fib;
01412 rb_context_t *cont;
01413 rb_thread_t *th = GET_THREAD();
01414
01415 GetFiberPtr(fibval, fib);
01416 cont = &fib->cont;
01417
01418 if (th->fiber == fibval) {
01419
01420
01421
01422 return make_passing_arg(argc, argv);
01423 }
01424
01425 if (cont->saved_thread.self != th->self) {
01426 rb_raise(rb_eFiberError, "fiber called across threads");
01427 }
01428 else if (cont->saved_thread.protect_tag != th->protect_tag) {
01429 rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
01430 }
01431 else if (fib->status == TERMINATED) {
01432 value = rb_exc_new2(rb_eFiberError, "dead fiber called");
01433 if (th->fiber != fibval) {
01434 GetFiberPtr(th->fiber, fib);
01435 if (fib->status != TERMINATED) rb_exc_raise(value);
01436 fibval = th->root_fiber;
01437 }
01438 else {
01439 fibval = fib->prev;
01440 if (NIL_P(fibval)) fibval = th->root_fiber;
01441 }
01442 GetFiberPtr(fibval, fib);
01443 cont = &fib->cont;
01444 cont->argc = -1;
01445 cont->value = value;
01446 #if FIBER_USE_NATIVE
01447 {
01448 VALUE oldfibval;
01449 rb_fiber_t *oldfib;
01450 oldfibval = rb_fiber_current();
01451 GetFiberPtr(oldfibval, oldfib);
01452 fiber_setcontext(fib, oldfib);
01453 }
01454 #else
01455 cont_restore_0(cont, &value);
01456 #endif
01457 }
01458
01459 if (is_resume) {
01460 fib->prev = rb_fiber_current();
01461 }
01462 else {
01463
01464 th->trace_arg = cont->saved_thread.trace_arg;
01465 }
01466
01467 cont->argc = argc;
01468 cont->value = make_passing_arg(argc, argv);
01469
01470 value = fiber_store(fib);
01471 #if !FIBER_USE_NATIVE
01472 if (value == Qundef) {
01473 cont_restore_0(cont, &value);
01474 rb_bug("rb_fiber_resume: unreachable");
01475 }
01476 #endif
01477 RUBY_VM_CHECK_INTS(th);
01478
01479 return value;
01480 }
01481
01482 VALUE
01483 rb_fiber_transfer(VALUE fib, int argc, VALUE *argv)
01484 {
01485 return fiber_switch(fib, argc, argv, 0);
01486 }
01487
01488 VALUE
01489 rb_fiber_resume(VALUE fibval, int argc, VALUE *argv)
01490 {
01491 rb_fiber_t *fib;
01492 GetFiberPtr(fibval, fib);
01493
01494 if (fib->prev != Qnil || fib->cont.type == ROOT_FIBER_CONTEXT) {
01495 rb_raise(rb_eFiberError, "double resume");
01496 }
01497 if (fib->transferred != 0) {
01498 rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
01499 }
01500
01501 return fiber_switch(fibval, argc, argv, 1);
01502 }
01503
01504 VALUE
01505 rb_fiber_yield(int argc, VALUE *argv)
01506 {
01507 return rb_fiber_transfer(return_fiber(), argc, argv);
01508 }
01509
01510 void
01511 rb_fiber_reset_root_local_storage(VALUE thval)
01512 {
01513 rb_thread_t *th;
01514 rb_fiber_t *fib;
01515
01516 GetThreadPtr(thval, th);
01517 if (th->root_fiber && th->root_fiber != th->fiber) {
01518 GetFiberPtr(th->root_fiber, fib);
01519 th->local_storage = fib->cont.saved_thread.local_storage;
01520 }
01521 }
01522
01523
01524
01525
01526
01527
01528
01529
01530
01531
01532 VALUE
01533 rb_fiber_alive_p(VALUE fibval)
01534 {
01535 rb_fiber_t *fib;
01536 GetFiberPtr(fibval, fib);
01537 return fib->status != TERMINATED ? Qtrue : Qfalse;
01538 }
01539
01540
01541
01542
01543
01544
01545
01546
01547
01548
01549
01550
01551
01552
01553
01554
01555 static VALUE
01556 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
01557 {
01558 return rb_fiber_resume(fib, argc, argv);
01559 }
01560
01561
01562
01563
01564
01565
01566
01567
01568
01569
01570
01571
01572
01573
01574
01575
01576
01577
01578
01579
01580
01581
01582
01583
01584
01585
01586
01587
01588
01589
01590
01591
01592
01593
01594
01595
01596
01597
01598
01599
01600
01601
01602
01603
01604
01605
01606 static VALUE
01607 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
01608 {
01609 rb_fiber_t *fib;
01610 GetFiberPtr(fibval, fib);
01611 fib->transferred = 1;
01612 return rb_fiber_transfer(fibval, argc, argv);
01613 }
01614
01615
01616
01617
01618
01619
01620
01621
01622
01623
01624
01625 static VALUE
01626 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
01627 {
01628 return rb_fiber_yield(argc, argv);
01629 }
01630
01631
01632
01633
01634
01635
01636
01637
01638
01639 static VALUE
01640 rb_fiber_s_current(VALUE klass)
01641 {
01642 return rb_fiber_current();
01643 }
01644
01645
01646
01647
01648
01649
01650
01651
01652
01653
01654
01655
01656
01657
01658
01659
01660 void
01661 Init_Cont(void)
01662 {
01663 #if FIBER_USE_NATIVE
01664 rb_thread_t *th = GET_THREAD();
01665
01666 #ifdef _WIN32
01667 SYSTEM_INFO info;
01668 GetSystemInfo(&info);
01669 pagesize = info.dwPageSize;
01670 #else
01671 pagesize = sysconf(_SC_PAGESIZE);
01672 #endif
01673 SET_MACHINE_STACK_END(&th->machine.stack_end);
01674 #endif
01675
01676 rb_cFiber = rb_define_class("Fiber", rb_cObject);
01677 rb_define_alloc_func(rb_cFiber, fiber_alloc);
01678 rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
01679 rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
01680 rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
01681 rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
01682 }
01683
01684 RUBY_SYMBOL_EXPORT_BEGIN
01685
01686 void
01687 ruby_Init_Continuation_body(void)
01688 {
01689 rb_cContinuation = rb_define_class("Continuation", rb_cObject);
01690 rb_undef_alloc_func(rb_cContinuation);
01691 rb_undef_method(CLASS_OF(rb_cContinuation), "new");
01692 rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
01693 rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
01694 rb_define_global_function("callcc", rb_callcc, 0);
01695 }
01696
01697 void
01698 ruby_Init_Fiber_as_Coroutine(void)
01699 {
01700 rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
01701 rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
01702 rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
01703 }
01704
01705 RUBY_SYMBOL_EXPORT_END
01706