00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00013
00014 #include "gc.h"
00015
00016 #ifdef HAVE_SYS_RESOURCE_H
00017 #include <sys/resource.h>
00018 #endif
00019 #ifdef HAVE_THR_STKSEGMENT
00020 #include <thread.h>
00021 #endif
00022 #if HAVE_FCNTL_H
00023 #include <fcntl.h>
00024 #elif HAVE_SYS_FCNTL_H
00025 #include <sys/fcntl.h>
00026 #endif
00027 #ifdef HAVE_SYS_PRCTL_H
00028 #include <sys/prctl.h>
00029 #endif
00030 #if defined(__native_client__) && defined(NACL_NEWLIB)
00031 # include "nacl/select.h"
00032 #endif
00033 #if HAVE_POLL
00034 #include <poll.h>
00035 #endif
00036 #if defined(HAVE_SYS_TIME_H)
00037 #include <sys/time.h>
00038 #endif
00039
00040 static void native_mutex_lock(pthread_mutex_t *lock);
00041 static void native_mutex_unlock(pthread_mutex_t *lock);
00042 static int native_mutex_trylock(pthread_mutex_t *lock);
00043 static void native_mutex_initialize(pthread_mutex_t *lock);
00044 static void native_mutex_destroy(pthread_mutex_t *lock);
00045 static void native_cond_signal(rb_thread_cond_t *cond);
00046 static void native_cond_broadcast(rb_thread_cond_t *cond);
00047 static void native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex);
00048 static void native_cond_initialize(rb_thread_cond_t *cond, int flags);
00049 static void native_cond_destroy(rb_thread_cond_t *cond);
00050 static void rb_thread_wakeup_timer_thread_low(void);
00051 static pthread_t timer_thread_id;
00052
00053 #define RB_CONDATTR_CLOCK_MONOTONIC 1
00054
00055 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
00056 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
00057 defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT)
00058 #define USE_MONOTONIC_COND 1
00059 #else
00060 #define USE_MONOTONIC_COND 0
00061 #endif
00062
00063 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__)
00064
00065 # define USE_SLEEPY_TIMER_THREAD 1
00066 #else
00067 # define USE_SLEEPY_TIMER_THREAD 0
00068 #endif
00069
00070 #ifndef ARRAY_SIZE
00071 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
00072 #endif
00073
00074 static void
00075 gvl_acquire_common(rb_vm_t *vm)
00076 {
00077 if (vm->gvl.acquired) {
00078
00079 vm->gvl.waiting++;
00080 if (vm->gvl.waiting == 1) {
00081
00082
00083
00084
00085
00086 rb_thread_wakeup_timer_thread_low();
00087 }
00088
00089 while (vm->gvl.acquired) {
00090 native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
00091 }
00092
00093 vm->gvl.waiting--;
00094
00095 if (vm->gvl.need_yield) {
00096 vm->gvl.need_yield = 0;
00097 native_cond_signal(&vm->gvl.switch_cond);
00098 }
00099 }
00100
00101 vm->gvl.acquired = 1;
00102 }
00103
00104 static void
00105 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
00106 {
00107 native_mutex_lock(&vm->gvl.lock);
00108 gvl_acquire_common(vm);
00109 native_mutex_unlock(&vm->gvl.lock);
00110 }
00111
00112 static void
00113 gvl_release_common(rb_vm_t *vm)
00114 {
00115 vm->gvl.acquired = 0;
00116 if (vm->gvl.waiting > 0)
00117 native_cond_signal(&vm->gvl.cond);
00118 }
00119
00120 static void
00121 gvl_release(rb_vm_t *vm)
00122 {
00123 native_mutex_lock(&vm->gvl.lock);
00124 gvl_release_common(vm);
00125 native_mutex_unlock(&vm->gvl.lock);
00126 }
00127
00128 static void
00129 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
00130 {
00131 native_mutex_lock(&vm->gvl.lock);
00132
00133 gvl_release_common(vm);
00134
00135
00136 if (UNLIKELY(vm->gvl.wait_yield)) {
00137 while (vm->gvl.wait_yield)
00138 native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
00139 goto acquire;
00140 }
00141
00142 if (vm->gvl.waiting > 0) {
00143
00144 vm->gvl.need_yield = 1;
00145 vm->gvl.wait_yield = 1;
00146 while (vm->gvl.need_yield)
00147 native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
00148 vm->gvl.wait_yield = 0;
00149 }
00150 else {
00151 native_mutex_unlock(&vm->gvl.lock);
00152 sched_yield();
00153 native_mutex_lock(&vm->gvl.lock);
00154 }
00155
00156 native_cond_broadcast(&vm->gvl.switch_wait_cond);
00157 acquire:
00158 gvl_acquire_common(vm);
00159 native_mutex_unlock(&vm->gvl.lock);
00160 }
00161
00162 static void
00163 gvl_init(rb_vm_t *vm)
00164 {
00165 native_mutex_initialize(&vm->gvl.lock);
00166 native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
00167 native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00168 native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00169 vm->gvl.acquired = 0;
00170 vm->gvl.waiting = 0;
00171 vm->gvl.need_yield = 0;
00172 vm->gvl.wait_yield = 0;
00173 }
00174
00175 static void
00176 gvl_destroy(rb_vm_t *vm)
00177 {
00178 native_cond_destroy(&vm->gvl.switch_wait_cond);
00179 native_cond_destroy(&vm->gvl.switch_cond);
00180 native_cond_destroy(&vm->gvl.cond);
00181 native_mutex_destroy(&vm->gvl.lock);
00182 }
00183
00184 static void
00185 gvl_atfork(rb_vm_t *vm)
00186 {
00187 gvl_init(vm);
00188 gvl_acquire(vm, GET_THREAD());
00189 }
00190
00191 #define NATIVE_MUTEX_LOCK_DEBUG 0
00192
00193 static void
00194 mutex_debug(const char *msg, pthread_mutex_t *lock)
00195 {
00196 if (NATIVE_MUTEX_LOCK_DEBUG) {
00197 int r;
00198 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
00199
00200 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00201 fprintf(stdout, "%s: %p\n", msg, (void *)lock);
00202 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00203 }
00204 }
00205
00206 static void
00207 native_mutex_lock(pthread_mutex_t *lock)
00208 {
00209 int r;
00210 mutex_debug("lock", lock);
00211 if ((r = pthread_mutex_lock(lock)) != 0) {
00212 rb_bug_errno("pthread_mutex_lock", r);
00213 }
00214 }
00215
00216 static void
00217 native_mutex_unlock(pthread_mutex_t *lock)
00218 {
00219 int r;
00220 mutex_debug("unlock", lock);
00221 if ((r = pthread_mutex_unlock(lock)) != 0) {
00222 rb_bug_errno("pthread_mutex_unlock", r);
00223 }
00224 }
00225
00226 static inline int
00227 native_mutex_trylock(pthread_mutex_t *lock)
00228 {
00229 int r;
00230 mutex_debug("trylock", lock);
00231 if ((r = pthread_mutex_trylock(lock)) != 0) {
00232 if (r == EBUSY) {
00233 return EBUSY;
00234 }
00235 else {
00236 rb_bug_errno("pthread_mutex_trylock", r);
00237 }
00238 }
00239 return 0;
00240 }
00241
00242 static void
00243 native_mutex_initialize(pthread_mutex_t *lock)
00244 {
00245 int r = pthread_mutex_init(lock, 0);
00246 mutex_debug("init", lock);
00247 if (r != 0) {
00248 rb_bug_errno("pthread_mutex_init", r);
00249 }
00250 }
00251
00252 static void
00253 native_mutex_destroy(pthread_mutex_t *lock)
00254 {
00255 int r = pthread_mutex_destroy(lock);
00256 mutex_debug("destroy", lock);
00257 if (r != 0) {
00258 rb_bug_errno("pthread_mutex_destroy", r);
00259 }
00260 }
00261
00262 static void
00263 native_cond_initialize(rb_thread_cond_t *cond, int flags)
00264 {
00265 #ifdef HAVE_PTHREAD_COND_INIT
00266 int r;
00267 # if USE_MONOTONIC_COND
00268 pthread_condattr_t attr;
00269
00270 pthread_condattr_init(&attr);
00271
00272 cond->clockid = CLOCK_REALTIME;
00273 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
00274 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
00275 if (r == 0) {
00276 cond->clockid = CLOCK_MONOTONIC;
00277 }
00278 }
00279
00280 r = pthread_cond_init(&cond->cond, &attr);
00281 pthread_condattr_destroy(&attr);
00282 # else
00283 r = pthread_cond_init(&cond->cond, NULL);
00284 # endif
00285 if (r != 0) {
00286 rb_bug_errno("pthread_cond_init", r);
00287 }
00288
00289 return;
00290 #endif
00291 }
00292
00293 static void
00294 native_cond_destroy(rb_thread_cond_t *cond)
00295 {
00296 #ifdef HAVE_PTHREAD_COND_INIT
00297 int r = pthread_cond_destroy(&cond->cond);
00298 if (r != 0) {
00299 rb_bug_errno("pthread_cond_destroy", r);
00300 }
00301 #endif
00302 }
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314 static void
00315 native_cond_signal(rb_thread_cond_t *cond)
00316 {
00317 int r;
00318 do {
00319 r = pthread_cond_signal(&cond->cond);
00320 } while (r == EAGAIN);
00321 if (r != 0) {
00322 rb_bug_errno("pthread_cond_signal", r);
00323 }
00324 }
00325
00326 static void
00327 native_cond_broadcast(rb_thread_cond_t *cond)
00328 {
00329 int r;
00330 do {
00331 r = pthread_cond_broadcast(&cond->cond);
00332 } while (r == EAGAIN);
00333 if (r != 0) {
00334 rb_bug_errno("native_cond_broadcast", r);
00335 }
00336 }
00337
00338 static void
00339 native_cond_wait(rb_thread_cond_t *cond, pthread_mutex_t *mutex)
00340 {
00341 int r = pthread_cond_wait(&cond->cond, mutex);
00342 if (r != 0) {
00343 rb_bug_errno("pthread_cond_wait", r);
00344 }
00345 }
00346
00347 static int
00348 native_cond_timedwait(rb_thread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
00349 {
00350 int r;
00351
00352
00353
00354
00355
00356
00357
00358 do {
00359 r = pthread_cond_timedwait(&cond->cond, mutex, ts);
00360 } while (r == EINTR);
00361
00362 if (r != 0 && r != ETIMEDOUT) {
00363 rb_bug_errno("pthread_cond_timedwait", r);
00364 }
00365
00366 return r;
00367 }
00368
00369 #if SIZEOF_TIME_T == SIZEOF_LONG
00370 typedef unsigned long unsigned_time_t;
00371 #elif SIZEOF_TIME_T == SIZEOF_INT
00372 typedef unsigned int unsigned_time_t;
00373 #elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
00374 typedef unsigned LONG_LONG unsigned_time_t;
00375 #else
00376 # error cannot find integer type which size is same as time_t.
00377 #endif
00378
00379 #define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
00380
00381 static struct timespec
00382 native_cond_timeout(rb_thread_cond_t *cond, struct timespec timeout_rel)
00383 {
00384 int ret;
00385 struct timeval tv;
00386 struct timespec timeout;
00387 struct timespec now;
00388
00389 #if USE_MONOTONIC_COND
00390 if (cond->clockid == CLOCK_MONOTONIC) {
00391 ret = clock_gettime(cond->clockid, &now);
00392 if (ret != 0)
00393 rb_sys_fail("clock_gettime()");
00394 goto out;
00395 }
00396
00397 if (cond->clockid != CLOCK_REALTIME)
00398 rb_bug("unsupported clockid %"PRIdVALUE, (SIGNED_VALUE)cond->clockid);
00399 #endif
00400
00401 ret = gettimeofday(&tv, 0);
00402 if (ret != 0)
00403 rb_sys_fail(0);
00404 now.tv_sec = tv.tv_sec;
00405 now.tv_nsec = tv.tv_usec * 1000;
00406
00407 #if USE_MONOTONIC_COND
00408 out:
00409 #endif
00410 timeout.tv_sec = now.tv_sec;
00411 timeout.tv_nsec = now.tv_nsec;
00412 timeout.tv_sec += timeout_rel.tv_sec;
00413 timeout.tv_nsec += timeout_rel.tv_nsec;
00414
00415 if (timeout.tv_nsec >= 1000*1000*1000) {
00416 timeout.tv_sec++;
00417 timeout.tv_nsec -= 1000*1000*1000;
00418 }
00419
00420 if (timeout.tv_sec < now.tv_sec)
00421 timeout.tv_sec = TIMET_MAX;
00422
00423 return timeout;
00424 }
00425
00426 #define native_cleanup_push pthread_cleanup_push
00427 #define native_cleanup_pop pthread_cleanup_pop
00428 #ifdef HAVE_SCHED_YIELD
00429 #define native_thread_yield() (void)sched_yield()
00430 #else
00431 #define native_thread_yield() ((void)0)
00432 #endif
00433
00434 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00435 #define USE_SIGNAL_THREAD_LIST 1
00436 #endif
00437 #ifdef USE_SIGNAL_THREAD_LIST
00438 static void add_signal_thread_list(rb_thread_t *th);
00439 static void remove_signal_thread_list(rb_thread_t *th);
00440 static rb_thread_lock_t signal_thread_list_lock;
00441 #endif
00442
00443 static pthread_key_t ruby_native_thread_key;
00444
00445 static void
00446 null_func(int i)
00447 {
00448
00449 }
00450
00451 static rb_thread_t *
00452 ruby_thread_from_native(void)
00453 {
00454 return pthread_getspecific(ruby_native_thread_key);
00455 }
00456
00457 static int
00458 ruby_thread_set_native(rb_thread_t *th)
00459 {
00460 return pthread_setspecific(ruby_native_thread_key, th) == 0;
00461 }
00462
00463 static void native_thread_init(rb_thread_t *th);
00464
00465 void
00466 Init_native_thread(void)
00467 {
00468 rb_thread_t *th = GET_THREAD();
00469
00470 pthread_key_create(&ruby_native_thread_key, NULL);
00471 th->thread_id = pthread_self();
00472 native_thread_init(th);
00473 #ifdef USE_SIGNAL_THREAD_LIST
00474 native_mutex_initialize(&signal_thread_list_lock);
00475 #endif
00476 #ifndef __native_client__
00477 posix_signal(SIGVTALRM, null_func);
00478 #endif
00479 }
00480
00481 static void
00482 native_thread_init(rb_thread_t *th)
00483 {
00484 native_cond_initialize(&th->native_thread_data.sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00485 ruby_thread_set_native(th);
00486 }
00487
00488 static void
00489 native_thread_destroy(rb_thread_t *th)
00490 {
00491 native_cond_destroy(&th->native_thread_data.sleep_cond);
00492 }
00493
00494 #ifndef USE_THREAD_CACHE
00495 #define USE_THREAD_CACHE 0
00496 #endif
00497
00498 #if USE_THREAD_CACHE
00499 static rb_thread_t *register_cached_thread_and_wait(void);
00500 #endif
00501
00502 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00503 #define STACKADDR_AVAILABLE 1
00504 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00505 #define STACKADDR_AVAILABLE 1
00506 #undef MAINSTACKADDR_AVAILABLE
00507 #define MAINSTACKADDR_AVAILABLE 0
00508 void *pthread_get_stackaddr_np(pthread_t);
00509 size_t pthread_get_stacksize_np(pthread_t);
00510 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00511 #define STACKADDR_AVAILABLE 1
00512 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00513 #define STACKADDR_AVAILABLE 1
00514 #endif
00515
00516 #ifndef MAINSTACKADDR_AVAILABLE
00517 # ifdef STACKADDR_AVAILABLE
00518 # define MAINSTACKADDR_AVAILABLE 1
00519 # else
00520 # define MAINSTACKADDR_AVAILABLE 0
00521 # endif
00522 #endif
00523
00524 #ifdef STACKADDR_AVAILABLE
00525
00526
00527
00528 static int
00529 get_stack(void **addr, size_t *size)
00530 {
00531 #define CHECK_ERR(expr) \
00532 {int err = (expr); if (err) return err;}
00533 #ifdef HAVE_PTHREAD_GETATTR_NP
00534 pthread_attr_t attr;
00535 size_t guard = 0;
00536 STACK_GROW_DIR_DETECTION;
00537 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
00538 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00539 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00540 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00541 # else
00542 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00543 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00544 # endif
00545 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
00546 *size -= guard;
00547 pthread_attr_destroy(&attr);
00548 #elif defined HAVE_PTHREAD_ATTR_GET_NP
00549 pthread_attr_t attr;
00550 CHECK_ERR(pthread_attr_init(&attr));
00551 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
00552 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00553 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00554 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00555 # else
00556 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00557 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00558 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00559 # endif
00560 pthread_attr_destroy(&attr);
00561 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
00562 pthread_t th = pthread_self();
00563 *addr = pthread_get_stackaddr_np(th);
00564 *size = pthread_get_stacksize_np(th);
00565 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00566 stack_t stk;
00567 # if defined HAVE_THR_STKSEGMENT
00568 CHECK_ERR(thr_stksegment(&stk));
00569 # else
00570 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
00571 # endif
00572 *addr = stk.ss_sp;
00573 *size = stk.ss_size;
00574 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00575 pthread_t th = pthread_self();
00576 struct __pthrdsinfo thinfo;
00577 char reg[256];
00578 int regsiz=sizeof(reg);
00579 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
00580 &thinfo, sizeof(thinfo),
00581 ®, ®siz));
00582 *addr = thinfo.__pi_stackaddr;
00583 *size = thinfo.__pi_stacksize;
00584 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00585 #else
00586 #error STACKADDR_AVAILABLE is defined but not implemented.
00587 #endif
00588 return 0;
00589 #undef CHECK_ERR
00590 }
00591 #endif
00592
00593 static struct {
00594 rb_thread_id_t id;
00595 size_t stack_maxsize;
00596 VALUE *stack_start;
00597 #ifdef __ia64
00598 VALUE *register_stack_start;
00599 #endif
00600 } native_main_thread;
00601
00602 #ifdef STACK_END_ADDRESS
00603 extern void *STACK_END_ADDRESS;
00604 #endif
00605
00606 enum {
00607 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
00608 RUBY_STACK_SPACE_RATIO = 5
00609 };
00610
00611 static size_t
00612 space_size(size_t stack_size)
00613 {
00614 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
00615 if (space_size > RUBY_STACK_SPACE_LIMIT) {
00616 return RUBY_STACK_SPACE_LIMIT;
00617 }
00618 else {
00619 return space_size;
00620 }
00621 }
00622
00623 #undef ruby_init_stack
00624
00625
00626
00627
00628 void
00629 ruby_init_stack(volatile VALUE *addr
00630 #ifdef __ia64
00631 , void *bsp
00632 #endif
00633 )
00634 {
00635 native_main_thread.id = pthread_self();
00636 #ifdef STACK_END_ADDRESS
00637 native_main_thread.stack_start = STACK_END_ADDRESS;
00638 #else
00639 if (!native_main_thread.stack_start ||
00640 STACK_UPPER((VALUE *)(void *)&addr,
00641 native_main_thread.stack_start > addr,
00642 native_main_thread.stack_start < addr)) {
00643 native_main_thread.stack_start = (VALUE *)addr;
00644 }
00645 #endif
00646 #ifdef __ia64
00647 if (!native_main_thread.register_stack_start ||
00648 (VALUE*)bsp < native_main_thread.register_stack_start) {
00649 native_main_thread.register_stack_start = (VALUE*)bsp;
00650 }
00651 #endif
00652 {
00653 #if defined(PTHREAD_STACK_DEFAULT)
00654 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
00655 # error "PTHREAD_STACK_DEFAULT is too small"
00656 # endif
00657 size_t size = PTHREAD_STACK_DEFAULT;
00658 #else
00659 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
00660 #endif
00661 size_t space = space_size(size);
00662 #if MAINSTACKADDR_AVAILABLE
00663 void* stackaddr;
00664 STACK_GROW_DIR_DETECTION;
00665 if (get_stack(&stackaddr, &size) == 0) {
00666 space = STACK_DIR_UPPER((char *)addr - (char *)stackaddr, (char *)stackaddr - (char *)addr);
00667 }
00668 native_main_thread.stack_maxsize = size - space;
00669 #elif defined(HAVE_GETRLIMIT)
00670 int pagesize = getpagesize();
00671 struct rlimit rlim;
00672 STACK_GROW_DIR_DETECTION;
00673 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
00674 size = (size_t)rlim.rlim_cur;
00675 }
00676 addr = native_main_thread.stack_start;
00677 if (IS_STACK_DIR_UPPER()) {
00678 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
00679 }
00680 else {
00681 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
00682 }
00683 native_main_thread.stack_maxsize = space;
00684 #endif
00685 }
00686
00687
00688
00689 {
00690 void *start, *end;
00691 STACK_GROW_DIR_DETECTION;
00692
00693 if (IS_STACK_DIR_UPPER()) {
00694 start = native_main_thread.stack_start;
00695 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
00696 }
00697 else {
00698 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
00699 end = native_main_thread.stack_start;
00700 }
00701
00702 if ((void *)addr < start || (void *)addr > end) {
00703
00704 native_main_thread.stack_start = (VALUE *)addr;
00705 native_main_thread.stack_maxsize = 0;
00706 }
00707 }
00708 }
00709
00710 #define CHECK_ERR(expr) \
00711 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
00712
00713 static int
00714 native_thread_init_stack(rb_thread_t *th)
00715 {
00716 rb_thread_id_t curr = pthread_self();
00717
00718 if (pthread_equal(curr, native_main_thread.id)) {
00719 th->machine_stack_start = native_main_thread.stack_start;
00720 th->machine_stack_maxsize = native_main_thread.stack_maxsize;
00721 }
00722 else {
00723 #ifdef STACKADDR_AVAILABLE
00724 void *start;
00725 size_t size;
00726
00727 if (get_stack(&start, &size) == 0) {
00728 th->machine_stack_start = start;
00729 th->machine_stack_maxsize = size;
00730 }
00731 #else
00732 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
00733 #endif
00734 }
00735 #ifdef __ia64
00736 th->machine_register_stack_start = native_main_thread.register_stack_start;
00737 th->machine_stack_maxsize /= 2;
00738 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00739 #endif
00740 return 0;
00741 }
00742
00743 #ifndef __CYGWIN__
00744 #define USE_NATIVE_THREAD_INIT 1
00745 #endif
00746
00747 static void *
00748 thread_start_func_1(void *th_ptr)
00749 {
00750 #if USE_THREAD_CACHE
00751 thread_start:
00752 #endif
00753 {
00754 rb_thread_t *th = th_ptr;
00755 #if !defined USE_NATIVE_THREAD_INIT
00756 VALUE stack_start;
00757 #endif
00758
00759 #if defined USE_NATIVE_THREAD_INIT
00760 native_thread_init_stack(th);
00761 #endif
00762 native_thread_init(th);
00763
00764 #if defined USE_NATIVE_THREAD_INIT
00765 thread_start_func_2(th, th->machine_stack_start, rb_ia64_bsp());
00766 #else
00767 thread_start_func_2(th, &stack_start, rb_ia64_bsp());
00768 #endif
00769 }
00770 #if USE_THREAD_CACHE
00771 if (1) {
00772
00773 rb_thread_t *th;
00774 if ((th = register_cached_thread_and_wait()) != 0) {
00775 th_ptr = (void *)th;
00776 th->thread_id = pthread_self();
00777 goto thread_start;
00778 }
00779 }
00780 #endif
00781 return 0;
00782 }
00783
00784 struct cached_thread_entry {
00785 volatile rb_thread_t **th_area;
00786 rb_thread_cond_t *cond;
00787 struct cached_thread_entry *next;
00788 };
00789
00790
00791 #if USE_THREAD_CACHE
00792 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
00793 struct cached_thread_entry *cached_thread_root;
00794
00795 static rb_thread_t *
00796 register_cached_thread_and_wait(void)
00797 {
00798 rb_thread_cond_t cond = { PTHREAD_COND_INITIALIZER, };
00799 volatile rb_thread_t *th_area = 0;
00800 struct timeval tv;
00801 struct timespec ts;
00802 struct cached_thread_entry *entry =
00803 (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
00804
00805 if (entry == 0) {
00806 return 0;
00807 }
00808
00809 gettimeofday(&tv, 0);
00810 ts.tv_sec = tv.tv_sec + 60;
00811 ts.tv_nsec = tv.tv_usec * 1000;
00812
00813 pthread_mutex_lock(&thread_cache_lock);
00814 {
00815 entry->th_area = &th_area;
00816 entry->cond = &cond;
00817 entry->next = cached_thread_root;
00818 cached_thread_root = entry;
00819
00820 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
00821
00822 {
00823 struct cached_thread_entry *e = cached_thread_root;
00824 struct cached_thread_entry *prev = cached_thread_root;
00825
00826 while (e) {
00827 if (e == entry) {
00828 if (prev == cached_thread_root) {
00829 cached_thread_root = e->next;
00830 }
00831 else {
00832 prev->next = e->next;
00833 }
00834 break;
00835 }
00836 prev = e;
00837 e = e->next;
00838 }
00839 }
00840
00841 free(entry);
00842 native_cond_destroy(&cond);
00843 }
00844 pthread_mutex_unlock(&thread_cache_lock);
00845
00846 return (rb_thread_t *)th_area;
00847 }
00848 #endif
00849
00850 static int
00851 use_cached_thread(rb_thread_t *th)
00852 {
00853 int result = 0;
00854 #if USE_THREAD_CACHE
00855 struct cached_thread_entry *entry;
00856
00857 if (cached_thread_root) {
00858 pthread_mutex_lock(&thread_cache_lock);
00859 entry = cached_thread_root;
00860 {
00861 if (cached_thread_root) {
00862 cached_thread_root = entry->next;
00863 *entry->th_area = th;
00864 result = 1;
00865 }
00866 }
00867 if (result) {
00868 native_cond_signal(entry->cond);
00869 }
00870 pthread_mutex_unlock(&thread_cache_lock);
00871 }
00872 #endif
00873 return result;
00874 }
00875
00876 static int
00877 native_thread_create(rb_thread_t *th)
00878 {
00879 int err = 0;
00880
00881 if (use_cached_thread(th)) {
00882 thread_debug("create (use cached thread): %p\n", (void *)th);
00883 }
00884 else {
00885 pthread_attr_t attr;
00886 const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
00887 const size_t space = space_size(stack_size);
00888
00889 th->machine_stack_maxsize = stack_size - space;
00890 #ifdef __ia64
00891 th->machine_stack_maxsize /= 2;
00892 th->machine_register_stack_maxsize = th->machine_stack_maxsize;
00893 #endif
00894
00895 #ifdef HAVE_PTHREAD_ATTR_INIT
00896 CHECK_ERR(pthread_attr_init(&attr));
00897
00898 # ifdef PTHREAD_STACK_MIN
00899 thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
00900 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
00901 # endif
00902
00903 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
00904 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
00905 # endif
00906 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
00907
00908 err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
00909 #else
00910 err = pthread_create(&th->thread_id, NULL, thread_start_func_1, th);
00911 #endif
00912 thread_debug("create: %p (%d)\n", (void *)th, err);
00913 #ifdef HAVE_PTHREAD_ATTR_INIT
00914 CHECK_ERR(pthread_attr_destroy(&attr));
00915 #endif
00916 }
00917 return err;
00918 }
00919
00920 static void
00921 native_thread_join(pthread_t th)
00922 {
00923 int err = pthread_join(th, 0);
00924 if (err) {
00925 rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
00926 }
00927 }
00928
00929
00930 #if USE_NATIVE_THREAD_PRIORITY
00931
00932 static void
00933 native_thread_apply_priority(rb_thread_t *th)
00934 {
00935 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
00936 struct sched_param sp;
00937 int policy;
00938 int priority = 0 - th->priority;
00939 int max, min;
00940 pthread_getschedparam(th->thread_id, &policy, &sp);
00941 max = sched_get_priority_max(policy);
00942 min = sched_get_priority_min(policy);
00943
00944 if (min > priority) {
00945 priority = min;
00946 }
00947 else if (max < priority) {
00948 priority = max;
00949 }
00950
00951 sp.sched_priority = priority;
00952 pthread_setschedparam(th->thread_id, policy, &sp);
00953 #else
00954
00955 #endif
00956 }
00957
00958 #endif
00959
00960 static int
00961 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
00962 {
00963 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
00964 }
00965
00966 static void
00967 ubf_pthread_cond_signal(void *ptr)
00968 {
00969 rb_thread_t *th = (rb_thread_t *)ptr;
00970 thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
00971 native_cond_signal(&th->native_thread_data.sleep_cond);
00972 }
00973
00974 static void
00975 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
00976 {
00977 struct timespec timeout;
00978 pthread_mutex_t *lock = &th->interrupt_lock;
00979 rb_thread_cond_t *cond = &th->native_thread_data.sleep_cond;
00980
00981 if (timeout_tv) {
00982 struct timespec timeout_rel;
00983
00984 timeout_rel.tv_sec = timeout_tv->tv_sec;
00985 timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995 if (timeout_rel.tv_sec > 100000000) {
00996 timeout_rel.tv_sec = 100000000;
00997 timeout_rel.tv_nsec = 0;
00998 }
00999
01000 timeout = native_cond_timeout(cond, timeout_rel);
01001 }
01002
01003 GVL_UNLOCK_BEGIN();
01004 {
01005 pthread_mutex_lock(lock);
01006 th->unblock.func = ubf_pthread_cond_signal;
01007 th->unblock.arg = th;
01008
01009 if (RUBY_VM_INTERRUPTED(th)) {
01010
01011 thread_debug("native_sleep: interrupted before sleep\n");
01012 }
01013 else {
01014 if (!timeout_tv)
01015 native_cond_wait(cond, lock);
01016 else
01017 native_cond_timedwait(cond, lock, &timeout);
01018 }
01019 th->unblock.func = 0;
01020 th->unblock.arg = 0;
01021
01022 pthread_mutex_unlock(lock);
01023 }
01024 GVL_UNLOCK_END();
01025
01026 thread_debug("native_sleep done\n");
01027 }
01028
01029 #ifdef USE_SIGNAL_THREAD_LIST
01030 struct signal_thread_list {
01031 rb_thread_t *th;
01032 struct signal_thread_list *prev;
01033 struct signal_thread_list *next;
01034 };
01035
01036 static struct signal_thread_list signal_thread_list_anchor = {
01037 0, 0, 0,
01038 };
01039
01040 #define FGLOCK(lock, body) do { \
01041 native_mutex_lock(lock); \
01042 { \
01043 body; \
01044 } \
01045 native_mutex_unlock(lock); \
01046 } while (0)
01047
01048 #if 0
01049 static void
01050 print_signal_list(char *str)
01051 {
01052 struct signal_thread_list *list =
01053 signal_thread_list_anchor.next;
01054 thread_debug("list (%s)> ", str);
01055 while (list) {
01056 thread_debug("%p (%p), ", list->th, list->th->thread_id);
01057 list = list->next;
01058 }
01059 thread_debug("\n");
01060 }
01061 #endif
01062
01063 static void
01064 add_signal_thread_list(rb_thread_t *th)
01065 {
01066 if (!th->native_thread_data.signal_thread_list) {
01067 FGLOCK(&signal_thread_list_lock, {
01068 struct signal_thread_list *list =
01069 malloc(sizeof(struct signal_thread_list));
01070
01071 if (list == 0) {
01072 fprintf(stderr, "[FATAL] failed to allocate memory\n");
01073 exit(EXIT_FAILURE);
01074 }
01075
01076 list->th = th;
01077
01078 list->prev = &signal_thread_list_anchor;
01079 list->next = signal_thread_list_anchor.next;
01080 if (list->next) {
01081 list->next->prev = list;
01082 }
01083 signal_thread_list_anchor.next = list;
01084 th->native_thread_data.signal_thread_list = list;
01085 });
01086 }
01087 }
01088
01089 static void
01090 remove_signal_thread_list(rb_thread_t *th)
01091 {
01092 if (th->native_thread_data.signal_thread_list) {
01093 FGLOCK(&signal_thread_list_lock, {
01094 struct signal_thread_list *list =
01095 (struct signal_thread_list *)
01096 th->native_thread_data.signal_thread_list;
01097
01098 list->prev->next = list->next;
01099 if (list->next) {
01100 list->next->prev = list->prev;
01101 }
01102 th->native_thread_data.signal_thread_list = 0;
01103 list->th = 0;
01104 free(list);
01105 });
01106 }
01107 }
01108
01109 static void
01110 ubf_select_each(rb_thread_t *th)
01111 {
01112 thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
01113 if (th) {
01114 pthread_kill(th->thread_id, SIGVTALRM);
01115 }
01116 }
01117
01118 static void
01119 ubf_select(void *ptr)
01120 {
01121 rb_thread_t *th = (rb_thread_t *)ptr;
01122 add_signal_thread_list(th);
01123 if (pthread_self() != timer_thread_id)
01124 rb_thread_wakeup_timer_thread();
01125 ubf_select_each(th);
01126 }
01127
01128 static void
01129 ping_signal_thread_list(void)
01130 {
01131 if (signal_thread_list_anchor.next) {
01132 FGLOCK(&signal_thread_list_lock, {
01133 struct signal_thread_list *list;
01134
01135 list = signal_thread_list_anchor.next;
01136 while (list) {
01137 ubf_select_each(list->th);
01138 list = list->next;
01139 }
01140 });
01141 }
01142 }
01143
01144 static int
01145 check_signal_thread_list(void)
01146 {
01147 if (signal_thread_list_anchor.next)
01148 return 1;
01149 else
01150 return 0;
01151 }
01152 #else
01153 #define add_signal_thread_list(th) (void)(th)
01154 #define remove_signal_thread_list(th) (void)(th)
01155 #define ubf_select 0
01156 static void ping_signal_thread_list(void) { return; }
01157 static int check_signal_thread_list(void) { return 0; }
01158 #endif
01159
01160 #define TT_DEBUG 0
01161 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
01162
01163
01164
01165
01166 #define TIME_QUANTUM_USEC (100 * 1000)
01167
01168 #if USE_SLEEPY_TIMER_THREAD
01169 static int timer_thread_pipe[2] = {-1, -1};
01170 static int timer_thread_pipe_low[2] = {-1, -1};
01171 static int timer_thread_pipe_owner_process;
01172
01173
01174 static void
01175 rb_thread_wakeup_timer_thread_fd(int fd)
01176 {
01177 ssize_t result;
01178
01179
01180 if (timer_thread_pipe_owner_process == getpid()) {
01181 const char *buff = "!";
01182 retry:
01183 if ((result = write(fd, buff, 1)) <= 0) {
01184 switch (errno) {
01185 case EINTR: goto retry;
01186 case EAGAIN:
01187 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
01188 case EWOULDBLOCK:
01189 #endif
01190 break;
01191 default:
01192 rb_async_bug_errno("rb_thread_wakeup_timer_thread - write", errno);
01193 }
01194 }
01195 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
01196 }
01197 else {
01198
01199 }
01200 }
01201
01202 void
01203 rb_thread_wakeup_timer_thread(void)
01204 {
01205 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe[1]);
01206 }
01207
01208 static void
01209 rb_thread_wakeup_timer_thread_low(void)
01210 {
01211 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe_low[1]);
01212 }
01213
01214
01215 static void
01216 consume_communication_pipe(int fd)
01217 {
01218 #define CCP_READ_BUFF_SIZE 1024
01219
01220 static char buff[CCP_READ_BUFF_SIZE];
01221 ssize_t result;
01222
01223 while (1) {
01224 result = read(fd, buff, sizeof(buff));
01225 if (result == 0) {
01226 return;
01227 }
01228 else if (result < 0) {
01229 switch (errno) {
01230 case EINTR:
01231 continue;
01232 case EAGAIN:
01233 return;
01234 default:
01235 rb_async_bug_errno("consume_communication_pipe: read\n", errno);
01236 }
01237 }
01238 }
01239 }
01240
01241 static void
01242 close_communication_pipe(int pipes[2])
01243 {
01244 if (close(pipes[0]) < 0) {
01245 rb_bug_errno("native_stop_timer_thread - close(ttp[0])", errno);
01246 }
01247 if (close(pipes[1]) < 0) {
01248 rb_bug_errno("native_stop_timer_thread - close(ttp[1])", errno);
01249 }
01250 pipes[0] = pipes[1] = -1;
01251 }
01252
01253 static void
01254 set_nonblock(int fd)
01255 {
01256 int oflags;
01257 int err;
01258
01259 oflags = fcntl(fd, F_GETFL);
01260 if (oflags == -1)
01261 rb_sys_fail(0);
01262 oflags |= O_NONBLOCK;
01263 err = fcntl(fd, F_SETFL, oflags);
01264 if (err == -1)
01265 rb_sys_fail(0);
01266 }
01267
01268 static void
01269 setup_communication_pipe_internal(int pipes[2])
01270 {
01271 int err;
01272
01273 if (pipes[0] != -1) {
01274
01275 close_communication_pipe(pipes);
01276 }
01277
01278 err = rb_cloexec_pipe(pipes);
01279 if (err != 0) {
01280 rb_bug_errno("setup_communication_pipe: Failed to create communication pipe for timer thread", errno);
01281 }
01282 rb_update_max_fd(pipes[0]);
01283 rb_update_max_fd(pipes[1]);
01284 set_nonblock(pipes[0]);
01285 set_nonblock(pipes[1]);
01286 }
01287
01288
01289 static void
01290 setup_communication_pipe(void)
01291 {
01292 if (timer_thread_pipe_owner_process == getpid()) {
01293
01294 return;
01295 }
01296 setup_communication_pipe_internal(timer_thread_pipe);
01297 setup_communication_pipe_internal(timer_thread_pipe_low);
01298
01299
01300 timer_thread_pipe_owner_process = getpid();
01301 }
01302
01309 static inline void
01310 timer_thread_sleep(rb_global_vm_lock_t* gvl)
01311 {
01312 int result;
01313 int need_polling;
01314 struct pollfd pollfds[2];
01315
01316 pollfds[0].fd = timer_thread_pipe[0];
01317 pollfds[0].events = POLLIN;
01318 pollfds[1].fd = timer_thread_pipe_low[0];
01319 pollfds[1].events = POLLIN;
01320
01321 need_polling = check_signal_thread_list();
01322
01323 if (gvl->waiting > 0 || need_polling) {
01324
01325 result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
01326 }
01327 else {
01328
01329 result = poll(pollfds, ARRAY_SIZE(pollfds), -1);
01330 }
01331
01332 if (result == 0) {
01333
01334 }
01335 else if (result > 0) {
01336 consume_communication_pipe(timer_thread_pipe[0]);
01337 consume_communication_pipe(timer_thread_pipe_low[0]);
01338 }
01339 else {
01340 switch (errno) {
01341 case EBADF:
01342 case EINVAL:
01343 case ENOMEM:
01344 case EFAULT:
01345 rb_async_bug_errno("thread_timer: select", errno);
01346 default:
01347 ;
01348 }
01349 }
01350 }
01351
01352 #else
01353 # define PER_NANO 1000000000
01354 void rb_thread_wakeup_timer_thread(void) {}
01355 static void rb_thread_wakeup_timer_thread_low(void) {}
01356
01357 static pthread_mutex_t timer_thread_lock;
01358 static rb_thread_cond_t timer_thread_cond;
01359
01360 static inline void
01361 timer_thread_sleep(rb_global_vm_lock_t* unused)
01362 {
01363 struct timespec ts;
01364 ts.tv_sec = 0;
01365 ts.tv_nsec = TIME_QUANTUM_USEC * 1000;
01366 ts = native_cond_timeout(&timer_thread_cond, ts);
01367
01368 native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
01369 }
01370 #endif
01371
01372 static void *
01373 thread_timer(void *p)
01374 {
01375 rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
01376
01377 if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
01378
01379 #if defined(__linux__) && defined(PR_SET_NAME)
01380 prctl(PR_SET_NAME, "ruby-timer-thr");
01381 #endif
01382
01383 #if !USE_SLEEPY_TIMER_THREAD
01384 native_mutex_initialize(&timer_thread_lock);
01385 native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
01386 native_mutex_lock(&timer_thread_lock);
01387 #endif
01388 while (system_working > 0) {
01389
01390
01391 ping_signal_thread_list();
01392 timer_thread_function(0);
01393
01394 if (TT_DEBUG) WRITE_CONST(2, "tick\n");
01395
01396
01397 timer_thread_sleep(gvl);
01398 }
01399 #if !USE_SLEEPY_TIMER_THREAD
01400 native_mutex_unlock(&timer_thread_lock);
01401 native_cond_destroy(&timer_thread_cond);
01402 native_mutex_destroy(&timer_thread_lock);
01403 #endif
01404
01405 if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
01406 return NULL;
01407 }
01408
01409 static void
01410 rb_thread_create_timer_thread(void)
01411 {
01412 if (!timer_thread_id) {
01413 int err;
01414 #ifdef HAVE_PTHREAD_ATTR_INIT
01415 pthread_attr_t attr;
01416
01417 err = pthread_attr_init(&attr);
01418 if (err != 0) {
01419 fprintf(stderr, "[FATAL] Failed to initialize pthread attr(errno: %d)\n", err);
01420 exit(EXIT_FAILURE);
01421 }
01422 # ifdef PTHREAD_STACK_MIN
01423 {
01424 const size_t min_size = (4096 * 4);
01425
01426
01427
01428
01429 size_t stack_size = PTHREAD_STACK_MIN;
01430 if (stack_size < min_size) stack_size = min_size;
01431 if (THREAD_DEBUG) stack_size += BUFSIZ;
01432 pthread_attr_setstacksize(&attr, stack_size);
01433 }
01434 # endif
01435 #endif
01436
01437 #if USE_SLEEPY_TIMER_THREAD
01438 setup_communication_pipe();
01439 #endif
01440
01441
01442 if (timer_thread_id) {
01443 rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
01444 }
01445 #ifdef HAVE_PTHREAD_ATTR_INIT
01446 err = pthread_create(&timer_thread_id, &attr, thread_timer, &GET_VM()->gvl);
01447 #else
01448 err = pthread_create(&timer_thread_id, NULL, thread_timer, &GET_VM()->gvl);
01449 #endif
01450 if (err != 0) {
01451 fprintf(stderr, "[FATAL] Failed to create timer thread (errno: %d)\n", err);
01452 exit(EXIT_FAILURE);
01453 }
01454 #ifdef HAVE_PTHREAD_ATTR_INIT
01455 pthread_attr_destroy(&attr);
01456 #endif
01457 }
01458 }
01459
01460 static int
01461 native_stop_timer_thread(int close_anyway)
01462 {
01463 int stopped;
01464 stopped = --system_working <= 0;
01465
01466 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
01467 if (stopped) {
01468
01469 rb_thread_wakeup_timer_thread();
01470 native_thread_join(timer_thread_id);
01471 if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
01472 timer_thread_id = 0;
01473
01474
01475 if (close_anyway) {
01476
01477
01478
01479
01480
01481
01482
01483
01484 }
01485 }
01486 return stopped;
01487 }
01488
01489 static void
01490 native_reset_timer_thread(void)
01491 {
01492 if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
01493 }
01494
01495 #ifdef HAVE_SIGALTSTACK
01496 int
01497 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
01498 {
01499 void *base;
01500 size_t size;
01501 const size_t water_mark = 1024 * 1024;
01502 STACK_GROW_DIR_DETECTION;
01503
01504 if (th) {
01505 size = th->machine_stack_maxsize;
01506 base = (char *)th->machine_stack_start - STACK_DIR_UPPER(0, size);
01507 }
01508 #ifdef STACKADDR_AVAILABLE
01509 else if (get_stack(&base, &size) == 0) {
01510 STACK_DIR_UPPER((void)(base = (char *)base + size), (void)0);
01511 }
01512 #endif
01513 else {
01514 return 0;
01515 }
01516 size /= RUBY_STACK_SPACE_RATIO;
01517 if (size > water_mark) size = water_mark;
01518 if (IS_STACK_DIR_UPPER()) {
01519 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
01520 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
01521 }
01522 else {
01523 if (size > (size_t)base) size = (size_t)base;
01524 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
01525 }
01526 return 0;
01527 }
01528 #endif
01529
01530 int
01531 rb_reserved_fd_p(int fd)
01532 {
01533 #if USE_SLEEPY_TIMER_THREAD
01534 if (fd == timer_thread_pipe[0] ||
01535 fd == timer_thread_pipe[1] ||
01536 fd == timer_thread_pipe_low[0] ||
01537 fd == timer_thread_pipe_low[1]) {
01538 return 1;
01539 }
01540 else {
01541 return 0;
01542 }
01543 #else
01544 return 0;
01545 #endif
01546 }
01547
01548 #endif
01549