00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
00013
00014 #include "gc.h"
00015
00016 #ifdef HAVE_SYS_RESOURCE_H
00017 #include <sys/resource.h>
00018 #endif
00019 #ifdef HAVE_THR_STKSEGMENT
00020 #include <thread.h>
00021 #endif
00022 #if HAVE_FCNTL_H
00023 #include <fcntl.h>
00024 #elif HAVE_SYS_FCNTL_H
00025 #include <sys/fcntl.h>
00026 #endif
00027 #ifdef HAVE_SYS_PRCTL_H
00028 #include <sys/prctl.h>
00029 #endif
00030 #if defined(__native_client__) && defined(NACL_NEWLIB)
00031 # include "nacl/select.h"
00032 #endif
00033 #if defined(HAVE_SYS_TIME_H)
00034 #include <sys/time.h>
00035 #endif
00036
00037 static void native_mutex_lock(pthread_mutex_t *lock);
00038 static void native_mutex_unlock(pthread_mutex_t *lock);
00039 static int native_mutex_trylock(pthread_mutex_t *lock);
00040 static void native_mutex_initialize(pthread_mutex_t *lock);
00041 static void native_mutex_destroy(pthread_mutex_t *lock);
00042 static void native_cond_signal(rb_nativethread_cond_t *cond);
00043 static void native_cond_broadcast(rb_nativethread_cond_t *cond);
00044 static void native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex);
00045 static void native_cond_initialize(rb_nativethread_cond_t *cond, int flags);
00046 static void native_cond_destroy(rb_nativethread_cond_t *cond);
00047 static void rb_thread_wakeup_timer_thread_low(void);
00048 static pthread_t timer_thread_id;
00049
00050 #define RB_CONDATTR_CLOCK_MONOTONIC 1
00051
00052 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
00053 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
00054 defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT)
00055 #define USE_MONOTONIC_COND 1
00056 #else
00057 #define USE_MONOTONIC_COND 0
00058 #endif
00059
00060 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__)
00061
00062 # define USE_SLEEPY_TIMER_THREAD 1
00063 #else
00064 # define USE_SLEEPY_TIMER_THREAD 0
00065 #endif
00066
00067 static void
00068 gvl_acquire_common(rb_vm_t *vm)
00069 {
00070 if (vm->gvl.acquired) {
00071
00072 vm->gvl.waiting++;
00073 if (vm->gvl.waiting == 1) {
00074
00075
00076
00077
00078
00079 rb_thread_wakeup_timer_thread_low();
00080 }
00081
00082 while (vm->gvl.acquired) {
00083 native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
00084 }
00085
00086 vm->gvl.waiting--;
00087
00088 if (vm->gvl.need_yield) {
00089 vm->gvl.need_yield = 0;
00090 native_cond_signal(&vm->gvl.switch_cond);
00091 }
00092 }
00093
00094 vm->gvl.acquired = 1;
00095 }
00096
00097 static void
00098 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
00099 {
00100 native_mutex_lock(&vm->gvl.lock);
00101 gvl_acquire_common(vm);
00102 native_mutex_unlock(&vm->gvl.lock);
00103 }
00104
00105 static void
00106 gvl_release_common(rb_vm_t *vm)
00107 {
00108 vm->gvl.acquired = 0;
00109 if (vm->gvl.waiting > 0)
00110 native_cond_signal(&vm->gvl.cond);
00111 }
00112
00113 static void
00114 gvl_release(rb_vm_t *vm)
00115 {
00116 native_mutex_lock(&vm->gvl.lock);
00117 gvl_release_common(vm);
00118 native_mutex_unlock(&vm->gvl.lock);
00119 }
00120
00121 static void
00122 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
00123 {
00124 native_mutex_lock(&vm->gvl.lock);
00125
00126 gvl_release_common(vm);
00127
00128
00129 if (UNLIKELY(vm->gvl.wait_yield)) {
00130 while (vm->gvl.wait_yield)
00131 native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
00132 goto acquire;
00133 }
00134
00135 if (vm->gvl.waiting > 0) {
00136
00137 vm->gvl.need_yield = 1;
00138 vm->gvl.wait_yield = 1;
00139 while (vm->gvl.need_yield)
00140 native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
00141 vm->gvl.wait_yield = 0;
00142 }
00143 else {
00144 native_mutex_unlock(&vm->gvl.lock);
00145 sched_yield();
00146 native_mutex_lock(&vm->gvl.lock);
00147 }
00148
00149 native_cond_broadcast(&vm->gvl.switch_wait_cond);
00150 acquire:
00151 gvl_acquire_common(vm);
00152 native_mutex_unlock(&vm->gvl.lock);
00153 }
00154
00155 static void
00156 gvl_init(rb_vm_t *vm)
00157 {
00158 native_mutex_initialize(&vm->gvl.lock);
00159 native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
00160 native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00161 native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00162 vm->gvl.acquired = 0;
00163 vm->gvl.waiting = 0;
00164 vm->gvl.need_yield = 0;
00165 vm->gvl.wait_yield = 0;
00166 }
00167
00168 static void
00169 gvl_destroy(rb_vm_t *vm)
00170 {
00171 native_cond_destroy(&vm->gvl.switch_wait_cond);
00172 native_cond_destroy(&vm->gvl.switch_cond);
00173 native_cond_destroy(&vm->gvl.cond);
00174 native_mutex_destroy(&vm->gvl.lock);
00175 }
00176
00177 static void
00178 gvl_atfork(rb_vm_t *vm)
00179 {
00180 gvl_init(vm);
00181 gvl_acquire(vm, GET_THREAD());
00182 }
00183
00184 #define NATIVE_MUTEX_LOCK_DEBUG 0
00185
00186 static void
00187 mutex_debug(const char *msg, pthread_mutex_t *lock)
00188 {
00189 if (NATIVE_MUTEX_LOCK_DEBUG) {
00190 int r;
00191 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
00192
00193 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00194 fprintf(stdout, "%s: %p\n", msg, (void *)lock);
00195 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
00196 }
00197 }
00198
00199 static void
00200 native_mutex_lock(pthread_mutex_t *lock)
00201 {
00202 int r;
00203 mutex_debug("lock", lock);
00204 if ((r = pthread_mutex_lock(lock)) != 0) {
00205 rb_bug_errno("pthread_mutex_lock", r);
00206 }
00207 }
00208
00209 static void
00210 native_mutex_unlock(pthread_mutex_t *lock)
00211 {
00212 int r;
00213 mutex_debug("unlock", lock);
00214 if ((r = pthread_mutex_unlock(lock)) != 0) {
00215 rb_bug_errno("pthread_mutex_unlock", r);
00216 }
00217 }
00218
00219 static inline int
00220 native_mutex_trylock(pthread_mutex_t *lock)
00221 {
00222 int r;
00223 mutex_debug("trylock", lock);
00224 if ((r = pthread_mutex_trylock(lock)) != 0) {
00225 if (r == EBUSY) {
00226 return EBUSY;
00227 }
00228 else {
00229 rb_bug_errno("pthread_mutex_trylock", r);
00230 }
00231 }
00232 return 0;
00233 }
00234
00235 static void
00236 native_mutex_initialize(pthread_mutex_t *lock)
00237 {
00238 int r = pthread_mutex_init(lock, 0);
00239 mutex_debug("init", lock);
00240 if (r != 0) {
00241 rb_bug_errno("pthread_mutex_init", r);
00242 }
00243 }
00244
00245 static void
00246 native_mutex_destroy(pthread_mutex_t *lock)
00247 {
00248 int r = pthread_mutex_destroy(lock);
00249 mutex_debug("destroy", lock);
00250 if (r != 0) {
00251 rb_bug_errno("pthread_mutex_destroy", r);
00252 }
00253 }
00254
00255 static void
00256 native_cond_initialize(rb_nativethread_cond_t *cond, int flags)
00257 {
00258 #ifdef HAVE_PTHREAD_COND_INIT
00259 int r;
00260 # if USE_MONOTONIC_COND
00261 pthread_condattr_t attr;
00262
00263 pthread_condattr_init(&attr);
00264
00265 cond->clockid = CLOCK_REALTIME;
00266 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
00267 r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
00268 if (r == 0) {
00269 cond->clockid = CLOCK_MONOTONIC;
00270 }
00271 }
00272
00273 r = pthread_cond_init(&cond->cond, &attr);
00274 pthread_condattr_destroy(&attr);
00275 # else
00276 r = pthread_cond_init(&cond->cond, NULL);
00277 # endif
00278 if (r != 0) {
00279 rb_bug_errno("pthread_cond_init", r);
00280 }
00281
00282 return;
00283 #endif
00284 }
00285
00286 static void
00287 native_cond_destroy(rb_nativethread_cond_t *cond)
00288 {
00289 #ifdef HAVE_PTHREAD_COND_INIT
00290 int r = pthread_cond_destroy(&cond->cond);
00291 if (r != 0) {
00292 rb_bug_errno("pthread_cond_destroy", r);
00293 }
00294 #endif
00295 }
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307 static void
00308 native_cond_signal(rb_nativethread_cond_t *cond)
00309 {
00310 int r;
00311 do {
00312 r = pthread_cond_signal(&cond->cond);
00313 } while (r == EAGAIN);
00314 if (r != 0) {
00315 rb_bug_errno("pthread_cond_signal", r);
00316 }
00317 }
00318
00319 static void
00320 native_cond_broadcast(rb_nativethread_cond_t *cond)
00321 {
00322 int r;
00323 do {
00324 r = pthread_cond_broadcast(&cond->cond);
00325 } while (r == EAGAIN);
00326 if (r != 0) {
00327 rb_bug_errno("native_cond_broadcast", r);
00328 }
00329 }
00330
00331 static void
00332 native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
00333 {
00334 int r = pthread_cond_wait(&cond->cond, mutex);
00335 if (r != 0) {
00336 rb_bug_errno("pthread_cond_wait", r);
00337 }
00338 }
00339
00340 static int
00341 native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, struct timespec *ts)
00342 {
00343 int r;
00344
00345
00346
00347
00348
00349
00350
00351 do {
00352 r = pthread_cond_timedwait(&cond->cond, mutex, ts);
00353 } while (r == EINTR);
00354
00355 if (r != 0 && r != ETIMEDOUT) {
00356 rb_bug_errno("pthread_cond_timedwait", r);
00357 }
00358
00359 return r;
00360 }
00361
00362 static struct timespec
00363 native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
00364 {
00365 int ret;
00366 struct timeval tv;
00367 struct timespec timeout;
00368 struct timespec now;
00369
00370 #if USE_MONOTONIC_COND
00371 if (cond->clockid == CLOCK_MONOTONIC) {
00372 ret = clock_gettime(cond->clockid, &now);
00373 if (ret != 0)
00374 rb_sys_fail("clock_gettime()");
00375 goto out;
00376 }
00377
00378 if (cond->clockid != CLOCK_REALTIME)
00379 rb_bug("unsupported clockid %"PRIdVALUE, (SIGNED_VALUE)cond->clockid);
00380 #endif
00381
00382 ret = gettimeofday(&tv, 0);
00383 if (ret != 0)
00384 rb_sys_fail(0);
00385 now.tv_sec = tv.tv_sec;
00386 now.tv_nsec = tv.tv_usec * 1000;
00387
00388 #if USE_MONOTONIC_COND
00389 out:
00390 #endif
00391 timeout.tv_sec = now.tv_sec;
00392 timeout.tv_nsec = now.tv_nsec;
00393 timeout.tv_sec += timeout_rel.tv_sec;
00394 timeout.tv_nsec += timeout_rel.tv_nsec;
00395
00396 if (timeout.tv_nsec >= 1000*1000*1000) {
00397 timeout.tv_sec++;
00398 timeout.tv_nsec -= 1000*1000*1000;
00399 }
00400
00401 if (timeout.tv_sec < now.tv_sec)
00402 timeout.tv_sec = TIMET_MAX;
00403
00404 return timeout;
00405 }
00406
00407 #define native_cleanup_push pthread_cleanup_push
00408 #define native_cleanup_pop pthread_cleanup_pop
00409 #ifdef HAVE_SCHED_YIELD
00410 #define native_thread_yield() (void)sched_yield()
00411 #else
00412 #define native_thread_yield() ((void)0)
00413 #endif
00414
00415 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__SYMBIAN32__)
00416 #define USE_SIGNAL_THREAD_LIST 1
00417 #endif
00418 #ifdef USE_SIGNAL_THREAD_LIST
00419 static void add_signal_thread_list(rb_thread_t *th);
00420 static void remove_signal_thread_list(rb_thread_t *th);
00421 static rb_nativethread_lock_t signal_thread_list_lock;
00422 #endif
00423
00424 static pthread_key_t ruby_native_thread_key;
00425
00426 static void
00427 null_func(int i)
00428 {
00429
00430 }
00431
00432 static rb_thread_t *
00433 ruby_thread_from_native(void)
00434 {
00435 return pthread_getspecific(ruby_native_thread_key);
00436 }
00437
00438 static int
00439 ruby_thread_set_native(rb_thread_t *th)
00440 {
00441 return pthread_setspecific(ruby_native_thread_key, th) == 0;
00442 }
00443
00444 static void native_thread_init(rb_thread_t *th);
00445
00446 void
00447 Init_native_thread(void)
00448 {
00449 rb_thread_t *th = GET_THREAD();
00450
00451 pthread_key_create(&ruby_native_thread_key, NULL);
00452 th->thread_id = pthread_self();
00453 native_thread_init(th);
00454 #ifdef USE_SIGNAL_THREAD_LIST
00455 native_mutex_initialize(&signal_thread_list_lock);
00456 #endif
00457 #ifndef __native_client__
00458 posix_signal(SIGVTALRM, null_func);
00459 #endif
00460 }
00461
00462 static void
00463 native_thread_init(rb_thread_t *th)
00464 {
00465 native_cond_initialize(&th->native_thread_data.sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
00466 ruby_thread_set_native(th);
00467 }
00468
00469 static void
00470 native_thread_destroy(rb_thread_t *th)
00471 {
00472 native_cond_destroy(&th->native_thread_data.sleep_cond);
00473 }
00474
00475 #ifndef USE_THREAD_CACHE
00476 #define USE_THREAD_CACHE 0
00477 #endif
00478
00479 #if USE_THREAD_CACHE
00480 static rb_thread_t *register_cached_thread_and_wait(void);
00481 #endif
00482
00483 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
00484 #define STACKADDR_AVAILABLE 1
00485 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
00486 #define STACKADDR_AVAILABLE 1
00487 #undef MAINSTACKADDR_AVAILABLE
00488 #define MAINSTACKADDR_AVAILABLE 1
00489 void *pthread_get_stackaddr_np(pthread_t);
00490 size_t pthread_get_stacksize_np(pthread_t);
00491 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00492 #define STACKADDR_AVAILABLE 1
00493 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00494 #define STACKADDR_AVAILABLE 1
00495 #elif defined __ia64 && defined _HPUX_SOURCE
00496 #include <sys/dyntune.h>
00497
00498 #define STACKADDR_AVAILABLE 1
00499
00500
00501
00502
00503
00504
00505 #undef PTHREAD_STACK_MIN
00506
00507 #define HAVE_PTHREAD_ATTR_GET_NP 1
00508 #undef HAVE_PTHREAD_ATTR_GETSTACK
00509
00510
00511
00512
00513
00514
00515 #define pthread_attr_get_np(thid, attr) 0
00516
00517
00518
00519
00520
00521
00522
00523 static int
00524 hpux_attr_getstackaddr(const pthread_attr_t *attr, void **addr)
00525 {
00526 static uint64_t pagesize;
00527 size_t size;
00528
00529 if (!pagesize) {
00530 if (gettune("vps_pagesize", &pagesize)) {
00531 pagesize = 16;
00532 }
00533 pagesize *= 1024;
00534 }
00535 pthread_attr_getstacksize(attr, &size);
00536 *addr = (void *)((size_t)((char *)_Asm_get_sp() - size) & ~(pagesize - 1));
00537 return 0;
00538 }
00539 #define pthread_attr_getstackaddr(attr, addr) hpux_attr_getstackaddr(attr, addr)
00540 #endif
00541
00542 #ifndef MAINSTACKADDR_AVAILABLE
00543 # ifdef STACKADDR_AVAILABLE
00544 # define MAINSTACKADDR_AVAILABLE 1
00545 # else
00546 # define MAINSTACKADDR_AVAILABLE 0
00547 # endif
00548 #endif
00549 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
00550 # define get_main_stack(addr, size) get_stack(addr, size)
00551 #endif
00552
00553 #ifdef STACKADDR_AVAILABLE
00554
00555
00556
00557 static int
00558 get_stack(void **addr, size_t *size)
00559 {
00560 #define CHECK_ERR(expr) \
00561 {int err = (expr); if (err) return err;}
00562 #ifdef HAVE_PTHREAD_GETATTR_NP
00563 pthread_attr_t attr;
00564 size_t guard = 0;
00565 STACK_GROW_DIR_DETECTION;
00566 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
00567 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00568 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00569 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00570 # else
00571 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00572 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00573 # endif
00574 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
00575 *size -= guard;
00576 pthread_attr_destroy(&attr);
00577 #elif defined HAVE_PTHREAD_ATTR_GET_NP
00578 pthread_attr_t attr;
00579 CHECK_ERR(pthread_attr_init(&attr));
00580 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
00581 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
00582 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
00583 # else
00584 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
00585 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
00586 # endif
00587 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00588 pthread_attr_destroy(&attr);
00589 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
00590 pthread_t th = pthread_self();
00591 *addr = pthread_get_stackaddr_np(th);
00592 *size = pthread_get_stacksize_np(th);
00593 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
00594 stack_t stk;
00595 # if defined HAVE_THR_STKSEGMENT
00596 CHECK_ERR(thr_stksegment(&stk));
00597 # else
00598 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
00599 # endif
00600 *addr = stk.ss_sp;
00601 *size = stk.ss_size;
00602 #elif defined HAVE_PTHREAD_GETTHRDS_NP
00603 pthread_t th = pthread_self();
00604 struct __pthrdsinfo thinfo;
00605 char reg[256];
00606 int regsiz=sizeof(reg);
00607 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
00608 &thinfo, sizeof(thinfo),
00609 ®, ®siz));
00610 *addr = thinfo.__pi_stackaddr;
00611
00612
00613
00614 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
00615 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
00616 #else
00617 #error STACKADDR_AVAILABLE is defined but not implemented.
00618 #endif
00619 return 0;
00620 #undef CHECK_ERR
00621 }
00622 #endif
00623
00624 static struct {
00625 rb_nativethread_id_t id;
00626 size_t stack_maxsize;
00627 VALUE *stack_start;
00628 #ifdef __ia64
00629 VALUE *register_stack_start;
00630 #endif
00631 } native_main_thread;
00632
00633 #ifdef STACK_END_ADDRESS
00634 extern void *STACK_END_ADDRESS;
00635 #endif
00636
00637 enum {
00638 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
00639 RUBY_STACK_SPACE_RATIO = 5
00640 };
00641
00642 static size_t
00643 space_size(size_t stack_size)
00644 {
00645 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
00646 if (space_size > RUBY_STACK_SPACE_LIMIT) {
00647 return RUBY_STACK_SPACE_LIMIT;
00648 }
00649 else {
00650 return space_size;
00651 }
00652 }
00653
00654 #ifdef __linux__
00655 static __attribute__((noinline)) void
00656 reserve_stack(volatile char *limit, size_t size)
00657 {
00658 # ifdef C_ALLOCA
00659 # error needs alloca()
00660 # endif
00661 struct rlimit rl;
00662 volatile char buf[0x100];
00663 enum {stack_check_margin = 0x1000};
00664
00665 STACK_GROW_DIR_DETECTION;
00666
00667 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
00668 return;
00669
00670 if (size < stack_check_margin) return;
00671 size -= stack_check_margin;
00672
00673 size -= sizeof(buf);
00674 if (IS_STACK_DIR_UPPER()) {
00675 const volatile char *end = buf + sizeof(buf);
00676 limit += size;
00677 if (limit > end) {
00678
00679
00680
00681
00682
00683
00684
00685 size_t sz = limit - end;
00686 limit = alloca(sz);
00687 limit[sz-1] = 0;
00688 }
00689 }
00690 else {
00691 limit -= size;
00692 if (buf > limit) {
00693
00694
00695
00696
00697
00698
00699
00700 size_t sz = buf - limit;
00701 limit = alloca(sz);
00702 limit[0] = 0;
00703 }
00704 }
00705 }
00706 #else
00707 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
00708 #endif
00709
00710 #undef ruby_init_stack
00711
00712
00713
00714
00715 void
00716 ruby_init_stack(volatile VALUE *addr
00717 #ifdef __ia64
00718 , void *bsp
00719 #endif
00720 )
00721 {
00722 native_main_thread.id = pthread_self();
00723 #if MAINSTACKADDR_AVAILABLE
00724 if (native_main_thread.stack_maxsize) return;
00725 {
00726 void* stackaddr;
00727 size_t size;
00728 if (get_main_stack(&stackaddr, &size) == 0) {
00729 native_main_thread.stack_maxsize = size;
00730 native_main_thread.stack_start = stackaddr;
00731 reserve_stack(stackaddr, size);
00732 return;
00733 }
00734 }
00735 #endif
00736 #ifdef STACK_END_ADDRESS
00737 native_main_thread.stack_start = STACK_END_ADDRESS;
00738 #else
00739 if (!native_main_thread.stack_start ||
00740 STACK_UPPER((VALUE *)(void *)&addr,
00741 native_main_thread.stack_start > addr,
00742 native_main_thread.stack_start < addr)) {
00743 native_main_thread.stack_start = (VALUE *)addr;
00744 }
00745 #endif
00746 #ifdef __ia64
00747 if (!native_main_thread.register_stack_start ||
00748 (VALUE*)bsp < native_main_thread.register_stack_start) {
00749 native_main_thread.register_stack_start = (VALUE*)bsp;
00750 }
00751 #endif
00752 {
00753 #if defined(HAVE_GETRLIMIT)
00754 #if defined(PTHREAD_STACK_DEFAULT)
00755 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
00756 # error "PTHREAD_STACK_DEFAULT is too small"
00757 # endif
00758 size_t size = PTHREAD_STACK_DEFAULT;
00759 #else
00760 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
00761 #endif
00762 size_t space;
00763 int pagesize = getpagesize();
00764 struct rlimit rlim;
00765 STACK_GROW_DIR_DETECTION;
00766 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
00767 size = (size_t)rlim.rlim_cur;
00768 }
00769 addr = native_main_thread.stack_start;
00770 if (IS_STACK_DIR_UPPER()) {
00771 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
00772 }
00773 else {
00774 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
00775 }
00776 native_main_thread.stack_maxsize = space;
00777 #endif
00778 }
00779
00780
00781
00782 {
00783 void *start, *end;
00784 STACK_GROW_DIR_DETECTION;
00785
00786 if (IS_STACK_DIR_UPPER()) {
00787 start = native_main_thread.stack_start;
00788 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
00789 }
00790 else {
00791 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
00792 end = native_main_thread.stack_start;
00793 }
00794
00795 if ((void *)addr < start || (void *)addr > end) {
00796
00797 native_main_thread.stack_start = (VALUE *)addr;
00798 native_main_thread.stack_maxsize = 0;
00799 }
00800 }
00801 }
00802
00803 #define CHECK_ERR(expr) \
00804 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
00805
00806 static int
00807 native_thread_init_stack(rb_thread_t *th)
00808 {
00809 rb_nativethread_id_t curr = pthread_self();
00810
00811 if (pthread_equal(curr, native_main_thread.id)) {
00812 th->machine.stack_start = native_main_thread.stack_start;
00813 th->machine.stack_maxsize = native_main_thread.stack_maxsize;
00814 }
00815 else {
00816 #ifdef STACKADDR_AVAILABLE
00817 void *start;
00818 size_t size;
00819
00820 if (get_stack(&start, &size) == 0) {
00821 th->machine.stack_start = start;
00822 th->machine.stack_maxsize = size;
00823 }
00824 #elif defined get_stack_of
00825 if (!th->machine.stack_maxsize) {
00826 native_mutex_lock(&th->interrupt_lock);
00827 native_mutex_unlock(&th->interrupt_lock);
00828 }
00829 #else
00830 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
00831 #endif
00832 }
00833 #ifdef __ia64
00834 th->machine.register_stack_start = native_main_thread.register_stack_start;
00835 th->machine.stack_maxsize /= 2;
00836 th->machine.register_stack_maxsize = th->machine.stack_maxsize;
00837 #endif
00838 return 0;
00839 }
00840
00841 #ifndef __CYGWIN__
00842 #define USE_NATIVE_THREAD_INIT 1
00843 #endif
00844
00845 static void *
00846 thread_start_func_1(void *th_ptr)
00847 {
00848 #if USE_THREAD_CACHE
00849 thread_start:
00850 #endif
00851 {
00852 rb_thread_t *th = th_ptr;
00853 #if !defined USE_NATIVE_THREAD_INIT
00854 VALUE stack_start;
00855 #endif
00856
00857 #if defined USE_NATIVE_THREAD_INIT
00858 native_thread_init_stack(th);
00859 #endif
00860 native_thread_init(th);
00861
00862 #if defined USE_NATIVE_THREAD_INIT
00863 thread_start_func_2(th, th->machine.stack_start, rb_ia64_bsp());
00864 #else
00865 thread_start_func_2(th, &stack_start, rb_ia64_bsp());
00866 #endif
00867 }
00868 #if USE_THREAD_CACHE
00869 if (1) {
00870
00871 rb_thread_t *th;
00872 if ((th = register_cached_thread_and_wait()) != 0) {
00873 th_ptr = (void *)th;
00874 th->thread_id = pthread_self();
00875 goto thread_start;
00876 }
00877 }
00878 #endif
00879 return 0;
00880 }
00881
00882 struct cached_thread_entry {
00883 volatile rb_thread_t **th_area;
00884 rb_nativethread_cond_t *cond;
00885 struct cached_thread_entry *next;
00886 };
00887
00888
00889 #if USE_THREAD_CACHE
00890 static pthread_mutex_t thread_cache_lock = PTHREAD_MUTEX_INITIALIZER;
00891 struct cached_thread_entry *cached_thread_root;
00892
00893 static rb_thread_t *
00894 register_cached_thread_and_wait(void)
00895 {
00896 rb_nativethread_cond_t cond = { PTHREAD_COND_INITIALIZER, };
00897 volatile rb_thread_t *th_area = 0;
00898 struct timeval tv;
00899 struct timespec ts;
00900 struct cached_thread_entry *entry =
00901 (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
00902
00903 if (entry == 0) {
00904 return 0;
00905 }
00906
00907 gettimeofday(&tv, 0);
00908 ts.tv_sec = tv.tv_sec + 60;
00909 ts.tv_nsec = tv.tv_usec * 1000;
00910
00911 pthread_mutex_lock(&thread_cache_lock);
00912 {
00913 entry->th_area = &th_area;
00914 entry->cond = &cond;
00915 entry->next = cached_thread_root;
00916 cached_thread_root = entry;
00917
00918 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
00919
00920 {
00921 struct cached_thread_entry *e, **prev = &cached_thread_root;
00922
00923 while ((e = *prev) != 0) {
00924 if (e == entry) {
00925 *prev = e->next;
00926 break;
00927 }
00928 prev = &e->next;
00929 }
00930 }
00931
00932 free(entry);
00933 native_cond_destroy(&cond);
00934 }
00935 pthread_mutex_unlock(&thread_cache_lock);
00936
00937 return (rb_thread_t *)th_area;
00938 }
00939 #endif
00940
00941 static int
00942 use_cached_thread(rb_thread_t *th)
00943 {
00944 int result = 0;
00945 #if USE_THREAD_CACHE
00946 struct cached_thread_entry *entry;
00947
00948 if (cached_thread_root) {
00949 pthread_mutex_lock(&thread_cache_lock);
00950 entry = cached_thread_root;
00951 {
00952 if (cached_thread_root) {
00953 cached_thread_root = entry->next;
00954 *entry->th_area = th;
00955 result = 1;
00956 }
00957 }
00958 if (result) {
00959 native_cond_signal(entry->cond);
00960 }
00961 pthread_mutex_unlock(&thread_cache_lock);
00962 }
00963 #endif
00964 return result;
00965 }
00966
00967 static int
00968 native_thread_create(rb_thread_t *th)
00969 {
00970 int err = 0;
00971
00972 if (use_cached_thread(th)) {
00973 thread_debug("create (use cached thread): %p\n", (void *)th);
00974 }
00975 else {
00976 #ifdef HAVE_PTHREAD_ATTR_INIT
00977 pthread_attr_t attr;
00978 pthread_attr_t *const attrp = &attr;
00979 #else
00980 pthread_attr_t *const attrp = NULL;
00981 #endif
00982 const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
00983 const size_t space = space_size(stack_size);
00984
00985 th->machine.stack_maxsize = stack_size - space;
00986 #ifdef __ia64
00987 th->machine.stack_maxsize /= 2;
00988 th->machine.register_stack_maxsize = th->machine.stack_maxsize;
00989 #endif
00990
00991 #ifdef HAVE_PTHREAD_ATTR_INIT
00992 CHECK_ERR(pthread_attr_init(&attr));
00993
00994 # ifdef PTHREAD_STACK_MIN
00995 thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
00996 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
00997 # endif
00998
00999 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
01000 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
01001 # endif
01002 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
01003 #endif
01004 #ifdef get_stack_of
01005 native_mutex_lock(&th->interrupt_lock);
01006 #endif
01007 err = pthread_create(&th->thread_id, attrp, thread_start_func_1, th);
01008 #ifdef get_stack_of
01009 if (!err) {
01010 get_stack_of(th->thread_id,
01011 &th->machine.stack_start,
01012 &th->machine.stack_maxsize);
01013 }
01014 native_mutex_unlock(&th->interrupt_lock);
01015 #endif
01016 thread_debug("create: %p (%d)\n", (void *)th, err);
01017 #ifdef HAVE_PTHREAD_ATTR_INIT
01018 CHECK_ERR(pthread_attr_destroy(&attr));
01019 #endif
01020 }
01021 return err;
01022 }
01023
01024 static void
01025 native_thread_join(pthread_t th)
01026 {
01027 int err = pthread_join(th, 0);
01028 if (err) {
01029 rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
01030 }
01031 }
01032
01033
01034 #if USE_NATIVE_THREAD_PRIORITY
01035
01036 static void
01037 native_thread_apply_priority(rb_thread_t *th)
01038 {
01039 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
01040 struct sched_param sp;
01041 int policy;
01042 int priority = 0 - th->priority;
01043 int max, min;
01044 pthread_getschedparam(th->thread_id, &policy, &sp);
01045 max = sched_get_priority_max(policy);
01046 min = sched_get_priority_min(policy);
01047
01048 if (min > priority) {
01049 priority = min;
01050 }
01051 else if (max < priority) {
01052 priority = max;
01053 }
01054
01055 sp.sched_priority = priority;
01056 pthread_setschedparam(th->thread_id, policy, &sp);
01057 #else
01058
01059 #endif
01060 }
01061
01062 #endif
01063
01064 static int
01065 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
01066 {
01067 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
01068 }
01069
01070 static void
01071 ubf_pthread_cond_signal(void *ptr)
01072 {
01073 rb_thread_t *th = (rb_thread_t *)ptr;
01074 thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
01075 native_cond_signal(&th->native_thread_data.sleep_cond);
01076 }
01077
01078 static void
01079 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
01080 {
01081 struct timespec timeout;
01082 pthread_mutex_t *lock = &th->interrupt_lock;
01083 rb_nativethread_cond_t *cond = &th->native_thread_data.sleep_cond;
01084
01085 if (timeout_tv) {
01086 struct timespec timeout_rel;
01087
01088 timeout_rel.tv_sec = timeout_tv->tv_sec;
01089 timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099 if (timeout_rel.tv_sec > 100000000) {
01100 timeout_rel.tv_sec = 100000000;
01101 timeout_rel.tv_nsec = 0;
01102 }
01103
01104 timeout = native_cond_timeout(cond, timeout_rel);
01105 }
01106
01107 GVL_UNLOCK_BEGIN();
01108 {
01109 pthread_mutex_lock(lock);
01110 th->unblock.func = ubf_pthread_cond_signal;
01111 th->unblock.arg = th;
01112
01113 if (RUBY_VM_INTERRUPTED(th)) {
01114
01115 thread_debug("native_sleep: interrupted before sleep\n");
01116 }
01117 else {
01118 if (!timeout_tv)
01119 native_cond_wait(cond, lock);
01120 else
01121 native_cond_timedwait(cond, lock, &timeout);
01122 }
01123 th->unblock.func = 0;
01124 th->unblock.arg = 0;
01125
01126 pthread_mutex_unlock(lock);
01127 }
01128 GVL_UNLOCK_END();
01129
01130 thread_debug("native_sleep done\n");
01131 }
01132
01133 #ifdef USE_SIGNAL_THREAD_LIST
01134 struct signal_thread_list {
01135 rb_thread_t *th;
01136 struct signal_thread_list *prev;
01137 struct signal_thread_list *next;
01138 };
01139
01140 static struct signal_thread_list signal_thread_list_anchor = {
01141 0, 0, 0,
01142 };
01143
01144 #define FGLOCK(lock, body) do { \
01145 native_mutex_lock(lock); \
01146 { \
01147 body; \
01148 } \
01149 native_mutex_unlock(lock); \
01150 } while (0)
01151
01152 #if 0
01153 static void
01154 print_signal_list(char *str)
01155 {
01156 struct signal_thread_list *list =
01157 signal_thread_list_anchor.next;
01158 thread_debug("list (%s)> ", str);
01159 while (list) {
01160 thread_debug("%p (%p), ", list->th, list->th->thread_id);
01161 list = list->next;
01162 }
01163 thread_debug("\n");
01164 }
01165 #endif
01166
01167 static void
01168 add_signal_thread_list(rb_thread_t *th)
01169 {
01170 if (!th->native_thread_data.signal_thread_list) {
01171 FGLOCK(&signal_thread_list_lock, {
01172 struct signal_thread_list *list =
01173 malloc(sizeof(struct signal_thread_list));
01174
01175 if (list == 0) {
01176 fprintf(stderr, "[FATAL] failed to allocate memory\n");
01177 exit(EXIT_FAILURE);
01178 }
01179
01180 list->th = th;
01181
01182 list->prev = &signal_thread_list_anchor;
01183 list->next = signal_thread_list_anchor.next;
01184 if (list->next) {
01185 list->next->prev = list;
01186 }
01187 signal_thread_list_anchor.next = list;
01188 th->native_thread_data.signal_thread_list = list;
01189 });
01190 }
01191 }
01192
01193 static void
01194 remove_signal_thread_list(rb_thread_t *th)
01195 {
01196 if (th->native_thread_data.signal_thread_list) {
01197 FGLOCK(&signal_thread_list_lock, {
01198 struct signal_thread_list *list =
01199 (struct signal_thread_list *)
01200 th->native_thread_data.signal_thread_list;
01201
01202 list->prev->next = list->next;
01203 if (list->next) {
01204 list->next->prev = list->prev;
01205 }
01206 th->native_thread_data.signal_thread_list = 0;
01207 list->th = 0;
01208 free(list);
01209 });
01210 }
01211 }
01212
01213 static void
01214 ubf_select_each(rb_thread_t *th)
01215 {
01216 thread_debug("ubf_select_each (%p)\n", (void *)th->thread_id);
01217 if (th) {
01218 pthread_kill(th->thread_id, SIGVTALRM);
01219 }
01220 }
01221
01222 static void
01223 ubf_select(void *ptr)
01224 {
01225 rb_thread_t *th = (rb_thread_t *)ptr;
01226 add_signal_thread_list(th);
01227
01228
01229
01230
01231
01232
01233
01234
01235 if (pthread_self() != timer_thread_id)
01236 rb_thread_wakeup_timer_thread();
01237 ubf_select_each(th);
01238 }
01239
01240 static void
01241 ping_signal_thread_list(void)
01242 {
01243 if (signal_thread_list_anchor.next) {
01244 FGLOCK(&signal_thread_list_lock, {
01245 struct signal_thread_list *list;
01246
01247 list = signal_thread_list_anchor.next;
01248 while (list) {
01249 ubf_select_each(list->th);
01250 list = list->next;
01251 }
01252 });
01253 }
01254 }
01255
01256 static int
01257 check_signal_thread_list(void)
01258 {
01259 if (signal_thread_list_anchor.next)
01260 return 1;
01261 else
01262 return 0;
01263 }
01264 #else
01265 #define add_signal_thread_list(th) (void)(th)
01266 #define remove_signal_thread_list(th) (void)(th)
01267 #define ubf_select 0
01268 static void ping_signal_thread_list(void) { return; }
01269 static int check_signal_thread_list(void) { return 0; }
01270 #endif
01271
01272 #define TT_DEBUG 0
01273 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
01274
01275
01276
01277
01278 #define TIME_QUANTUM_USEC (100 * 1000)
01279
01280 #if USE_SLEEPY_TIMER_THREAD
01281 static int timer_thread_pipe[2] = {-1, -1};
01282 static int timer_thread_pipe_low[2] = {-1, -1};
01283 static int timer_thread_pipe_owner_process;
01284
01285
01286 static void
01287 rb_thread_wakeup_timer_thread_fd(int fd)
01288 {
01289 ssize_t result;
01290
01291
01292 if (timer_thread_pipe_owner_process == getpid()) {
01293 const char *buff = "!";
01294 retry:
01295 if ((result = write(fd, buff, 1)) <= 0) {
01296 switch (errno) {
01297 case EINTR: goto retry;
01298 case EAGAIN:
01299 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
01300 case EWOULDBLOCK:
01301 #endif
01302 break;
01303 default:
01304 rb_async_bug_errno("rb_thread_wakeup_timer_thread - write", errno);
01305 }
01306 }
01307 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
01308 }
01309 else {
01310
01311 }
01312 }
01313
01314 void
01315 rb_thread_wakeup_timer_thread(void)
01316 {
01317 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe[1]);
01318 }
01319
01320 static void
01321 rb_thread_wakeup_timer_thread_low(void)
01322 {
01323 rb_thread_wakeup_timer_thread_fd(timer_thread_pipe_low[1]);
01324 }
01325
01326
01327 static void
01328 consume_communication_pipe(int fd)
01329 {
01330 #define CCP_READ_BUFF_SIZE 1024
01331
01332 static char buff[CCP_READ_BUFF_SIZE];
01333 ssize_t result;
01334
01335 while (1) {
01336 result = read(fd, buff, sizeof(buff));
01337 if (result == 0) {
01338 return;
01339 }
01340 else if (result < 0) {
01341 switch (errno) {
01342 case EINTR:
01343 continue;
01344 case EAGAIN:
01345 return;
01346 default:
01347 rb_async_bug_errno("consume_communication_pipe: read\n", errno);
01348 }
01349 }
01350 }
01351 }
01352
01353 static void
01354 close_communication_pipe(int pipes[2])
01355 {
01356 if (close(pipes[0]) < 0) {
01357 rb_bug_errno("native_stop_timer_thread - close(ttp[0])", errno);
01358 }
01359 if (close(pipes[1]) < 0) {
01360 rb_bug_errno("native_stop_timer_thread - close(ttp[1])", errno);
01361 }
01362 pipes[0] = pipes[1] = -1;
01363 }
01364
01365 static void
01366 set_nonblock(int fd)
01367 {
01368 int oflags;
01369 int err;
01370
01371 oflags = fcntl(fd, F_GETFL);
01372 if (oflags == -1)
01373 rb_sys_fail(0);
01374 oflags |= O_NONBLOCK;
01375 err = fcntl(fd, F_SETFL, oflags);
01376 if (err == -1)
01377 rb_sys_fail(0);
01378 }
01379
01380 static void
01381 setup_communication_pipe_internal(int pipes[2])
01382 {
01383 int err;
01384
01385 if (pipes[0] != -1) {
01386
01387 close_communication_pipe(pipes);
01388 }
01389
01390 err = rb_cloexec_pipe(pipes);
01391 if (err != 0) {
01392 rb_bug_errno("setup_communication_pipe: Failed to create communication pipe for timer thread", errno);
01393 }
01394 rb_update_max_fd(pipes[0]);
01395 rb_update_max_fd(pipes[1]);
01396 set_nonblock(pipes[0]);
01397 set_nonblock(pipes[1]);
01398 }
01399
01400
01401 static void
01402 setup_communication_pipe(void)
01403 {
01404 if (timer_thread_pipe_owner_process == getpid()) {
01405
01406 return;
01407 }
01408 setup_communication_pipe_internal(timer_thread_pipe);
01409 setup_communication_pipe_internal(timer_thread_pipe_low);
01410
01411
01412 timer_thread_pipe_owner_process = getpid();
01413 }
01414
01421 static inline void
01422 timer_thread_sleep(rb_global_vm_lock_t* gvl)
01423 {
01424 int result;
01425 int need_polling;
01426 struct pollfd pollfds[2];
01427
01428 pollfds[0].fd = timer_thread_pipe[0];
01429 pollfds[0].events = POLLIN;
01430 pollfds[1].fd = timer_thread_pipe_low[0];
01431 pollfds[1].events = POLLIN;
01432
01433 need_polling = check_signal_thread_list();
01434
01435 if (gvl->waiting > 0 || need_polling) {
01436
01437 result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
01438 }
01439 else {
01440
01441 result = poll(pollfds, numberof(pollfds), -1);
01442 }
01443
01444 if (result == 0) {
01445
01446 }
01447 else if (result > 0) {
01448 consume_communication_pipe(timer_thread_pipe[0]);
01449 consume_communication_pipe(timer_thread_pipe_low[0]);
01450 }
01451 else {
01452 switch (errno) {
01453 case EBADF:
01454 case EINVAL:
01455 case ENOMEM:
01456 case EFAULT:
01457 rb_async_bug_errno("thread_timer: select", errno);
01458 default:
01459 ;
01460 }
01461 }
01462 }
01463
01464 #else
01465 # define PER_NANO 1000000000
01466 void rb_thread_wakeup_timer_thread(void) {}
01467 static void rb_thread_wakeup_timer_thread_low(void) {}
01468
01469 static pthread_mutex_t timer_thread_lock;
01470 static rb_nativethread_cond_t timer_thread_cond;
01471
01472 static inline void
01473 timer_thread_sleep(rb_global_vm_lock_t* unused)
01474 {
01475 struct timespec ts;
01476 ts.tv_sec = 0;
01477 ts.tv_nsec = TIME_QUANTUM_USEC * 1000;
01478 ts = native_cond_timeout(&timer_thread_cond, ts);
01479
01480 native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
01481 }
01482 #endif
01483
01484 #if defined(__linux__) && defined(PR_SET_NAME)
01485 # undef SET_THREAD_NAME
01486 # define SET_THREAD_NAME(name) prctl(PR_SET_NAME, name)
01487 #elif !defined(SET_THREAD_NAME)
01488 # define SET_THREAD_NAME(name) (void)0
01489 #endif
01490
01491 static void *
01492 thread_timer(void *p)
01493 {
01494 rb_global_vm_lock_t *gvl = (rb_global_vm_lock_t *)p;
01495
01496 if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
01497
01498 SET_THREAD_NAME("ruby-timer-thr");
01499
01500 #if !USE_SLEEPY_TIMER_THREAD
01501 native_mutex_initialize(&timer_thread_lock);
01502 native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
01503 native_mutex_lock(&timer_thread_lock);
01504 #endif
01505 while (system_working > 0) {
01506
01507
01508 ping_signal_thread_list();
01509 timer_thread_function(0);
01510
01511 if (TT_DEBUG) WRITE_CONST(2, "tick\n");
01512
01513
01514 timer_thread_sleep(gvl);
01515 }
01516 #if !USE_SLEEPY_TIMER_THREAD
01517 native_mutex_unlock(&timer_thread_lock);
01518 native_cond_destroy(&timer_thread_cond);
01519 native_mutex_destroy(&timer_thread_lock);
01520 #endif
01521
01522 if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
01523 return NULL;
01524 }
01525
01526 static void
01527 rb_thread_create_timer_thread(void)
01528 {
01529 if (!timer_thread_id) {
01530 int err;
01531 #ifdef HAVE_PTHREAD_ATTR_INIT
01532 pthread_attr_t attr;
01533
01534 err = pthread_attr_init(&attr);
01535 if (err != 0) {
01536 fprintf(stderr, "[FATAL] Failed to initialize pthread attr: %s\n", strerror(err));
01537 exit(EXIT_FAILURE);
01538 }
01539 # ifdef PTHREAD_STACK_MIN
01540 {
01541 const size_t min_size = (4096 * 4);
01542
01543
01544
01545
01546 size_t stack_size = PTHREAD_STACK_MIN;
01547 if (stack_size < min_size) stack_size = min_size;
01548 if (THREAD_DEBUG) stack_size += BUFSIZ;
01549 pthread_attr_setstacksize(&attr, stack_size);
01550 }
01551 # endif
01552 #endif
01553
01554 #if USE_SLEEPY_TIMER_THREAD
01555 setup_communication_pipe();
01556 #endif
01557
01558
01559 if (timer_thread_id) {
01560 rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
01561 }
01562 #ifdef HAVE_PTHREAD_ATTR_INIT
01563 err = pthread_create(&timer_thread_id, &attr, thread_timer, &GET_VM()->gvl);
01564 #else
01565 err = pthread_create(&timer_thread_id, NULL, thread_timer, &GET_VM()->gvl);
01566 #endif
01567 if (err != 0) {
01568 fprintf(stderr, "[FATAL] Failed to create timer thread: %s\n", strerror(err));
01569 exit(EXIT_FAILURE);
01570 }
01571 #ifdef HAVE_PTHREAD_ATTR_INIT
01572 pthread_attr_destroy(&attr);
01573 #endif
01574 }
01575 }
01576
01577 static int
01578 native_stop_timer_thread(int close_anyway)
01579 {
01580 int stopped;
01581 stopped = --system_working <= 0;
01582
01583 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
01584 if (stopped) {
01585
01586 rb_thread_wakeup_timer_thread();
01587 native_thread_join(timer_thread_id);
01588 if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
01589 timer_thread_id = 0;
01590
01591
01592 if (close_anyway) {
01593
01594
01595
01596
01597
01598
01599
01600
01601 }
01602 }
01603 return stopped;
01604 }
01605
01606 static void
01607 native_reset_timer_thread(void)
01608 {
01609 if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
01610 }
01611
01612 #ifdef HAVE_SIGALTSTACK
01613 int
01614 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
01615 {
01616 void *base;
01617 size_t size;
01618 const size_t water_mark = 1024 * 1024;
01619 STACK_GROW_DIR_DETECTION;
01620
01621 #ifdef STACKADDR_AVAILABLE
01622 if (get_stack(&base, &size) == 0) {
01623 # ifdef __APPLE__
01624 if (pthread_equal(th->thread_id, native_main_thread.id)) {
01625 struct rlimit rlim;
01626 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
01627 size = (size_t)rlim.rlim_cur;
01628 }
01629 }
01630 # endif
01631 base = (char *)base + STACK_DIR_UPPER(+size, -size);
01632 }
01633 else
01634 #endif
01635 if (th) {
01636 size = th->machine.stack_maxsize;
01637 base = (char *)th->machine.stack_start - STACK_DIR_UPPER(0, size);
01638 }
01639 else {
01640 return 0;
01641 }
01642 size /= RUBY_STACK_SPACE_RATIO;
01643 if (size > water_mark) size = water_mark;
01644 if (IS_STACK_DIR_UPPER()) {
01645 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
01646 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
01647 }
01648 else {
01649 if (size > (size_t)base) size = (size_t)base;
01650 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
01651 }
01652 return 0;
01653 }
01654 #endif
01655
01656 int
01657 rb_reserved_fd_p(int fd)
01658 {
01659 #if USE_SLEEPY_TIMER_THREAD
01660 if (fd == timer_thread_pipe[0] ||
01661 fd == timer_thread_pipe[1] ||
01662 fd == timer_thread_pipe_low[0] ||
01663 fd == timer_thread_pipe_low[1]) {
01664 return 1;
01665 }
01666 else {
01667 return 0;
01668 }
01669 #else
01670 return 0;
01671 #endif
01672 }
01673
01674 rb_nativethread_id_t
01675 rb_nativethread_self(void)
01676 {
01677 return pthread_self();
01678 }
01679
01680 #endif
01681